Commit 927d9c1c authored by Hoanganhvu123's avatar Hoanganhvu123

update: latest code

parent 1fcace51
......@@ -88,6 +88,13 @@ This ecosystem operates through structured workflows. Always route work through
| `workflow-audit.md` | Auto (from Autopilot) | QA Gatekeeper: security, performance, UI/UX, agent compliance |
| `workflow-autopilot.md` | `"Autopilot: [task]"` | God Mode: classifies intent → routes to Pipeline A/B/C/D |
### Skills Registry (Active)
| Skill | Path | Purpose |
|---|---|---|
| `huashu-design` | `.agent/skills/huashu-design/SKILL.md` | HTML-native hi-fi design, animations, and expert review |
| `frontend-design` | `.agent/skills/frontend-design/SKILL.md` | Production-ready Web UI components |
| `ai-agents-architect` | `.agent/skills/ai-agents-architect/SKILL.md` | Agent system design and orchestration |
### Autopilot Pipelines
```
Pipeline A (Feature): Idea → Evaluate → Build → Audit
......
huashu-design @ 23f60d9b
Subproject commit 23f60d9b4304f20851469987c6e2c92242b94a45
"""
Lead Search Tool - LangChain @tool cho AI goi.
AI sinh keywords + tags -> Tool search -> cascade -> Tra ket qua.
+ Tu dong goi Canifa Stock API -> chi tra SP CON HANG.
+ Doc outfit tu SQLite canifa_ai_dump.sqlite
Architecture:
CO DINH (luon giu): product_type + color + gender + price + size
BIEN DOI (fallback):
Tang 1: CO DINH + keywords (NGRAMBF tren product_name hoac LIKE tren description)
Tang 2: CO DINH + tags (BITMAP hoac LIKE full phrase)
Tang 3: CHI CO DINH (product_type + color + gender + size + price)
Tang 4: Drop gender
Tang 5-6: Price relaxation (1.5x, 2x)
Tang 7: Bo price hoan toan
Lớp này giờ chỉ đóng vai trò Wrapper mỏng (Thin Wrapper).
Toàn bộ logic phức tạp đã được đưa sang ProductSearchEngine.
"""
import json
import logging
import time
from datetime import datetime, timezone
import os as _os
import sqlite3
import httpx
from langchain_core.tools import tool
from pydantic import BaseModel, Field
from common.starrocks_connection import get_db_connection
from .product_mapping import SYNONYM_TO_DB, get_related_lines, resolve_product_line
from .product_search_engine import ProductSearchEngine, LeadSearchInput
logger = logging.getLogger(__name__)
try:
from langfuse import get_client as _get_langfuse
_LANGFUSE_AVAILABLE = True
except ImportError:
_get_langfuse = None
_LANGFUSE_AVAILABLE = False
# ═══════════════════════════════════════════════
# Stock API — Kiểm tra tồn kho Canifa
# ═══════════════════════════════════════════════
CANIFA_STOCK_API = "https://canifa.com/v1/middleware/stock_get_stock_list_parent"
_STOCK_TIMEOUT = 1.5 # seconds
async def _fetch_stock_batch(base_codes: list[str]) -> dict[str, list[dict]]:
if not base_codes:
return {}, 0.0, False
sku_string = ",".join(base_codes)
url = f"{CANIFA_STOCK_API}?skus={sku_string}"
t0 = time.time()
try:
async with httpx.AsyncClient(timeout=_STOCK_TIMEOUT) as client:
resp = await client.get(url)
resp.raise_for_status()
data = resp.json()
elapsed = round((time.time() - t0) * 1000, 1)
stock_map: dict[str, list[dict]] = {}
results = data.get("result", [])
if isinstance(results, list):
for item in results:
sku_full = item.get("sku", "")
qty = item.get("qty", 0) or 0
if not sku_full:
continue
parts = sku_full.rsplit("-", 1)
if len(parts) == 2:
color_code = parts[0]
size = parts[1]
status = "còn hàng" if qty > 0 else "hết hàng"
stock_map.setdefault(color_code, []).append({
"size": size, "qty": qty, "status": status
})
logger.info("📦 Stock API: %d base_codes -> %d color_codes | %.0fms", len(base_codes), len(stock_map), elapsed)
return stock_map, elapsed, False
except httpx.TimeoutException:
elapsed = round((time.time() - t0) * 1000, 1)
logger.warning("⏰ Stock API timeout (%.1fs=%.0fms)", _STOCK_TIMEOUT, elapsed)
return {}, elapsed, True
except Exception as e:
elapsed = round((time.time() - t0) * 1000, 1)
logger.warning("⚠️ Stock API error (%.0fms): %s", elapsed, e)
return {}, elapsed, False
async def _enrich_with_stock(products: list[dict]) -> tuple[list[dict], bool, float, bool]:
if not products:
return [], False, 0.0, False
base_codes = list({p.get("internal_ref_code", "") for p in products if p.get("internal_ref_code")})
if not base_codes:
return products, False, 0.0, False
stock_map, stock_api_ms, timed_out = await _fetch_stock_batch(base_codes)
if not stock_map:
return products, False, stock_api_ms, timed_out
in_stock = []
dropped = 0
for p in products:
color_code = p.get("product_color_code", "")
if color_code in stock_map:
sizes_detail = stock_map[color_code]
total_qty = sum(s["qty"] for s in sizes_detail)
if total_qty > 0:
p["_stock_detail"] = sizes_detail
p["_total_qty"] = total_qty
in_stock.append(p)
else:
dropped += 1
logger.info("📦 Stock filter: %d -> %d in-stock, %d dropped | api=%.0fms", len(products), len(in_stock), dropped, stock_api_ms)
return in_stock if in_stock else products[:5], True, stock_api_ms, timed_out
# ═══════════════════════════════════════════════
# SQLite local DB path
# ═══════════════════════════════════════════════
from common.constants import SQLITE_DB_PATH as _SQLITE_DB_PATH
_DB_123_PATH = _SQLITE_DB_PATH
TABLE_NAME = "test_db.magento_product_dimension_with_text_embedding"
SELECT_COLUMNS = """
internal_ref_code,
magento_ref_code,
product_color_code,
product_name,
master_color,
product_color_name,
product_image_url_thumbnail,
product_web_url,
sale_price,
original_price,
COALESCE(discount_amount, 0) AS discount_amount,
ROUND(CASE WHEN original_price > 0
THEN ((original_price - sale_price) / original_price * 100)
ELSE 0 END, 0) AS discount_percent,
age_by_product,
gender_by_product,
product_line_vn,
COALESCE(quantity_sold, 0) AS quantity_sold,
COALESCE(is_new_product, 0) AS is_new_product,
size_scale,
description_text,
suggest_items,
similar_items
"""
TAGS_TO_OCCASION: dict[str, str] = {
"occ:di_lam": "di_lam",
"occ:di_choi": "di_choi",
"occ:di_tiec": "di_tiec",
"occ:di_hoc": "di_choi",
"occ:mac_nha": "mac_nha",
"occ:the_thao": "the_thao",
"occ:di_bien": "di_choi",
"occ:du_lich": "di_choi",
"occ:da_ngoai": "di_choi",
"occ:di_ngu": "mac_nha",
"occ:hang_ngay": "hang_ngay",
}
def _resolve_occasion(tags: list[str]) -> str:
for tag in (tags or []):
occ = TAGS_TO_OCCASION.get(tag.lower())
if occ:
return occ
return "hang_ngay"
class LeadSearchInput(BaseModel):
model_config = {"extra": "forbid"}
keywords: list[str] = Field(
default=[],
description=(
"Từ khoá NGUYÊN VĂN từ câu hỏi khách. "
"VD: khách nói '30/4' -> keywords=['30/4']. "
"Khách nói 'cờ đỏ sao vàng' -> keywords=['cờ đỏ sao vàng']. "
"Khách nói 'cotton ống rộng' -> keywords=['cotton', 'ống rộng']. "
"Tối đa 2. Để [] nếu khách không nhắc gì đặc biệt."
)
)
tags: list[str] = Field(
default=[],
description=(
"AI suy luận ý định khách -> chọn từ 4 TRỤC CỐ ĐỊNH (BẮT BUỘC có prefix!): "
"Trục 1 (occ:): occ:di_lam, occ:di_choi, occ:di_tiec, occ:di_hoc, occ:mac_nha, occ:the_thao, occ:di_bien, occ:du_lich, occ:da_ngoai, occ:di_ngu. "
"Trục 2a (wthr:): wthr:mua_he, wthr:mua_dong, wthr:giao_mua, wthr:troi_mua, wthr:troi_nang. "
"Trục 2b (func:): func:thoang_mat, func:giu_am, func:tham_hut, func:nhanh_kho, func:chong_uv, func:can_gio. "
"Trục 3 (style:): style:thanh_lich, style:nang_dong, style:basic, style:ca_tinh, style:de_thuong, style:tre_trung, style:toi_gian, style:smart_casual. "
"Trục 4 (fit:): fit:oversize, fit:slim, fit:regular, fit:wide_leg, fit:cropped, fit:relaxed. "
"VD: 'đi tiệc' -> tags=['occ:di_tiec']. 'mùa đông' -> tags=['wthr:mua_dong', 'func:giu_am']. "
"KHÔNG tự nghĩ tag mới! PHẢI giữ prefix! Tối đa 3."
)
)
product_line_vn: list[str] = Field(
default=[],
description=(
"Dòng sản phẩm. CHÍNH XÁC lời user nói: 'áo phông', 'váy liền', 'đồ lót', 'quần jean'... "
"Tool sẽ tự động chuẩn hoá từ đồng nghĩa (VD: 'áo thun' -> 'Áo phông'). "
"Nếu khách nói chung chung ('đồ mùa đông', 'đồ tập') hoặc không nhắc, để []. "
),
)
gender_by_product: str | None = Field(
default=None,
description="Giới tính. GIÁ TRỊ HỢP LỆ: women, men, boy, girl, unisex, newborn",
)
age_by_product: str | None = Field(
default=None,
description="Độ tuổi. GIÁ TRỊ HỢP LỆ: adult, kid, others",
)
master_color: str | None = Field(
default=None,
description=(
"Màu sắc. Ghi CHÍNH XÁC từ khách nói: 'trắng', 'đen', 'đỏ'... "
"VD: 'áo cờ đỏ sao vàng' -> color='đỏ'."
),
)
price_min: int | None = Field(default=None, description="Giá thấp nhất (VND)")
price_max: int | None = Field(default=None, description="Giá cao nhất (VND)")
discount_min: int | None = Field(default=None, description="% giảm giá TỐI THIỂU.")
discount_max: int | None = Field(default=None, description="% giảm giá TỐI ĐA.")
discovery_mode: str | None = Field(default=None, description="'new' = hàng mới, 'best_seller' = bán chạy.")
size: str | None = Field(
default=None,
description=(
"Size san pham khach yeu cau. "
"Nguoi lon: XS, S, M, L, XL, XXL, 3XL, 4XL. "
"Tre em (chieu cao cm): 80, 86, 92, 98, 104, 110, 116, 122, 128, 134, 140, 152, 164. "
"Tre so sinh (thang): 1/3, 4/6, 7/9, 10/12, 13/14. "
"VD: 'ao size L' -> size='L'."
),
)
magento_ref_code: str | None = Field(default=None, description="Ma SKU cu the.")
reasoning: str | None = Field(default=None, description="SUY LUẬN TẠI SAO bạn chọn params này.")
# ======================================================
# SQL Builder
# ======================================================
def _build_fixed_clauses(req: LeadSearchInput, params: list) -> list[str]:
clauses = []
if req.product_line_vn:
lines = []
for line in req.product_line_vn:
if not line: continue
resolved = resolve_product_line(line)
for r in resolved:
expanded = get_related_lines(r)
lines.extend(expanded)
if lines:
placeholders = ", ".join(["%s"] * len(lines))
params.extend(lines)
clauses.append(f"product_line_vn IN ({placeholders})")
if req.gender_by_product:
gender_lower = req.gender_by_product.lower().strip()
genders_to_search = []
if gender_lower in ("women", "nu", "female", "nữ"):
genders_to_search = ["female", "women", "nu", "nữ", "unisex"]
elif gender_lower in ("men", "nam", "male"):
genders_to_search = ["male", "men", "nam", "unisex"]
elif gender_lower in ("boy", "bé trai", "be trai"):
genders_to_search = ["boy", "bé trai", "be trai", "unisex"]
elif gender_lower in ("girl", "bé gái", "be gai"):
genders_to_search = ["girl", "bé gái", "be gai", "unisex"]
else:
genders_to_search = [gender_lower, "unisex"]
placeholders = ", ".join(["%s"] * len(genders_to_search))
params.extend(genders_to_search)
clauses.append(f"gender_by_product IN ({placeholders})")
if req.age_by_product:
age_lower = req.age_by_product.lower().strip()
if age_lower in ("baby", "newborn"):
params.append("kid")
else:
params.append(age_lower)
clauses.append("age_by_product = %s")
if req.master_color:
c_raw = req.master_color.strip()
COLOR_EN_MAP = {
"do": "red", "trang": "white", "den": "black", "xanh": "blue",
"hong": "pink", "tim": "purple", "cam": "orange", "vang": "yellow",
"nau": "brown", "xam": "gray", "be": "beige", "kem": "cream",
"xanh la": "green", "bac": "silver", "vang dong": "gold",
"đo": "red", "đỏ": "red", "trắng": "white", "đen": "black",
"xanh lá": "green", "hồng": "pink", "tím": "purple", "nâu": "brown",
"xám": "gray", "vàng": "yellow", "bạc": "silver"
}
c_key = c_raw.lower().strip()
en_color = COLOR_EN_MAP.get(c_key)
if not en_color:
import unicodedata
c_no_acc = "".join(
ch for ch in unicodedata.normalize("NFD", c_key)
if unicodedata.category(ch) != "Mn"
).strip()
en_color = COLOR_EN_MAP.get(c_no_acc)
c_no_accent = c_key
color_parts = []
color_parts.append("(master_color LIKE %s OR product_color_name LIKE %s)")
params.append(f"%{c_raw}%")
params.append(f"%{c_raw}%")
if c_no_accent != c_raw.lower():
color_parts.append("(LOWER(master_color) LIKE %s OR LOWER(product_color_name) LIKE %s)")
params.append(f"%{c_no_accent}%")
params.append(f"%{c_no_accent}%")
if en_color:
color_parts.append("(LOWER(master_color) LIKE %s OR LOWER(product_color_name) LIKE %s)")
params.append(f"%{en_color}%")
params.append(f"%{en_color}%")
clauses.append("(" + " OR ".join(color_parts) + ")")
if req.price_min is not None:
params.append(req.price_min)
clauses.append("sale_price >= %s")
if req.price_max is not None:
params.append(req.price_max)
clauses.append("sale_price <= %s")
if req.discount_min is not None:
params.append(req.discount_min)
clauses.append("(original_price > 0 AND ((original_price - sale_price) / original_price * 100) >= %s)")
if req.discount_max is not None:
params.append(req.discount_max)
clauses.append("(original_price > 0 AND ((original_price - sale_price) / original_price * 100) <= %s)")
if req.size:
s = req.size.strip().upper()
params.append(s)
clauses.append("FIND_IN_SET(%s, REPLACE(size_scale, '|', ',')) > 0")
if req.discovery_mode:
mode = req.discovery_mode.lower().strip()
if mode == "new":
clauses.append("is_new_product = 1")
elif mode == "best_seller":
clauses.append("quantity_sold > 0")
return clauses
TAG_TO_BITMAP_COL: dict[str, tuple[str, str]] = {
"style:thanh_lich": ("style", "Feminine"),
"style:nang_dong": ("style", "Dynamic"),
"style:basic": ("style", "Basic"),
"style:ca_tinh": ("style", "Street"),
"style:de_thuong": ("style", "Cute"),
"style:tre_trung": ("style", "Trend"),
"style:toi_gian": ("style", "Essential"),
"style:smart_casual":("style", "Smart Casual"),
"fit:oversize": ("fitting", "Oversize"),
"fit:slim": ("fitting", "Slim"),
"fit:regular": ("fitting", "Regular"),
"fit:wide_leg": ("fitting", "Relax"),
"fit:cropped": ("fitting", "Boxy"),
"fit:relaxed": ("fitting", "Relax"),
"wthr:mua_he": ("season_sale", "Summer"),
"wthr:mua_dong": ("season_sale", "Winter"),
"occ:di_bien": ("season_sale", "Summer"),
"occ:du_lich": ("season_sale", "Summer"),
"occ:the_thao": ("style", "Athleisure"),
}
TAG_TO_SEARCH_TEXT: dict[str, str] = {
"occ:di_lam": "đi làm",
"occ:di_choi": "đi chơi",
"occ:di_tiec": "đi tiệc",
"occ:di_hoc": "đi học",
"occ:mac_nha": "mặc nhà",
"occ:da_ngoai": "dã ngoại",
"occ:di_ngu": "ngủ",
"wthr:giao_mua": "giao mùa",
"wthr:troi_mua": "mưa",
"wthr:troi_nang": "nắng",
"func:thoang_mat": "thoáng mát",
"func:giu_am": "giữ ấm",
"func:tham_hut": "thấm hút",
"func:nhanh_kho": "nhanh khô",
"func:chong_uv": "chống UV",
"func:can_gio": "cản gió",
}
def _build_tag_clauses(tags: list[str], params: list) -> str:
if not tags: return ""
bitmap_by_col: dict[str, list[str]] = {}
text_terms: list[str] = []
for tag in tags:
tag_lower = tag.strip().lower()
if tag_lower in TAG_TO_BITMAP_COL:
col, val = TAG_TO_BITMAP_COL[tag_lower]
bitmap_by_col.setdefault(col, []).append(val)
elif tag_lower in TAG_TO_SEARCH_TEXT:
text_terms.append(TAG_TO_SEARCH_TEXT[tag_lower])
else:
text_terms.append(tag_lower.replace(":", " ").replace("_", " "))
all_clauses = []
for col, values in bitmap_by_col.items():
if len(values) == 1:
params.append(values[0])
all_clauses.append(f"{col} = %s")
else:
placeholders = ", ".join(["%s"] * len(values))
params.extend(values)
all_clauses.append(f"{col} IN ({placeholders})")
if text_terms:
term_clauses = []
for term in text_terms:
term = term.strip()
if not term: continue
params.append(f"%{term}%")
params.append(f"%{term}%")
term_clauses.append(f"(LOWER(description_text) LIKE %s OR product_name LIKE %s)")
if term_clauses:
all_clauses.append(f"({' OR '.join(term_clauses)})")
if all_clauses:
return f"({' OR '.join(all_clauses)})"
return ""
def _build_search_clause(search_terms: list[str], params: list) -> str:
term_clauses = []
for term in search_terms:
t = term.strip()
if not t: continue
words = t.split()
if not words: continue
word_clauses = []
for word in words:
if len(word) >= 4:
params.append(f"%{word}%")
word_clauses.append("product_name LIKE %s")
else:
params.append(f"%{word}%")
params.append(f"%{word}%")
word_clauses.append("(product_name LIKE %s OR LOWER(description_text) LIKE %s)")
if word_clauses:
term_clauses.append(f"({' AND '.join(word_clauses)})")
if term_clauses:
return f"({' OR '.join(term_clauses)})"
return ""
def _build_exclusion_clauses(keywords: list[str], params: list) -> list[str]:
clauses = []
kws_str = " ".join(keywords).lower()
if any(k in kws_str for k in ["đông", "lạnh", "winter", "tuyết", "giữ ấm"]):
clauses.append("product_line_vn NOT IN (%s, %s)")
params.extend(["Quần soóc", "Áo ba lỗ"])
if any(k in kws_str for k in ["đi làm", "công sở", "văn phòng", "office"]):
forbidden = ["cartoon", "hoạt hình", "manga", "anime", "demon slayer", "naruto", "disney", "marvel"]
for f in forbidden:
clauses.append("LOWER(description_text) NOT LIKE %s")
params.append(f"%{f}%")
return clauses
def _build_full_query(fixed_clauses: list[str], search_clause: str | None, exclusion_clauses: list[str] = None) -> str:
all_clauses = list(fixed_clauses)
if search_clause:
all_clauses.append(search_clause)
if exclusion_clauses:
all_clauses.extend(exclusion_clauses)
where = " AND ".join(all_clauses) if all_clauses else "1=1"
return f"""
SELECT {SELECT_COLUMNS}
FROM {TABLE_NAME}
WHERE {where}
ORDER BY quantity_sold DESC NULLS LAST, sale_price ASC
LIMIT 20
"""
async def _price_relaxed_search(req: LeadSearchInput, db, multiplier: float) -> list:
if req.price_max is None:
return []
saved_max = req.price_max
saved_min = req.price_min
req.price_max = int(req.price_max * multiplier)
req.price_min = None
params: list = []
fixed = _build_fixed_clauses(req, params)
sql = _build_full_query(fixed, None)
products = await db.execute_query_async(sql, params=tuple(params))
req.price_max = saved_max
req.price_min = saved_min
return products
async def _cascading_search(req: LeadSearchInput, db) -> tuple[list, int, str | None]:
ex_params = []
exclusions = _build_exclusion_clauses(req.keywords, ex_params)
# Tang 1: Fixed + Keywords (NGRAMBF)
if req.keywords:
params = []
fixed = _build_fixed_clauses(req, params)
search = _build_search_clause(req.keywords, params)
if search:
sql = _build_full_query(fixed, search, exclusions)
products = await db.execute_query_async(sql, params=tuple(params + ex_params))
if products:
return products, 1, None
# Tang 2: Fixed + Tags (BITMAP)
if req.tags:
params = []
fixed = _build_fixed_clauses(req, params)
search = _build_tag_clauses(req.tags, params)
if search:
sql = _build_full_query(fixed, search, exclusions)
products = await db.execute_query_async(sql, params=tuple(params + ex_params))
if products:
return products, 2, None
# Tang 3: Chỉ cố định
params = []
fixed = _build_fixed_clauses(req, params)
sql = _build_full_query(fixed, None, exclusions)
products = await db.execute_query_async(sql, params=tuple(params + ex_params))
if products:
return products, 3, None
# Tang 4: Drop gender
if req.product_line_vn and req.gender_by_product:
saved_gender = req.gender_by_product
req.gender_by_product = None
params = []
fixed = _build_fixed_clauses(req, params)
sql = _build_full_query(fixed, None, exclusions)
products = await db.execute_query_async(sql, params=tuple(params + ex_params))
req.gender_by_product = saved_gender
if products:
return products, 4, None
# Tang 5-7: Price relaxation
if req.price_max is not None and req.product_line_vn:
original_max = req.price_max
product_label = "/".join(req.product_line_vn)
for multiplier, tier, label in [(1.5, 5, "1.5x"), (2.0, 6, "2x")]:
new_max = int(original_max * multiplier)
products = await _price_relaxed_search(req, db, multiplier)
if products:
cheapest_p = min(products, key=lambda p: float(p.get("sale_price") or 999_999_999))
cheapest = float(cheapest_p.get("sale_price") or 0)
cheapest_name = cheapest_p.get("product_name", "")
diff = int(cheapest - original_max)
msg = (
f"FALLBACK_PRICE: Khong co {product_label} duoi {original_max:,.0f}d. "
f"Mau re nhat: \"{cheapest_name}\" gia {cheapest:,.0f}d "
f"(chi them {diff:,}d so voi budget). "
f"Tong co {len(products)} mau trong tam {new_max:,.0f}d. "
f"-> HAY goi y SP nay cho khach va noi kheo: them chut xiu la co mau rat dep!"
)
return products, tier, msg
# Tang 7: bo price hoan toan
saved_max = req.price_max
saved_min = req.price_min
req.price_max = None
req.price_min = None
params = []
fixed = _build_fixed_clauses(req, params)
sql = _build_full_query(fixed, None, exclusions)
products = await db.execute_query_async(sql, params=tuple(params + ex_params))
req.price_max = saved_max
req.price_min = saved_min
if products:
cheapest_p = min(products, key=lambda p: float(p.get("sale_price") or 999_999_999))
cheapest = float(cheapest_p.get("sale_price") or 0)
cheapest_name = cheapest_p.get("product_name", "")
msg = (
f"FALLBACK_PRICE: Khong co {product_label} duoi {original_max:,.0f}d. "
f"Mau re nhat hien co: \"{cheapest_name}\" gia {cheapest:,.0f}d. "
f"Tong co {len(products)} mau. "
f"-> HAY goi y SP nay va noi: budget them chut la co mau rat dang!"
)
return products, 7, msg
return [], 3, None
async def _enrich_with_outfit(
products: list[dict],
db,
tags: list[str] | None = None,
) -> list[dict]:
if not products:
return products
if not _os.path.exists(_DB_123_PATH):
return products
top_products = products[:3]
anchor_base_codes = [
(p.get("internal_ref_code") or p.get("magento_ref_code", "").split("-")[0]).strip()
for p in top_products
]
anchor_base_codes = [c for c in anchor_base_codes if c]
if not anchor_base_codes:
return products
occasion = _resolve_occasion(tags or [])
try:
conn = sqlite3.connect(_DB_123_PATH)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
placeholders = ",".join(["?"] * len(anchor_base_codes))
ultra_rows = cursor.execute(
f"""
SELECT base_ref_code, magento_ref_code, ai_matches, clean_description
FROM pg__dashboard_canifa__ultra_descriptions
WHERE base_ref_code IN ({placeholders})
""",
anchor_base_codes,
).fetchall()
conn.close()
except Exception as e:
logger.error("❌ SQLite ultra_desc read error: %s", e)
return products
ultra_map: dict[str, dict] = {}
for row in ultra_rows:
ultra_map[row["base_ref_code"]] = {
"ai_matches": row["ai_matches"],
"clean_description": row["clean_description"],
}
for p in top_products:
anchor_magento = p.get("magento_ref_code", "")
base = (p.get("internal_ref_code") or anchor_magento.split("-")[0]).strip()
ultra = ultra_map.get(base)
if not ultra or not ultra["ai_matches"]:
continue
if ultra["clean_description"]:
p["ai_description"] = ultra["clean_description"]
try:
ai_matches_json = json.loads(ultra["ai_matches"])
p["ai_matches"] = ai_matches_json
except (json.JSONDecodeError, TypeError):
continue
return products
async def _format_products(products: list, db) -> list[dict]:
formatted = []
# ── Fetch Suggest/Similar from DB ──
all_related_skus: set = set()
raw_parsed_map: dict = {}
def parse_item_list(raw_val) -> list[str]:
if not raw_val: return []
if isinstance(raw_val, list): return [str(x) for x in raw_val[:3]]
if isinstance(raw_val, str):
try:
parsed = json.loads(raw_val)
if isinstance(parsed, list): return [str(x) for x in parsed[:3]]
except Exception: pass
return []
for p in products[:3]:
sku = p.get("magento_ref_code", "")
sug_skus = parse_item_list(p.get("suggest_items"))[:2]
sim_skus = parse_item_list(p.get("similar_items"))[:3]
raw_parsed_map[sku] = {"suggest": sug_skus, "similar": sim_skus}
all_related_skus.update(sug_skus)
all_related_skus.update(sim_skus)
sku_lookup: dict = {}
if all_related_skus:
phs = ", ".join(["%s"] * len(all_related_skus))
q = f"SELECT magento_ref_code, product_name, sale_price FROM {TABLE_NAME} WHERE magento_ref_code IN ({phs})"
related_products = await db.execute_query_async(q, params=tuple(all_related_skus))
for rp in related_products:
rpsku = rp.get("magento_ref_code")
if rpsku:
sku_lookup[rpsku] = {
"sku": rpsku,
"name": rp.get("product_name"),
"price": int(rp.get("sale_price") or 0),
}
for p in products[:3]:
sale = float(p.get("sale_price") or 0)
orig = float(p.get("original_price") or 0)
has_discount = sale < orig and orig > 0
disc_pct = int(p.get("discount_percent") or 0)
main_sku = p.get("magento_ref_code", "")
parsed_rel = raw_parsed_map.get(main_sku, {"suggest": [], "similar": []})
rich_suggest = [sku_lookup[s] for s in parsed_rel["suggest"] if s in sku_lookup]
rich_similar = [sku_lookup[s] for s in parsed_rel["similar"] if s in sku_lookup]
desc = (p.get("description_text") or "").strip()
desc_short = desc[:200] + "..." if len(desc) > 200 else desc
item = {
"sku": main_sku,
"name": p.get("product_name", ""),
"price": int(sale),
"original_price": int(orig),
"discount": f"-{disc_pct}%" if has_discount else None,
"color": p.get("master_color", ""),
"gender": p.get("gender_by_product", ""),
"product_line": p.get("product_line_vn", ""),
"image": p.get("product_image_url_thumbnail", ""),
"url": p.get("product_web_url", ""),
"sizes": p.get("size_scale", ""),
"description": desc_short,
"suggest_items": rich_suggest,
"similar_items": rich_similar,
}
if "in_stock" in p:
item["in_stock"] = p["in_stock"]
item["total_qty"] = p["total_qty"]
item["stock"] = p["stock"]
if "ai_description" in p:
item["ai_description"] = p["ai_description"]
if "ai_matches" in p:
item["ai_matches"] = p["ai_matches"]
formatted.append(item)
return formatted
def _build_sku_query(code: str) -> tuple[str, list]:
code = code.strip().upper()
sql = f"""
SELECT {SELECT_COLUMNS}
FROM {TABLE_NAME}
WHERE (UPPER(magento_ref_code) = %s
OR UPPER(internal_ref_code) = %s
OR UPPER(magento_ref_code) LIKE %s)
ORDER BY quantity_sold DESC NULLS LAST
LIMIT 20
"""
return sql, [code, code, f"{code}%"]
engine = ProductSearchEngine()
@tool(args_schema=LeadSearchInput)
async def lead_search_tool(
......@@ -784,8 +33,6 @@ async def lead_search_tool(
"""
Tim kiem san pham CANIFA theo tags va tu khoa. Toi uu hoa de chay cuc nhanh bang BITMAP index.
"""
start = time.time()
req = LeadSearchInput(
keywords=keywords or [],
tags=tags or [],
......@@ -802,71 +49,10 @@ async def lead_search_tool(
magento_ref_code=magento_ref_code,
)
db = get_db_connection()
try:
fallback_msg = None
if req.magento_ref_code:
sql, params = _build_sku_query(req.magento_ref_code)
products = await db.execute_query_async(sql, params=tuple(params))
tier = 0
else:
products, tier, fallback_msg = await _cascading_search(req, db)
# Kiem tra ton kho (chi lay SP con hang)
products, stock_checked, stock_api_ms, timed_out = await _enrich_with_stock(products)
# Inject thong tin outfit neu co
products = await _enrich_with_outfit(products, db, tags=tags)
# Danh dau de formatter lay them data
for p in products:
if stock_checked and p.get("_stock_detail"):
p["in_stock"] = True
p["total_qty"] = p.get("_total_qty")
p["stock"] = p.get("_stock_detail")
formatted = await _format_products(products, db)
elapsed_ms = round((time.time() - start) * 1000, 2)
_search_modes = [
"sku_lookup",
"keywords_ngrambf",
"tags_bitmap",
"hard_filters_only",
"drop_gender",
"price_1.5x",
"price_2x",
"price_unlimited",
]
logger.info(
"[LEAD SEARCH] REQUEST | tags=%s | kw=%s | line=%s | price=%s~%s | size=%s",
tags, keywords, product_line_vn, price_min, price_max, size
)
logger.info(
"[LEAD SEARCH] RESULT | tier=%d (%s) | results=%d | time=%.0fms | stock=%.0fms",
tier, _search_modes[min(tier, 7)], len(products), elapsed_ms, stock_api_ms
)
result = {
"status": "success",
"count": len(products),
"tier": tier,
"search_mode": _search_modes[min(tier, 7)],
"elapsed_ms": elapsed_ms,
"reasoning": reasoning or "",
"keywords_used": keywords or [],
"tags_used": tags or [],
"products": formatted,
}
if fallback_msg:
result["fallback_message"] = fallback_msg
return json.dumps(result, ensure_ascii=False, default=str)
# Gọi engine để xử lý logic dual-query / cascade
result_dict = await engine.search(req, reasoning=reasoning)
return json.dumps(result_dict, ensure_ascii=False, default=str)
except Exception as e:
logger.error("Lead search tool error: %s", e, exc_info=True)
return json.dumps({"status": "error", "message": str(e)})
"""
Lead Search Tool - LangChain @tool cho AI goi.
AI sinh keywords + tags -> Tool search -> cascade -> Tra ket qua.
+ Tu dong goi Canifa Stock API -> chi tra SP CON HANG.
+ Doc outfit tu SQLite canifa_ai_dump.sqlite
Architecture:
CO DINH (luon giu): product_type + color + gender + price + size
BIEN DOI (fallback):
Tang 1: CO DINH + keywords (NGRAMBF tren product_name hoac LIKE tren description)
Tang 2: CO DINH + tags (BITMAP hoac LIKE full phrase)
Tang 3: CHI CO DINH (product_type + color + gender + size + price)
Tang 4: Drop gender
Tang 5-6: Price relaxation (1.5x, 2x)
Tang 7: Bo price hoan toan
"""
import json
import logging
import time
from datetime import datetime, timezone
import os as _os
import sqlite3
import httpx
from pydantic import BaseModel, Field
from common.starrocks_connection import get_db_connection
from .product_mapping import SYNONYM_TO_DB, get_related_lines, resolve_product_line
logger = logging.getLogger(__name__)
try:
from langfuse import get_client as _get_langfuse
_LANGFUSE_AVAILABLE = True
except ImportError:
_get_langfuse = None
_LANGFUSE_AVAILABLE = False
# ═══════════════════════════════════════════════
# Stock API — Kiểm tra tồn kho Canifa
# ═══════════════════════════════════════════════
CANIFA_STOCK_API = "https://canifa.com/v1/middleware/stock_get_stock_list_parent"
_STOCK_TIMEOUT = 1.5 # seconds
async def _fetch_stock_batch(base_codes: list[str]) -> dict[str, list[dict]]:
if not base_codes:
return {}, 0.0, False
sku_string = ",".join(base_codes)
url = f"{CANIFA_STOCK_API}?skus={sku_string}"
t0 = time.time()
try:
async with httpx.AsyncClient(timeout=_STOCK_TIMEOUT) as client:
resp = await client.get(url)
resp.raise_for_status()
data = resp.json()
elapsed = round((time.time() - t0) * 1000, 1)
stock_map: dict[str, list[dict]] = {}
results = data.get("result", [])
if isinstance(results, list):
for item in results:
sku_full = item.get("sku", "")
qty = item.get("qty", 0) or 0
if not sku_full:
continue
parts = sku_full.rsplit("-", 1)
if len(parts) == 2:
color_code = parts[0]
size = parts[1]
status = "còn hàng" if qty > 0 else "hết hàng"
stock_map.setdefault(color_code, []).append({
"size": size, "qty": qty, "status": status
})
logger.info("📦 Stock API: %d base_codes -> %d color_codes | %.0fms", len(base_codes), len(stock_map), elapsed)
return stock_map, elapsed, False
except httpx.TimeoutException:
elapsed = round((time.time() - t0) * 1000, 1)
logger.warning("⏰ Stock API timeout (%.1fs=%.0fms)", _STOCK_TIMEOUT, elapsed)
return {}, elapsed, True
except Exception as e:
elapsed = round((time.time() - t0) * 1000, 1)
logger.warning("⚠️ Stock API error (%.0fms): %s", elapsed, e)
return {}, elapsed, False
async def _enrich_with_stock(products: list[dict]) -> tuple[list[dict], bool, float, bool]:
if not products:
return [], False, 0.0, False
base_codes = list({p.get("internal_ref_code", "") for p in products if p.get("internal_ref_code")})
if not base_codes:
return products, False, 0.0, False
stock_map, stock_api_ms, timed_out = await _fetch_stock_batch(base_codes)
if not stock_map:
return products, False, stock_api_ms, timed_out
in_stock = []
dropped = 0
for p in products:
color_code = p.get("product_color_code", "")
if color_code in stock_map:
sizes_detail = stock_map[color_code]
total_qty = sum(s["qty"] for s in sizes_detail)
if total_qty > 0:
p["_stock_detail"] = sizes_detail
p["_total_qty"] = total_qty
in_stock.append(p)
else:
dropped += 1
logger.info("📦 Stock filter: %d -> %d in-stock, %d dropped | api=%.0fms", len(products), len(in_stock), dropped, stock_api_ms)
return in_stock if in_stock else products[:5], True, stock_api_ms, timed_out
# ═══════════════════════════════════════════════
# SQLite local DB path
# ═══════════════════════════════════════════════
from common.constants import SQLITE_DB_PATH as _SQLITE_DB_PATH
_DB_123_PATH = _SQLITE_DB_PATH
TABLE_NAME = "test_db.magento_product_dimension_with_text_embedding"
SELECT_COLUMNS = """
internal_ref_code,
magento_ref_code,
product_color_code,
product_name,
master_color,
product_color_name,
product_image_url_thumbnail,
product_web_url,
sale_price,
original_price,
COALESCE(discount_amount, 0) AS discount_amount,
ROUND(CASE WHEN original_price > 0
THEN ((original_price - sale_price) / original_price * 100)
ELSE 0 END, 0) AS discount_percent,
age_by_product,
gender_by_product,
product_line_vn,
COALESCE(quantity_sold, 0) AS quantity_sold,
COALESCE(is_new_product, 0) AS is_new_product,
size_scale,
description_text,
suggest_items,
similar_items
"""
TAGS_TO_OCCASION: dict[str, str] = {
"occ:di_lam": "di_lam",
"occ:di_choi": "di_choi",
"occ:di_tiec": "di_tiec",
"occ:di_hoc": "di_choi",
"occ:mac_nha": "mac_nha",
"occ:the_thao": "the_thao",
"occ:di_bien": "di_choi",
"occ:du_lich": "di_choi",
"occ:da_ngoai": "di_choi",
"occ:di_ngu": "mac_nha",
"occ:hang_ngay": "hang_ngay",
}
def _resolve_occasion(tags: list[str]) -> str:
for tag in (tags or []):
occ = TAGS_TO_OCCASION.get(tag.lower())
if occ:
return occ
return "hang_ngay"
class LeadSearchInput(BaseModel):
model_config = {"extra": "forbid"}
keywords: list[str] = Field(
default=[],
description=(
"Từ khoá NGUYÊN VĂN từ câu hỏi khách. "
"VD: khách nói '30/4' -> keywords=['30/4']. "
"Khách nói 'cờ đỏ sao vàng' -> keywords=['cờ đỏ sao vàng']. "
"Khách nói 'cotton ống rộng' -> keywords=['cotton', 'ống rộng']. "
"Tối đa 2. Để [] nếu khách không nhắc gì đặc biệt."
)
)
tags: list[str] = Field(
default=[],
description=(
"AI suy luận ý định khách -> chọn từ 4 TRỤC CỐ ĐỊNH (BẮT BUỘC có prefix!): "
"Trục 1 (occ:): occ:di_lam, occ:di_choi, occ:di_tiec, occ:di_hoc, occ:mac_nha, occ:the_thao, occ:di_bien, occ:du_lich, occ:da_ngoai, occ:di_ngu. "
"Trục 2a (wthr:): wthr:mua_he, wthr:mua_dong, wthr:giao_mua, wthr:troi_mua, wthr:troi_nang. "
"Trục 2b (func:): func:thoang_mat, func:giu_am, func:tham_hut, func:nhanh_kho, func:chong_uv, func:can_gio. "
"Trục 3 (style:): style:thanh_lich, style:nang_dong, style:basic, style:ca_tinh, style:de_thuong, style:tre_trung, style:toi_gian, style:smart_casual. "
"Trục 4 (fit:): fit:oversize, fit:slim, fit:regular, fit:wide_leg, fit:cropped, fit:relaxed. "
"VD: 'đi tiệc' -> tags=['occ:di_tiec']. 'mùa đông' -> tags=['wthr:mua_dong', 'func:giu_am']. "
"KHÔNG tự nghĩ tag mới! PHẢI giữ prefix! Tối đa 3."
)
)
product_line_vn: list[str] = Field(
default=[],
description=(
"Dòng sản phẩm. CHÍNH XÁC lời user nói: 'áo phông', 'váy liền', 'đồ lót', 'quần jean'... "
"Tool sẽ tự động chuẩn hoá từ đồng nghĩa (VD: 'áo thun' -> 'Áo phông'). "
"Nếu khách nói chung chung ('đồ mùa đông', 'đồ tập') hoặc không nhắc, để []. "
),
)
gender_by_product: str | None = Field(
default=None,
description="Giới tính. GIÁ TRỊ HỢP LỆ: women, men, boy, girl, unisex, newborn",
)
age_by_product: str | None = Field(
default=None,
description="Độ tuổi. GIÁ TRỊ HỢP LỆ: adult, kid, others",
)
master_color: str | None = Field(
default=None,
description=(
"Màu sắc. Ghi CHÍNH XÁC từ khách nói: 'trắng', 'đen', 'đỏ'... "
"VD: 'áo cờ đỏ sao vàng' -> color='đỏ'."
),
)
price_min: int | None = Field(default=None, description="Giá thấp nhất (VND)")
price_max: int | None = Field(default=None, description="Giá cao nhất (VND)")
discount_min: int | None = Field(default=None, description="% giảm giá TỐI THIỂU.")
discount_max: int | None = Field(default=None, description="% giảm giá TỐI ĐA.")
discovery_mode: str | None = Field(default=None, description="'new' = hàng mới, 'best_seller' = bán chạy.")
size: str | None = Field(
default=None,
description=(
"Size san pham khach yeu cau. "
"Nguoi lon: XS, S, M, L, XL, XXL, 3XL, 4XL. "
"Tre em (chieu cao cm): 80, 86, 92, 98, 104, 110, 116, 122, 128, 134, 140, 152, 164. "
"Tre so sinh (thang): 1/3, 4/6, 7/9, 10/12, 13/14. "
"VD: 'ao size L' -> size='L'."
),
)
magento_ref_code: str | None = Field(default=None, description="Ma SKU cu the.")
reasoning: str | None = Field(default=None, description="SUY LUẬN TẠI SAO bạn chọn params này.")
# ======================================================
# SQL Builder
# ======================================================
def _build_fixed_clauses(req: LeadSearchInput, params: list) -> list[str]:
clauses = []
if req.product_line_vn:
lines = []
for line in req.product_line_vn:
if not line: continue
resolved = resolve_product_line(line)
for r in resolved:
expanded = get_related_lines(r)
lines.extend(expanded)
if lines:
placeholders = ", ".join(["%s"] * len(lines))
params.extend(lines)
clauses.append(f"product_line_vn IN ({placeholders})")
if req.gender_by_product:
gender_lower = req.gender_by_product.lower().strip()
genders_to_search = []
if gender_lower in ("women", "nu", "female", "nữ"):
genders_to_search = ["female", "women", "nu", "nữ", "unisex"]
elif gender_lower in ("men", "nam", "male"):
genders_to_search = ["male", "men", "nam", "unisex"]
elif gender_lower in ("boy", "bé trai", "be trai"):
genders_to_search = ["boy", "bé trai", "be trai", "unisex"]
elif gender_lower in ("girl", "bé gái", "be gai"):
genders_to_search = ["girl", "bé gái", "be gai", "unisex"]
else:
genders_to_search = [gender_lower, "unisex"]
placeholders = ", ".join(["%s"] * len(genders_to_search))
params.extend(genders_to_search)
clauses.append(f"gender_by_product IN ({placeholders})")
if req.age_by_product:
age_lower = req.age_by_product.lower().strip()
if age_lower in ("baby", "newborn"):
params.append("kid")
else:
params.append(age_lower)
clauses.append("age_by_product = %s")
if req.master_color:
c_raw = req.master_color.strip()
COLOR_EN_MAP = {
"do": "red", "trang": "white", "den": "black", "xanh": "blue",
"hong": "pink", "tim": "purple", "cam": "orange", "vang": "yellow",
"nau": "brown", "xam": "gray", "be": "beige", "kem": "cream",
"xanh la": "green", "bac": "silver", "vang dong": "gold",
"đo": "red", "đỏ": "red", "trắng": "white", "đen": "black",
"xanh lá": "green", "hồng": "pink", "tím": "purple", "nâu": "brown",
"xám": "gray", "vàng": "yellow", "bạc": "silver"
}
c_key = c_raw.lower().strip()
en_color = COLOR_EN_MAP.get(c_key)
if not en_color:
import unicodedata
c_no_acc = "".join(
ch for ch in unicodedata.normalize("NFD", c_key)
if unicodedata.category(ch) != "Mn"
).strip()
en_color = COLOR_EN_MAP.get(c_no_acc)
c_no_accent = c_key
color_parts = []
color_parts.append("(master_color LIKE %s OR product_color_name LIKE %s)")
params.append(f"%{c_raw}%")
params.append(f"%{c_raw}%")
if c_no_accent != c_raw.lower():
color_parts.append("(LOWER(master_color) LIKE %s OR LOWER(product_color_name) LIKE %s)")
params.append(f"%{c_no_accent}%")
params.append(f"%{c_no_accent}%")
if en_color:
color_parts.append("(LOWER(master_color) LIKE %s OR LOWER(product_color_name) LIKE %s)")
params.append(f"%{en_color}%")
params.append(f"%{en_color}%")
clauses.append("(" + " OR ".join(color_parts) + ")")
if req.price_min is not None:
params.append(req.price_min)
clauses.append("sale_price >= %s")
if req.price_max is not None:
params.append(req.price_max)
clauses.append("sale_price <= %s")
if req.discount_min is not None:
params.append(req.discount_min)
clauses.append("(original_price > 0 AND ((original_price - sale_price) / original_price * 100) >= %s)")
if req.discount_max is not None:
params.append(req.discount_max)
clauses.append("(original_price > 0 AND ((original_price - sale_price) / original_price * 100) <= %s)")
if req.size:
s = req.size.strip().upper()
params.append(s)
clauses.append("FIND_IN_SET(%s, REPLACE(size_scale, '|', ',')) > 0")
if req.discovery_mode:
mode = req.discovery_mode.lower().strip()
if mode == "new":
clauses.append("is_new_product = 1")
elif mode == "best_seller":
clauses.append("quantity_sold > 0")
return clauses
TAG_TO_BITMAP_COL: dict[str, tuple[str, str]] = {
"style:thanh_lich": ("style", "Feminine"),
"style:nang_dong": ("style", "Dynamic"),
"style:basic": ("style", "Basic"),
"style:ca_tinh": ("style", "Street"),
"style:de_thuong": ("style", "Cute"),
"style:tre_trung": ("style", "Trend"),
"style:toi_gian": ("style", "Essential"),
"style:smart_casual":("style", "Smart Casual"),
"fit:oversize": ("fitting", "Oversize"),
"fit:slim": ("fitting", "Slim"),
"fit:regular": ("fitting", "Regular"),
"fit:wide_leg": ("fitting", "Relax"),
"fit:cropped": ("fitting", "Boxy"),
"fit:relaxed": ("fitting", "Relax"),
"wthr:mua_he": ("season_sale", "Summer"),
"wthr:mua_dong": ("season_sale", "Winter"),
"occ:di_bien": ("season_sale", "Summer"),
"occ:du_lich": ("season_sale", "Summer"),
"occ:the_thao": ("style", "Athleisure"),
}
TAG_TO_SEARCH_TEXT: dict[str, str] = {
"occ:di_lam": "đi làm",
"occ:di_choi": "đi chơi",
"occ:di_tiec": "đi tiệc",
"occ:di_hoc": "đi học",
"occ:mac_nha": "mặc nhà",
"occ:da_ngoai": "dã ngoại",
"occ:di_ngu": "ngủ",
"wthr:giao_mua": "giao mùa",
"wthr:troi_mua": "mưa",
"wthr:troi_nang": "nắng",
"func:thoang_mat": "thoáng mát",
"func:giu_am": "giữ ấm",
"func:tham_hut": "thấm hút",
"func:nhanh_kho": "nhanh khô",
"func:chong_uv": "chống UV",
"func:can_gio": "cản gió",
}
def _build_tag_clauses(tags: list[str], params: list) -> str:
if not tags: return ""
bitmap_by_col: dict[str, list[str]] = {}
text_terms: list[str] = []
for tag in tags:
tag_lower = tag.strip().lower()
if tag_lower in TAG_TO_BITMAP_COL:
col, val = TAG_TO_BITMAP_COL[tag_lower]
bitmap_by_col.setdefault(col, []).append(val)
elif tag_lower in TAG_TO_SEARCH_TEXT:
text_terms.append(TAG_TO_SEARCH_TEXT[tag_lower])
else:
text_terms.append(tag_lower.replace(":", " ").replace("_", " "))
all_clauses = []
for col, values in bitmap_by_col.items():
if len(values) == 1:
params.append(values[0])
all_clauses.append(f"{col} = %s")
else:
placeholders = ", ".join(["%s"] * len(values))
params.extend(values)
all_clauses.append(f"{col} IN ({placeholders})")
if text_terms:
term_clauses = []
for term in text_terms:
term = term.strip()
if not term: continue
params.append(f"%{term}%")
params.append(f"%{term}%")
term_clauses.append(f"(LOWER(description_text) LIKE %s OR product_name LIKE %s)")
if term_clauses:
all_clauses.append(f"({' OR '.join(term_clauses)})")
if all_clauses:
return f"({' OR '.join(all_clauses)})"
return ""
def _build_search_clause(search_terms: list[str], params: list) -> str:
term_clauses = []
for term in search_terms:
t = term.strip()
if not t: continue
words = t.split()
if not words: continue
word_clauses = []
for word in words:
if len(word) >= 4:
params.append(f"%{word}%")
word_clauses.append("product_name LIKE %s")
else:
params.append(f"%{word}%")
params.append(f"%{word}%")
word_clauses.append("(product_name LIKE %s OR LOWER(description_text) LIKE %s)")
if word_clauses:
term_clauses.append(f"({' AND '.join(word_clauses)})")
if term_clauses:
return f"({' OR '.join(term_clauses)})"
return ""
def _build_exclusion_clauses(keywords: list[str], params: list) -> list[str]:
clauses = []
kws_str = " ".join(keywords).lower()
if any(k in kws_str for k in ["đông", "lạnh", "winter", "tuyết", "giữ ấm"]):
clauses.append("product_line_vn NOT IN (%s, %s)")
params.extend(["Quần soóc", "Áo ba lỗ"])
if any(k in kws_str for k in ["đi làm", "công sở", "văn phòng", "office"]):
forbidden = ["cartoon", "hoạt hình", "manga", "anime", "demon slayer", "naruto", "disney", "marvel"]
for f in forbidden:
clauses.append("LOWER(description_text) NOT LIKE %s")
params.append(f"%{f}%")
return clauses
def _build_full_query(fixed_clauses: list[str], search_clause: str | None, exclusion_clauses: list[str] = None) -> str:
all_clauses = list(fixed_clauses)
if search_clause:
all_clauses.append(search_clause)
if exclusion_clauses:
all_clauses.extend(exclusion_clauses)
where = " AND ".join(all_clauses) if all_clauses else "1=1"
return f"""
SELECT {SELECT_COLUMNS}
FROM {TABLE_NAME}
WHERE {where}
ORDER BY quantity_sold DESC NULLS LAST, sale_price ASC
LIMIT 20
"""
async def _price_relaxed_search(req: LeadSearchInput, db, multiplier: float) -> list:
if req.price_max is None:
return []
saved_max = req.price_max
saved_min = req.price_min
req.price_max = int(req.price_max * multiplier)
req.price_min = None
params: list = []
fixed = _build_fixed_clauses(req, params)
sql = _build_full_query(fixed, None)
products = await db.execute_query_async(sql, params=tuple(params))
req.price_max = saved_max
req.price_min = saved_min
return products
async def _cascading_search(req: LeadSearchInput, db) -> tuple[list, int, str | None]:
ex_params = []
exclusions = _build_exclusion_clauses(req.keywords, ex_params)
# Tang 1: Fixed + Keywords (NGRAMBF)
if req.keywords:
params = []
fixed = _build_fixed_clauses(req, params)
search = _build_search_clause(req.keywords, params)
if search:
sql = _build_full_query(fixed, search, exclusions)
products = await db.execute_query_async(sql, params=tuple(params + ex_params))
if products:
return products, 1, None
# Tang 2: Fixed + Tags (BITMAP)
if req.tags:
params = []
fixed = _build_fixed_clauses(req, params)
search = _build_tag_clauses(req.tags, params)
if search:
sql = _build_full_query(fixed, search, exclusions)
products = await db.execute_query_async(sql, params=tuple(params + ex_params))
if products:
return products, 2, None
# Tang 3: Chỉ cố định
params = []
fixed = _build_fixed_clauses(req, params)
sql = _build_full_query(fixed, None, exclusions)
products = await db.execute_query_async(sql, params=tuple(params + ex_params))
if products:
return products, 3, None
# Tang 4: Drop gender
if req.product_line_vn and req.gender_by_product:
saved_gender = req.gender_by_product
req.gender_by_product = None
params = []
fixed = _build_fixed_clauses(req, params)
sql = _build_full_query(fixed, None, exclusions)
products = await db.execute_query_async(sql, params=tuple(params + ex_params))
req.gender_by_product = saved_gender
if products:
return products, 4, None
# Tang 5-7: Price relaxation
if req.price_max is not None and req.product_line_vn:
original_max = req.price_max
product_label = "/".join(req.product_line_vn)
for multiplier, tier, label in [(1.5, 5, "1.5x"), (2.0, 6, "2x")]:
new_max = int(original_max * multiplier)
products = await _price_relaxed_search(req, db, multiplier)
if products:
cheapest_p = min(products, key=lambda p: float(p.get("sale_price") or 999_999_999))
cheapest = float(cheapest_p.get("sale_price") or 0)
cheapest_name = cheapest_p.get("product_name", "")
diff = int(cheapest - original_max)
msg = (
f"FALLBACK_PRICE: Khong co {product_label} duoi {original_max:,.0f}d. "
f"Mau re nhat: \"{cheapest_name}\" gia {cheapest:,.0f}d "
f"(chi them {diff:,}d so voi budget). "
f"Tong co {len(products)} mau trong tam {new_max:,.0f}d. "
f"-> HAY goi y SP nay cho khach va noi kheo: them chut xiu la co mau rat dep!"
)
return products, tier, msg
# Tang 7: bo price hoan toan
saved_max = req.price_max
saved_min = req.price_min
req.price_max = None
req.price_min = None
params = []
fixed = _build_fixed_clauses(req, params)
sql = _build_full_query(fixed, None, exclusions)
products = await db.execute_query_async(sql, params=tuple(params + ex_params))
req.price_max = saved_max
req.price_min = saved_min
if products:
cheapest_p = min(products, key=lambda p: float(p.get("sale_price") or 999_999_999))
cheapest = float(cheapest_p.get("sale_price") or 0)
cheapest_name = cheapest_p.get("product_name", "")
msg = (
f"FALLBACK_PRICE: Khong co {product_label} duoi {original_max:,.0f}d. "
f"Mau re nhat hien co: \"{cheapest_name}\" gia {cheapest:,.0f}d. "
f"Tong co {len(products)} mau. "
f"-> HAY goi y SP nay va noi: budget them chut la co mau rat dang!"
)
return products, 7, msg
return [], 3, None
async def _enrich_with_outfit(
products: list[dict],
db,
tags: list[str] | None = None,
) -> list[dict]:
if not products:
return products
if not _os.path.exists(_DB_123_PATH):
return products
top_products = products[:3]
anchor_base_codes = [
(p.get("internal_ref_code") or p.get("magento_ref_code", "").split("-")[0]).strip()
for p in top_products
]
anchor_base_codes = [c for c in anchor_base_codes if c]
if not anchor_base_codes:
return products
occasion = _resolve_occasion(tags or [])
try:
conn = sqlite3.connect(_DB_123_PATH)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
placeholders = ",".join(["?"] * len(anchor_base_codes))
ultra_rows = cursor.execute(
f"""
SELECT base_ref_code, magento_ref_code, ai_matches, clean_description
FROM pg__dashboard_canifa__ultra_descriptions
WHERE base_ref_code IN ({placeholders})
""",
anchor_base_codes,
).fetchall()
conn.close()
except Exception as e:
logger.error("❌ SQLite ultra_desc read error: %s", e)
return products
ultra_map: dict[str, dict] = {}
for row in ultra_rows:
ultra_map[row["base_ref_code"]] = {
"ai_matches": row["ai_matches"],
"clean_description": row["clean_description"],
}
for p in top_products:
anchor_magento = p.get("magento_ref_code", "")
base = (p.get("internal_ref_code") or anchor_magento.split("-")[0]).strip()
ultra = ultra_map.get(base)
if not ultra or not ultra["ai_matches"]:
continue
if ultra["clean_description"]:
p["ai_description"] = ultra["clean_description"]
try:
ai_matches_json = json.loads(ultra["ai_matches"])
p["ai_matches"] = ai_matches_json
except (json.JSONDecodeError, TypeError):
continue
return products
async def _format_products(products: list, db) -> list[dict]:
formatted = []
# ── Fetch Suggest/Similar from DB ──
all_related_skus: set = set()
raw_parsed_map: dict = {}
def parse_item_list(raw_val) -> list[str]:
if not raw_val: return []
if isinstance(raw_val, list): return [str(x) for x in raw_val[:3]]
if isinstance(raw_val, str):
try:
parsed = json.loads(raw_val)
if isinstance(parsed, list): return [str(x) for x in parsed[:3]]
except Exception: pass
return []
for p in products[:3]:
sku = p.get("magento_ref_code", "")
sug_skus = parse_item_list(p.get("suggest_items"))[:2]
sim_skus = parse_item_list(p.get("similar_items"))[:3]
raw_parsed_map[sku] = {"suggest": sug_skus, "similar": sim_skus}
all_related_skus.update(sug_skus)
all_related_skus.update(sim_skus)
sku_lookup: dict = {}
if all_related_skus:
phs = ", ".join(["%s"] * len(all_related_skus))
q = f"SELECT magento_ref_code, product_name, sale_price FROM {TABLE_NAME} WHERE magento_ref_code IN ({phs})"
related_products = await db.execute_query_async(q, params=tuple(all_related_skus))
for rp in related_products:
rpsku = rp.get("magento_ref_code")
if rpsku:
sku_lookup[rpsku] = {
"sku": rpsku,
"name": rp.get("product_name"),
"price": int(rp.get("sale_price") or 0),
}
for p in products[:3]:
sale = float(p.get("sale_price") or 0)
orig = float(p.get("original_price") or 0)
has_discount = sale < orig and orig > 0
disc_pct = int(p.get("discount_percent") or 0)
main_sku = p.get("magento_ref_code", "")
parsed_rel = raw_parsed_map.get(main_sku, {"suggest": [], "similar": []})
rich_suggest = [sku_lookup[s] for s in parsed_rel["suggest"] if s in sku_lookup]
rich_similar = [sku_lookup[s] for s in parsed_rel["similar"] if s in sku_lookup]
desc = (p.get("description_text") or "").strip()
desc_short = desc[:200] + "..." if len(desc) > 200 else desc
item = {
"sku": main_sku,
"name": p.get("product_name", ""),
"price": int(sale),
"original_price": int(orig),
"discount": f"-{disc_pct}%" if has_discount else None,
"color": p.get("master_color", ""),
"gender": p.get("gender_by_product", ""),
"product_line": p.get("product_line_vn", ""),
"image": p.get("product_image_url_thumbnail", ""),
"url": p.get("product_web_url", ""),
"sizes": p.get("size_scale", ""),
"description": desc_short,
"suggest_items": rich_suggest,
"similar_items": rich_similar,
}
if "in_stock" in p:
item["in_stock"] = p["in_stock"]
item["total_qty"] = p["total_qty"]
item["stock"] = p["stock"]
if "ai_description" in p:
item["ai_description"] = p["ai_description"]
if "ai_matches" in p:
item["ai_matches"] = p["ai_matches"]
formatted.append(item)
return formatted
def _build_sku_query(code: str) -> tuple[str, list]:
code = code.strip().upper()
sql = f"""
SELECT {SELECT_COLUMNS}
FROM {TABLE_NAME}
WHERE (UPPER(magento_ref_code) = %s
OR UPPER(internal_ref_code) = %s
OR UPPER(magento_ref_code) LIKE %s)
ORDER BY quantity_sold DESC NULLS LAST
LIMIT 20
"""
return sql, [code, code, f"{code}%"]
class ProductSearchEngine:
"""
Standalone Product Search Service.
Dual-Query Architecture:
Query 1 (RAW): keywords LIKE trực tiếp — giữ nguyên ý user
Query 2 (AI): tags BITMAP/LIKE — AI suy luận mở rộng
→ Merge + Dedup theo SKU → Ưu tiên RAW trước
→ Nếu cả 2 rỗng → fallback cascade (tier 3-7)
"""
# ── Dual-Query: chạy RAW + AI song song, merge kết quả ──
async def _dual_query_search(
self, req: LeadSearchInput, db
) -> tuple[list, int, str | None]:
"""
Chạy 2 query song song:
1) RAW keywords → LIKE trên product_name/description
2) AI tags → BITMAP index + LIKE
Merge + dedup, ưu tiên RAW match trước.
Nếu cả 2 rỗng → delegate cho _cascading_search (tier 3+).
"""
import asyncio
ex_params_raw = []
exclusions_raw = _build_exclusion_clauses(req.keywords, ex_params_raw)
ex_params_ai = list(ex_params_raw) # copy
# ── Build Query 1: RAW keywords ──
raw_coro = None
if req.keywords:
params_raw = []
fixed_raw = _build_fixed_clauses(req, params_raw)
search_raw = _build_search_clause(req.keywords, params_raw)
if search_raw:
sql_raw = _build_full_query(fixed_raw, search_raw, exclusions_raw)
raw_coro = db.execute_query_async(
sql_raw, params=tuple(params_raw + ex_params_raw)
)
# ── Build Query 2: AI tags ──
ai_coro = None
if req.tags:
params_ai = []
fixed_ai = _build_fixed_clauses(req, params_ai)
search_ai = _build_tag_clauses(req.tags, params_ai)
if search_ai:
sql_ai = _build_full_query(fixed_ai, search_ai, exclusions_raw)
ai_coro = db.execute_query_async(
sql_ai, params=tuple(params_ai + ex_params_ai)
)
# ── Chạy song song ──
raw_products = []
ai_products = []
if raw_coro and ai_coro:
raw_products, ai_products = await asyncio.gather(raw_coro, ai_coro)
elif raw_coro:
raw_products = await raw_coro
elif ai_coro:
ai_products = await ai_coro
logger.info(
"[DUAL QUERY] RAW=%d results | AI=%d results",
len(raw_products), len(ai_products),
)
# ── Merge + Dedup (ưu tiên RAW trước) ──
if raw_products or ai_products:
seen_skus: set[str] = set()
merged: list[dict] = []
# RAW match = ưu tiên cao hơn → thêm trước
for p in raw_products:
sku = p.get("magento_ref_code", "")
if sku and sku not in seen_skus:
p["_search_source"] = "raw"
seen_skus.add(sku)
merged.append(p)
# AI match = bổ sung thêm
for p in ai_products:
sku = p.get("magento_ref_code", "")
if sku and sku not in seen_skus:
p["_search_source"] = "ai"
seen_skus.add(sku)
merged.append(p)
if merged:
# Xác định tier dựa trên nguồn kết quả
if raw_products and ai_products:
tier = 1 # dual hit
elif raw_products:
tier = 1 # raw only
else:
tier = 2 # ai only
return merged[:20], tier, None
# ── Cả 2 rỗng → fallback cascade tier 3+ ──
return await self._fallback_cascade(req, db)
async def _fallback_cascade(
self, req: LeadSearchInput, db
) -> tuple[list, int, str | None]:
"""Cascade fallback khi dual-query rỗng (tier 3-7)."""
ex_params = []
exclusions = _build_exclusion_clauses(req.keywords, ex_params)
# Tier 3: Chỉ fixed clauses
params = []
fixed = _build_fixed_clauses(req, params)
sql = _build_full_query(fixed, None, exclusions)
products = await db.execute_query_async(sql, params=tuple(params + ex_params))
if products:
return products, 3, None
# Tier 4: Drop gender
if req.product_line_vn and req.gender_by_product:
saved = req.gender_by_product
req.gender_by_product = None
params = []
fixed = _build_fixed_clauses(req, params)
sql = _build_full_query(fixed, None, exclusions)
products = await db.execute_query_async(sql, params=tuple(params + ex_params))
req.gender_by_product = saved
if products:
return products, 4, None
# Tier 5-7: Price relaxation
if req.price_max is not None and req.product_line_vn:
original_max = req.price_max
product_label = "/".join(req.product_line_vn)
for multiplier, tier_num in [(1.5, 5), (2.0, 6)]:
new_max = int(original_max * multiplier)
products = await _price_relaxed_search(req, db, multiplier)
if products:
cheapest_p = min(products, key=lambda p: float(p.get("sale_price") or 999_999_999))
cheapest = float(cheapest_p.get("sale_price") or 0)
cheapest_name = cheapest_p.get("product_name", "")
diff = int(cheapest - original_max)
msg = (
f"FALLBACK_PRICE: Khong co {product_label} duoi {original_max:,.0f}d. "
f'Mau re nhat: "{cheapest_name}" gia {cheapest:,.0f}d '
f"(chi them {diff:,}d so voi budget). "
f"Tong co {len(products)} mau trong tam {new_max:,.0f}d. "
f"-> HAY goi y SP nay cho khach va noi kheo: them chut xiu la co mau rat dep!"
)
return products, tier_num, msg
# Tier 7: drop price
saved_max, saved_min = req.price_max, req.price_min
req.price_max = req.price_min = None
params = []
fixed = _build_fixed_clauses(req, params)
sql = _build_full_query(fixed, None, exclusions)
products = await db.execute_query_async(sql, params=tuple(params + ex_params))
req.price_max, req.price_min = saved_max, saved_min
if products:
cheapest_p = min(products, key=lambda p: float(p.get("sale_price") or 999_999_999))
cheapest = float(cheapest_p.get("sale_price") or 0)
cheapest_name = cheapest_p.get("product_name", "")
msg = (
f"FALLBACK_PRICE: Khong co {product_label} duoi {original_max:,.0f}d. "
f'Mau re nhat hien co: "{cheapest_name}" gia {cheapest:,.0f}d. '
f"Tong co {len(products)} mau. "
f"-> HAY goi y SP nay va noi: budget them chut la co mau rat dang!"
)
return products, 7, msg
return [], 3, None
async def search(self, req: LeadSearchInput, reasoning: str | None = None) -> dict:
"""
Entry point: Dual-Query search + enrichment pipeline.
"""
start = time.time()
db = get_db_connection()
try:
fallback_msg = None
if req.magento_ref_code:
sql, params = _build_sku_query(req.magento_ref_code)
products = await db.execute_query_async(sql, params=tuple(params))
tier = 0
else:
# ★ DUAL-QUERY thay vì cascade tuần tự ★
products, tier, fallback_msg = await self._dual_query_search(req, db)
# Enrichment pipeline (giữ nguyên)
products, stock_checked, stock_api_ms, timed_out = await _enrich_with_stock(products)
products = await _enrich_with_outfit(products, db, tags=req.tags)
for p in products:
if stock_checked and p.get("_stock_detail"):
p["in_stock"] = True
p["total_qty"] = p.get("_total_qty")
p["stock"] = p.get("_stock_detail")
formatted = await _format_products(products, db)
elapsed_ms = round((time.time() - start) * 1000, 2)
_search_modes = [
"sku_lookup",
"keywords_ngrambf",
"tags_bitmap",
"hard_filters_only",
"drop_gender",
"price_1.5x",
"price_2x",
"price_unlimited",
]
logger.info(
"[LEAD SEARCH] REQUEST | tags=%s | kw=%s | line=%s | price=%s~%s | size=%s",
req.tags, req.keywords, req.product_line_vn, req.price_min, req.price_max, req.size,
)
logger.info(
"[LEAD SEARCH] RESULT | tier=%d (%s) | results=%d | time=%.0fms | stock=%.0fms",
tier, _search_modes[min(tier, 7)], len(products), elapsed_ms, stock_api_ms,
)
result = {
"status": "success",
"count": len(products),
"tier": tier,
"search_mode": _search_modes[min(tier, 7)],
"elapsed_ms": elapsed_ms,
"reasoning": reasoning or "",
"keywords_used": req.keywords or [],
"tags_used": req.tags or [],
"products": formatted,
}
if fallback_msg:
result["fallback_message"] = fallback_msg
return result
except Exception as e:
logger.error("Lead search tool error: %s", e, exc_info=True)
return {"status": "error", "message": str(e)}
......@@ -15,7 +15,7 @@ from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from common.starrocks_connection import get_db_connection
from agent.lead_stage_agent.lead_search_tool import (
from agent.lead_stage_agent.product_search_engine import (
LeadSearchInput,
_build_fixed_clauses,
_build_search_clause,
......
"""
gemini_cli_wrapper.py — Class wrapper gọi Gemini CLI từ Python.
Dùng cho:
1. Lead Flow: thay thế LangChain model → gọi Gemini CLI trực tiếp
2. Test Runner: sinh câu hỏi + đánh giá response
Yêu cầu: `gemini` CLI đã cài và có API key.
"""
import asyncio
import json
import logging
import re
import subprocess
import shutil
import tempfile
logger = logging.getLogger(__name__)
class GeminiCLI:
"""
Wrapper class gọi Gemini CLI command.
Có thể cắm vào bất kỳ đâu cần AI trả lời:
- Lead Flow (thay LangChain)
- Test Runner (sinh câu hỏi + đánh giá)
- Bất kỳ module nào cần LLM
Usage:
gemini = GeminiCLI()
# Text response
answer = await gemini.ask("Tư vấn áo đi biển")
# JSON response
data = await gemini.ask_json("Parse câu hỏi thành JSON: ...")
# Sync version
answer = gemini.ask_sync("Hello")
"""
def __init__(self, model: str = "gemini-2.0-flash", timeout: int = 120):
self.model = model
self.timeout = timeout
self._cli_path: str | None = None
@property
def cli_path(self) -> str:
"""Lazy load đường dẫn gemini CLI."""
if self._cli_path is None:
self._cli_path = shutil.which("gemini")
if not self._cli_path:
raise FileNotFoundError(
"Gemini CLI không tìm thấy trên PATH. "
"Cài đặt: npm install -g @anthropic-ai/gemini-cli"
)
logger.info("[GeminiCLI] Found: %s", self._cli_path)
return self._cli_path
# ── ASYNC ──
async def ask(self, prompt: str) -> str:
"""Gọi Gemini CLI async, trả về text response."""
env = {
**__import__("os").environ,
"NO_COLOR": "1",
"TERM": "dumb",
"FORCE_COLOR": "0",
}
# Pass via stdin to avoid "Command line is too long" on Windows
cwd = tempfile.gettempdir()
cmd = [self.cli_path]
proc = await asyncio.create_subprocess_exec(
*cmd,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
cwd=cwd,
)
stdout, stderr = await asyncio.wait_for(
proc.communicate(input=prompt.encode("utf-8")), timeout=self.timeout
)
output = stdout.decode("utf-8", errors="replace").strip()
err = stderr.decode("utf-8", errors="replace").strip()
# Gemini CLI có thể exit non-zero nhưng vẫn có output (do warning)
if output:
if err:
logger.debug("[GeminiCLI] stderr (ignored): %s", err[:200])
return output
if proc.returncode != 0:
logger.error("[GeminiCLI] Error (code=%d): %s", proc.returncode, err)
raise RuntimeError(f"Gemini CLI failed: {err}")
logger.debug("[GeminiCLI] Response: %.100s...", output)
return output
async def ask_json(self, prompt: str) -> dict:
"""Gọi Gemini CLI, tự động parse response thành JSON."""
raw = await self.ask(prompt)
return self._parse_json(raw)
# ── SYNC ──
def ask_sync(self, prompt: str) -> str:
"""Phiên bản SYNC — subprocess.run trực tiếp."""
cwd = tempfile.gettempdir()
result = subprocess.run(
[self.cli_path],
input=prompt,
capture_output=True,
text=True,
timeout=self.timeout,
cwd=cwd,
)
if result.returncode != 0:
raise RuntimeError(f"Gemini CLI failed: {result.stderr}")
return result.stdout.strip()
def ask_json_sync(self, prompt: str) -> dict:
"""Phiên bản SYNC — trả về JSON."""
raw = self.ask_sync(prompt)
return self._parse_json(raw)
# ── JSON Parser ──
@staticmethod
def _parse_json(raw: str) -> dict:
"""
Tìm và parse JSON từ output Gemini CLI.
Hỗ trợ: ```json ... ```, raw JSON, hoặc { ... } trong text.
"""
# 1. Tìm fenced code block
json_match = re.search(r"```(?:json)?\s*\n([\s\S]*?)\n```", raw)
if json_match:
try:
return json.loads(json_match.group(1).strip())
except json.JSONDecodeError:
pass
# 2. Thử parse trực tiếp
try:
return json.loads(raw)
except json.JSONDecodeError:
pass
# 3. Tìm { ... } trong text
brace_match = re.search(r"\{[\s\S]*\}", raw)
if brace_match:
try:
return json.loads(brace_match.group())
except json.JSONDecodeError:
pass
logger.warning("[GeminiCLI] Cannot parse JSON: %.200s", raw)
return {"raw_text": raw}
# ═══════════════════════════════════════════════
# LangChain BaseChatModel Adapter
# ═══════════════════════════════════════════════
from typing import Any, Optional
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage
from langchain_core.outputs import ChatGeneration, ChatResult
from langchain_core.runnables import RunnableLambda
from pydantic import PrivateAttr
class ChatGeminiCLI(BaseChatModel):
"""
LangChain BaseChatModel adapter — bọc Gemini CLI subprocess.
Dùng OAuth authentication từ Gemini CLI (KHÔNG cần GOOGLE_API_KEY).
Hỗ trợ with_structured_output() qua JSON-in-prompt.
Usage:
llm = ChatGeminiCLI(model="gemini-2.0-flash")
result = await llm.ainvoke([HumanMessage(content="hello")])
# Structured output (thay thế function_calling)
structured = llm.with_structured_output(MyPydanticModel)
obj = await structured.ainvoke(messages)
"""
model: str = "gemini-2.0-flash"
timeout: int = 120
temperature: float = 0
max_output_tokens: int = 1500
streaming: bool = False # CLI không hỗ trợ streaming, chấp nhận param cho compat
_cli: GeminiCLI = PrivateAttr()
model_config = {"arbitrary_types_allowed": True}
def __init__(self, **kwargs):
# Compat: llm_factory truyền 'model' hoặc 'model_name'
if "model_name" in kwargs and "model" not in kwargs:
kwargs["model"] = kwargs.pop("model_name")
elif "model_name" in kwargs:
kwargs.pop("model_name")
# Loại bỏ params không liên quan đến CLI
for drop_key in ("google_api_key", "response_mime_type", "api_key", "model_kwargs"):
kwargs.pop(drop_key, None)
super().__init__(**kwargs)
self._cli = GeminiCLI(model=self.model, timeout=self.timeout)
logger.info("✅ [ChatGeminiCLI] Initialized | model=%s | OAuth via Gemini CLI", self.model)
@property
def _llm_type(self) -> str:
return "gemini-cli"
def _format_messages(self, messages: list[BaseMessage]) -> str:
"""Flatten LangChain messages thành 1 prompt string cho CLI."""
parts = []
for msg in messages:
if isinstance(msg, SystemMessage):
parts.append(f"[System Instructions]\n{msg.content}")
elif isinstance(msg, HumanMessage):
parts.append(f"[User]\n{msg.content}")
elif isinstance(msg, AIMessage):
parts.append(f"[Assistant]\n{msg.content}")
else:
parts.append(f"[{msg.type}]\n{msg.content}")
return "\n\n".join(parts)
def _generate(
self,
messages: list[BaseMessage],
stop: Optional[list[str]] = None,
run_manager: Any = None,
**kwargs,
) -> ChatResult:
"""Sync generation via Gemini CLI subprocess."""
prompt = self._format_messages(messages)
response = self._cli.ask_sync(prompt)
return ChatResult(
generations=[ChatGeneration(message=AIMessage(content=response))]
)
async def _agenerate(
self,
messages: list[BaseMessage],
stop: Optional[list[str]] = None,
run_manager: Any = None,
**kwargs,
) -> ChatResult:
"""Async generation via Gemini CLI subprocess."""
prompt = self._format_messages(messages)
response = await self._cli.ask(prompt)
return ChatResult(
generations=[ChatGeneration(message=AIMessage(content=response))]
)
def with_structured_output(self, schema, *, method=None, include_raw=False, **kwargs):
"""
Trả về Runnable inject JSON schema vào prompt, parse CLI output thành Pydantic model.
Thay thế function_calling bằng JSON-in-prompt — hoạt động với mọi method param.
"""
json_schema = schema.model_json_schema()
schema_str = json.dumps(json_schema, indent=2, ensure_ascii=False)
outer = self
async def _ainvoke(messages, config=None):
schema_instruction = (
"\n\n=== OUTPUT FORMAT (BẮT BUỘC) ===\n"
"Trả lời CHÍNH XÁC bằng 1 JSON object hợp lệ theo schema dưới.\n"
"KHÔNG markdown, KHÔNG giải thích, KHÔNG code fences. CHỈ raw JSON.\n"
f"Schema:\n{schema_str}"
)
augmented = list(messages)
if augmented and isinstance(augmented[0], SystemMessage):
augmented[0] = SystemMessage(
content=augmented[0].content + schema_instruction
)
else:
augmented.insert(0, SystemMessage(content=schema_instruction))
prompt = outer._format_messages(augmented)
raw = await outer._cli.ask(prompt)
parsed = GeminiCLI._parse_json(raw)
try:
return schema.model_validate(parsed)
except Exception as e:
logger.error(
"❌ [ChatGeminiCLI] Pydantic validation failed: %s\nRaw: %.500s",
e, raw,
)
raise
def _invoke(messages, config=None):
return asyncio.get_event_loop().run_until_complete(
_ainvoke(messages, config)
)
return RunnableLambda(func=_invoke, afunc=_ainvoke)
......@@ -81,10 +81,19 @@ class LLMFactory:
raise
def _create_gemini(self, model_name: str, streaming: bool, json_mode: bool, api_key: str | None) -> BaseChatModel:
"""Create Google Gemini model instance. Always uses GOOGLE_API_KEY (ignores api_key param which may be OpenAI key)."""
"""Create Google Gemini model instance. Uses GOOGLE_API_KEY if available, otherwise falls back to Gemini CLI (OAuth)."""
key = GOOGLE_API_KEY
if not key:
raise ValueError("GOOGLE_API_KEY is required for Gemini models. Set it in .env")
# Fallback: dùng Gemini CLI (OAuth local) thay vì crash
logger.info("🔄 No GOOGLE_API_KEY — falling back to ChatGeminiCLI (OAuth)")
from common.gemini_cli_wrapper import ChatGeminiCLI
llm = ChatGeminiCLI(
model=model_name,
temperature=0,
max_output_tokens=1500,
streaming=streaming,
)
return llm
llm_kwargs = {
"model": model_name,
......
import json
import logging
from agent.helper import extract_product_ids
from langchain_core.messages import ToolMessage
# Mock ToolMessage from sku_search_tool (Current buggy format)
sku_search_output = {
"status": "success",
"products": [
{
"sku": "6TS25S003",
"sku_color": "6TS25S003-SB055",
"name": "Áo thun nam",
"image": "https://canifa.com/image1.jpg",
"url": "https://canifa.com/p1"
}
]
}
# Mock ToolMessage from data_retrieval_tool (Working format)
data_retrieval_output = {
"status": "success",
"results": [
{
"sku": "6TS25S003",
"name": "Áo thun nam",
"thumbnail_image_url": "https://canifa.com/image1.jpg",
"url": "https://canifa.com/p1"
}
]
}
def test_extraction():
print("Testing SKU Search Tool Extraction...")
msg_sku = ToolMessage(content=json.dumps(sku_search_output), tool_call_id="1")
products_sku = extract_product_ids([msg_sku])
print(f"Extracted from SKU Search: {json.dumps(products_sku, indent=2, ensure_ascii=False)}")
# Check if thumbnail_image_url is present
if products_sku and products_sku[0].get("thumbnail_image_url"):
print("✅ SKU Search Extraction: SUCCESS")
else:
print("❌ SKU Search Extraction: FAILED (thumbnail_image_url missing)")
print("\nTesting Data Retrieval Tool Extraction...")
msg_data = ToolMessage(content=json.dumps(data_retrieval_output), tool_call_id="2")
products_data = extract_product_ids([msg_data])
print(f"Extracted from Data Retrieval: {json.dumps(products_data, indent=2, ensure_ascii=False)}")
if products_data and products_data[0].get("thumbnail_image_url"):
print("✅ Data Retrieval Extraction: SUCCESS")
else:
print("❌ Data Retrieval Extraction: FAILED")
if __name__ == "__main__":
test_extraction()
B<svg width="83" height="44" viewBox="0 0 83 44" fill="none" xmlns="http://www.w3.org/2000/svg"><path fill-rule="evenodd" clip-rule="evenodd" d="M46.5503 30.4271V13.5729H48.8924V30.4271H46.5503ZM34.6931 30.4271V13.5729C36.3033 13.5729 37.9136 13.5729 39.5238 13.5729C41.5732 13.5729 43.1834 15.2136 43.1834 17.3017V30.4271H40.9877V17.3017C40.9877 16.5559 40.4021 15.8102 39.5238 15.8102C38.6455 15.8102 37.9136 15.8102 37.0353 15.8102V30.4271H34.6931ZM16.9806 30.4271C14.9312 30.4271 13.321 28.7864 13.321 26.6983V17.4508C13.321 15.3627 14.9312 13.722 16.9806 13.722H20.3474V16.1085H16.9806C16.2487 16.1085 15.5168 16.7051 15.5168 17.6V26.8475C15.5168 27.5932 16.1023 28.339 16.9806 28.339H20.3474V30.4333H16.9609L16.9806 30.4271ZM29.1305 21.9254V17.3017C29.1305 16.5559 28.545 15.8102 27.6667 15.8102H26.642C25.9101 15.8102 25.1781 16.4068 25.1781 17.3017V21.9254H29.1305ZM67.3369 24.3119H63.3845V30.4271H61.0423V17.3017C61.0423 15.2136 62.6526 13.5729 64.7019 13.5729H65.7266C67.776 13.5729 69.3862 15.2136 69.3862 17.3017V30.4271H67.3369V24.3119ZM63.5309 21.9254H67.3369V17.3017C67.3369 16.5559 66.7513 15.8102 65.873 15.8102H64.8483C64.1164 15.8102 63.3845 16.4068 63.3845 17.3017V21.9254H63.5309ZM54.6014 21.9254H58.7002V24.3119H54.6014V30.4271H52.2593V24.3119V21.9254V17.3017C52.2593 15.2136 53.8695 13.5729 55.9189 13.5729H58.7002V15.9593H55.9189C55.1869 15.9593 54.455 16.5559 54.455 17.4508V21.9254H54.6014ZM22.836 30.4271V17.3017C22.836 15.2136 24.4462 13.5729 26.4956 13.5729H27.5203C29.5697 13.5729 31.1799 15.2136 31.1799 17.3017V30.4271H28.8377V24.3119H24.8854V30.4271H22.836ZM0 0H83V44H0V0Z" fill="#E2231A"></path></svg>
This source diff could not be displayed because it is too large. You can view the blob instead.
<!DOCTYPE html>
<html lang="vi">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Canifa Chatbot Concept v2</title>
<script src="https://unpkg.com/react@18/umd/react.development.js"></script>
<script src="https://unpkg.com/react-dom@18/umd/react-dom.development.js"></script>
<script src="https://unpkg.com/@babel/standalone/babel.min.js"></script>
<script src="https://cdn.tailwindcss.com"></script>
<link href="https://fonts.googleapis.com/css2?family=Montserrat:wght@400;500;600;700&display=swap" rel="stylesheet">
<style>
body { font-family: 'Montserrat', sans-serif; background: #F5F5F5; }
.canifa-red { background-color: #E2231A; }
.canifa-text-red { color: #E2231A; }
.glass-header { background: rgba(255, 255, 255, 0.9); backdrop-filter: blur(10px); }
.message-ai { background: #FFFFFF; border: 1px solid #E5E7EB; border-radius: 20px 20px 20px 4px; }
.message-user { background: #E2231A; color: white; border-radius: 20px 20px 4px 20px; }
.custom-shadow { box-shadow: 0 10px 25px -5px rgba(0, 0, 0, 0.1), 0 8px 10px -6px rgba(0, 0, 0, 0.1); }
.typing-dot { width: 4px; height: 4px; background: #E2231A; border-radius: 50%; animation: blink 1.4s infinite both; }
.typing-dot:nth-child(2) { animation-delay: 0.2s; }
.typing-dot:nth-child(3) { animation-delay: 0.4s; }
@keyframes blink { 0%, 80%, 100% { opacity: 0.2; } 40% { opacity: 1; } }
</style>
</head>
<body>
<div id="root"></div>
<script type="text/babel">
const { useState, useEffect, useRef } = React;
// Mock Icons
const SendIcon = () => (
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2" strokeLinecap="round" strokeLinejoin="round"><path d="m22 2-7 20-4-9-9-4Z"/><path d="M22 2 11 13"/></svg>
);
const CalendarIcon = () => (
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2" strokeLinecap="round" strokeLinejoin="round"><rect width="18" height="18" x="3" y="4" rx="2" ry="2"/><line x1="16" x2="16" y1="2" y2="6"/><line x1="8" x2="8" y1="2" y2="6"/><line x1="3" x2="21" y1="10" y2="10"/></svg>
);
const App = () => {
const [messages, setMessages] = useState([
{ role: 'ai', content: 'Chào bạn! Mình là CuCu, trợ lý ảo của Canifa. Bạn cần hỗ trợ gì về trang phục hay đơn hàng không? 😊' },
{ role: 'user', content: 'Cho mình xem các mẫu áo thun mới nhất' },
{ role: 'ai', content: 'Dưới đây là một số mẫu áo thun mới nhất trong bộ sưu tập **Trạm Hè Đa Sắc 2026**:\n\n1. Áo thun Cotton USA in hình (2026-04-25)\n2. Áo thun Canifa S phong cách Streetwear (2026-04-28)\n3. Áo Polo thoáng khí cho gia đình (2026-04-29)\n\nBạn có muốn xem chi tiết mẫu nào không?' }
]);
const [input, setInput] = useState('');
return (
<div className="flex flex-col h-screen max-w-md mx-auto bg-white border-x custom-shadow relative">
{/* Header */}
<header className="glass-header border-b p-4 flex items-center gap-3 sticky top-0 z-10">
<div className="w-10 h-10 canifa-red rounded-lg flex items-center justify-center overflow-hidden">
<img src="canifa-brand/logo.svg" alt="Canifa" className="w-8 h-8" />
</div>
<div>
<h1 className="font-bold text-sm">CuCu Assistant</h1>
<div className="flex items-center gap-1">
<span className="w-2 h-2 bg-green-500 rounded-full"></span>
<span className="text-[10px] text-gray-500 uppercase tracking-wider font-semibold">Fashion for All</span>
</div>
</div>
</header>
{/* Messages */}
<main className="flex-1 overflow-y-auto p-4 space-y-6">
{messages.map((msg, i) => (
<div key={i} className={`flex ${msg.role === 'user' ? 'justify-end' : 'justify-start'}`}>
<div className={`max-w-[85%] p-4 text-sm leading-relaxed shadow-sm ${msg.role === 'user' ? 'message-user' : 'message-ai'}`}>
<div className="whitespace-pre-wrap">
{msg.content.split('\n').map((line, li) => (
<p key={li} className={li > 0 ? 'mt-2' : ''}>
{line.split(/(2026-\d{2}-\d{2})/).map((part, pi) => (
part.match(/^\d{4}-\d{2}-\d{2}$/) ? (
<span key={pi} className="inline-flex items-center gap-1 px-2 py-0.5 bg-gray-100 canifa-text-red rounded-full text-[11px] font-bold cursor-pointer hover:bg-gray-200 transition-colors">
<CalendarIcon /> {part}
</span>
) : part
))}
</p>
))}
</div>
</div>
</div>
))}
<div className="flex justify-start">
<div className="message-ai p-4 flex items-center gap-1">
<div className="typing-dot"></div>
<div className="typing-dot"></div>
<div className="typing-dot"></div>
</div>
</div>
</main>
{/* Input */}
<footer className="p-4 bg-white border-t">
<div className="flex items-center gap-2 bg-gray-100 rounded-2xl p-1 pr-2 border focus-within:border-red-300 focus-within:ring-2 focus-within:ring-red-100 transition-all">
<input
type="text"
placeholder="Hỏi CuCu về thời trang..."
className="flex-1 bg-transparent border-none outline-none px-4 py-2 text-sm"
/>
<button className="w-10 h-10 canifa-red text-white rounded-xl flex items-center justify-center hover:opacity-90 transition-all shadow-md">
<SendIcon />
</button>
</div>
<div className="text-[9px] text-center text-gray-400 mt-3 uppercase tracking-tighter">
© 2026 CANIFA - Powered by Huashu Design
</div>
</footer>
</div>
);
};
const root = ReactDOM.createRoot(document.getElementById('root'));
root.render(<App />);
</script>
</body>
</html>
<!DOCTYPE html>
<html lang="vi">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Canifa Intelligence Dashboard 2026</title>
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
<script src="https://cdn.tailwindcss.com"></script>
<link href="https://fonts.googleapis.com/css2?family=Montserrat:wght@300;400;600;800&family=Space+Grotesk:wght@300;500;700&display=swap" rel="stylesheet">
<style>
:root {
--accent: #E2231A;
--bg: #0B0E14;
--card-bg: rgba(255, 255, 255, 0.03);
--card-border: rgba(255, 255, 255, 0.08);
--neon-glow: 0 0 15px rgba(226, 35, 26, 0.4);
}
body {
font-family: 'Montserrat', sans-serif;
background-color: var(--bg);
color: #FFFFFF;
overflow-x: hidden;
}
.font-heading { font-family: 'Space Grotesk', sans-serif; }
.bento-grid {
display: grid;
grid-template-columns: repeat(4, 1fr);
grid-auto-rows: minmax(160px, auto);
gap: 1.5rem;
}
.glass-card {
background: var(--card-bg);
backdrop-filter: blur(12px);
border: 1px solid var(--card-border);
border-radius: 28px;
padding: 2rem;
transition: all 0.4s cubic-bezier(0.175, 0.885, 0.32, 1.275);
position: relative;
overflow: hidden;
}
.glass-card:hover {
border-color: var(--accent);
box-shadow: var(--neon-glow);
transform: scale(1.02);
}
.glass-card::before {
content: '';
position: absolute;
top: 0; left: -100%;
width: 100%; height: 100%;
background: linear-gradient(90deg, transparent, rgba(255,255,255,0.05), transparent);
transition: 0.5s;
}
.glass-card:hover::before { left: 100%; }
.neon-text {
color: var(--accent);
text-shadow: 0 0 8px rgba(226, 35, 26, 0.3);
}
.stat-huge {
font-size: 3.5rem;
font-weight: 800;
line-height: 1;
background: linear-gradient(to bottom, #fff, #888);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
}
.glow-dot {
width: 8px; height: 8px;
background: var(--accent);
border-radius: 50%;
box-shadow: 0 0 10px var(--accent);
animation: pulse 2s infinite;
}
@keyframes pulse {
0% { transform: scale(1); opacity: 1; }
50% { transform: scale(1.5); opacity: 0.5; }
100% { transform: scale(1); opacity: 1; }
}
.radar-box { height: 350px; width: 100%; }
/* Custom Scrollbar */
::-webkit-scrollbar { width: 6px; }
::-webkit-scrollbar-track { background: #0B0E14; }
::-webkit-scrollbar-thumb { background: #333; border-radius: 10px; }
::-webkit-scrollbar-thumb:hover { background: var(--accent); }
.badge-cyber {
background: rgba(226, 35, 26, 0.1);
border: 1px solid rgba(226, 35, 26, 0.3);
color: var(--accent);
padding: 4px 12px;
border-radius: 100px;
font-size: 10px;
font-weight: 700;
text-transform: uppercase;
letter-spacing: 0.1em;
}
</style>
</head>
<body class="p-6 md:p-12">
<div class="max-w-7xl mx-auto space-y-10">
<!-- Navbar -->
<nav class="flex justify-between items-center animate-fade-in">
<div class="flex items-center gap-4">
<div class="p-2 bg-white rounded-xl">
<img src="canifa-brand/logo.svg" alt="Canifa" class="w-16">
</div>
<div class="h-8 w-[1px] bg-white/10"></div>
<span class="font-heading font-bold text-xs uppercase tracking-[0.3em] text-gray-500">Intelligence Unit</span>
</div>
<div class="flex items-center gap-6 text-[10px] font-bold uppercase tracking-widest text-gray-400">
<span class="flex items-center gap-2"><div class="glow-dot"></div> System Live</span>
<span>Q1 2026 Analysis</span>
</div>
</nav>
<!-- Hero Section -->
<div class="relative py-10">
<h1 class="font-heading text-6xl md:text-8xl font-black tracking-tighter mb-4">
DATA <span class="neon-text">MANIFEST</span>
</h1>
<p class="text-gray-500 max-w-xl text-lg font-light leading-relaxed">
Phân tích cảm biến trải nghiệm khách hàng thời gian thực. <br/>
Hệ thống CuCu AI đã xử lý <span class="text-white font-medium">128,492</span> điểm dữ liệu trong 24h qua.
</p>
</div>
<!-- Bento Grid -->
<div class="bento-grid">
<!-- Main Stats -->
<div class="glass-card col-span-4 md:col-span-2 row-span-2 flex flex-col justify-between">
<div>
<div class="badge-cyber mb-6">Customer Sentiment Index</div>
<div class="stat-huge">98.2<span class="text-2xl text-gray-600 font-light ml-2">/100</span></div>
</div>
<div class="space-y-6">
<div class="h-24 w-full">
<canvas id="miniTrendChart"></canvas>
</div>
<div class="flex justify-between text-[10px] font-bold text-gray-500 uppercase tracking-widest">
<span>Jan 2026</span>
<span>Peak Performance Achieved</span>
<span>Mar 2026</span>
</div>
</div>
</div>
<!-- Experience Radar -->
<div class="glass-card col-span-4 md:col-span-2 row-span-3">
<div class="flex justify-between items-center mb-10">
<div class="badge-cyber">5-Axis Experience</div>
<span class="text-[10px] text-gray-500 font-mono">CORE_METRIC.REAI</span>
</div>
<div class="radar-box">
<canvas id="radarChart"></canvas>
</div>
</div>
<!-- Small KPI 1 -->
<div class="glass-card flex flex-col justify-between">
<span class="text-[10px] uppercase tracking-widest font-bold text-gray-500">NPS score</span>
<div class="text-4xl font-black neon-text">74</div>
<div class="text-[10px] text-green-500 font-bold">+5.4% WoW</div>
</div>
<!-- Small KPI 2 -->
<div class="glass-card flex flex-col justify-between">
<span class="text-[10px] uppercase tracking-widest font-bold text-gray-500">AI Accuracy</span>
<div class="text-4xl font-black text-white">99.1%</div>
<div class="text-[10px] text-gray-600 font-bold italic">Neural Network Validated</div>
</div>
<!-- Feedback Tags -->
<div class="glass-card col-span-4 md:col-span-2">
<div class="badge-cyber mb-6">Top Keywords</div>
<div class="flex flex-wrap gap-3">
<span class="px-4 py-2 bg-white/5 rounded-full text-xs font-semibold hover:bg-red-600/20 cursor-pointer transition-colors border border-white/10">#Chat-lieu-mem</span>
<span class="px-4 py-2 bg-white/5 rounded-full text-xs font-semibold hover:bg-red-600/20 cursor-pointer transition-colors border border-white/10">#Phuc-vu-tan-tam</span>
<span class="px-4 py-2 bg-white/5 rounded-full text-xs font-semibold hover:bg-red-600/20 cursor-pointer transition-colors border border-white/10">#Giao-hang-nhanh</span>
<span class="px-4 py-2 bg-white/5 rounded-full text-xs font-semibold hover:bg-red-600/20 cursor-pointer transition-colors border border-white/10">#Canifa-S-Ngon</span>
<span class="px-4 py-2 bg-white/5 rounded-full text-xs font-semibold hover:bg-red-600/20 cursor-pointer transition-colors border border-white/10">#Mau-sac-dep</span>
</div>
</div>
<!-- Sentiment Breakdown -->
<div class="glass-card col-span-4 flex items-center justify-between gap-12">
<div class="flex-1 space-y-4">
<div class="badge-cyber">Sentiment Distribution</div>
<div class="grid grid-cols-3 gap-8">
<div>
<div class="text-[10px] text-gray-500 uppercase font-bold mb-1">Positive</div>
<div class="text-3xl font-bold">82%</div>
<div class="w-full h-1 bg-white/5 mt-2 rounded-full overflow-hidden">
<div class="h-full bg-red-600" style="width: 82%"></div>
</div>
</div>
<div>
<div class="text-[10px] text-gray-500 uppercase font-bold mb-1">Neutral</div>
<div class="text-3xl font-bold">14%</div>
<div class="w-full h-1 bg-white/5 mt-2 rounded-full overflow-hidden">
<div class="h-full bg-gray-600" style="width: 14%"></div>
</div>
</div>
<div>
<div class="text-[10px] text-gray-500 uppercase font-bold mb-1">Critical</div>
<div class="text-3xl font-bold">4%</div>
<div class="w-full h-1 bg-white/5 mt-2 rounded-full overflow-hidden">
<div class="h-full bg-white" style="width: 4%"></div>
</div>
</div>
</div>
</div>
<div class="w-32 h-32">
<canvas id="sentimentDonut"></canvas>
</div>
</div>
</div>
<!-- Footer -->
<footer class="pt-20 pb-10 flex justify-between items-end border-t border-white/5">
<div class="space-y-4">
<div class="text-xs font-bold tracking-widest text-gray-600 uppercase">CANIFA DIGITAL ECOSYSTEM</div>
<div class="text-5xl font-black opacity-20 font-heading">JOY FOR ALL.</div>
</div>
<div class="text-right space-y-2">
<div class="text-[10px] font-mono text-red-600/60">SYSTEM_AUTH: 2b62470-FIX</div>
<div class="text-[10px] font-bold text-gray-500">PROUDLY POWERED BY HUASHU DESIGN</div>
</div>
</footer>
</div>
<script>
const RED = '#E2231A';
const DARK = '#333F48';
Chart.defaults.color = '#555';
Chart.defaults.font.family = 'Space Grotesk';
// 1. Mini Trend
new Chart(document.getElementById('miniTrendChart'), {
type: 'line',
data: {
labels: Array(20).fill(''),
datasets: [{
data: [65, 78, 72, 85, 80, 92, 88, 95, 92, 98, 95, 99, 96, 100, 98, 97, 100, 98, 99, 100],
borderColor: RED,
borderWidth: 3,
pointRadius: 0,
tension: 0.4,
fill: true,
backgroundColor: (ctx) => {
const gradient = ctx.chart.ctx.createLinearGradient(0, 0, 0, 100);
gradient.addColorStop(0, 'rgba(226, 35, 26, 0.2)');
gradient.addColorStop(1, 'transparent');
return gradient;
}
}]
},
options: {
maintainAspectRatio: false,
plugins: { legend: { display: false } },
scales: {
x: { display: false },
y: { display: false }
}
}
});
// 2. Radar
new Chart(document.getElementById('radarChart'), {
type: 'radar',
data: {
labels: ['CHẤT LƯỢNG', 'DỊCH VỤ', 'GIÁ CẢ', 'KHÔNG GIAN', 'SHIP'],
datasets: [{
label: 'Canifa S',
data: [95, 85, 90, 92, 88],
borderColor: RED,
backgroundColor: 'rgba(226, 35, 26, 0.1)',
pointBackgroundColor: RED,
borderWidth: 2
}, {
label: 'Market Avg',
data: [80, 75, 85, 70, 75],
borderColor: '#444',
borderDash: [5, 5],
borderWidth: 1,
pointRadius: 0
}]
},
options: {
maintainAspectRatio: false,
scales: {
r: {
grid: { color: 'rgba(255,255,255,0.05)' },
angleLines: { color: 'rgba(255,255,255,0.05)' },
pointLabels: { color: '#888', font: { size: 10, weight: 'bold' } },
ticks: { display: false },
suggestedMin: 50
}
},
plugins: { legend: { display: false } }
}
});
// 3. Sentiment Donut
new Chart(document.getElementById('sentimentDonut'), {
type: 'doughnut',
data: {
datasets: [{
data: [82, 14, 4],
backgroundColor: [RED, '#333', '#fff'],
borderWidth: 0,
hoverOffset: 10
}]
},
options: {
cutout: '85%',
plugins: { legend: { display: false } }
}
});
</script>
</body>
</html>
<!DOCTYPE html>
<html lang="vi">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Canifa Feedback Insights 2026</title>
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
<script src="https://cdn.tailwindcss.com"></script>
<link href="https://fonts.googleapis.com/css2?family=Montserrat:wght@400;600;700;800&display=swap" rel="stylesheet">
<style>
:root {
--canifa-red: #E2231A;
--canifa-dark: #333F48;
--canifa-light: #F5F5F5;
}
body { font-family: 'Montserrat', sans-serif; background-color: var(--canifa-light); color: var(--canifa-dark); }
.gradient-bg { background: linear-gradient(135deg, #E2231A 0%, #B91C1C 100%); }
.card { background: white; border-radius: 24px; box-shadow: 0 4px 20px rgba(0,0,0,0.05); transition: transform 0.3s ease; }
.card:hover { transform: translateY(-5px); }
.stat-value { font-size: 2.5rem; font-weight: 800; color: var(--canifa-red); }
.chart-container { position: relative; height: 300px; width: 100%; }
.radar-container { height: 400px; }
/* Custom Scrollbar */
::-webkit-scrollbar { width: 8px; }
::-webkit-scrollbar-track { background: #f1f1f1; }
::-webkit-scrollbar-thumb { background: #E2231A; border-radius: 10px; }
.animate-fade-up { animation: fadeUp 0.8s ease-out forwards; opacity: 0; }
@keyframes fadeUp {
from { opacity: 0; transform: translateY(20px); }
to { opacity: 1; transform: translateY(0); }
}
</style>
</head>
<body class="p-4 md:p-8">
<div class="max-w-6xl mx-auto space-y-8">
<!-- Header Hero -->
<header class="gradient-bg rounded-[32px] p-8 md:p-12 text-white relative overflow-hidden shadow-2xl animate-fade-up">
<div class="relative z-10 flex flex-col md:flex-row justify-between items-start md:items-center gap-6">
<div class="space-y-4">
<div class="bg-white/20 backdrop-blur-md px-4 py-1 rounded-full inline-block text-sm font-semibold tracking-widest uppercase">
Q1 2026 Strategy
</div>
<h1 class="text-4xl md:text-6xl font-extrabold leading-tight">
Canifa <br/> <span class="text-white/80">Feedback Insights</span>
</h1>
<p class="text-lg text-white/90 max-w-md italic">
"Lắng nghe từng phản hồi, dệt nên niềm vui cho mọi gia đình Việt."
</p>
</div>
<div class="bg-white p-4 rounded-2xl shadow-xl">
<img src="canifa-brand/logo.svg" alt="Canifa Logo" class="w-24 md:w-32">
</div>
</div>
<!-- Decorative circle -->
<div class="absolute -right-20 -bottom-20 w-64 h-64 bg-white/10 rounded-full blur-3xl"></div>
</header>
<!-- KPI Grid -->
<div class="grid grid-cols-1 md:grid-cols-4 gap-6 animate-fade-up" style="animation-delay: 0.2s">
<div class="card p-6 flex flex-col justify-between">
<span class="text-xs uppercase font-bold text-gray-400 tracking-wider">Hài lòng chung</span>
<div class="stat-value">4.8<span class="text-sm font-normal text-gray-400 ml-1">/ 5.0</span></div>
<div class="text-green-500 text-xs font-bold">↑ 12% so với Q4</div>
</div>
<div class="card p-6 flex flex-col justify-between">
<span class="text-xs uppercase font-bold text-gray-400 tracking-wider">Tổng Feedback</span>
<div class="stat-value">12.4K</div>
<div class="text-gray-400 text-xs">Từ 23 cửa hàng toàn quốc</div>
</div>
<div class="card p-6 flex flex-col justify-between">
<span class="text-xs uppercase font-bold text-gray-400 tracking-wider">Tỷ lệ Phản hồi</span>
<div class="stat-value">94%</div>
<div class="text-green-500 text-xs font-bold">Bot xử lý 85%</div>
</div>
<div class="card p-6 flex flex-col justify-between">
<span class="text-xs uppercase font-bold text-gray-400 tracking-wider">NPS Score</span>
<div class="stat-value">72</div>
<div class="text-green-500 text-xs font-bold">Mức xuất sắc</div>
</div>
</div>
<!-- Main Insights Section -->
<div class="grid grid-cols-1 lg:grid-cols-3 gap-8">
<!-- Radar Chart: 5-Star Breakdown -->
<div class="card p-8 lg:col-span-2 animate-fade-up" style="animation-delay: 0.4s">
<div class="flex justify-between items-center mb-8">
<h2 class="text-xl font-bold border-l-4 border-red-600 pl-4 uppercase tracking-tight">Trải nghiệm 5 Chiều</h2>
<span class="text-xs text-gray-400 font-medium italic">Dữ liệu từ AI Phân tích</span>
</div>
<div class="radar-container">
<canvas id="radarChart"></canvas>
</div>
</div>
<!-- Sentiment Distribution -->
<div class="card p-8 animate-fade-up" style="animation-delay: 0.6s">
<h2 class="text-xl font-bold border-l-4 border-red-600 pl-4 mb-8 uppercase tracking-tight">Sắc thái Cảm xúc</h2>
<div class="chart-container">
<canvas id="sentimentChart"></canvas>
</div>
<div class="mt-8 space-y-4">
<div class="flex items-center justify-between">
<span class="text-sm font-medium">Tích cực</span>
<span class="text-sm font-bold text-green-600">78%</span>
</div>
<div class="w-full bg-gray-100 rounded-full h-1.5">
<div class="bg-green-500 h-1.5 rounded-full" style="width: 78%"></div>
</div>
<div class="flex items-center justify-between">
<span class="text-sm font-medium">Trung lập</span>
<span class="text-sm font-bold text-gray-600">15%</span>
</div>
<div class="w-full bg-gray-100 rounded-full h-1.5">
<div class="bg-gray-400 h-1.5 rounded-full" style="width: 15%"></div>
</div>
<div class="flex items-center justify-between">
<span class="text-sm font-medium">Tiêu cực</span>
<span class="text-sm font-bold text-red-600">7%</span>
</div>
<div class="w-full bg-gray-100 rounded-full h-1.5">
<div class="bg-red-500 h-1.5 rounded-full" style="width: 7%"></div>
</div>
</div>
</div>
</div>
<!-- Key Trends Section -->
<div class="card p-8 animate-fade-up" style="animation-delay: 0.8s">
<h2 class="text-xl font-bold border-l-4 border-red-600 pl-4 mb-8 uppercase tracking-tight">Xu hướng Feedback 30 ngày qua</h2>
<div class="h-[300px]">
<canvas id="trendChart"></canvas>
</div>
</div>
<!-- Footer -->
<footer class="flex flex-col md:flex-row justify-between items-center py-8 text-gray-400 text-xs font-medium uppercase tracking-widest border-t border-gray-200">
<div>CANIFA Business Intelligence Division</div>
<div class="mt-4 md:mt-0 italic">Bảo mật nội bộ - © 2026 CANIFA</div>
<div class="mt-4 md:mt-0">Powered by Huashu Design</div>
</footer>
</div>
<script>
// Charts Initialization
const canifaRed = '#E2231A';
const canifaDark = '#333F48';
// 1. Radar Chart
new Chart(document.getElementById('radarChart'), {
type: 'radar',
data: {
labels: ['Chất lượng SP', 'Thái độ PV', 'Giá cả', 'Không gian', 'Chính sách Đổi trả'],
datasets: [{
label: 'Thực tế Q1',
data: [4.8, 4.2, 4.5, 4.9, 4.0],
fill: true,
backgroundColor: 'rgba(226, 35, 26, 0.2)',
borderColor: canifaRed,
pointBackgroundColor: canifaRed,
pointBorderColor: '#fff',
pointHoverBackgroundColor: '#fff',
pointHoverBorderColor: canifaRed
}, {
label: 'Mục tiêu',
data: [4.5, 4.5, 4.0, 4.5, 4.5],
fill: true,
backgroundColor: 'rgba(51, 63, 72, 0.05)',
borderColor: canifaDark,
borderDash: [5, 5],
pointBackgroundColor: canifaDark,
}]
},
options: {
elements: { line: { borderWidth: 3 } },
scales: {
r: {
angleLines: { display: true },
suggestedMin: 0,
suggestedMax: 5,
ticks: { stepSize: 1 }
}
},
plugins: { legend: { position: 'bottom' } }
}
});
// 2. Sentiment Doughnut
new Chart(document.getElementById('sentimentChart'), {
type: 'doughnut',
data: {
labels: ['Tích cực', 'Trung lập', 'Tiêu cực'],
datasets: [{
data: [78, 15, 7],
backgroundColor: [canifaRed, canifaDark, '#E5E7EB'],
hoverOffset: 10,
borderWidth: 0
}]
},
options: {
cutout: '80%',
plugins: { legend: { display: false } },
maintainAspectRatio: false
}
});
// 3. Trend Line Chart
new Chart(document.getElementById('trendChart'), {
type: 'line',
data: {
labels: ['Tuần 1', 'Tuần 2', 'Tuần 3', 'Tuần 4'],
datasets: [{
label: 'Feedback tích cực',
data: [2100, 2400, 2200, 2800],
borderColor: canifaRed,
tension: 0.4,
fill: false,
borderWidth: 4
}, {
label: 'Khiếu nại',
data: [150, 120, 180, 90],
borderColor: canifaDark,
tension: 0.4,
fill: false,
borderWidth: 2,
borderDash: [5, 5]
}]
},
options: {
maintainAspectRatio: false,
scales: {
y: { beginAtZero: true, grid: { display: false } },
x: { grid: { display: false } }
},
plugins: { legend: { position: 'top', align: 'end' } }
}
});
</script>
</body>
</html>
"""
Gemini CLI Lead Flow Test — 100 câu hỏi đa dạng.
Chạy khi server đang up: uvicorn server:app --host 0.0.0.0 --port 5000
Usage:
cd backend
python tests/gemini_lead_flow_test.py
python tests/gemini_lead_flow_test.py 20 # chỉ chạy 20 câu
"""
import asyncio
import json
import logging
import os
import sys
import time
import httpx
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from common.gemini_cli_wrapper import GeminiCLI
logging.basicConfig(level=logging.INFO, format="%(message)s")
logger = logging.getLogger(__name__)
# ═══════════════════════════════════════════════
# Config
# ═══════════════════════════════════════════════
API_URL = "http://localhost:5000/api/agent/chat-lead-flow"
DEVICE_ID = "gemini-loop-tester"
gemini = GeminiCLI()
# ═══════════════════════════════════════════════
# 100 câu hỏi test đa dạng
# ═══════════════════════════════════════════════
TEST_QUESTIONS = [
# ── MÙA HÈ ──
"có áo mặc mùa hè thoáng mát không",
"tìm áo phông nam mùa hè dưới 300k",
"váy đi biển cho nữ có không bạn",
"quần short nam đi chơi hè",
"áo ba lỗ tập gym mùa hè",
"đồ bơi trẻ em có không",
"áo thun cotton thoáng mát cho bé trai",
"váy liền đi dạo phố mùa hè",
"quần linen nữ mặc hè thoải mái",
"áo sơ mi ngắn tay nam mùa hè",
# ── MÙA ĐÔNG ──
"áo khoác mùa đông cho nữ",
"áo giữ ấm cho bé gái 5 tuổi",
"áo len nam dưới 500k",
"áo phao trẻ em mùa đông",
"quần jean dày mặc đông cho nam",
"áo nỉ hoodie nữ giữ ấm",
"set đồ mùa đông cho bé sơ sinh",
"áo khoác gió nam đi làm mùa đông",
"váy len dài mặc đông thanh lịch",
"đồ ngủ mùa đông ấm áp",
# ── ĐI LÀM / CÔNG SỞ ──
"áo sơ mi nữ mặc đi làm",
"quần tây nam công sở",
"váy công sở thanh lịch dưới 600k",
"áo polo nam đi làm smart casual",
"set đồ công sở nữ mùa hè",
"áo blazer nữ mặc văn phòng",
"quần âu nữ ống đứng đi làm",
"sơ mi trắng nam công sở size L",
"đồ đi làm kín đáo mà vẫn đẹp",
"trang phục họp quan trọng cho nữ",
# ── ĐI CHƠI / DẠO PHỐ ──
"đồ đi chơi cuối tuần cho nữ",
"áo oversize nam đi dạo phố",
"váy hoa đi picnic",
"quần jogger nam đi chơi",
"set đồ đi cafe cho nữ",
"áo croptop nữ năng động",
"quần short jean nữ đi chơi hè",
"đồ đi chơi tết cho bé trai 3 tuổi",
"áo khoác bomber nam cá tính",
"đầm maxi đi du lịch",
# ── ĐI TIỆC ──
"váy đi tiệc cuối năm cho nữ",
"áo sơ mi nam đi tiệc",
"đầm dạ hội thanh lịch dưới 1 triệu",
"trang phục đi đám cưới cho nam",
"váy liền đi sinh nhật bạn",
"áo vest nữ đi tiệc sang trọng",
"đồ đi tiệc cho bé gái",
"sơ mi cổ đức nam đi tiệc",
"váy đen đi event",
"set đồ đi gala cho nữ",
# ── THỂ THAO ──
"áo thể thao nam chạy bộ",
"quần legging nữ tập yoga",
"set đồ tập gym cho nam",
"áo thun thể thao bé trai",
"quần short thể thao nữ",
"áo khoác gió thể thao chạy bộ",
"đồ bơi nam tập bơi",
"áo polo thể thao nam",
"quần jogger nữ tập gym",
"set thể thao trẻ em đi học",
# ── MẶC NHÀ ──
"đồ mặc nhà thoải mái cho nữ",
"pyjama nam cotton mát",
"đồ bộ mặc nhà cho bé gái",
"áo ngủ nữ dễ thương",
"quần short mặc nhà nam",
"set đồ ngủ đôi couple",
"đồ bộ mặc nhà nữ giá rẻ",
"pyjama trẻ em dễ thương",
"áo 2 dây mặc nhà nữ",
"đồ ngủ cotton cho bé sơ sinh",
# ── THEO MÀU SẮC ──
"áo phông trắng basic nam",
"váy đỏ đi tiệc nữ",
"quần jean xanh đậm nam",
"áo len hồng pastel nữ",
"đồ đen full set cho nam",
"áo khoác be nữ mùa thu",
"váy vàng đi chơi",
"áo xám basic oversize",
"quần trắng nữ ống rộng",
"sơ mi xanh nhạt nam",
# ── THEO SIZE ──
"áo phông nam size XXL",
"váy nữ size XS nhỏ nhắn",
"quần jean bé trai cao 140cm",
"áo khoác nữ size 3XL",
"đồ sơ sinh 0-3 tháng",
# ── THEO GIÁ ──
"áo dưới 200k cho nam",
"váy đẹp dưới 400k",
"đồ sale giảm giá 50%",
"quần jean nam giá rẻ nhất",
"set đồ dưới 500k cho nữ",
# ── PHỐI ĐỒ / TƯ VẤN ──
"mặc gì đi phỏng vấn xin việc",
"gợi ý phối đồ đi hẹn hò",
"mặc gì ngày 30/4 đẹp",
"trang phục đi du lịch Đà Lạt mùa đông",
"phối đồ đi biển Nha Trang",
]
# ═══════════════════════════════════════════════
# Evaluator prompt cho Gemini CLI
# ═══════════════════════════════════════════════
EVALUATOR_PROMPT = """\
Bạn là chuyên gia thời trang đánh giá chatbot tư vấn CANIFA.
CÂU HỎI KHÁCH: "{question}"
TRẢ LỜI CHATBOT:
{response}
SẢN PHẨM TÌM ĐƯỢC: {product_count} sản phẩm
{product_list}
ĐÁNH GIÁ theo 4 tiêu chí (mỗi cái YES/NO):
1. RELEVANT: Sản phẩm có phù hợp câu hỏi? (hỏi váy mà ra áo = NO)
2. NATURAL: Câu trả lời tự nhiên, thân thiện?
3. HELPFUL: Có tư vấn phối đồ hoặc gợi ý hữu ích?
4. ACCURATE: Thông tin giá, size, màu có chính xác?
TRẢ LỜI CHÍNH XÁC FORMAT:
VERDICT: PASS hoặc FAIL
SCORE: X/4
REASON: <1 dòng ngắn gọn>
"""
async def call_lead_flow_api(question: str, conversation_id: str) -> dict:
"""Gọi API lead flow trên server đang chạy."""
async with httpx.AsyncClient(timeout=30) as client:
resp = await client.post(
API_URL,
json={
"user_query": question,
"device_id": DEVICE_ID,
"conversation_id": conversation_id,
},
)
resp.raise_for_status()
return resp.json()
async def run_one_test(idx: int, question: str) -> dict:
"""Chạy 1 test case."""
t0 = time.time()
conv_id = f"test-loop-{idx}"
logger.info(f"\n{'─'*60}")
logger.info(f"[{idx:03d}] 🧑 {question}")
# 1. Gọi API lead flow
try:
result = await call_lead_flow_api(question, conv_id)
except Exception as e:
logger.error(f" 💥 API Error: {e}")
return {"idx": idx, "question": question, "verdict": "ERROR", "reason": str(e)}
ai_response = result.get("ai_response", "")[:300]
products = result.get("products", [])
product_count = len(products)
product_names = [p.get("name", "?") for p in products[:5]]
logger.info(f" 📦 {product_count} sản phẩm | response: {ai_response[:80]}...")
for name in product_names[:3]:
logger.info(f" → {name}")
# 2. Gemini CLI đánh giá
product_list = "\n".join([
f" - {p.get('name','?')} | {p.get('price',0):,}đ | {p.get('color','?')} | {p.get('product_line','?')}"
for p in products[:5]
]) or " (không có sản phẩm)"
eval_prompt = EVALUATOR_PROMPT.format(
question=question,
response=ai_response,
product_count=product_count,
product_list=product_list,
)
try:
verdict_text = await gemini.ask(eval_prompt)
except Exception as e:
logger.warning(f" ⚠️ Evaluator error: {e}")
verdict_text = "VERDICT: UNKNOWN"
# Parse verdict
verdict = "PASS" if "PASS" in verdict_text.upper() else "FAIL"
score = "?"
reason = ""
for line in verdict_text.splitlines():
if "SCORE:" in line.upper():
score = line.split(":")[-1].strip()
if "REASON:" in line.upper():
reason = line.split(":", 1)[-1].strip()
elapsed = round(time.time() - t0, 1)
icon = "✅" if verdict == "PASS" else "❌"
logger.info(f" {icon} {verdict} ({score}) | {reason} [{elapsed}s]")
return {
"idx": idx,
"question": question,
"product_count": product_count,
"top_products": product_names,
"ai_response": ai_response,
"verdict": verdict,
"score": score,
"reason": reason,
"elapsed_s": elapsed,
}
async def main(max_rounds: int | None = None):
"""Chạy test loop."""
questions = TEST_QUESTIONS[:max_rounds] if max_rounds else TEST_QUESTIONS
total = len(questions)
logger.info(f"🚀 Gemini CLI Lead Flow Test — {total} câu hỏi")
logger.info(f" API: {API_URL}")
logger.info(f" Evaluator: GeminiCLI (gemini-2.0-flash)")
results = []
for i, q in enumerate(questions, 1):
try:
r = await run_one_test(i, q)
results.append(r)
except Exception as e:
logger.error(f"[{i:03d}] 💥 Fatal: {e}")
results.append({"idx": i, "question": q, "verdict": "ERROR", "reason": str(e)})
# Throttle: tránh rate limit Gemini CLI
await asyncio.sleep(1)
# ── Summary ──
passed = sum(1 for r in results if r.get("verdict") == "PASS")
failed = sum(1 for r in results if r.get("verdict") == "FAIL")
errors = sum(1 for r in results if r.get("verdict") == "ERROR")
avg_time = sum(r.get("elapsed_s", 0) for r in results) / max(len(results), 1)
logger.info(f"\n{'═'*60}")
logger.info(f"📊 KẾT QUẢ: {passed} PASS ✅ | {failed} FAIL ❌ | {errors} ERROR 💥")
logger.info(f" Tổng: {total} câu | Avg: {avg_time:.1f}s/câu")
logger.info(f" Pass rate: {passed/max(total,1)*100:.0f}%")
logger.info(f"{'═'*60}")
# Liệt kê FAIL cases
fails = [r for r in results if r.get("verdict") == "FAIL"]
if fails:
logger.info(f"\n❌ FAIL CASES ({len(fails)}):")
for f in fails:
logger.info(f" [{f['idx']:03d}] {f['question']}")
logger.info(f" → {f.get('reason', '?')}")
# Save results
out_path = os.path.join(os.path.dirname(__file__), "gemini_test_results.json")
with open(out_path, "w", encoding="utf-8") as f:
json.dump(results, f, ensure_ascii=False, indent=2)
logger.info(f"\n💾 Full results: {out_path}")
if __name__ == "__main__":
rounds = int(sys.argv[1]) if len(sys.argv) > 1 else None
asyncio.run(main(rounds))
[
{
"idx": 1,
"question": "có áo mặc mùa hè thoáng mát không",
"verdict": "ERROR",
"reason": "All connection attempts failed"
},
{
"idx": 2,
"question": "tìm áo phông nam mùa hè dưới 300k",
"verdict": "ERROR",
"reason": "All connection attempts failed"
},
{
"idx": 3,
"question": "váy đi biển cho nữ có không bạn",
"verdict": "ERROR",
"reason": "All connection attempts failed"
},
{
"idx": 4,
"question": "quần short nam đi chơi hè",
"verdict": "ERROR",
"reason": "All connection attempts failed"
},
{
"idx": 5,
"question": "áo ba lỗ tập gym mùa hè",
"verdict": "ERROR",
"reason": "All connection attempts failed"
}
]
\ No newline at end of file
# Canifa · Brand Spec
> 采集日期:2026-04-30
> 资产来源:canifa.com 官网提取
> 资产完整度:完整
## 🎯 核心资产(一等公民)
### Logo
- 主版本:`backend/static/canifa-brand/logo.svg`
- 使用场景:Chatbot Header, Watermark, App Launcher
- 视觉特征:经典的 "CANIFA" 无衬线字标,置于红色圆角矩形块中。
### 品牌气质
- 核心关键词:Fashion for All, Gia đình, Năng động, Tin cậy.
- 2026 夏季主题:Trạm Hè Đa Sắc (Vibrant Summer Station)
## 🎨 辅助资产
### 色板
- **Canifa Red (Primary)**: `#E2231A` (Logo & CTA)
- **Deep Navy/Black**: `#333F48` (Typography & Secondary elements)
- **Summer Yellow/Lime**: `#C4FF1C` (Highlighter / New Campaign)
- **White**: `#FFFFFF` (Background)
- **Light Gray**: `#F5F5F5` (Container background)
### 字型
- **Display**: Sans-serif (Clean, modern like Montserrat or Inter)
- **Body**: Sans-serif (Highly readable)
### 交互签名
- 极简、流畅、圆角适中(8px - 12px)。
- 强调 "Joy" (Niềm vui) thông qua các hiệu ứng chuyển cảnh nhẹ nhàng.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment