Commit 85d77ce6 authored by Vũ Hoàng Anh's avatar Vũ Hoàng Anh

refactor : refactor all codebase

parent 5deb9f3e
...@@ -36,6 +36,9 @@ backend/.venv/ ...@@ -36,6 +36,9 @@ backend/.venv/
backend/__pycache__/ backend/__pycache__/
backend/*.pyc backend/*.pyc
# Preference folder (development/temporary)
preference/
# OS # OS
.DS_Store .DS_Store
Thumbs.db Thumbs.db
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
.PHONY: up down restart logs build ps clean setup-nginx monitor-up monitor-down .PHONY: up down restart logs build ps clean setup-nginx monitor-up monitor-down
up: up:
suod docker compose up -d --build sudo docker compose up -d --build
down: down:
docker-compose down docker-compose down
......
...@@ -46,47 +46,56 @@ async def chat_controller( ...@@ -46,47 +46,56 @@ async def chat_controller(
# Init ConversationManager (Singleton) # Init ConversationManager (Singleton)
memory = await get_conversation_manager() memory = await get_conversation_manager()
# LOAD HISTORY & Prepare State # LOAD HISTORY & Prepare State (Optimize: history logic remains solid)
history_dicts = await memory.get_chat_history(user_id, limit=20) history_dicts = await memory.get_chat_history(user_id, limit=20)
# Convert to BaseMessage objects
history = [] history = []
for h in reversed(history_dicts): for h in reversed(history_dicts):
if h["is_human"]: msg_cls = HumanMessage if h["is_human"] else AIMessage
history.append(HumanMessage(content=h["message"])) history.append(msg_cls(content=h["message"]))
else:
history.append(AIMessage(content=h["message"]))
initial_state, exec_config = _prepare_execution_context( initial_state, exec_config = _prepare_execution_context(
query=query, user_id=user_id, history=history, images=images query=query, user_id=user_id, history=history, images=images
) )
try: try:
# TỐI ƯU: Chạy Graph
result = await graph.ainvoke(initial_state, config=exec_config) result = await graph.ainvoke(initial_state, config=exec_config)
# logger.info(f"Answer result from ai: {result}")
# take ai message from result # TỐI ƯU: Extract IDs từ Tool Messages một lần duy nhất
final_ai_message = result.get("ai_response") all_product_ids = _extract_product_ids(result.get("messages", []))
# Extract product IDs from tool messages # TỐI ƯU: Xử lý AI Response
product_ids = _extract_product_ids(result.get("messages", [])) ai_raw_content = result.get("ai_response").content if result.get("ai_response") else ""
logger.info(f"💾 [RAW AI OUTPUT]:\n{ai_raw_content}")
# Save to DB in background after response is sent # Chỉ parse JSON một lần để lấy Explicit IDs từ AI (Nếu có)
try:
# Vì json_mode=True, OpenAI sẽ nhả raw JSON, không cần regex rườm rà
ai_json = json.loads(ai_raw_content)
explicit_ids = ai_json.get("product_ids", [])
if explicit_ids:
all_product_ids = list(set(all_product_ids + explicit_ids))
except (json.JSONDecodeError, Exception):
# Nếu AI trả về text thường (hiếm khi xảy ra trong JSON mode) thì ignore
pass
# BACKGROUND TASK: Lưu history nhanh gọn
background_tasks.add_task( background_tasks.add_task(
_handle_post_chat_async, _handle_post_chat_async,
memory=memory, memory=memory,
user_id=user_id, user_id=user_id,
human_query=query, human_query=query,
ai_msg=final_ai_message, ai_msg=AIMessage(content=ai_raw_content),
) )
logger.info(f"✅ Request completed for user {user_id} with {len(product_ids)} products")
return { return {
"ai_response": final_ai_message.content if final_ai_message else "", "ai_response": ai_raw_content,
"product_ids": product_ids, "product_ids": all_product_ids,
} }
except Exception as e: except Exception as e:
logger.error(f"💥 Chat error: {e}", exc_info=True) logger.error(f"💥 Chat error for user {user_id}: {e}", exc_info=True)
raise raise
......
...@@ -45,7 +45,7 @@ class CANIFAGraph: ...@@ -45,7 +45,7 @@ class CANIFAGraph:
self.collection_tools = get_collection_tools() # Vẫn lấy list name để routing self.collection_tools = get_collection_tools() # Vẫn lấy list name để routing
self.retrieval_tools = self.all_tools self.retrieval_tools = self.all_tools
self.llm_with_tools = self.llm.bind_tools(self.all_tools) self.llm_with_tools = self.llm.bind_tools(self.all_tools, strict=True)
self.system_prompt = get_system_prompt() self.system_prompt = get_system_prompt()
self.prompt_template = ChatPromptTemplate.from_messages( self.prompt_template = ChatPromptTemplate.from_messages(
[ [
......
This diff is collapsed.
...@@ -12,7 +12,7 @@ logger = logging.getLogger(__name__) ...@@ -12,7 +12,7 @@ logger = logging.getLogger(__name__)
@tool @tool
async def collect_customer_info(name: str, phone: str, email: str | None = None) -> str: async def collect_customer_info(name: str, phone: str, email: str | None) -> str:
""" """
Sử dụng tool này để ghi lại thông tin khách hàng khi họ muốn tư vấn sâu hơn, Sử dụng tool này để ghi lại thông tin khách hàng khi họ muốn tư vấn sâu hơn,
nhận khuyến mãi hoặc đăng ký mua hàng. nhận khuyến mãi hoặc đăng ký mua hàng.
......
This diff is collapsed.
import logging import logging
from common.embedding_service import create_embedding from common.embedding_service import create_embedding_async
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -14,7 +14,7 @@ def _get_where_clauses(params) -> list[str]: ...@@ -14,7 +14,7 @@ def _get_where_clauses(params) -> list[str]:
"""Xây dựng danh sách các điều kiện lọc từ params.""" """Xây dựng danh sách các điều kiện lọc từ params."""
clauses = [] clauses = []
clauses.extend(_get_price_clauses(params)) clauses.extend(_get_price_clauses(params))
clauses.extend(_get_exact_match_clauses(params)) clauses.extend(_get_metadata_clauses(params))
clauses.extend(_get_special_clauses(params)) clauses.extend(_get_special_clauses(params))
return clauses return clauses
...@@ -31,12 +31,23 @@ def _get_price_clauses(params) -> list[str]: ...@@ -31,12 +31,23 @@ def _get_price_clauses(params) -> list[str]:
return clauses return clauses
def _get_exact_match_clauses(params) -> list[str]: def _get_metadata_clauses(params) -> list[str]:
"""Các trường lọc chính xác (Exact match).""" """Xây dựng điều kiện lọc từ metadata (Phối hợp Exact và Partial)."""
clauses = [] clauses = []
exact_filters = [
# 1. Exact Match (Giới tính, Độ tuổi) - Các trường này cần độ chính xác tuyệt đối
exact_fields = [
("gender_by_product", "gender_by_product"), ("gender_by_product", "gender_by_product"),
("age_by_product", "age_by_product"), ("age_by_product", "age_by_product"),
]
for param_name, col_name in exact_fields:
val = getattr(params, param_name, None)
if val:
clauses.append(f"{col_name} = '{_escape(val)}'")
# 2. Partial Match (LIKE) - Giúp map text linh hoạt hơn (Chất liệu, Dòng SP, Phong cách...)
# Cái này giúp map: "Yarn" -> "Yarn - Sợi", "Knit" -> "Knit - Dệt Kim"
partial_fields = [
("season", "season"), ("season", "season"),
("material_group", "material_group"), ("material_group", "material_group"),
("product_line_vn", "product_line_vn"), ("product_line_vn", "product_line_vn"),
...@@ -45,21 +56,24 @@ def _get_exact_match_clauses(params) -> list[str]: ...@@ -45,21 +56,24 @@ def _get_exact_match_clauses(params) -> list[str]:
("form_neckline", "form_neckline"), ("form_neckline", "form_neckline"),
("form_sleeve", "form_sleeve"), ("form_sleeve", "form_sleeve"),
] ]
for param_name, col_name in exact_filters: for param_name, col_name in partial_fields:
val = getattr(params, param_name, None) val = getattr(params, param_name, None)
if val: if val:
clauses.append(f"{col_name} = '{_escape(val)}'") v = _escape(val).lower()
# Dùng LOWER + LIKE để cân mọi loại ký tự thừa hoặc hoa/thường
clauses.append(f"LOWER({col_name}) LIKE '%{v}%'")
return clauses return clauses
def _get_special_clauses(params) -> list[str]: def _get_special_clauses(params) -> list[str]:
"""Các trường hợp đặc biệt: Mã sản phẩm, Màu sắc.""" """Các trường hợp đặc biệt: Mã sản phẩm, Màu sắc."""
clauses = [] clauses = []
# Mã sản phẩm # Mã sản phẩm / SKU
ref_code = getattr(params, "internal_ref_code", None) m_code = getattr(params, "magento_ref_code", None)
if ref_code: if m_code:
r = _escape(ref_code) m = _escape(m_code)
clauses.append(f"(internal_ref_code = '{r}' OR magento_ref_code = '{r}')") clauses.append(f"(magento_ref_code = '{m}' OR internal_ref_code = '{m}')")
# Màu sắc # Màu sắc
color = getattr(params, "master_color", None) color = getattr(params, "master_color", None)
...@@ -69,7 +83,7 @@ def _get_special_clauses(params) -> list[str]: ...@@ -69,7 +83,7 @@ def _get_special_clauses(params) -> list[str]:
return clauses return clauses
def build_starrocks_query(params, query_vector: list[float] | None = None) -> str: async def build_starrocks_query(params, query_vector: list[float] | None = None) -> str:
""" """
Build SQL Hybrid tối ưu: Build SQL Hybrid tối ưu:
1. Pre-filtering (Metadata) 1. Pre-filtering (Metadata)
...@@ -80,7 +94,7 @@ def build_starrocks_query(params, query_vector: list[float] | None = None) -> st ...@@ -80,7 +94,7 @@ def build_starrocks_query(params, query_vector: list[float] | None = None) -> st
# --- Process vector in query field --- # --- Process vector in query field ---
query_text = getattr(params, "query", None) query_text = getattr(params, "query", None)
if query_text and query_vector is None: if query_text and query_vector is None:
query_vector = create_embedding(query_text) query_vector = await create_embedding_async(query_text)
# --- Build filter clauses --- # --- Build filter clauses ---
where_clauses = _get_where_clauses(params) where_clauses = _get_where_clauses(params)
...@@ -144,7 +158,7 @@ def build_starrocks_query(params, query_vector: list[float] | None = None) -> st ...@@ -144,7 +158,7 @@ def build_starrocks_query(params, query_vector: list[float] | None = None) -> st
FROM top_sku_candidates FROM top_sku_candidates
GROUP BY internal_ref_code GROUP BY internal_ref_code
ORDER BY max_score DESC ORDER BY max_score DESC
LIMIT 5 LIMIT 10
""" # noqa: S608 """ # noqa: S608
else: else:
# FALLBACK: Keyword search - MAXIMALLY OPTIMIZED (No CTE overhead) # FALLBACK: Keyword search - MAXIMALLY OPTIMIZED (No CTE overhead)
...@@ -181,7 +195,7 @@ def build_starrocks_query(params, query_vector: list[float] | None = None) -> st ...@@ -181,7 +195,7 @@ def build_starrocks_query(params, query_vector: list[float] | None = None) -> st
GROUP BY internal_ref_code GROUP BY internal_ref_code
HAVING COUNT(*) > 0 HAVING COUNT(*) > 0
ORDER BY sale_price ASC ORDER BY sale_price ASC
LIMIT 5 LIMIT 10
""" # noqa: S608 """ # noqa: S608
logger.info(f"📊 Query Mode: {'Vector' if query_vector else 'Keyword'}") logger.info(f"📊 Query Mode: {'Vector' if query_vector else 'Keyword'}")
......
"""
Test API Routes - Tất cả endpoints cho testing (isolated)
KHÔNG ĐỘNG VÀO chatbot_route.py chính!
"""
import asyncio
import logging
import random
from fastapi import APIRouter, HTTPException
from pydantic import BaseModel, Field
from agent.models import QueryRequest
from common.load_test_manager import get_load_test_manager
router = APIRouter(prefix="/test", tags=["Testing & Load Test"])
logger = logging.getLogger(__name__)
# ==================== MOCK CHAT ENDPOINT ====================
@router.post("/chat-mock", summary="Mock Chat API (for Load Testing)")
async def mock_chat(req: QueryRequest):
"""
Endpoint MOCK để test performance KHÔNG tốn tiền OpenAI.
Trả về response giả lập với latency ngẫu nhiên.
⚠️ CHỈ DÙNG CHO LOAD TESTING!
"""
# Giả lập latency của real API (100-500ms)
await asyncio.sleep(random.uniform(0.1, 0.5))
# Mock responses
mock_responses = [
"Dạ em đã tìm được một số mẫu áo sơ mi nam đẹp cho anh/chị ạ. Anh/chị có thể xem các sản phẩm sau đây.",
"Em xin gợi ý một số mẫu áo thun nam phù hợp với yêu cầu của anh/chị.",
"Dạ, em có tìm thấy một số mẫu quần jean nam trong khoảng giá anh/chị yêu cầu ạ.",
"Em xin giới thiệu các mẫu áo khoác nam đang có khuyến mãi tốt ạ.",
"Anh/chị có thể tham khảo các mẫu giày thể thao nam đang được ưa chuộng nhất.",
]
# Mock product IDs
mock_product_ids = [
f"MOCK_PROD_{random.randint(1000, 9999)}"
for _ in range(random.randint(2, 5))
]
return {
"status": "success",
"ai_response": random.choice(mock_responses),
"product_ids": mock_product_ids,
"_mock": True, # Flag để biết đây là mock response
"_latency_ms": random.randint(100, 500)
}
@router.post("/db-search", summary="DB Search Mock (Test StarRocks Performance)")
async def mock_db_search(req: QueryRequest):
"""
Endpoint để test PERFORMANCE của StarRocks DB query.
Hỗ trợ Multi-Search (Parallel).
"""
from agent.tools.data_retrieval_tool import data_retrieval_tool
try:
# Mock Multi-Search call (Parallel)
tool_result = await data_retrieval_tool.ainvoke({
"searches": [
{
"keywords": "áo sơ mi",
"gender_by_product": "male",
"price_max": 500000
},
{
"keywords": "quần jean",
"gender_by_product": "male",
"price_max": 800000
}
]
})
# Parse result
import json
result_data = json.loads(tool_result)
# Collect all product IDs from all search results
all_product_ids = []
if result_data.get("status") == "success":
for res in result_data.get("results", []):
ids = [p.get("internal_ref_code", "") for p in res.get("products", [])]
all_product_ids.extend(ids)
return {
"status": "success",
"ai_response": "Kết quả Multi-Search Parallel từ DB",
"product_ids": list(set(all_product_ids)),
"_db_test": True,
"_queries_count": len(result_data.get("results", [])),
"_total_products": len(all_product_ids)
}
except Exception as e:
logger.error(f"DB multi-search error: {e}")
return {
"status": "error",
"ai_response": f"Lỗi: {str(e)}",
"product_ids": [],
"_error": str(e)
}
# ==================== LOAD TEST CONTROL ====================
class StartTestRequest(BaseModel):
"""Request body để start test"""
target_url: str = Field(default="http://localhost:5000", description="Base URL của target")
num_users: int = Field(default=10, ge=1, le=1000, description="Số lượng concurrent users")
spawn_rate: int = Field(default=2, ge=1, le=100, description="Tốc độ spawn users (users/second)")
duration_seconds: int = Field(default=60, ge=10, le=600, description="Thời gian chạy test (giây)")
test_type: str = Field(default="chat_mock", description="chat_mock | chat_real | history")
@router.post("/loadtest/start", summary="Bắt đầu Load Test")
async def start_load_test(req: StartTestRequest):
"""
Bắt đầu load test với config được chỉ định.
**test_type options:**
- `chat_mock`: Test mock chat API (KHÔNG tốn tiền) ⭐ Khuyên dùng
- `chat_real`: Test real chat API (TỐN TIỀN OpenAI!)
- `history`: Test history API (không tốn tiền LLM)
"""
try:
manager = get_load_test_manager()
config_dict = req.model_dump()
result = manager.start_test(config_dict)
if "error" in result:
raise HTTPException(status_code=400, detail=result["error"])
return {
"status": "success",
"message": "Load test started",
"data": result
}
except Exception as e:
logger.error(f"Error starting load test: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/loadtest/stop", summary="Dừng Load Test")
async def stop_load_test():
"""Dừng load test đang chạy"""
try:
manager = get_load_test_manager()
result = manager.stop_test()
if "error" in result:
raise HTTPException(status_code=400, detail=result["error"])
return {
"status": "success",
"message": "Load test stopped",
"data": result
}
except Exception as e:
logger.error(f"Error stopping load test: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.get("/loadtest/metrics", summary="Lấy Metrics Realtime")
async def get_load_test_metrics():
"""
Lấy metrics realtime của load test.
Frontend poll endpoint này mỗi 2 giây.
"""
try:
manager = get_load_test_manager()
metrics = manager.get_metrics()
return {
"status": "success",
"data": metrics
}
except Exception as e:
logger.error(f"Error getting metrics: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.get("/loadtest/status", summary="Check Test Status")
async def get_load_test_status():
"""Check xem load test có đang chạy không"""
try:
manager = get_load_test_manager()
return {
"status": "success",
"data": {
"is_running": manager.is_running(),
"current_status": manager.status
}
}
except Exception as e:
logger.error(f"Error getting status: {e}")
raise HTTPException(status_code=500, detail=str(e))
...@@ -64,26 +64,9 @@ class LLMFactory: ...@@ -64,26 +64,9 @@ class LLMFactory:
json_mode: bool = False, json_mode: bool = False,
api_key: str | None = None, api_key: str | None = None,
) -> BaseChatModel: ) -> BaseChatModel:
""" """Create and cache a new OpenAI LLM instance."""
Create and cache a new OpenAI LLM instance.
Args:
model_name: Clean model identifier
streaming: Enable streaming
json_mode: Enable JSON mode
api_key: Optional API key override
Returns:
Configured LLM instance
Raises:
ValueError: If API key is missing
"""
try: try:
llm = self._create_openai(model_name, streaming, api_key) llm = self._create_openai(model_name, streaming, json_mode, api_key)
if json_mode:
llm = self._enable_json_mode(llm, model_name)
cache_key = (model_name, streaming, json_mode, api_key) cache_key = (model_name, streaming, json_mode, api_key)
self._cache[cache_key] = llm self._cache[cache_key] = llm
...@@ -93,19 +76,26 @@ class LLMFactory: ...@@ -93,19 +76,26 @@ class LLMFactory:
logger.error(f"❌ Failed to create model {model_name}: {e}") logger.error(f"❌ Failed to create model {model_name}: {e}")
raise raise
def _create_openai(self, model_name: str, streaming: bool, api_key: str | None) -> BaseChatModel: def _create_openai(self, model_name: str, streaming: bool, json_mode: bool, api_key: str | None) -> BaseChatModel:
"""Create OpenAI model instance.""" """Create OpenAI model instance."""
key = api_key or OPENAI_API_KEY key = api_key or OPENAI_API_KEY
if not key: if not key:
raise ValueError("OPENAI_API_KEY is required") raise ValueError("OPENAI_API_KEY is required")
llm = ChatOpenAI( llm_kwargs = {
model=model_name, "model": model_name,
streaming=streaming, "streaming": streaming,
api_key=key, "api_key": key,
temperature=0, "temperature": 0,
) }
# Nếu bật json_mode, tiêm trực tiếp vào constructor
if json_mode:
llm_kwargs["model_kwargs"] = {"response_format": {"type": "json_object"}}
logger.info(f"⚙️ Initializing OpenAI in JSON mode: {model_name}")
llm = ChatOpenAI(**llm_kwargs)
logger.info(f"✅ Created OpenAI: {model_name}") logger.info(f"✅ Created OpenAI: {model_name}")
return llm return llm
......
This diff is collapsed.
This diff is collapsed.
...@@ -4,6 +4,7 @@ Based on chatbot-rsa pattern ...@@ -4,6 +4,7 @@ Based on chatbot-rsa pattern
""" """
import logging import logging
import asyncio
from typing import Any from typing import Any
import aiomysql import aiomysql
...@@ -109,11 +110,15 @@ class StarRocksConnection: ...@@ -109,11 +110,15 @@ class StarRocksConnection:
# Async pool shared # Async pool shared
_shared_pool = None _shared_pool = None
_pool_lock = asyncio.Lock()
async def get_pool(self): async def get_pool(self):
""" """
Get or create shared async connection pool Get or create shared async connection pool (Thread-safe singleton)
""" """
if StarRocksConnection._shared_pool is None:
async with StarRocksConnection._pool_lock:
# Double-check inside lock to prevent multiple pools
if StarRocksConnection._shared_pool is None: if StarRocksConnection._shared_pool is None:
logger.info(f"🔌 Creating Async Pool to {self.host}:{self.port}...") logger.info(f"🔌 Creating Async Pool to {self.host}:{self.port}...")
StarRocksConnection._shared_pool = await aiomysql.create_pool( StarRocksConnection._shared_pool = await aiomysql.create_pool(
...@@ -124,8 +129,8 @@ class StarRocksConnection: ...@@ -124,8 +129,8 @@ class StarRocksConnection:
db=self.database, db=self.database,
charset="utf8mb4", charset="utf8mb4",
cursorclass=aiomysql.DictCursor, cursorclass=aiomysql.DictCursor,
minsize=100, minsize=10, # Sẵn sàng 10 kết nối ngay lập tức (Cực nhanh cho Prod)
maxsize=200, # Max Ping: Mở 2000 slot - Đủ cân 500k users (với cơ chế pooling) maxsize=50, # Tối đa 50 kết nối (Đủ cân hàng nghìn users, an toàn trên Windows)
connect_timeout=10, connect_timeout=10,
) )
return StarRocksConnection._shared_pool return StarRocksConnection._shared_pool
......
...@@ -84,7 +84,7 @@ LANGFUSE_PUBLIC_KEY: str | None = os.getenv("LANGFUSE_PUBLIC_KEY") ...@@ -84,7 +84,7 @@ LANGFUSE_PUBLIC_KEY: str | None = os.getenv("LANGFUSE_PUBLIC_KEY")
LANGFUSE_BASE_URL: str | None = os.getenv("LANGFUSE_BASE_URL", "https://cloud.langfuse.com") LANGFUSE_BASE_URL: str | None = os.getenv("LANGFUSE_BASE_URL", "https://cloud.langfuse.com")
# ====================== LANGSMITH CONFIGURATION ====================== # ====================== LANGSMITH CONFIGURATION ======================
LANGSMITH_TRACING = os.getenv("LANGSMITH_TRACING", "true") LANGSMITH_TRACING = os.getenv("LANGSMITH_TRACING", "false")
LANGSMITH_ENDPOINT = os.getenv("LANGSMITH_ENDPOINT", "https://api.smith.langchain.com") LANGSMITH_ENDPOINT = os.getenv("LANGSMITH_ENDPOINT", "https://api.smith.langchain.com")
LANGSMITH_API_KEY = os.getenv("LANGSMITH_API_KEY") LANGSMITH_API_KEY = os.getenv("LANGSMITH_API_KEY")
LANGSMITH_PROJECT = os.getenv("LANGSMITH_PROJECT") LANGSMITH_PROJECT = os.getenv("LANGSMITH_PROJECT")
......
...@@ -172,43 +172,44 @@ async def startup_event(): ...@@ -172,43 +172,44 @@ async def startup_event():
@app.post("/test/db-search") @app.post("/test/db-search")
async def test_db_search(request: SearchRequest): async def test_db_search(request: SearchRequest):
""" """
Test StarRocks DB Search. Test StarRocks DB Multi-Search Parallel.
KHÔNG GỌI OPENAI - Chỉ test DB.
""" """
import asyncio
start_time = time.time() start_time = time.time()
try: try:
params = MockParams(query_text=request.query, limit=request.limit) # Giả lập Multi-Search với 2 query song song
sql = build_starrocks_query(params) params1 = MockParams(query_text=request.query)
params2 = MockParams(query_text=request.query + " nam") # Truy vấn phái sinh
# Launch parallel task creation
tasks = [build_starrocks_query(params1), build_starrocks_query(params2)]
sqls = await asyncio.gather(*tasks)
db = StarRocksConnection() db = StarRocksConnection()
products = await db.execute_query_async(sql)
# Parallel DB fetching
# Filter fields db_tasks = [db.execute_query_async(sql) for sql in sqls]
limited_products = products[:5] results = await asyncio.gather(*db_tasks)
ALLOWED_FIELDS = {
"product_name", # Trích xuất và làm sạch dữ liệu
"sale_price", ALLOWED_FIELDS = {"product_name", "sale_price", "internal_ref_code", "product_image_url_thumbnail"}
"original_price", all_products = []
"product_image_url_thumbnail", for products in results:
"product_web_url", clean = [{k: v for k, v in p.items() if k in ALLOWED_FIELDS} for p in products[:5]]
"master_color", all_products.extend(clean)
"product_color_name",
"material",
"internal_ref_code",
}
clean_products = [{k: v for k, v in p.items() if k in ALLOWED_FIELDS} for p in limited_products]
process_time = time.time() - start_time process_time = time.time() - start_time
return { return {
"status": "success", "status": "success",
"count": len(clean_products), "count": len(all_products),
"process_time_seconds": round(process_time, 4), "process_time_seconds": round(process_time, 4),
"products": clean_products, "products": all_products,
"_queries_run": len(sqls)
} }
except Exception as e: except Exception as e:
logger.error(f"DB Search Error: {e}") logger.error(f"DB Multi-Search Error: {e}")
raise HTTPException(status_code=500, detail=str(e)) raise HTTPException(status_code=500, detail=str(e))
......
...@@ -10,6 +10,10 @@ if platform.system() == "Windows": ...@@ -10,6 +10,10 @@ if platform.system() == "Windows":
print("🔧 Windows detected: Applying SelectorEventLoopPolicy globally...") print("🔧 Windows detected: Applying SelectorEventLoopPolicy globally...")
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
# Tạm thời tắt LangChain Tracing để tránh lỗi recursion (Đúng ý bro)
os.environ["LANGCHAIN_TRACING_V2"] = "false"
os.environ["LANGCHAIN_API_KEY"] = ""
# Sau khi fix xong mới import tiếp # Sau khi fix xong mới import tiếp
import logging import logging
...@@ -21,6 +25,7 @@ from fastapi.staticfiles import StaticFiles # Import cái này để mount HTML ...@@ -21,6 +25,7 @@ from fastapi.staticfiles import StaticFiles # Import cái này để mount HTML
# Updated APIs (Import sau cùng để DB nhận cấu hình fix ở trên) # Updated APIs (Import sau cùng để DB nhận cấu hình fix ở trên)
from api.chatbot_route import router as chatbot_router from api.chatbot_route import router as chatbot_router
from api.conservation_route import router as conservation_router from api.conservation_route import router as conservation_router
from api.test_route import router as test_router # ← Test API (isolated)
from config import PORT from config import PORT
# Configure Logging # Configure Logging
...@@ -46,6 +51,7 @@ app.add_middleware( ...@@ -46,6 +51,7 @@ app.add_middleware(
app.include_router(conservation_router) app.include_router(conservation_router)
app.include_router(chatbot_router, prefix="/api/agent") app.include_router(chatbot_router, prefix="/api/agent")
app.include_router(test_router, prefix="/api") # ← Test routes
# ========================================== # ==========================================
# 🟢 ĐOẠN MOUNT STATIC HTML CỦA BRO ĐÂY 🟢 # 🟢 ĐOẠN MOUNT STATIC HTML CỦA BRO ĐÂY 🟢
......
...@@ -8,11 +8,56 @@ ...@@ -8,11 +8,56 @@
<style> <style>
body { body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, 'Open Sans', 'Helvetica Neue', sans-serif; font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, 'Open Sans', 'Helvetica Neue', sans-serif;
margin: 0;
padding: 0;
background-color: #1e1e1e;
color: #e0e0e0;
}
/* Navigation Header */
.nav-header {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
padding: 15px 30px;
display: flex;
justify-content: space-between;
align-items: center;
box-shadow: 0 2px 10px rgba(0, 0, 0, 0.3);
}
.nav-header h1 {
margin: 0;
color: white;
font-size: 1.5em;
}
.nav-links {
display: flex;
gap: 15px;
}
.nav-links a {
color: white;
text-decoration: none;
padding: 8px 16px;
border-radius: 6px;
background: rgba(255, 255, 255, 0.2);
transition: all 0.3s;
font-weight: 500;
}
.nav-links a:hover {
background: rgba(255, 255, 255, 0.3);
transform: translateY(-2px);
}
.nav-links a.active {
background: rgba(255, 255, 255, 0.4);
}
.main-content {
max-width: 900px; max-width: 900px;
margin: 0 auto; margin: 0 auto;
padding: 20px; padding: 20px;
background-color: #1e1e1e;
color: #e0e0e0;
} }
.container { .container {
...@@ -217,6 +262,16 @@ ...@@ -217,6 +262,16 @@
</head> </head>
<body> <body>
<!-- Navigation Header -->
<div class="nav-header">
<h1>🤖 Canifa AI System</h1>
<div class="nav-links">
<a href="/static/index.html" class="active">💬 Chatbot</a>
<a href="/static/loadtest.html">🔬 Load Test</a>
</div>
</div>
<div class="main-content">
<div class="container"> <div class="container">
<div class="header"> <div class="header">
<h2>🤖 Canifa AI Chat</h2> <h2>🤖 Canifa AI Chat</h2>
...@@ -451,6 +506,7 @@ ...@@ -451,6 +506,7 @@
document.getElementById('messagesArea').innerHTML = ''; document.getElementById('messagesArea').innerHTML = '';
} }
</script> </script>
</div> <!-- Close main-content -->
</body> </body>
</html> </html>
\ No newline at end of file
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment