Commit c3cabf65 authored by anhvh's avatar anhvh

update : format some files

parent 01f2f0ab
Pipeline #3311 passed with stage
in 23 seconds
...@@ -19,7 +19,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \ ...@@ -19,7 +19,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
COPY . . COPY . .
# Expose port 5000 (Port chạy server) # Expose port 5000 (Port chạy server)
EXPOSE 5000 EXPOSE 5001
# Lệnh chạy server dùng uvicorn # Lệnh chạy server dùng uvicorn
CMD ["uvicorn", "server:app", "--host", "0.0.0.0", "--port", "5000"] CMD ["uvicorn", "server:app", "--host", "0.0.0.0", "--port", "5001"]
# """
# Fashion Q&A Agent Controller
# Switched to LangSmith for tracing (configured via environment variables).
# """
# import asyncio
# import json
# import logging
# import uuid
# from fastapi import BackgroundTasks, HTTPException
# from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
# from langchain_core.runnables import RunnableConfig
# from common.conversation_manager import ConversationManager, get_conversation_manager
# from common.llm_factory import create_llm
# from config import DEFAULT_MODEL
# from .graph import build_graph
# from .models import AgentState, get_config
# from .tools.get_tools import get_all_tools
# logger = logging.getLogger(__name__)
# async def chat_controller(
# query: str,
# user_id: str,
# background_tasks: BackgroundTasks,
# model_name: str = DEFAULT_MODEL,
# images: list[str] | None = None,
# ) -> dict:
# """
# Controller main logic for non-streaming chat requests.
# TEMPORARILY BYPASS LANGGRAPH for debugging.
# """
# logger.info(f"▶️ Starting chat_controller with model: {model_name} for user: {user_id}")
# # 🔧 TEMPORARY: Direct LLM call bypassing LangGraph
# logger.info("🔧 [DEBUG] BYPASSING LangGraph - calling LLM directly")
# try:
# llm = create_llm(model_name=model_name, streaming=False, json_mode=True)
# # Simple direct call
# from langchain_core.messages import HumanMessage, SystemMessage
# messages = [
# SystemMessage(content="You are a helpful fashion assistant. Respond in JSON format with 'response' field."),
# HumanMessage(content=query)
# ]
# logger.info("🔧 [DEBUG] Invoking LLM directly...")
# response = await asyncio.wait_for(
# llm.ainvoke(messages),
# timeout=30.0
# )
# logger.info(f"🔧 [DEBUG] LLM response received: {response.content[:100]}")
# return {
# "ai_response": response.content,
# "product_ids": [],
# }
# except asyncio.TimeoutError:
# logger.error("❌ LLM call timeout!")
# raise HTTPException(status_code=504, detail="Request timeout")
# except Exception as e:
# logger.error(f"💥 Chat error for user {user_id}: {e}", exc_info=True)
# raise
# def _extract_product_ids(messages: list) -> list[str]:
# """
# Extract product internal_ref_code from tool messages (data_retrieval_tool results).
# Returns list of unique product IDs.
# """
# product_ids = []
# for msg in messages:
# if isinstance(msg, ToolMessage):
# try:
# # Tool result is JSON string
# tool_result = json.loads(msg.content)
# # Check if tool returned products
# if tool_result.get("status") == "success" and "products" in tool_result:
# for product in tool_result["products"]:
# product_id = product.get("internal_ref_code")
# if product_id and product_id not in product_ids:
# product_ids.append(product_id)
# except (json.JSONDecodeError, KeyError, TypeError) as e:
# logger.debug(f"Could not parse tool message for product IDs: {e}")
# continue
# return product_ids
# def _prepare_execution_context(query: str, user_id: str, history: list, images: list | None):
# """Prepare initial state and execution config for the graph run."""
# initial_state: AgentState = {
# "user_query": HumanMessage(content=query),
# "messages": [HumanMessage(content=query)],
# "history": history,
# "user_id": user_id,
# "images_embedding": [],
# "ai_response": None,
# }
# run_id = str(uuid.uuid4())
# # Metadata for LangSmith
# metadata = {"user_id": user_id, "run_id": run_id}
# exec_config = RunnableConfig(
# configurable={
# "user_id": user_id,
# "transient_images": images or [],
# "run_id": run_id,
# },
# run_id=run_id,
# metadata=metadata, # Attach metadata for LangSmith
# )
# return initial_state, exec_config
# async def _handle_post_chat_async(
# memory: ConversationManager, user_id: str, human_query: str, ai_msg: AIMessage | None
# ):
# """Save chat history in background task after response is sent."""
# if ai_msg:
# try:
# await memory.save_conversation_turn(user_id, human_query, ai_msg.content)
# logger.debug(f"Saved conversation for user {user_id}")
# except Exception as e:
# logger.error(f"Failed to save conversation for user {user_id}: {e}", exc_info=True)
""" """
Fashion Q&A Agent Controller Fashion Q&A Agent Controller
Switched to LangSmith for tracing (configured via environment variables). Switched to LangSmith for tracing (configured via environment variables).
......
...@@ -5,16 +5,17 @@ Tất cả resources (LLM, Tools) khởi tạo trong __init__. ...@@ -5,16 +5,17 @@ Tất cả resources (LLM, Tools) khởi tạo trong __init__.
Sử dụng ConversationManager (Postgres) để lưu history thay vì checkpoint. Sử dụng ConversationManager (Postgres) để lưu history thay vì checkpoint.
""" """
import asyncio
import logging import logging
from typing import Any from typing import Any
from langchain_core.language_models import BaseChatModel from langchain_core.language_models import BaseChatModel
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnableConfig from langchain_core.runnables import RunnableConfig
from langgraph.cache.memory import InMemoryCache # from langgraph.cache.memory import InMemoryCache # DISABLED FOR DEBUG
from langgraph.graph import END, StateGraph from langgraph.graph import END, StateGraph
from langgraph.prebuilt import ToolNode from langgraph.prebuilt import ToolNode
from langgraph.types import CachePolicy # from langgraph.types import CachePolicy # DISABLED FOR DEBUG
from common.llm_factory import create_llm from common.llm_factory import create_llm
...@@ -56,24 +57,35 @@ class CANIFAGraph: ...@@ -56,24 +57,35 @@ class CANIFAGraph:
] ]
) )
self.chain = self.prompt_template | self.llm_with_tools self.chain = self.prompt_template | self.llm_with_tools
self.cache = InMemoryCache() # self.cache = InMemoryCache() # DISABLED FOR DEBUG
async def _agent_node(self, state: AgentState, config: RunnableConfig) -> dict: async def _agent_node(self, state: AgentState, config: RunnableConfig) -> dict:
"""Agent node - Chỉ việc đổ dữ liệu riêng vào khuôn đã có sẵn.""" """Agent node - Chỉ việc đổ dữ liệu riêng vào khuôn đã có sẵn."""
logger.info("🔧 [DEBUG] _agent_node CALLED!")
messages = state.get("messages", []) messages = state.get("messages", [])
history = state.get("history", []) history = state.get("history", [])
user_query = state.get("user_query") user_query = state.get("user_query")
logger.info(f"🔧 [DEBUG] _agent_node processing {len(messages)} messages")
transient_images = config.get("configurable", {}).get("transient_images", []) transient_images = config.get("configurable", {}).get("transient_images", [])
if transient_images and messages: if transient_images and messages:
pass pass
# Invoke chain with user_query, history, and messages # Invoke chain with user_query, history, and messages
response = await self.chain.ainvoke({ logger.info("🔧 [DEBUG] About to invoke chain with 60s timeout")
try:
response = await asyncio.wait_for(
self.chain.ainvoke({
"user_query": [user_query] if user_query else [], "user_query": [user_query] if user_query else [],
"history": history, "history": history,
"messages": messages "messages": messages
}) }),
timeout=60.0
)
logger.info("🔧 [DEBUG] Chain invoked successfully")
return {"messages": [response], "ai_response": response} return {"messages": [response], "ai_response": response}
except asyncio.TimeoutError:
logger.error("❌ Chain invoke TIMEOUT after 60s!")
raise TimeoutError("LLM chain invoke timed out after 60 seconds")
def _should_continue(self, state: AgentState) -> str: def _should_continue(self, state: AgentState) -> str:
...@@ -102,7 +114,7 @@ class CANIFAGraph: ...@@ -102,7 +114,7 @@ class CANIFAGraph:
# Nodes # Nodes
workflow.add_node("agent", self._agent_node) workflow.add_node("agent", self._agent_node)
workflow.add_node("retrieve_tools", ToolNode(self.retrieval_tools), cache_policy=CachePolicy(ttl=3600)) workflow.add_node("retrieve_tools", ToolNode(self.retrieval_tools)) # cache_policy DISABLED
workflow.add_node("collect_tools", ToolNode(self.collection_tools)) workflow.add_node("collect_tools", ToolNode(self.collection_tools))
# Edges # Edges
...@@ -115,8 +127,8 @@ class CANIFAGraph: ...@@ -115,8 +127,8 @@ class CANIFAGraph:
workflow.add_edge("retrieve_tools", "agent") workflow.add_edge("retrieve_tools", "agent")
workflow.add_edge("collect_tools", "agent") workflow.add_edge("collect_tools", "agent")
self._compiled_graph = workflow.compile(cache=self.cache) # No Checkpointer self._compiled_graph = workflow.compile() # No Checkpointer, No Cache (DEBUG)
logger.info("✅ Graph compiled (Langfuse callback will be per-run)") logger.info("✅ Graph compiled WITHOUT cache (DEBUG MODE)")
return self._compiled_graph return self._compiled_graph
...@@ -129,10 +141,11 @@ class CANIFAGraph: ...@@ -129,10 +141,11 @@ class CANIFAGraph:
_instance: list[CANIFAGraph | None] = [None] _instance: list[CANIFAGraph | None] = [None]
def build_graph(config: AgentConfig | None = None, llm: BaseChatModel | None = None, tools: list | None = None) -> Any: def build_graph(config: AgentConfig | None = None, llm: BaseChatModel | None = None, tools: list | None = None) -> Any:
"""Get compiled graph (singleton).""" """Get compiled graph (DISABLED SINGLETON FOR DEBUG)."""
if _instance[0] is None: # ALWAYS create new instance to avoid async state conflicts
_instance[0] = CANIFAGraph(config, llm, tools) logger.info("🔧 [DEBUG] Building NEW graph instance (singleton disabled)")
return _instance[0].build() instance = CANIFAGraph(config, llm, tools)
return instance.build()
def get_graph_manager( def get_graph_manager(
......
...@@ -107,8 +107,11 @@ async def data_retrieval_tool(searches: list[SearchItem]) -> str: ...@@ -107,8 +107,11 @@ async def data_retrieval_tool(searches: list[SearchItem]) -> str:
{"query": "áo phông bé trai màu xanh", "keywords": "áo phông", "master_color": "Xanh", "gender_by_product": "male", "age_by_product": "others"} {"query": "áo phông bé trai màu xanh", "keywords": "áo phông", "master_color": "Xanh", "gender_by_product": "male", "age_by_product": "others"}
] ]
""" """
logger.info("🔧 [DEBUG] data_retrieval_tool STARTED")
try: try:
logger.info("🔧 [DEBUG] Creating StarRocksConnection instance")
db = StarRocksConnection() db = StarRocksConnection()
logger.info("🔧 [DEBUG] StarRocksConnection created successfully")
# 0. Log input parameters (Đúng ý bro) # 0. Log input parameters (Đúng ý bro)
logger.info(f"📥 [Tool Input] data_retrieval_tool received {len(searches)} items:") logger.info(f"📥 [Tool Input] data_retrieval_tool received {len(searches)} items:")
...@@ -116,12 +119,15 @@ async def data_retrieval_tool(searches: list[SearchItem]) -> str: ...@@ -116,12 +119,15 @@ async def data_retrieval_tool(searches: list[SearchItem]) -> str:
logger.info(f" 🔹 Item [{idx}]: {item.dict(exclude_none=True)}") logger.info(f" 🔹 Item [{idx}]: {item.dict(exclude_none=True)}")
# 1. Tạo tasks chạy song song (Parallel) # 1. Tạo tasks chạy song song (Parallel)
logger.info("🔧 [DEBUG] Creating parallel tasks")
tasks = [] tasks = []
for item in searches: for item in searches:
tasks.append(_execute_single_search(db, item)) tasks.append(_execute_single_search(db, item))
logger.info(f"🚀 [Parallel Search] Executing {len(searches)} queries simultaneously...") logger.info(f"🚀 [Parallel Search] Executing {len(searches)} queries simultaneously...")
logger.info("🔧 [DEBUG] About to call asyncio.gather()")
results = await asyncio.gather(*tasks) results = await asyncio.gather(*tasks)
logger.info(f"🔧 [DEBUG] asyncio.gather() completed with {len(results)} results")
# 2. Tổng hợp kết quả # 2. Tổng hợp kết quả
combined_results = [] combined_results = []
...@@ -147,9 +153,14 @@ async def data_retrieval_tool(searches: list[SearchItem]) -> str: ...@@ -147,9 +153,14 @@ async def data_retrieval_tool(searches: list[SearchItem]) -> str:
async def _execute_single_search(db: StarRocksConnection, item: SearchItem) -> list[dict]: async def _execute_single_search(db: StarRocksConnection, item: SearchItem) -> list[dict]:
"""Thực thi một search query đơn lẻ (Async).""" """Thực thi một search query đơn lẻ (Async)."""
try: try:
logger.info(f"🔧 [DEBUG] _execute_single_search STARTED for query: {item.query[:50] if item.query else 'None'}")
# build_starrocks_query handles embedding internally (async) # build_starrocks_query handles embedding internally (async)
logger.info("🔧 [DEBUG] Calling build_starrocks_query()")
sql = await build_starrocks_query(item) sql = await build_starrocks_query(item)
logger.info(f"🔧 [DEBUG] SQL query built, length: {len(sql)}")
logger.info("🔧 [DEBUG] Calling db.execute_query_async()")
products = await db.execute_query_async(sql) products = await db.execute_query_async(sql)
logger.info(f"🔧 [DEBUG] Query executed, got {len(products)} products")
return _format_product_results(products) return _format_product_results(products)
except Exception as e: except Exception as e:
logger.error(f"Single search error for item {item}: {e}") logger.error(f"Single search error for item {item}: {e}")
......
...@@ -90,13 +90,18 @@ async def build_starrocks_query(params, query_vector: list[float] | None = None) ...@@ -90,13 +90,18 @@ async def build_starrocks_query(params, query_vector: list[float] | None = None)
2. Vector Search (HNSW Index) 2. Vector Search (HNSW Index)
3. Grouping (Gom màu theo style) 3. Grouping (Gom màu theo style)
""" """
logger.info("🔧 [DEBUG] build_starrocks_query STARTED")
# --- Process vector in query field --- # --- Process vector in query field ---
query_text = getattr(params, "query", None) query_text = getattr(params, "query", None)
logger.info(f"🔧 [DEBUG] query_text: {query_text[:50] if query_text else 'None'}")
if query_text and query_vector is None: if query_text and query_vector is None:
logger.info("🔧 [DEBUG] Calling create_embedding_async()")
query_vector = await create_embedding_async(query_text) query_vector = await create_embedding_async(query_text)
logger.info(f"🔧 [DEBUG] Embedding created, dimension: {len(query_vector) if query_vector else 0}")
# --- Build filter clauses --- # --- Build filter clauses ---
logger.info("🔧 [DEBUG] Building WHERE clauses")
where_clauses = _get_where_clauses(params) where_clauses = _get_where_clauses(params)
where_sql = " AND ".join(where_clauses) if where_clauses else "1=1" where_sql = " AND ".join(where_clauses) if where_clauses else "1=1"
......
"""
Test API Routes - Tất cả endpoints cho testing (isolated)
KHÔNG ĐỘNG VÀO chatbot_route.py chính!
"""
import asyncio
import logging
import random
from fastapi import APIRouter, HTTPException
from pydantic import BaseModel, Field
from agent.models import QueryRequest
from common.load_test_manager import get_load_test_manager
router = APIRouter(prefix="/test", tags=["Testing & Load Test"])
logger = logging.getLogger(__name__)
# ==================== MOCK CHAT ENDPOINT ====================
@router.post("/chat-mock", summary="Mock Chat API (for Load Testing)")
async def mock_chat(req: QueryRequest):
"""
Endpoint MOCK để test performance KHÔNG tốn tiền OpenAI.
Trả về response giả lập với latency ngẫu nhiên.
⚠️ CHỈ DÙNG CHO LOAD TESTING!
"""
# Giả lập latency của real API (100-500ms)
await asyncio.sleep(random.uniform(0.1, 0.5))
# Mock responses
mock_responses = [
"Dạ em đã tìm được một số mẫu áo sơ mi nam đẹp cho anh/chị ạ. Anh/chị có thể xem các sản phẩm sau đây.",
"Em xin gợi ý một số mẫu áo thun nam phù hợp với yêu cầu của anh/chị.",
"Dạ, em có tìm thấy một số mẫu quần jean nam trong khoảng giá anh/chị yêu cầu ạ.",
"Em xin giới thiệu các mẫu áo khoác nam đang có khuyến mãi tốt ạ.",
"Anh/chị có thể tham khảo các mẫu giày thể thao nam đang được ưa chuộng nhất.",
]
# Mock product IDs
mock_product_ids = [
f"MOCK_PROD_{random.randint(1000, 9999)}"
for _ in range(random.randint(2, 5))
]
return {
"status": "success",
"ai_response": random.choice(mock_responses),
"product_ids": mock_product_ids,
"_mock": True, # Flag để biết đây là mock response
"_latency_ms": random.randint(100, 500)
}
@router.post("/db-search", summary="DB Search Mock (Test StarRocks Performance)")
async def mock_db_search(req: QueryRequest):
"""
Endpoint để test PERFORMANCE của StarRocks DB query.
Hỗ trợ Multi-Search (Parallel).
"""
from agent.tools.data_retrieval_tool import data_retrieval_tool
try:
# Mock Multi-Search call (Parallel)
tool_result = await data_retrieval_tool.ainvoke({
"searches": [
{
"keywords": "áo sơ mi",
"gender_by_product": "male",
"price_max": 500000
},
{
"keywords": "quần jean",
"gender_by_product": "male",
"price_max": 800000
}
]
})
# Parse result
import json
result_data = json.loads(tool_result)
# Collect all product IDs from all search results
all_product_ids = []
if result_data.get("status") == "success":
for res in result_data.get("results", []):
ids = [p.get("internal_ref_code", "") for p in res.get("products", [])]
all_product_ids.extend(ids)
return {
"status": "success",
"ai_response": "Kết quả Multi-Search Parallel từ DB",
"product_ids": list(set(all_product_ids)),
"_db_test": True,
"_queries_count": len(result_data.get("results", [])),
"_total_products": len(all_product_ids)
}
except Exception as e:
logger.error(f"DB multi-search error: {e}")
return {
"status": "error",
"ai_response": f"Lỗi: {str(e)}",
"product_ids": [],
"_error": str(e)
}
# ==================== LOAD TEST CONTROL ====================
class StartTestRequest(BaseModel):
"""Request body để start test"""
target_url: str = Field(default="http://localhost:5000", description="Base URL của target")
num_users: int = Field(default=10, ge=1, le=1000, description="Số lượng concurrent users")
spawn_rate: int = Field(default=2, ge=1, le=100, description="Tốc độ spawn users (users/second)")
duration_seconds: int = Field(default=60, ge=10, le=600, description="Thời gian chạy test (giây)")
test_type: str = Field(default="chat_mock", description="chat_mock | chat_real | history")
@router.post("/loadtest/start", summary="Bắt đầu Load Test")
async def start_load_test(req: StartTestRequest):
"""
Bắt đầu load test với config được chỉ định.
**test_type options:**
- `chat_mock`: Test mock chat API (KHÔNG tốn tiền) ⭐ Khuyên dùng
- `chat_real`: Test real chat API (TỐN TIỀN OpenAI!)
- `history`: Test history API (không tốn tiền LLM)
"""
try:
manager = get_load_test_manager()
config_dict = req.model_dump()
result = manager.start_test(config_dict)
if "error" in result:
raise HTTPException(status_code=400, detail=result["error"])
return {
"status": "success",
"message": "Load test started",
"data": result
}
except Exception as e:
logger.error(f"Error starting load test: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/loadtest/stop", summary="Dừng Load Test")
async def stop_load_test():
"""Dừng load test đang chạy"""
try:
manager = get_load_test_manager()
result = manager.stop_test()
if "error" in result:
raise HTTPException(status_code=400, detail=result["error"])
return {
"status": "success",
"message": "Load test stopped",
"data": result
}
except Exception as e:
logger.error(f"Error stopping load test: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.get("/loadtest/metrics", summary="Lấy Metrics Realtime")
async def get_load_test_metrics():
"""
Lấy metrics realtime của load test.
Frontend poll endpoint này mỗi 2 giây.
"""
try:
manager = get_load_test_manager()
metrics = manager.get_metrics()
return {
"status": "success",
"data": metrics
}
except Exception as e:
logger.error(f"Error getting metrics: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.get("/loadtest/status", summary="Check Test Status")
async def get_load_test_status():
"""Check xem load test có đang chạy không"""
try:
manager = get_load_test_manager()
return {
"status": "success",
"data": {
"is_running": manager.is_running(),
"current_status": manager.status
}
}
except Exception as e:
logger.error(f"Error getting status: {e}")
raise HTTPException(status_code=500, detail=str(e))
...@@ -88,12 +88,14 @@ class LLMFactory: ...@@ -88,12 +88,14 @@ class LLMFactory:
"streaming": streaming, "streaming": streaming,
"api_key": key, "api_key": key,
"temperature": 0, "temperature": 0,
"timeout": 30.0, # 30 second timeout
"max_retries": 2,
} }
# Nếu bật json_mode, tiêm trực tiếp vào constructor # Nếu bật json_mode, tiêm trực tiếp vào constructor
if json_mode: if json_mode:
llm_kwargs["model_kwargs"] = {"response_format": {"type": "json_object"}} llm_kwargs["model_kwargs"] = {"response_format": {"type": "json_object"}}
logger.info(f"⚙️ Initializing OpenAI in JSON mode: {model_name}") logger.info(f"⚙️ Initializing OpenAI in JSON mode: {model_name} with timeout=30s")
llm = ChatOpenAI(**llm_kwargs) llm = ChatOpenAI(**llm_kwargs)
logger.info(f"✅ Created OpenAI: {model_name}") logger.info(f"✅ Created OpenAI: {model_name}")
......
""" """
Load Test Manager - Chạy Locust programmatically Load Test Manager - Chạy Locust programmatically
Singleton service để quản lý load testing cho APIs Singleton service để quản lý load testing cho APIs
DISABLED: Locust monkey-patches SSL and breaks async OpenAI client
""" """
import logging import logging
...@@ -10,8 +11,9 @@ from dataclasses import dataclass, asdict ...@@ -10,8 +11,9 @@ from dataclasses import dataclass, asdict
from enum import Enum from enum import Enum
from typing import Any from typing import Any
from locust import HttpUser, between, task # DISABLED: Locust monkey-patches ssl and breaks async OpenAI client
from locust.env import Environment # from locust import HttpUser, between, task
# from locust.env import Environment
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
......
...@@ -5,11 +5,11 @@ services: ...@@ -5,11 +5,11 @@ services:
container_name: canifa_backend container_name: canifa_backend
env_file: .env env_file: .env
ports: ports:
- "5000:5000" - "5001:5001"
# volumes: # volumes:
# - .:/app # - .:/app
environment: environment:
- PORT=5000 - PORT=5001
restart: unless-stopped restart: unless-stopped
logging: logging:
driver: "json-file" driver: "json-file"
......
import asyncio import asyncio
import os # Có os để mount static import os #
import platform import platform
# ==========================================
# 🛑 QUAN TRỌNG: FIX LỖI WINDOWS Ở ĐÂY 🛑
# Phải chạy dòng này TRƯỚC KHI import bất kỳ thư viện nào khác
# ==========================================
if platform.system() == "Windows": if platform.system() == "Windows":
print("🔧 Windows detected: Applying SelectorEventLoopPolicy globally...") print("🔧 Windows detected: Applying SelectorEventLoopPolicy globally...")
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
# Tạm thời tắt LangChain Tracing để tránh lỗi recursion (Đúng ý bro)
os.environ["LANGCHAIN_TRACING_V2"] = "false" os.environ["LANGCHAIN_TRACING_V2"] = "false"
os.environ["LANGCHAIN_API_KEY"] = "" os.environ["LANGCHAIN_API_KEY"] = ""
...@@ -20,12 +15,11 @@ import logging ...@@ -20,12 +15,11 @@ import logging
import uvicorn import uvicorn
from fastapi import FastAPI from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles # Import cái này để mount HTML from fastapi.staticfiles import StaticFiles
# Updated APIs (Import sau cùng để DB nhận cấu hình fix ở trên) # Updated APIs (Import sau cùng để DB nhận cấu hình fix ở trên)
from api.chatbot_route import router as chatbot_router from api.chatbot_route import router as chatbot_router
from api.conservation_route import router as conservation_router from api.conservation_route import router as conservation_router
from api.test_route import router as test_router # ← Test API (isolated)
from config import PORT from config import PORT
# Configure Logging # Configure Logging
...@@ -51,11 +45,7 @@ app.add_middleware( ...@@ -51,11 +45,7 @@ app.add_middleware(
app.include_router(conservation_router) app.include_router(conservation_router)
app.include_router(chatbot_router, prefix="/api/agent") app.include_router(chatbot_router, prefix="/api/agent")
app.include_router(test_router, prefix="/api") # ← Test routes
# ==========================================
# 🟢 ĐOẠN MOUNT STATIC HTML CỦA BRO ĐÂY 🟢
# ==========================================
try: try:
static_dir = os.path.join(os.path.dirname(__file__), "static") static_dir = os.path.join(os.path.dirname(__file__), "static")
if not os.path.exists(static_dir): if not os.path.exists(static_dir):
......
...@@ -267,7 +267,6 @@ ...@@ -267,7 +267,6 @@
<h1>🤖 Canifa AI System</h1> <h1>🤖 Canifa AI System</h1>
<div class="nav-links"> <div class="nav-links">
<a href="/static/index.html" class="active">💬 Chatbot</a> <a href="/static/index.html" class="active">💬 Chatbot</a>
<a href="/static/loadtest.html">🔬 Load Test</a>
</div> </div>
</div> </div>
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment