Commit c3cabf65 authored by anhvh's avatar anhvh

update : format some files

parent 01f2f0ab
Pipeline #3311 passed with stage
in 23 seconds
......@@ -19,7 +19,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
COPY . .
# Expose port 5000 (Port chạy server)
EXPOSE 5000
EXPOSE 5001
# Lệnh chạy server dùng uvicorn
CMD ["uvicorn", "server:app", "--host", "0.0.0.0", "--port", "5000"]
CMD ["uvicorn", "server:app", "--host", "0.0.0.0", "--port", "5001"]
# """
# Fashion Q&A Agent Controller
# Switched to LangSmith for tracing (configured via environment variables).
# """
# import asyncio
# import json
# import logging
# import uuid
# from fastapi import BackgroundTasks, HTTPException
# from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
# from langchain_core.runnables import RunnableConfig
# from common.conversation_manager import ConversationManager, get_conversation_manager
# from common.llm_factory import create_llm
# from config import DEFAULT_MODEL
# from .graph import build_graph
# from .models import AgentState, get_config
# from .tools.get_tools import get_all_tools
# logger = logging.getLogger(__name__)
# async def chat_controller(
# query: str,
# user_id: str,
# background_tasks: BackgroundTasks,
# model_name: str = DEFAULT_MODEL,
# images: list[str] | None = None,
# ) -> dict:
# """
# Controller main logic for non-streaming chat requests.
# TEMPORARILY BYPASS LANGGRAPH for debugging.
# """
# logger.info(f"▶️ Starting chat_controller with model: {model_name} for user: {user_id}")
# # 🔧 TEMPORARY: Direct LLM call bypassing LangGraph
# logger.info("🔧 [DEBUG] BYPASSING LangGraph - calling LLM directly")
# try:
# llm = create_llm(model_name=model_name, streaming=False, json_mode=True)
# # Simple direct call
# from langchain_core.messages import HumanMessage, SystemMessage
# messages = [
# SystemMessage(content="You are a helpful fashion assistant. Respond in JSON format with 'response' field."),
# HumanMessage(content=query)
# ]
# logger.info("🔧 [DEBUG] Invoking LLM directly...")
# response = await asyncio.wait_for(
# llm.ainvoke(messages),
# timeout=30.0
# )
# logger.info(f"🔧 [DEBUG] LLM response received: {response.content[:100]}")
# return {
# "ai_response": response.content,
# "product_ids": [],
# }
# except asyncio.TimeoutError:
# logger.error("❌ LLM call timeout!")
# raise HTTPException(status_code=504, detail="Request timeout")
# except Exception as e:
# logger.error(f"💥 Chat error for user {user_id}: {e}", exc_info=True)
# raise
# def _extract_product_ids(messages: list) -> list[str]:
# """
# Extract product internal_ref_code from tool messages (data_retrieval_tool results).
# Returns list of unique product IDs.
# """
# product_ids = []
# for msg in messages:
# if isinstance(msg, ToolMessage):
# try:
# # Tool result is JSON string
# tool_result = json.loads(msg.content)
# # Check if tool returned products
# if tool_result.get("status") == "success" and "products" in tool_result:
# for product in tool_result["products"]:
# product_id = product.get("internal_ref_code")
# if product_id and product_id not in product_ids:
# product_ids.append(product_id)
# except (json.JSONDecodeError, KeyError, TypeError) as e:
# logger.debug(f"Could not parse tool message for product IDs: {e}")
# continue
# return product_ids
# def _prepare_execution_context(query: str, user_id: str, history: list, images: list | None):
# """Prepare initial state and execution config for the graph run."""
# initial_state: AgentState = {
# "user_query": HumanMessage(content=query),
# "messages": [HumanMessage(content=query)],
# "history": history,
# "user_id": user_id,
# "images_embedding": [],
# "ai_response": None,
# }
# run_id = str(uuid.uuid4())
# # Metadata for LangSmith
# metadata = {"user_id": user_id, "run_id": run_id}
# exec_config = RunnableConfig(
# configurable={
# "user_id": user_id,
# "transient_images": images or [],
# "run_id": run_id,
# },
# run_id=run_id,
# metadata=metadata, # Attach metadata for LangSmith
# )
# return initial_state, exec_config
# async def _handle_post_chat_async(
# memory: ConversationManager, user_id: str, human_query: str, ai_msg: AIMessage | None
# ):
# """Save chat history in background task after response is sent."""
# if ai_msg:
# try:
# await memory.save_conversation_turn(user_id, human_query, ai_msg.content)
# logger.debug(f"Saved conversation for user {user_id}")
# except Exception as e:
# logger.error(f"Failed to save conversation for user {user_id}: {e}", exc_info=True)
"""
Fashion Q&A Agent Controller
Switched to LangSmith for tracing (configured via environment variables).
......
......@@ -5,16 +5,17 @@ Tất cả resources (LLM, Tools) khởi tạo trong __init__.
Sử dụng ConversationManager (Postgres) để lưu history thay vì checkpoint.
"""
import asyncio
import logging
from typing import Any
from langchain_core.language_models import BaseChatModel
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnableConfig
from langgraph.cache.memory import InMemoryCache
# from langgraph.cache.memory import InMemoryCache # DISABLED FOR DEBUG
from langgraph.graph import END, StateGraph
from langgraph.prebuilt import ToolNode
from langgraph.types import CachePolicy
# from langgraph.types import CachePolicy # DISABLED FOR DEBUG
from common.llm_factory import create_llm
......@@ -56,24 +57,35 @@ class CANIFAGraph:
]
)
self.chain = self.prompt_template | self.llm_with_tools
self.cache = InMemoryCache()
# self.cache = InMemoryCache() # DISABLED FOR DEBUG
async def _agent_node(self, state: AgentState, config: RunnableConfig) -> dict:
"""Agent node - Chỉ việc đổ dữ liệu riêng vào khuôn đã có sẵn."""
logger.info("🔧 [DEBUG] _agent_node CALLED!")
messages = state.get("messages", [])
history = state.get("history", [])
user_query = state.get("user_query")
logger.info(f"🔧 [DEBUG] _agent_node processing {len(messages)} messages")
transient_images = config.get("configurable", {}).get("transient_images", [])
if transient_images and messages:
pass
# Invoke chain with user_query, history, and messages
response = await self.chain.ainvoke({
logger.info("🔧 [DEBUG] About to invoke chain with 60s timeout")
try:
response = await asyncio.wait_for(
self.chain.ainvoke({
"user_query": [user_query] if user_query else [],
"history": history,
"messages": messages
})
}),
timeout=60.0
)
logger.info("🔧 [DEBUG] Chain invoked successfully")
return {"messages": [response], "ai_response": response}
except asyncio.TimeoutError:
logger.error("❌ Chain invoke TIMEOUT after 60s!")
raise TimeoutError("LLM chain invoke timed out after 60 seconds")
def _should_continue(self, state: AgentState) -> str:
......@@ -102,7 +114,7 @@ class CANIFAGraph:
# Nodes
workflow.add_node("agent", self._agent_node)
workflow.add_node("retrieve_tools", ToolNode(self.retrieval_tools), cache_policy=CachePolicy(ttl=3600))
workflow.add_node("retrieve_tools", ToolNode(self.retrieval_tools)) # cache_policy DISABLED
workflow.add_node("collect_tools", ToolNode(self.collection_tools))
# Edges
......@@ -115,8 +127,8 @@ class CANIFAGraph:
workflow.add_edge("retrieve_tools", "agent")
workflow.add_edge("collect_tools", "agent")
self._compiled_graph = workflow.compile(cache=self.cache) # No Checkpointer
logger.info("✅ Graph compiled (Langfuse callback will be per-run)")
self._compiled_graph = workflow.compile() # No Checkpointer, No Cache (DEBUG)
logger.info("✅ Graph compiled WITHOUT cache (DEBUG MODE)")
return self._compiled_graph
......@@ -129,10 +141,11 @@ class CANIFAGraph:
_instance: list[CANIFAGraph | None] = [None]
def build_graph(config: AgentConfig | None = None, llm: BaseChatModel | None = None, tools: list | None = None) -> Any:
"""Get compiled graph (singleton)."""
if _instance[0] is None:
_instance[0] = CANIFAGraph(config, llm, tools)
return _instance[0].build()
"""Get compiled graph (DISABLED SINGLETON FOR DEBUG)."""
# ALWAYS create new instance to avoid async state conflicts
logger.info("🔧 [DEBUG] Building NEW graph instance (singleton disabled)")
instance = CANIFAGraph(config, llm, tools)
return instance.build()
def get_graph_manager(
......
......@@ -107,8 +107,11 @@ async def data_retrieval_tool(searches: list[SearchItem]) -> str:
{"query": "áo phông bé trai màu xanh", "keywords": "áo phông", "master_color": "Xanh", "gender_by_product": "male", "age_by_product": "others"}
]
"""
logger.info("🔧 [DEBUG] data_retrieval_tool STARTED")
try:
logger.info("🔧 [DEBUG] Creating StarRocksConnection instance")
db = StarRocksConnection()
logger.info("🔧 [DEBUG] StarRocksConnection created successfully")
# 0. Log input parameters (Đúng ý bro)
logger.info(f"📥 [Tool Input] data_retrieval_tool received {len(searches)} items:")
......@@ -116,12 +119,15 @@ async def data_retrieval_tool(searches: list[SearchItem]) -> str:
logger.info(f" 🔹 Item [{idx}]: {item.dict(exclude_none=True)}")
# 1. Tạo tasks chạy song song (Parallel)
logger.info("🔧 [DEBUG] Creating parallel tasks")
tasks = []
for item in searches:
tasks.append(_execute_single_search(db, item))
logger.info(f"🚀 [Parallel Search] Executing {len(searches)} queries simultaneously...")
logger.info("🔧 [DEBUG] About to call asyncio.gather()")
results = await asyncio.gather(*tasks)
logger.info(f"🔧 [DEBUG] asyncio.gather() completed with {len(results)} results")
# 2. Tổng hợp kết quả
combined_results = []
......@@ -147,9 +153,14 @@ async def data_retrieval_tool(searches: list[SearchItem]) -> str:
async def _execute_single_search(db: StarRocksConnection, item: SearchItem) -> list[dict]:
"""Thực thi một search query đơn lẻ (Async)."""
try:
logger.info(f"🔧 [DEBUG] _execute_single_search STARTED for query: {item.query[:50] if item.query else 'None'}")
# build_starrocks_query handles embedding internally (async)
logger.info("🔧 [DEBUG] Calling build_starrocks_query()")
sql = await build_starrocks_query(item)
logger.info(f"🔧 [DEBUG] SQL query built, length: {len(sql)}")
logger.info("🔧 [DEBUG] Calling db.execute_query_async()")
products = await db.execute_query_async(sql)
logger.info(f"🔧 [DEBUG] Query executed, got {len(products)} products")
return _format_product_results(products)
except Exception as e:
logger.error(f"Single search error for item {item}: {e}")
......
......@@ -90,13 +90,18 @@ async def build_starrocks_query(params, query_vector: list[float] | None = None)
2. Vector Search (HNSW Index)
3. Grouping (Gom màu theo style)
"""
logger.info("🔧 [DEBUG] build_starrocks_query STARTED")
# --- Process vector in query field ---
query_text = getattr(params, "query", None)
logger.info(f"🔧 [DEBUG] query_text: {query_text[:50] if query_text else 'None'}")
if query_text and query_vector is None:
logger.info("🔧 [DEBUG] Calling create_embedding_async()")
query_vector = await create_embedding_async(query_text)
logger.info(f"🔧 [DEBUG] Embedding created, dimension: {len(query_vector) if query_vector else 0}")
# --- Build filter clauses ---
logger.info("🔧 [DEBUG] Building WHERE clauses")
where_clauses = _get_where_clauses(params)
where_sql = " AND ".join(where_clauses) if where_clauses else "1=1"
......
"""
Test API Routes - Tất cả endpoints cho testing (isolated)
KHÔNG ĐỘNG VÀO chatbot_route.py chính!
"""
import asyncio
import logging
import random
from fastapi import APIRouter, HTTPException
from pydantic import BaseModel, Field
from agent.models import QueryRequest
from common.load_test_manager import get_load_test_manager
router = APIRouter(prefix="/test", tags=["Testing & Load Test"])
logger = logging.getLogger(__name__)
# ==================== MOCK CHAT ENDPOINT ====================
@router.post("/chat-mock", summary="Mock Chat API (for Load Testing)")
async def mock_chat(req: QueryRequest):
"""
Endpoint MOCK để test performance KHÔNG tốn tiền OpenAI.
Trả về response giả lập với latency ngẫu nhiên.
⚠️ CHỈ DÙNG CHO LOAD TESTING!
"""
# Giả lập latency của real API (100-500ms)
await asyncio.sleep(random.uniform(0.1, 0.5))
# Mock responses
mock_responses = [
"Dạ em đã tìm được một số mẫu áo sơ mi nam đẹp cho anh/chị ạ. Anh/chị có thể xem các sản phẩm sau đây.",
"Em xin gợi ý một số mẫu áo thun nam phù hợp với yêu cầu của anh/chị.",
"Dạ, em có tìm thấy một số mẫu quần jean nam trong khoảng giá anh/chị yêu cầu ạ.",
"Em xin giới thiệu các mẫu áo khoác nam đang có khuyến mãi tốt ạ.",
"Anh/chị có thể tham khảo các mẫu giày thể thao nam đang được ưa chuộng nhất.",
]
# Mock product IDs
mock_product_ids = [
f"MOCK_PROD_{random.randint(1000, 9999)}"
for _ in range(random.randint(2, 5))
]
return {
"status": "success",
"ai_response": random.choice(mock_responses),
"product_ids": mock_product_ids,
"_mock": True, # Flag để biết đây là mock response
"_latency_ms": random.randint(100, 500)
}
@router.post("/db-search", summary="DB Search Mock (Test StarRocks Performance)")
async def mock_db_search(req: QueryRequest):
"""
Endpoint để test PERFORMANCE của StarRocks DB query.
Hỗ trợ Multi-Search (Parallel).
"""
from agent.tools.data_retrieval_tool import data_retrieval_tool
try:
# Mock Multi-Search call (Parallel)
tool_result = await data_retrieval_tool.ainvoke({
"searches": [
{
"keywords": "áo sơ mi",
"gender_by_product": "male",
"price_max": 500000
},
{
"keywords": "quần jean",
"gender_by_product": "male",
"price_max": 800000
}
]
})
# Parse result
import json
result_data = json.loads(tool_result)
# Collect all product IDs from all search results
all_product_ids = []
if result_data.get("status") == "success":
for res in result_data.get("results", []):
ids = [p.get("internal_ref_code", "") for p in res.get("products", [])]
all_product_ids.extend(ids)
return {
"status": "success",
"ai_response": "Kết quả Multi-Search Parallel từ DB",
"product_ids": list(set(all_product_ids)),
"_db_test": True,
"_queries_count": len(result_data.get("results", [])),
"_total_products": len(all_product_ids)
}
except Exception as e:
logger.error(f"DB multi-search error: {e}")
return {
"status": "error",
"ai_response": f"Lỗi: {str(e)}",
"product_ids": [],
"_error": str(e)
}
# ==================== LOAD TEST CONTROL ====================
class StartTestRequest(BaseModel):
"""Request body để start test"""
target_url: str = Field(default="http://localhost:5000", description="Base URL của target")
num_users: int = Field(default=10, ge=1, le=1000, description="Số lượng concurrent users")
spawn_rate: int = Field(default=2, ge=1, le=100, description="Tốc độ spawn users (users/second)")
duration_seconds: int = Field(default=60, ge=10, le=600, description="Thời gian chạy test (giây)")
test_type: str = Field(default="chat_mock", description="chat_mock | chat_real | history")
@router.post("/loadtest/start", summary="Bắt đầu Load Test")
async def start_load_test(req: StartTestRequest):
"""
Bắt đầu load test với config được chỉ định.
**test_type options:**
- `chat_mock`: Test mock chat API (KHÔNG tốn tiền) ⭐ Khuyên dùng
- `chat_real`: Test real chat API (TỐN TIỀN OpenAI!)
- `history`: Test history API (không tốn tiền LLM)
"""
try:
manager = get_load_test_manager()
config_dict = req.model_dump()
result = manager.start_test(config_dict)
if "error" in result:
raise HTTPException(status_code=400, detail=result["error"])
return {
"status": "success",
"message": "Load test started",
"data": result
}
except Exception as e:
logger.error(f"Error starting load test: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/loadtest/stop", summary="Dừng Load Test")
async def stop_load_test():
"""Dừng load test đang chạy"""
try:
manager = get_load_test_manager()
result = manager.stop_test()
if "error" in result:
raise HTTPException(status_code=400, detail=result["error"])
return {
"status": "success",
"message": "Load test stopped",
"data": result
}
except Exception as e:
logger.error(f"Error stopping load test: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.get("/loadtest/metrics", summary="Lấy Metrics Realtime")
async def get_load_test_metrics():
"""
Lấy metrics realtime của load test.
Frontend poll endpoint này mỗi 2 giây.
"""
try:
manager = get_load_test_manager()
metrics = manager.get_metrics()
return {
"status": "success",
"data": metrics
}
except Exception as e:
logger.error(f"Error getting metrics: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.get("/loadtest/status", summary="Check Test Status")
async def get_load_test_status():
"""Check xem load test có đang chạy không"""
try:
manager = get_load_test_manager()
return {
"status": "success",
"data": {
"is_running": manager.is_running(),
"current_status": manager.status
}
}
except Exception as e:
logger.error(f"Error getting status: {e}")
raise HTTPException(status_code=500, detail=str(e))
......@@ -88,12 +88,14 @@ class LLMFactory:
"streaming": streaming,
"api_key": key,
"temperature": 0,
"timeout": 30.0, # 30 second timeout
"max_retries": 2,
}
# Nếu bật json_mode, tiêm trực tiếp vào constructor
if json_mode:
llm_kwargs["model_kwargs"] = {"response_format": {"type": "json_object"}}
logger.info(f"⚙️ Initializing OpenAI in JSON mode: {model_name}")
logger.info(f"⚙️ Initializing OpenAI in JSON mode: {model_name} with timeout=30s")
llm = ChatOpenAI(**llm_kwargs)
logger.info(f"✅ Created OpenAI: {model_name}")
......
"""
Load Test Manager - Chạy Locust programmatically
Singleton service để quản lý load testing cho APIs
DISABLED: Locust monkey-patches SSL and breaks async OpenAI client
"""
import logging
......@@ -10,8 +11,9 @@ from dataclasses import dataclass, asdict
from enum import Enum
from typing import Any
from locust import HttpUser, between, task
from locust.env import Environment
# DISABLED: Locust monkey-patches ssl and breaks async OpenAI client
# from locust import HttpUser, between, task
# from locust.env import Environment
logger = logging.getLogger(__name__)
......
......@@ -5,11 +5,11 @@ services:
container_name: canifa_backend
env_file: .env
ports:
- "5000:5000"
- "5001:5001"
# volumes:
# - .:/app
environment:
- PORT=5000
- PORT=5001
restart: unless-stopped
logging:
driver: "json-file"
......
import asyncio
import os # Có os để mount static
import os #
import platform
# ==========================================
# 🛑 QUAN TRỌNG: FIX LỖI WINDOWS Ở ĐÂY 🛑
# Phải chạy dòng này TRƯỚC KHI import bất kỳ thư viện nào khác
# ==========================================
if platform.system() == "Windows":
print("🔧 Windows detected: Applying SelectorEventLoopPolicy globally...")
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
# Tạm thời tắt LangChain Tracing để tránh lỗi recursion (Đúng ý bro)
os.environ["LANGCHAIN_TRACING_V2"] = "false"
os.environ["LANGCHAIN_API_KEY"] = ""
......@@ -20,12 +15,11 @@ import logging
import uvicorn
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles # Import cái này để mount HTML
from fastapi.staticfiles import StaticFiles
# Updated APIs (Import sau cùng để DB nhận cấu hình fix ở trên)
from api.chatbot_route import router as chatbot_router
from api.conservation_route import router as conservation_router
from api.test_route import router as test_router # ← Test API (isolated)
from config import PORT
# Configure Logging
......@@ -51,11 +45,7 @@ app.add_middleware(
app.include_router(conservation_router)
app.include_router(chatbot_router, prefix="/api/agent")
app.include_router(test_router, prefix="/api") # ← Test routes
# ==========================================
# 🟢 ĐOẠN MOUNT STATIC HTML CỦA BRO ĐÂY 🟢
# ==========================================
try:
static_dir = os.path.join(os.path.dirname(__file__), "static")
if not os.path.exists(static_dir):
......
......@@ -267,7 +267,6 @@
<h1>🤖 Canifa AI System</h1>
<div class="nav-links">
<a href="/static/index.html" class="active">💬 Chatbot</a>
<a href="/static/loadtest.html">🔬 Load Test</a>
</div>
</div>
......
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>🔬 Load Testing Dashboard</title>
<style>
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
body {
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
min-height: 100vh;
}
/* Navigation Header */
.nav-header {
background: rgba(0, 0, 0, 0.2);
padding: 15px 30px;
display: flex;
justify-content: space-between;
align-items: center;
box-shadow: 0 2px 10px rgba(0, 0, 0, 0.3);
}
.nav-header h1 {
margin: 0;
color: white;
font-size: 1.5em;
}
.nav-links {
display: flex;
gap: 15px;
}
.nav-links a {
color: white;
text-decoration: none;
padding: 8px 16px;
border-radius: 6px;
background: rgba(255, 255, 255, 0.2);
transition: all 0.3s;
font-weight: 500;
}
.nav-links a:hover {
background: rgba(255, 255, 255, 0.3);
transform: translateY(-2px);
}
.nav-links a.active {
background: rgba(255, 255, 255, 0.4);
}
.main-wrapper {
padding: 20px;
}
.container {
max-width: 1200px;
margin: 0 auto;
background: white;
border-radius: 16px;
box-shadow: 0 20px 60px rgba(0, 0, 0, 0.3);
overflow: hidden;
}
.header {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
color: white;
padding: 30px;
text-align: center;
}
.header h1 {
font-size: 2em;
margin-bottom: 10px;
}
.header p {
opacity: 0.9;
font-size: 1.1em;
}
.content {
padding: 30px;
}
.section {
margin-bottom: 30px;
padding: 20px;
border: 2px solid #f0f0f0;
border-radius: 12px;
}
.section h2 {
color: #667eea;
margin-bottom: 20px;
font-size: 1.5em;
display: flex;
align-items: center;
gap: 10px;
}
.form-group {
margin-bottom: 15px;
}
label {
display: block;
margin-bottom: 5px;
font-weight: 600;
color: #333;
}
input,
select {
width: 100%;
padding: 12px;
border: 2px solid #e0e0e0;
border-radius: 8px;
font-size: 14px;
transition: border-color 0.3s;
}
input:focus,
select:focus {
outline: none;
border-color: #667eea;
}
.button-group {
display: flex;
gap: 15px;
margin-top: 20px;
}
button {
flex: 1;
padding: 15px 30px;
border: none;
border-radius: 8px;
font-size: 16px;
font-weight: 600;
cursor: pointer;
transition: all 0.3s;
}
.btn-start {
background: linear-gradient(135deg, #11998e 0%, #38ef7d 100%);
color: white;
}
.btn-start:hover:not(:disabled) {
transform: translateY(-2px);
box-shadow: 0 5px 15px rgba(17, 153, 142, 0.4);
}
.btn-stop {
background: linear-gradient(135deg, #ee0979 0%, #ff6a00 100%);
color: white;
}
.btn-stop:hover:not(:disabled) {
transform: translateY(-2px);
box-shadow: 0 5px 15px rgba(238, 9, 121, 0.4);
}
button:disabled {
opacity: 0.5;
cursor: not-allowed;
}
.metrics-grid {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(250px, 1fr));
gap: 20px;
margin-top: 20px;
}
.metric-card {
background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%);
padding: 20px;
border-radius: 12px;
text-align: center;
}
.metric-label {
font-size: 14px;
color: #666;
margin-bottom: 10px;
}
.metric-value {
font-size: 32px;
font-weight: bold;
color: #333;
}
.metric-unit {
font-size: 16px;
color: #999;
margin-left: 5px;
}
.status {
display: inline-block;
padding: 8px 16px;
border-radius: 20px;
font-weight: 600;
font-size: 14px;
margin: 10px 0;
}
.status.idle {
background: #e0e0e0;
color: #666;
}
.status.running {
background: #4caf50;
color: white;
animation: pulse 2s infinite;
}
.status.stopped {
background: #f44336;
color: white;
}
@keyframes pulse {
0%,
100% {
opacity: 1;
}
50% {
opacity: 0.7;
}
}
.log-container {
background: #1e1e1e;
color: #00ff00;
padding: 20px;
border-radius: 8px;
font-family: 'Courier New', monospace;
font-size: 14px;
max-height: 300px;
overflow-y: auto;
}
.log-entry {
margin-bottom: 5px;
}
.alert {
padding: 15px;
border-radius: 8px;
margin-bottom: 20px;
}
.alert-warning {
background: #fff3cd;
border-left: 4px solid #ffc107;
color: #856404;
}
.alert-info {
background: #d1ecf1;
border-left: 4px solid #17a2b8;
color: #0c5460;
}
/* Chart container */
.chart-container {
position: relative;
height: 300px;
margin-top: 20px;
}
</style>
<!-- Chart.js -->
<script src="https://cdn.jsdelivr.net/npm/chart.js@4.4.0/dist/chart.umd.min.js"></script>
</head>
<body>
<!-- Navigation Header -->
<div class="nav-header">
<h1>🤖 Canifa AI System</h1>
<div class="nav-links">
<a href="/static/index.html">💬 Chatbot</a>
<a href="/static/loadtest.html" class="active">🔬 Load Test</a>
</div>
</div>
<div class="main-wrapper">
<div class="container">
<div class="header">
<h1>🔬 Load Testing Dashboard</h1>
<p>Performance testing tool for Canifa Chat API</p>
</div>
<div class="content">
<!-- Alert -->
<div class="alert alert-warning">
⚠️ <strong>Lưu ý:</strong> Sử dụng <code>chat_mock</code> để test mà không tốn tiền OpenAI!
</div>
<!-- Config Section -->
<div class="section">
<h2>⚙️ Test Configuration</h2>
<div class="form-group">
<label for="targetUrl">Target URL</label>
<input type="text" id="targetUrl" value="http://localhost:5000"
placeholder="http://localhost:5000">
</div>
<div class="form-group">
<label for="numUsers">Number of Users (1-1000)</label>
<input type="number" id="numUsers" value="10" min="1" max="1000">
</div>
<div class="form-group">
<label for="spawnRate">Spawn Rate (users/second)</label>
<input type="number" id="spawnRate" value="2" min="1" max="100">
</div>
<div class="form-group">
<label for="duration">Duration (seconds)</label>
<input type="number" id="duration" value="60" min="10" max="600">
</div>
<div class="form-group">
<label for="testType">Test Type</label>
<select id="testType">
<option value="chat_mock">💚 Mock Chat (No cost - Recommended)</option>
<option value="db_search">🔥 DB Search (Test StarRocks - No LLM cost)</option>
<option value="chat_real">💸 Real Chat (Costs money!)</option>
<option value="history">📜 History API (Postgres)</option>
</select>
</div>
<div class="button-group">
<button class="btn-start" id="startBtn" onclick="startTest()">▶ Start Test</button>
<button class="btn-stop" id="stopBtn" onclick="stopTest()" disabled>⏹ Stop Test</button>
</div>
</div>
<!-- Status Section -->
<div class="section">
<h2>📊 Live Metrics</h2>
<div>
Current Status: <span class="status idle" id="statusBadge">IDLE</span>
</div>
<div class="metrics-grid">
<div class="metric-card">
<div class="metric-label">Total Requests</div>
<div class="metric-value" id="totalReq">0</div>
</div>
<div class="metric-card">
<div class="metric-label">Requests/Second</div>
<div class="metric-value" id="rps">0<span class="metric-unit">req/s</span></div>
</div>
<div class="metric-card">
<div class="metric-label">Avg Response Time</div>
<div class="metric-value" id="avgLatency">0<span class="metric-unit">ms</span></div>
</div>
<div class="metric-card">
<div class="metric-label">P50 (Median)</div>
<div class="metric-value" id="p50">0<span class="metric-unit">ms</span></div>
</div>
<div class="metric-card">
<div class="metric-label">P90</div>
<div class="metric-value" id="p90">0<span class="metric-unit">ms</span></div>
</div>
<div class="metric-card">
<div class="metric-label">P95</div>
<div class="metric-value" id="p95">0<span class="metric-unit">ms</span></div>
</div>
<div class="metric-card">
<div class="metric-label">P99 (Worst)</div>
<div class="metric-value" id="p99">0<span class="metric-unit">ms</span></div>
</div>
<div class="metric-card">
<div class="metric-label">Success Rate</div>
<div class="metric-value" id="successRate">100<span class="metric-unit">%</span></div>
</div>
<div class="metric-card">
<div class="metric-label">Active Users</div>
<div class="metric-value" id="activeUsers">0</div>
</div>
<div class="metric-card">
<div class="metric-label">Elapsed Time</div>
<div class="metric-value" id="elapsed">0<span class="metric-unit">s</span></div>
</div>
</div>
</div>
<!-- Chart Section -->
<div class="section">
<h2>📈 Response Time Chart (Real-time)</h2>
<div class="chart-container">
<canvas id="responseTimeChart"></canvas>
</div>
</div>
<!-- Logs Section -->
<div class="section">
<h2>📝 Logs</h2>
<div class="log-container" id="logContainer">
<div class="log-entry">[INFO] Waiting for test to start...</div>
</div>
</div>
</div>
</div>
<script>
let pollingInterval = null;
let responseTimeChart = null;
const maxDataPoints = 30; // Giữ 30 data points (60 giây với poll 2s)
// Initialize Chart
function initChart() {
const ctx = document.getElementById('responseTimeChart').getContext('2d');
responseTimeChart = new Chart(ctx, {
type: 'line',
data: {
labels: [],
datasets: [
{
label: 'P99 (Worst)',
data: [],
borderColor: 'rgb(255, 99, 132)',
backgroundColor: 'rgba(255, 99, 132, 0.1)',
tension: 0.4,
fill: true
},
{
label: 'P95',
data: [],
borderColor: 'rgb(255, 159, 64)',
backgroundColor: 'rgba(255, 159, 64, 0.1)',
tension: 0.4,
fill: true
},
{
label: 'P50 (Median)',
data: [],
borderColor: 'rgb(75, 192, 192)',
backgroundColor: 'rgba(75, 192, 192, 0.1)',
tension: 0.4,
fill: true
},
{
label: 'Avg',
data: [],
borderColor: 'rgb(54, 162, 235)',
backgroundColor: 'rgba(54, 162, 235, 0.1)',
tension: 0.4,
fill: true
}
]
},
options: {
responsive: true,
maintainAspectRatio: false,
scales: {
y: {
beginAtZero: true,
title: {
display: true,
text: 'Response Time (ms)'
}
},
x: {
title: {
display: true,
text: 'Time (seconds)'
}
}
},
plugins: {
legend: {
display: true,
position: 'top'
}
}
}
});
}
function updateChart(metrics) {
if (!responseTimeChart) return;
const elapsed = metrics.elapsed_seconds || 0;
// Add new data point
responseTimeChart.data.labels.push(elapsed + 's');
responseTimeChart.data.datasets[0].data.push(metrics.p99_response_time_ms || 0);
responseTimeChart.data.datasets[1].data.push(metrics.p95_response_time_ms || 0);
responseTimeChart.data.datasets[2].data.push(metrics.p50_response_time_ms || 0);
responseTimeChart.data.datasets[3].data.push(metrics.avg_response_time_ms || 0);
// Giữ tối đa maxDataPoints
if (responseTimeChart.data.labels.length > maxDataPoints) {
responseTimeChart.data.labels.shift();
responseTimeChart.data.datasets.forEach(dataset => dataset.data.shift());
}
responseTimeChart.update('none'); // Update without animation for smoother realtime
}
function addLog(message, type = 'INFO') {
const logContainer = document.getElementById('logContainer');
const timestamp = new Date().toLocaleTimeString();
const logEntry = document.createElement('div');
logEntry.className = 'log-entry';
logEntry.textContent = `[${timestamp}] [${type}] ${message}`;
logContainer.appendChild(logEntry);
logContainer.scrollTop = logContainer.scrollHeight;
}
async function startTest() {
const config = {
target_url: document.getElementById('targetUrl').value,
num_users: parseInt(document.getElementById('numUsers').value),
spawn_rate: parseInt(document.getElementById('spawnRate').value),
duration_seconds: parseInt(document.getElementById('duration').value),
test_type: document.getElementById('testType').value
};
try {
document.getElementById('startBtn').disabled = true;
addLog('Starting load test...', 'INFO');
const response = await fetch('/api/test/loadtest/start', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(config)
});
const data = await response.json();
if (data.status === 'success') {
addLog(`Test started successfully! Type: ${config.test_type}`, 'SUCCESS');
document.getElementById('stopBtn').disabled = false;
startPolling();
} else {
addLog(`Failed to start: ${data.detail || 'Unknown error'}`, 'ERROR');
document.getElementById('startBtn').disabled = false;
}
} catch (error) {
addLog(`Error: ${error.message}`, 'ERROR');
document.getElementById('startBtn').disabled = false;
}
}
async function stopTest() {
try {
addLog('Stopping test...', 'INFO');
const response = await fetch('/api/test/loadtest/stop', {
method: 'POST'
});
const data = await response.json();
addLog('Test stopped by user', 'INFO');
stopPolling();
} catch (error) {
addLog(`Error stopping test: ${error.message}`, 'ERROR');
}
}
async function fetchMetrics() {
try {
const response = await fetch('/api/test/loadtest/metrics');
const data = await response.json();
if (data.status === 'success') {
const metrics = data.data;
// Update status badge
const statusBadge = document.getElementById('statusBadge');
statusBadge.textContent = metrics.status.toUpperCase();
statusBadge.className = `status ${metrics.status}`;
// Update metrics
document.getElementById('totalReq').textContent = metrics.total_requests || 0;
document.getElementById('rps').innerHTML = `${metrics.current_rps || 0}<span class="metric-unit">req/s</span>`;
document.getElementById('avgLatency').innerHTML = `${metrics.avg_response_time_ms || 0}<span class="metric-unit">ms</span>`;
// Percentiles
document.getElementById('p50').innerHTML = `${metrics.p50_response_time_ms || 0}<span class="metric-unit">ms</span>`;
document.getElementById('p90').innerHTML = `${metrics.p90_response_time_ms || 0}<span class="metric-unit">ms</span>`;
document.getElementById('p95').innerHTML = `${metrics.p95_response_time_ms || 0}<span class="metric-unit">ms</span>`;
document.getElementById('p99').innerHTML = `${metrics.p99_response_time_ms || 0}<span class="metric-unit">ms</span>`;
const successRate = Math.round((1 - metrics.failure_rate) * 100);
document.getElementById('successRate').innerHTML = `${successRate}<span class="metric-unit">%</span>`;
document.getElementById('activeUsers').textContent = metrics.active_users || 0;
document.getElementById('elapsed').innerHTML = `${metrics.elapsed_seconds || 0}<span class="metric-unit">s</span>`;
// Update chart
updateChart(metrics);
// Stop polling if test is stopped
if (metrics.status === 'stopped' || metrics.status === 'idle') {
stopPolling();
addLog('Test completed!', 'INFO');
}
}
} catch (error) {
console.error('Error fetching metrics:', error);
}
}
function startPolling() {
if (pollingInterval) clearInterval(pollingInterval);
pollingInterval = setInterval(fetchMetrics, 2000); // Poll every 2 seconds
}
function stopPolling() {
if (pollingInterval) {
clearInterval(pollingInterval);
pollingInterval = null;
}
document.getElementById('startBtn').disabled = false;
document.getElementById('stopBtn').disabled = true;
}
// Initialize
window.addEventListener('load', () => {
initChart();
addLog('Dashboard ready', 'INFO');
});
</script>
</div> <!-- Close main-wrapper -->
</body>
</html>
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment