Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
C
canifa_note
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Vũ Hoàng Anh
canifa_note
Commits
9a100d55
Unverified
Commit
9a100d55
authored
Nov 10, 2025
by
Johnny
Committed by
GitHub
Nov 10, 2025
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
chore(server): remove profiler code and endpoints (#5244)
Co-authored-by:
Claude
<
noreply@anthropic.com
>
parent
bb3d808e
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
0 additions
and
143 deletions
+0
-143
profiler.go
server/profiler/profiler.go
+0
-120
server.go
server/server.go
+0
-23
No files found.
server/profiler/profiler.go
deleted
100644 → 0
View file @
bb3d808e
package
profiler
import
(
"context"
"fmt"
"log/slog"
"net/http"
"net/http/pprof"
"runtime"
"time"
"github.com/labstack/echo/v4"
)
// Profiler provides HTTP endpoints for memory profiling.
type
Profiler
struct
{
memStatsLogInterval
time
.
Duration
}
// NewProfiler creates a new profiler.
func
NewProfiler
()
*
Profiler
{
return
&
Profiler
{
memStatsLogInterval
:
1
*
time
.
Minute
,
}
}
// RegisterRoutes adds profiling endpoints to the Echo server.
func
(
*
Profiler
)
RegisterRoutes
(
e
*
echo
.
Echo
)
{
// Register pprof handlers
g
:=
e
.
Group
(
"/debug/pprof"
)
g
.
GET
(
""
,
echo
.
WrapHandler
(
http
.
HandlerFunc
(
pprof
.
Index
)))
g
.
GET
(
"/cmdline"
,
echo
.
WrapHandler
(
http
.
HandlerFunc
(
pprof
.
Cmdline
)))
g
.
GET
(
"/profile"
,
echo
.
WrapHandler
(
http
.
HandlerFunc
(
pprof
.
Profile
)))
g
.
POST
(
"/symbol"
,
echo
.
WrapHandler
(
http
.
HandlerFunc
(
pprof
.
Symbol
)))
g
.
GET
(
"/symbol"
,
echo
.
WrapHandler
(
http
.
HandlerFunc
(
pprof
.
Symbol
)))
g
.
GET
(
"/trace"
,
echo
.
WrapHandler
(
http
.
HandlerFunc
(
pprof
.
Trace
)))
g
.
GET
(
"/allocs"
,
echo
.
WrapHandler
(
http
.
HandlerFunc
(
pprof
.
Handler
(
"allocs"
)
.
ServeHTTP
)))
g
.
GET
(
"/block"
,
echo
.
WrapHandler
(
http
.
HandlerFunc
(
pprof
.
Handler
(
"block"
)
.
ServeHTTP
)))
g
.
GET
(
"/goroutine"
,
echo
.
WrapHandler
(
http
.
HandlerFunc
(
pprof
.
Handler
(
"goroutine"
)
.
ServeHTTP
)))
g
.
GET
(
"/heap"
,
echo
.
WrapHandler
(
http
.
HandlerFunc
(
pprof
.
Handler
(
"heap"
)
.
ServeHTTP
)))
g
.
GET
(
"/mutex"
,
echo
.
WrapHandler
(
http
.
HandlerFunc
(
pprof
.
Handler
(
"mutex"
)
.
ServeHTTP
)))
g
.
GET
(
"/threadcreate"
,
echo
.
WrapHandler
(
http
.
HandlerFunc
(
pprof
.
Handler
(
"threadcreate"
)
.
ServeHTTP
)))
// Add a custom memory stats endpoint.
g
.
GET
(
"/memstats"
,
func
(
c
echo
.
Context
)
error
{
var
m
runtime
.
MemStats
runtime
.
ReadMemStats
(
&
m
)
return
c
.
JSON
(
http
.
StatusOK
,
map
[
string
]
interface
{}{
"alloc"
:
m
.
Alloc
,
"totalAlloc"
:
m
.
TotalAlloc
,
"sys"
:
m
.
Sys
,
"numGC"
:
m
.
NumGC
,
"heapAlloc"
:
m
.
HeapAlloc
,
"heapSys"
:
m
.
HeapSys
,
"heapInuse"
:
m
.
HeapInuse
,
"heapObjects"
:
m
.
HeapObjects
,
})
})
}
// StartMemoryMonitor starts a goroutine that periodically logs memory stats.
func
(
p
*
Profiler
)
StartMemoryMonitor
(
ctx
context
.
Context
)
{
go
func
()
{
ticker
:=
time
.
NewTicker
(
p
.
memStatsLogInterval
)
defer
ticker
.
Stop
()
// Store previous heap allocation to track growth.
var
lastHeapAlloc
uint64
var
lastNumGC
uint32
for
{
select
{
case
<-
ticker
.
C
:
var
m
runtime
.
MemStats
runtime
.
ReadMemStats
(
&
m
)
// Calculate heap growth since last check.
heapGrowth
:=
int64
(
m
.
HeapAlloc
)
-
int64
(
lastHeapAlloc
)
gcCount
:=
m
.
NumGC
-
lastNumGC
slog
.
Info
(
"memory stats"
,
"heapAlloc"
,
byteCountIEC
(
m
.
HeapAlloc
),
"heapSys"
,
byteCountIEC
(
m
.
HeapSys
),
"heapObjects"
,
m
.
HeapObjects
,
"heapGrowth"
,
byteCountIEC
(
uint64
(
heapGrowth
)),
"numGoroutine"
,
runtime
.
NumGoroutine
(),
"numGC"
,
m
.
NumGC
,
"gcSince"
,
gcCount
,
"nextGC"
,
byteCountIEC
(
m
.
NextGC
),
"gcPause"
,
time
.
Duration
(
m
.
PauseNs
[(
m
.
NumGC
+
255
)
%
256
])
.
String
(),
)
// Track values for next iteration.
lastHeapAlloc
=
m
.
HeapAlloc
lastNumGC
=
m
.
NumGC
// Force GC if memory usage is high to see if objects can be reclaimed.
if
m
.
HeapAlloc
>
500
*
1024
*
1024
{
// 500 MB threshold
slog
.
Info
(
"forcing garbage collection due to high memory usage"
)
}
case
<-
ctx
.
Done
()
:
return
}
}
}()
}
// byteCountIEC converts bytes to a human-readable string (MiB, GiB).
func
byteCountIEC
(
b
uint64
)
string
{
const
unit
=
1024
if
b
<
unit
{
return
fmt
.
Sprintf
(
"%d B"
,
b
)
}
div
,
exp
:=
uint64
(
unit
),
0
for
n
:=
b
/
unit
;
n
>=
unit
;
n
/=
unit
{
div
*=
unit
exp
++
}
return
fmt
.
Sprintf
(
"%.1f %ciB"
,
float64
(
b
)
/
float64
(
div
),
"KMGTPE"
[
exp
])
}
server/server.go
View file @
9a100d55
...
@@ -21,7 +21,6 @@ import (
...
@@ -21,7 +21,6 @@ import (
"github.com/usememos/memos/internal/profile"
"github.com/usememos/memos/internal/profile"
storepb
"github.com/usememos/memos/proto/gen/store"
storepb
"github.com/usememos/memos/proto/gen/store"
"github.com/usememos/memos/server/profiler"
apiv1
"github.com/usememos/memos/server/router/api/v1"
apiv1
"github.com/usememos/memos/server/router/api/v1"
"github.com/usememos/memos/server/router/frontend"
"github.com/usememos/memos/server/router/frontend"
"github.com/usememos/memos/server/router/rss"
"github.com/usememos/memos/server/router/rss"
...
@@ -36,7 +35,6 @@ type Server struct {
...
@@ -36,7 +35,6 @@ type Server struct {
echoServer
*
echo
.
Echo
echoServer
*
echo
.
Echo
grpcServer
*
grpc
.
Server
grpcServer
*
grpc
.
Server
profiler
*
profiler
.
Profiler
runnerCancelFuncs
[]
context
.
CancelFunc
runnerCancelFuncs
[]
context
.
CancelFunc
}
}
...
@@ -53,13 +51,6 @@ func NewServer(ctx context.Context, profile *profile.Profile, store *store.Store
...
@@ -53,13 +51,6 @@ func NewServer(ctx context.Context, profile *profile.Profile, store *store.Store
echoServer
.
Use
(
middleware
.
Recover
())
echoServer
.
Use
(
middleware
.
Recover
())
s
.
echoServer
=
echoServer
s
.
echoServer
=
echoServer
if
profile
.
Mode
!=
"prod"
{
// Initialize profiler
s
.
profiler
=
profiler
.
NewProfiler
()
s
.
profiler
.
RegisterRoutes
(
echoServer
)
s
.
profiler
.
StartMemoryMonitor
(
ctx
)
}
instanceBasicSetting
,
err
:=
s
.
getOrUpsertInstanceBasicSetting
(
ctx
)
instanceBasicSetting
,
err
:=
s
.
getOrUpsertInstanceBasicSetting
(
ctx
)
if
err
!=
nil
{
if
err
!=
nil
{
return
nil
,
errors
.
Wrap
(
err
,
"failed to get instance basic setting"
)
return
nil
,
errors
.
Wrap
(
err
,
"failed to get instance basic setting"
)
...
@@ -185,20 +176,6 @@ func (s *Server) Shutdown(ctx context.Context) {
...
@@ -185,20 +176,6 @@ func (s *Server) Shutdown(ctx context.Context) {
// Shutdown gRPC server.
// Shutdown gRPC server.
s
.
grpcServer
.
GracefulStop
()
s
.
grpcServer
.
GracefulStop
()
// Stop the profiler
if
s
.
profiler
!=
nil
{
slog
.
Info
(
"stopping profiler"
)
// Log final memory stats
var
m
runtime
.
MemStats
runtime
.
ReadMemStats
(
&
m
)
slog
.
Info
(
"final memory stats before exit"
,
"heapAlloc"
,
m
.
Alloc
,
"heapSys"
,
m
.
Sys
,
"heapObjects"
,
m
.
HeapObjects
,
"numGoroutine"
,
runtime
.
NumGoroutine
(),
)
}
// Close database connection.
// Close database connection.
if
err
:=
s
.
Store
.
Close
();
err
!=
nil
{
if
err
:=
s
.
Store
.
Close
();
err
!=
nil
{
slog
.
Error
(
"failed to close database"
,
slog
.
String
(
"error"
,
err
.
Error
()))
slog
.
Error
(
"failed to close database"
,
slog
.
String
(
"error"
,
err
.
Error
()))
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment