Skip to content

Commit

Permalink
feat: add limit flag, limit the num of files displayed
Browse files Browse the repository at this point in the history
  • Loading branch information
rfyiamcool committed Mar 15, 2023
1 parent 85084d7 commit f7d1cc3
Show file tree
Hide file tree
Showing 3 changed files with 31 additions and 16 deletions.
9 changes: 5 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

`pgcacher` is used to get page cache statistics for files. Use the **pgcacher** command to know how much cache space the fd of the specified process occupies in the page cache. Use **pgcacher** to know whether the specified file list is cached in the page cache, and how much space is cached.

Compared with pcstat, `pgcacher` has fixed the problem that the file list of the process is incorrect. It used to be obtained through `/proc/{pid}/maps`, but now it is changed to obtain from `/proc/{pid}/maps` and `/proc/{pid}/fd` at the same time. pgcacher supports more parameters, such as top, worker, depth, least-size, exclude-files and include-files. 😁
Compared with pcstat, `pgcacher` has fixed the problem that the file list of the process is incorrect. It used to be obtained through `/proc/{pid}/maps`, but now it is changed to obtain from `/proc/{pid}/maps` and `/proc/{pid}/fd` at the same time. pgcacher supports more parameters, such as top, worker, limit, depth, least-size, exclude-files and include-files. 😁

In addition, the pgcacher code is more robust, and also supports concurrency parameters, which can calculate the cache occupancy in the page cache faster.

Expand All @@ -14,10 +14,11 @@ In addition, the pgcacher code is more robust, and also supports concurrency par

```sh
pgcacher <-json <-pps>|-terse|-default> <-nohdr> <-bname> file file file
-depth set the depth of dirs to scan.
-limit limit the number of files displayed, default: 500
-depth set the depth of dirs to scan, default: 0
-worker concurrency workers, default: 2
-pid show all open maps for the given pid
-top scan the open files of all processes, show the top few files that occupy the most memory space in the page cache.
-top scan the open files of all processes, show the top few files that occupy the most memory space in the page cache, default: false
-lease-size ignore files smaller than the lastSize, such as '10MB' and '15GB'
-exclude-files exclude the specified files by wildcard, such as 'a*c?d' and '*xiaorui*,rfyiamcool'
-include-files only include the specified files by wildcard, such as 'a*c?d' and '*xiaorui?cc,rfyiamcool'
Expand Down Expand Up @@ -114,7 +115,7 @@ chmod 777 pgcacher
│ Sum │ 10.746G │ 2817091 │ 10.746G │ 2817091 │ 100.000 │
+------------+----------------+-------------+----------------+-------------+---------+
# sudo pgcacher -top=3
# sudo pgcacher -top -limit 3
+------------------+----------------+-------------+----------------+-------------+---------+
| Name | Size │ Pages │ Cached Size │ Cached Pages│ Percent │
Expand Down
25 changes: 17 additions & 8 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ import (
)

type option struct {
pid, top, worker, depth int
terse, json, unicode bool
pid, worker, depth, limit int
top, terse, json, unicode bool
plain, bname bool
leastSize, excludeFiles, includeFiles string
}
Expand All @@ -23,7 +23,8 @@ var globalOption = new(option)
func init() {
// basic params
flag.IntVar(&globalOption.pid, "pid", 0, "show all open maps for the given pid")
flag.IntVar(&globalOption.top, "top", 0, "scan the open files of all processes, show the top few files that occupy the most memory space in the page cache.")
flag.IntVar(&globalOption.limit, "limit", 500, "limit the number of files displayed")
flag.BoolVar(&globalOption.top, "top", false, "scan the open files of all processes, show the top few files that occupy the most memory space in the page cache.")
flag.IntVar(&globalOption.depth, "depth", 0, "set the depth of dirs to scan")
flag.IntVar(&globalOption.worker, "worker", 2, "concurrency workers")
flag.StringVar(&globalOption.leastSize, "least-size", "0mb", "ignore files smaller than the lastSize, such as 10MB and 15GB")
Expand Down Expand Up @@ -51,10 +52,14 @@ func main() {
files = walkDirs(files, globalOption.depth)

// init pgcacher obj
pg := pgcacher{files: files, leastSize: int64(leastSize), option: globalOption}
pg := pgcacher{
files: files,
leastSize: int64(leastSize),
option: globalOption,
}

if globalOption.top != 0 {
pg.handleTop(globalOption.top)
if globalOption.top {
pg.handleTop()
os.Exit(0)
}

Expand All @@ -63,16 +68,20 @@ func main() {
}

if len(pg.files) == 0 {
fmt.Println("files is null ?")
fmt.Println("the files is null ???")
flag.Usage()
os.Exit(1)
}

pg.filterFiles()
stats := pg.getPageCacheStats()
pg.output(stats)
pg.output(stats, pg.option.limit)

// invalid function, just make a reference relationship with pcstat
invalidCall()
}

func invalidCall() {
pcstat.SwitchMountNs(os.Getegid())
pcstat.GetPcStatus(os.Args[0])
}
13 changes: 9 additions & 4 deletions pgcacher.go
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,10 @@ func (pg *pgcacher) getPageCacheStats() PcStatusList {
return stats
}

func (pg *pgcacher) output(stats PcStatusList) {
func (pg *pgcacher) output(stats PcStatusList, limit int) {
limit = min(len(stats), limit)
stats = stats[:limit]

if pg.option.json {
stats.FormatJson()
} else if pg.option.terse {
Expand All @@ -245,7 +248,7 @@ func (pg *pgcacher) output(stats PcStatusList) {
}
}

func (pg *pgcacher) handleTop(top int) {
func (pg *pgcacher) handleTop() {
// get all active process.
procs, err := psutils.Processes()
if err != nil || len(procs) == 0 {
Expand Down Expand Up @@ -293,9 +296,11 @@ func (pg *pgcacher) handleTop(top int) {
// filter files
pg.filterFiles()

// get page cache stats of files.
stats := pg.getPageCacheStats()
top = min(len(stats), top)
pg.output(stats[:top])

// print
pg.output(stats, pg.option.limit)
}

func min(x, y int) int {
Expand Down

0 comments on commit f7d1cc3

Please sign in to comment.