Skip to content

Commit

Permalink
BREAKING CHANGE: make it possible to pass a command directly (#9)
Browse files Browse the repository at this point in the history
* fix: make it possible to pass a command directly

* fix: lint issue

* chore(README): update
  • Loading branch information
knqyf263 authored Jan 15, 2020
1 parent ad05962 commit ede8cf4
Show file tree
Hide file tree
Showing 4 changed files with 58 additions and 184 deletions.
77 changes: 21 additions & 56 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ jobs:
run: curl -sfL https://raw.githubusercontent.com/knqyf263/cob/master/install.sh | sudo sh -s -- -b /usr/local/bin
- name: Run Benchmark
run: cob -benchmem ./...
run: cob
```

## Travis CI
Expand All @@ -76,7 +76,7 @@ before_script:
- curl -sfL https://raw.githubusercontent.com/knqyf263/cob/master/install.sh | sudo sh -s -- -b /usr/local/bin
script:
- cob -benchmem ./...
- cob
```

## CircleCI
Expand All @@ -94,7 +94,7 @@ jobs:
command: curl -sfL https://raw.githubusercontent.com/knqyf263/cob/master/install.sh | sudo sh -s -- -b /usr/local/bin
- run:
name: Run cob
command: cob -benchmem ./...
command: cob
workflows:
version: 2
build-workflow:
Expand All @@ -104,58 +104,25 @@ workflows:


# Example
## Print memory allocation statistics for benchmarks

```
$ cob -benchmem ./...
```
## Override a command to measure benchmarks

<details>
<summary>Result</summary>
To measure benchmarks by `make bench`, you can use `-bench-cmd` and `-bench-args` options.

```
2020/01/12 17:31:16 Run Benchmark: 4363944cbed3da7a8245cbcdc8d8240b8976eb24 HEAD{@1}
2020/01/12 17:31:19 Run Benchmark: 599a5523729d4d99a331b9d3f71dde9e1e6daef0 HEAD
Result
======
+-----------------------------+----------+---------------+-------------------+
| Name | Commit | NsPerOp | AllocedBytesPerOp |
+-----------------------------+----------+---------------+-------------------+
| BenchmarkAppend_Allocate-16 | HEAD | 175.00 ns/op | 111 B/op |
+ +----------+---------------+-------------------+
| | HEAD@{1} | 108.00 ns/op | 23 B/op |
+-----------------------------+----------+---------------+-------------------+
| BenchmarkCall-16 | HEAD | 0.27 ns/op | 0 B/op |
+ +----------+---------------+ +
| | HEAD@{1} | 0.29 ns/op | |
+-----------------------------+----------+---------------+-------------------+
Comparison
==========
+-----------------------------+---------+-------------------+
| Name | NsPerOp | AllocedBytesPerOp |
+-----------------------------+---------+-------------------+
| BenchmarkAppend_Allocate-16 | 62.04% | 382.61% |
+-----------------------------+---------+-------------------+
| BenchmarkCall-16 | 7.53% | 0.00% |
+-----------------------------+---------+-------------------+
2020/01/12 17:31:21 This commit makes benchmarks worse
$ cob -bench-cmd make -bench-args bench
```

</details>


## Run only those benchmarks matching a regular expression

```
$ cob -bench-args "test -bench Append -benchmem ./..."
```

<details>
<summary>Result</summary>

```
$ cob -bench Append ./...
2020/01/12 17:32:30 Run Benchmark: 4363944cbed3da7a8245cbcdc8d8240b8976eb24 HEAD{@1}
2020/01/12 17:32:32 Run Benchmark: 599a5523729d4d99a331b9d3f71dde9e1e6daef0 HEAD
Expand Down Expand Up @@ -185,7 +152,7 @@ Comparison
## Show only benchmarks with worse score

```
$ cob -benchmem -only-degression
$ cob -only-degression
```

<details>
Expand Down Expand Up @@ -228,7 +195,6 @@ $ cob --base origin/master ./...
# Usage

```
$ cob -h
NAME:
cob - Continuous Benchmark for Go project
Expand All @@ -239,13 +205,12 @@ COMMANDS:
help, h Shows a list of commands or help for one command
GLOBAL OPTIONS:
--only-degression Show only benchmarks with worse score (default: false)
--threshold value The program fails if the benchmark gets worse than the threshold (default: 0.1)
--bench value Run only those benchmarks matching a regular expression. (default: ".")
--benchmem Print memory allocation statistics for benchmarks. (default: false)
--benchtime value Run enough iterations of each benchmark to take t, specified as a time.Duration (for example, -benchtime 1h30s). (default: "1s")
--help, -h show help (default: false)
--only-degression Show only benchmarks with worse score (default: false)
--threshold value The program fails if the benchmark gets worse than the threshold (default: 0.2)
--base value Specify a base commit compared with HEAD (default: "HEAD~1")
--bench-cmd value Specify a command to measure benchmarks (default: "go")
--bench-args value Specify arguments passed to -cmd (default: "test -run '^$' -bench . -benchmem ./...")
--help, -h show help (default: false)
```

# Q&A
Expand All @@ -255,16 +220,16 @@ GLOBAL OPTIONS:
Specify a package name.

```
$ cob -benchmem ./foo
$ cob -benchmem ./bar
$ cob -bench-args "test -bench . -benchmem ./foo"
$ cob -bench-args "test -bench . -benchmem ./bar"
```

## A result of benchmarks is unstable

You can specify `--benchtime`.
You can specify `-benchtime`.

```
$ cob -benchtime 10s ./...
$ cob -bench-args "test -bench . -benchmem -benchtime 10s ./..."
```

# License
Expand Down
20 changes: 9 additions & 11 deletions config.go
Original file line number Diff line number Diff line change
@@ -1,27 +1,25 @@
package main

import "github.com/urfave/cli/v2"
import (
"strings"

"github.com/urfave/cli/v2"
)

type config struct {
args []string
onlyDegression bool
threshold float64
base string
bench string
benchmem bool
benchtime string
tags string
benchCmd string
benchArgs []string
}

func newConfig(c *cli.Context) config {
return config{
args: c.Args().Slice(),
onlyDegression: c.Bool("only-degression"),
threshold: c.Float64("threshold"),
base: c.String("base"),
bench: c.String("bench"),
benchmem: c.Bool("benchmem"),
benchtime: c.String("benchtime"),
tags: c.String("tags"),
benchCmd: c.String("bench-cmd"),
benchArgs: strings.Fields(c.String("bench-args")),
}
}
83 changes: 26 additions & 57 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,22 +48,14 @@ func main() {
Value: "HEAD~1",
},
&cli.StringFlag{
Name: "bench",
Usage: "Run only those benchmarks matching a regular expression.",
Value: ".",
},
&cli.BoolFlag{
Name: "benchmem",
Usage: "Print memory allocation statistics for benchmarks.",
},
&cli.StringFlag{
Name: "benchtime",
Usage: "Run enough iterations of each benchmark to take t, specified as a time.Duration (for example, -benchtime 1h30s).",
Value: "1s",
Name: "bench-cmd",
Usage: "Specify a command to measure benchmarks",
Value: "go",
},
&cli.StringFlag{
Name: "tags",
Usage: "Run only those benchmarks with the specified build tags.",
Name: "bench-args",
Usage: "Specify arguments passed to -cmd",
Value: "test -run '^$' -bench . -benchmem ./...",
},
},
}
Expand Down Expand Up @@ -109,10 +101,12 @@ func run(c config) error {
return xerrors.Errorf("failed to reset the worktree to a previous commit: %w", err)
}

args := prepareBenchArgs(c)
defer func() {
_ = w.Reset(&git.ResetOptions{Commit: head.Hash(), Mode: git.HardReset})
}()

log.Printf("Run Benchmark: %s %s", prev, c.base)
prevSet, err := runBenchmark(args)
prevSet, err := runBenchmark(c.benchCmd, c.benchArgs)
if err != nil {
return xerrors.Errorf("failed to run a benchmark: %w", err)
}
Expand All @@ -123,7 +117,7 @@ func run(c config) error {
}

log.Printf("Run Benchmark: %s %s", head.Hash(), "HEAD")
headSet, err := runBenchmark(args)
headSet, err := runBenchmark(c.benchCmd, c.benchArgs)
if err != nil {
return xerrors.Errorf("failed to run a benchmark: %w", err)
}
Expand Down Expand Up @@ -151,8 +145,8 @@ func run(c config) error {
ratioAllocedBytesPerOp = (float64(headBench.AllocedBytesPerOp) - float64(prevBench.AllocedBytesPerOp)) / float64(prevBench.AllocedBytesPerOp)
}

rows = append(rows, generateRow("HEAD", headBench, c.benchmem))
rows = append(rows, generateRow("HEAD@{1}", prevBench, c.benchmem))
rows = append(rows, generateRow("HEAD", headBench))
rows = append(rows, generateRow("HEAD@{1}", prevBench))

ratios = append(ratios, result{
Name: benchName,
Expand All @@ -162,33 +156,21 @@ func run(c config) error {
}

if !c.onlyDegression {
showResult(os.Stdout, rows, c.benchmem)
showResult(os.Stdout, rows)
}

degression := showRatio(os.Stdout, ratios, c.benchmem, c.threshold, c.onlyDegression)
degression := showRatio(os.Stdout, ratios, c.threshold, c.onlyDegression)
if degression {
return xerrors.New("This commit makes benchmarks worse")
}

return nil
}

func prepareBenchArgs(c config) []string {
args := []string{"test", "-run='^$'", "-benchtime", c.benchtime, "-bench", c.bench}
if c.benchmem {
args = append(args, "-benchmem")
}
if c.tags != "" {
args = append(args, "-tags", c.tags)
}
args = append(args, c.args...)
return args
}

func runBenchmark(args []string) (parse.Set, error) {
out, err := exec.Command("go", args...).Output()
func runBenchmark(cmd string, args []string) (parse.Set, error) {
out, err := exec.Command(cmd, args...).Output()
if err != nil {
return nil, xerrors.Errorf("failed to run 'go test' command: %w", err)
return nil, xerrors.Errorf("failed to run '%s %s' command: %w", cmd, strings.Join(args, " "), err)
}

b := bytes.NewBuffer(out)
Expand All @@ -199,41 +181,32 @@ func runBenchmark(args []string) (parse.Set, error) {
return s, nil
}

func generateRow(ref string, b *parse.Benchmark, benchmem bool) []string {
row := []string{b.Name, ref, fmt.Sprintf(" %.2f ns/op", b.NsPerOp)}
if benchmem {
row = append(row, fmt.Sprintf(" %d B/op", b.AllocedBytesPerOp))
}
return row
func generateRow(ref string, b *parse.Benchmark) []string {
return []string{b.Name, ref, fmt.Sprintf(" %.2f ns/op", b.NsPerOp),
fmt.Sprintf(" %d B/op", b.AllocedBytesPerOp)}
}

func showResult(w io.Writer, rows [][]string, benchmem bool) {
func showResult(w io.Writer, rows [][]string) {
fmt.Fprintln(w, "\nResult")
fmt.Fprintf(w, "%s\n\n", strings.Repeat("=", 6))

table := tablewriter.NewWriter(w)
table.SetAutoFormatHeaders(false)
table.SetAlignment(tablewriter.ALIGN_CENTER)
headers := []string{"Name", "Commit", "NsPerOp"}
if benchmem {
headers = append(headers, "AllocedBytesPerOp")
}
headers := []string{"Name", "Commit", "NsPerOp", "AllocedBytesPerOp"}
table.SetHeader(headers)
table.SetAutoMergeCells(true)
table.SetRowLine(true)
table.AppendBulk(rows)
table.Render()
}

func showRatio(w io.Writer, results []result, benchmem bool, threshold float64, onlyDegression bool) bool {
func showRatio(w io.Writer, results []result, threshold float64, onlyDegression bool) bool {
table := tablewriter.NewWriter(w)
table.SetAutoFormatHeaders(false)
table.SetAlignment(tablewriter.ALIGN_CENTER)
table.SetRowLine(true)
headers := []string{"Name", "NsPerOp"}
if benchmem {
headers = append(headers, "AllocedBytesPerOp")
}
headers := []string{"Name", "NsPerOp", "AllocedBytesPerOp"}
table.SetHeader(headers)

var degression bool
Expand All @@ -245,11 +218,7 @@ func showRatio(w io.Writer, results []result, benchmem bool, threshold float64,
continue
}
}
row := []string{result.Name, generateRatioItem(result.RatioNsPerOp)}
if benchmem {
row = append(row, generateRatioItem(result.RatioAllocedBytesPerOp))
}

row := []string{result.Name, generateRatioItem(result.RatioNsPerOp), generateRatioItem(result.RatioAllocedBytesPerOp)}
colors := []tablewriter.Colors{{}}
colors = append(colors, generateColor(result.RatioNsPerOp))
colors = append(colors, generateColor(result.RatioAllocedBytesPerOp))
Expand Down
Loading

0 comments on commit ede8cf4

Please sign in to comment.