From 4501094d393b4e7434c2c73630f38f219552f026 Mon Sep 17 00:00:00 2001 From: Bram Vogelaar Date: Fri, 2 Apr 2021 11:43:32 +0200 Subject: [PATCH] moving to go mod and bumping versions --- .gitignore | 16 + glide.yaml | 12 - go.mod | 11 + go.sum | 23 + .../github.com/Shopify/toxiproxy/.gitignore | 12 - .../github.com/Shopify/toxiproxy/CHANGELOG.md | 72 - .../Shopify/toxiproxy/CREATING_TOXICS.md | 166 -- .../github.com/Shopify/toxiproxy/Dockerfile | 8 - .../Shopify/toxiproxy/Godeps/Godeps.json | 36 - .../Shopify/toxiproxy/Godeps/Readme | 5 - vendor/github.com/Shopify/toxiproxy/LICENSE | 22 - vendor/github.com/Shopify/toxiproxy/Makefile | 76 - vendor/github.com/Shopify/toxiproxy/README.md | 573 ------ vendor/github.com/Shopify/toxiproxy/VERSION | 1 - vendor/github.com/Shopify/toxiproxy/api.go | 448 ----- .../github.com/Shopify/toxiproxy/api_test.go | 953 ---------- .../github.com/Shopify/toxiproxy/circle.yml | 8 - .../github.com/Shopify/toxiproxy/cli/cli.go | 603 ------- .../Shopify/toxiproxy/client/README.md | 161 -- .../Shopify/toxiproxy/client/client.go | 362 ---- .../Shopify/toxiproxy/cmd/toxiproxy.go | 30 - vendor/github.com/Shopify/toxiproxy/dev.yml | 16 - vendor/github.com/Shopify/toxiproxy/link.go | 176 -- .../github.com/Shopify/toxiproxy/link_test.go | 225 --- vendor/github.com/Shopify/toxiproxy/proxy.go | 225 --- .../Shopify/toxiproxy/proxy_collection.go | 166 -- .../toxiproxy/proxy_collection_test.go | 120 -- .../Shopify/toxiproxy/proxy_test.go | 316 ---- .../Shopify/toxiproxy/share/toxiproxy.conf | 13 - .../Shopify/toxiproxy/stream/io_chan.go | 108 -- .../Shopify/toxiproxy/stream/io_chan_test.go | 201 --- .../toxiproxy/testhelper/testhelper.go | 20 - .../toxiproxy/testhelper/testhelper_test.go | 20 - .../Shopify/toxiproxy/toxic_collection.go | 260 --- .../Shopify/toxiproxy/toxics/bandwidth.go | 59 - .../toxiproxy/toxics/bandwidth_test.go | 119 -- .../Shopify/toxiproxy/toxics/latency.go | 55 - .../Shopify/toxiproxy/toxics/latency_test.go | 250 --- .../Shopify/toxiproxy/toxics/limit_data.go | 60 - .../toxiproxy/toxics/limit_data_test.go | 160 -- .../Shopify/toxiproxy/toxics/noop.go | 23 - .../Shopify/toxiproxy/toxics/slicer.go | 83 - .../Shopify/toxiproxy/toxics/slicer_test.go | 58 - .../Shopify/toxiproxy/toxics/slow_close.go | 34 - .../Shopify/toxiproxy/toxics/timeout.go | 44 - .../Shopify/toxiproxy/toxics/timeout_test.go | 113 -- .../Shopify/toxiproxy/toxics/toxic.go | 151 -- .../Shopify/toxiproxy/toxics/toxic_test.go | 340 ---- .../github.com/Sirupsen/logrus/.gitignore | 1 - .../github.com/Sirupsen/logrus/.travis.yml | 9 - .../vendor/github.com/Sirupsen/logrus/LICENSE | 21 - .../github.com/Sirupsen/logrus/README.md | 342 ---- .../github.com/Sirupsen/logrus/entry.go | 244 --- .../github.com/Sirupsen/logrus/exported.go | 177 -- .../github.com/Sirupsen/logrus/formatter.go | 44 - .../github.com/Sirupsen/logrus/hooks.go | 34 - .../Sirupsen/logrus/json_formatter.go | 22 - .../github.com/Sirupsen/logrus/logger.go | 161 -- .../github.com/Sirupsen/logrus/logrus.go | 94 - .../Sirupsen/logrus/terminal_darwin.go | 12 - .../Sirupsen/logrus/terminal_freebsd.go | 20 - .../Sirupsen/logrus/terminal_linux.go | 12 - .../Sirupsen/logrus/terminal_notwindows.go | 21 - .../Sirupsen/logrus/terminal_windows.go | 27 - .../Sirupsen/logrus/text_formatter.go | 94 - .../vendor/github.com/gorilla/context/LICENSE | 27 - .../github.com/gorilla/context/README.md | 6 - .../github.com/gorilla/context/context.go | 112 -- .../vendor/github.com/gorilla/context/doc.go | 82 - .../vendor/github.com/gorilla/mux/.travis.yml | 7 - .../vendor/github.com/gorilla/mux/LICENSE | 27 - .../vendor/github.com/gorilla/mux/README.md | 7 - .../vendor/github.com/gorilla/mux/doc.go | 199 --- .../vendor/github.com/gorilla/mux/mux.go | 347 ---- .../vendor/github.com/gorilla/mux/regexp.go | 247 --- .../vendor/github.com/gorilla/mux/route.go | 499 ------ .../vendor/github.com/urfave/cli/.gitignore | 2 - .../vendor/github.com/urfave/cli/.travis.yml | 39 - .../vendor/github.com/urfave/cli/CHANGELOG.md | 336 ---- .../vendor/github.com/urfave/cli/LICENSE | 21 - .../vendor/github.com/urfave/cli/README.md | 1364 --------------- .../vendor/github.com/urfave/cli/app.go | 502 ------ .../vendor/github.com/urfave/cli/appveyor.yml | 24 - .../vendor/github.com/urfave/cli/category.go | 44 - .../vendor/github.com/urfave/cli/cli.go | 21 - .../vendor/github.com/urfave/cli/command.go | 284 --- .../vendor/github.com/urfave/cli/context.go | 264 --- .../vendor/github.com/urfave/cli/errors.go | 98 -- .../github.com/urfave/cli/flag-types.json | 93 - .../vendor/github.com/urfave/cli/flag.go | 636 ------- .../github.com/urfave/cli/flag_generated.go | 627 ------- .../vendor/github.com/urfave/cli/funcs.go | 28 - .../github.com/urfave/cli/generate-flag-types | 248 --- .../vendor/github.com/urfave/cli/help.go | 267 --- .../vendor/github.com/urfave/cli/runtests | 122 -- .../vendor/golang.org/x/crypto/LICENSE | 27 - .../vendor/golang.org/x/crypto/PATENTS | 22 - .../x/crypto/ssh/terminal/terminal.go | 888 ---------- .../golang.org/x/crypto/ssh/terminal/util.go | 128 -- .../x/crypto/ssh/terminal/util_bsd.go | 12 - .../x/crypto/ssh/terminal/util_linux.go | 11 - .../x/crypto/ssh/terminal/util_windows.go | 174 -- .../toxiproxy/vendor/gopkg.in/tomb.v1/LICENSE | 29 - .../vendor/gopkg.in/tomb.v1/README.md | 4 - .../toxiproxy/vendor/gopkg.in/tomb.v1/tomb.go | 176 -- .../github.com/Shopify/toxiproxy/version.go | 3 - vendor/github.com/Sirupsen/logrus/.gitignore | 1 - vendor/github.com/Sirupsen/logrus/.travis.yml | 9 - vendor/github.com/Sirupsen/logrus/LICENSE | 21 - vendor/github.com/Sirupsen/logrus/README.md | 342 ---- vendor/github.com/Sirupsen/logrus/entry.go | 244 --- .../Sirupsen/logrus/examples/basic/basic.go | 29 - .../Sirupsen/logrus/examples/hook/hook.go | 35 - vendor/github.com/Sirupsen/logrus/exported.go | 177 -- .../github.com/Sirupsen/logrus/formatter.go | 44 - .../Sirupsen/logrus/formatter_bench_test.go | 88 - .../github.com/Sirupsen/logrus/hook_test.go | 122 -- vendor/github.com/Sirupsen/logrus/hooks.go | 34 - .../logrus/hooks/airbrake/airbrake.go | 54 - .../logrus/hooks/papertrail/README.md | 28 - .../logrus/hooks/papertrail/papertrail.go | 54 - .../hooks/papertrail/papertrail_test.go | 26 - .../Sirupsen/logrus/hooks/syslog/README.md | 20 - .../Sirupsen/logrus/hooks/syslog/syslog.go | 59 - .../logrus/hooks/syslog/syslog_test.go | 26 - .../Sirupsen/logrus/json_formatter.go | 22 - vendor/github.com/Sirupsen/logrus/logger.go | 161 -- vendor/github.com/Sirupsen/logrus/logrus.go | 94 - .../github.com/Sirupsen/logrus/logrus_test.go | 247 --- .../Sirupsen/logrus/terminal_darwin.go | 12 - .../Sirupsen/logrus/terminal_freebsd.go | 20 - .../Sirupsen/logrus/terminal_linux.go | 12 - .../Sirupsen/logrus/terminal_notwindows.go | 21 - .../Sirupsen/logrus/terminal_windows.go | 27 - .../Sirupsen/logrus/text_formatter.go | 94 - vendor/github.com/davecgh/go-spew/.gitignore | 22 - vendor/github.com/davecgh/go-spew/.travis.yml | 14 - vendor/github.com/davecgh/go-spew/LICENSE | 15 - vendor/github.com/davecgh/go-spew/README.md | 194 -- .../github.com/davecgh/go-spew/cov_report.sh | 22 - .../github.com/davecgh/go-spew/spew/bypass.go | 152 -- .../davecgh/go-spew/spew/bypasssafe.go | 38 - .../github.com/davecgh/go-spew/spew/common.go | 341 ---- .../davecgh/go-spew/spew/common_test.go | 298 ---- .../github.com/davecgh/go-spew/spew/config.go | 297 ---- vendor/github.com/davecgh/go-spew/spew/doc.go | 202 --- .../github.com/davecgh/go-spew/spew/dump.go | 509 ------ .../davecgh/go-spew/spew/dump_test.go | 1042 ----------- .../davecgh/go-spew/spew/dumpcgo_test.go | 99 -- .../davecgh/go-spew/spew/dumpnocgo_test.go | 26 - .../davecgh/go-spew/spew/example_test.go | 226 --- .../github.com/davecgh/go-spew/spew/format.go | 419 ----- .../davecgh/go-spew/spew/format_test.go | 1558 ----------------- .../davecgh/go-spew/spew/internal_test.go | 87 - .../go-spew/spew/internalunsafe_test.go | 102 -- .../github.com/davecgh/go-spew/spew/spew.go | 148 -- .../davecgh/go-spew/spew/spew_test.go | 309 ---- .../davecgh/go-spew/spew/testdata/dumpcgo.go | 82 - .../davecgh/go-spew/test_coverage.txt | 61 - vendor/github.com/gorilla/context/LICENSE | 27 - vendor/github.com/gorilla/context/README.md | 6 - vendor/github.com/gorilla/context/context.go | 112 -- .../gorilla/context/context_test.go | 66 - vendor/github.com/gorilla/context/doc.go | 82 - vendor/github.com/gorilla/mux/.travis.yml | 7 - vendor/github.com/gorilla/mux/LICENSE | 27 - vendor/github.com/gorilla/mux/README.md | 7 - vendor/github.com/gorilla/mux/bench_test.go | 21 - vendor/github.com/gorilla/mux/doc.go | 199 --- vendor/github.com/gorilla/mux/mux.go | 347 ---- vendor/github.com/gorilla/mux/mux_test.go | 806 --------- vendor/github.com/gorilla/mux/old_test.go | 758 -------- vendor/github.com/gorilla/mux/regexp.go | 247 --- vendor/github.com/gorilla/mux/route.go | 499 ------ .../github.com/pmezard/go-difflib/.travis.yml | 5 - vendor/github.com/pmezard/go-difflib/LICENSE | 27 - .../github.com/pmezard/go-difflib/README.md | 50 - .../pmezard/go-difflib/difflib/difflib.go | 758 -------- .../go-difflib/difflib/difflib_test.go | 352 ---- vendor/github.com/stretchr/testify | 1 - vendor/gopkg.in/tomb.v1/LICENSE | 29 - vendor/gopkg.in/tomb.v1/README.md | 4 - vendor/gopkg.in/tomb.v1/tomb.go | 176 -- vendor/gopkg.in/tomb.v1/tomb_test.go | 114 -- 184 files changed, 50 insertions(+), 29465 deletions(-) delete mode 100644 glide.yaml create mode 100644 go.mod create mode 100644 go.sum delete mode 100644 vendor/github.com/Shopify/toxiproxy/.gitignore delete mode 100644 vendor/github.com/Shopify/toxiproxy/CHANGELOG.md delete mode 100644 vendor/github.com/Shopify/toxiproxy/CREATING_TOXICS.md delete mode 100644 vendor/github.com/Shopify/toxiproxy/Dockerfile delete mode 100644 vendor/github.com/Shopify/toxiproxy/Godeps/Godeps.json delete mode 100644 vendor/github.com/Shopify/toxiproxy/Godeps/Readme delete mode 100644 vendor/github.com/Shopify/toxiproxy/LICENSE delete mode 100644 vendor/github.com/Shopify/toxiproxy/Makefile delete mode 100644 vendor/github.com/Shopify/toxiproxy/README.md delete mode 100644 vendor/github.com/Shopify/toxiproxy/VERSION delete mode 100644 vendor/github.com/Shopify/toxiproxy/api.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/api_test.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/circle.yml delete mode 100644 vendor/github.com/Shopify/toxiproxy/cli/cli.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/client/README.md delete mode 100644 vendor/github.com/Shopify/toxiproxy/client/client.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/cmd/toxiproxy.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/dev.yml delete mode 100644 vendor/github.com/Shopify/toxiproxy/link.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/link_test.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/proxy.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/proxy_collection.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/proxy_collection_test.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/proxy_test.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/share/toxiproxy.conf delete mode 100644 vendor/github.com/Shopify/toxiproxy/stream/io_chan.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/stream/io_chan_test.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/testhelper/testhelper.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/testhelper/testhelper_test.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/toxic_collection.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/toxics/bandwidth.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/toxics/bandwidth_test.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/toxics/latency.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/toxics/latency_test.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/toxics/limit_data.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/toxics/limit_data_test.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/toxics/noop.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/toxics/slicer.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/toxics/slicer_test.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/toxics/slow_close.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/toxics/timeout.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/toxics/timeout_test.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/toxics/toxic.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/toxics/toxic_test.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/.gitignore delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/.travis.yml delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/LICENSE delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/README.md delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/entry.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/exported.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/formatter.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/hooks.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/json_formatter.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/logger.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/logrus.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/terminal_darwin.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/terminal_linux.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/terminal_windows.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/text_formatter.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/context/LICENSE delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/context/README.md delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/context/context.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/context/doc.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/mux/.travis.yml delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/mux/LICENSE delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/mux/README.md delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/mux/doc.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/mux/mux.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/mux/regexp.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/mux/route.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/.gitignore delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/.travis.yml delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/CHANGELOG.md delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/LICENSE delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/README.md delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/app.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/appveyor.yml delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/category.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/cli.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/command.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/context.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/errors.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/flag-types.json delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/flag.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/flag_generated.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/funcs.go delete mode 100755 vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/generate-flag-types delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/help.go delete mode 100755 vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/runtests delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/golang.org/x/crypto/LICENSE delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/golang.org/x/crypto/PATENTS delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/golang.org/x/crypto/ssh/terminal/terminal.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/golang.org/x/crypto/ssh/terminal/util.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/gopkg.in/tomb.v1/LICENSE delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/gopkg.in/tomb.v1/README.md delete mode 100644 vendor/github.com/Shopify/toxiproxy/vendor/gopkg.in/tomb.v1/tomb.go delete mode 100644 vendor/github.com/Shopify/toxiproxy/version.go delete mode 100644 vendor/github.com/Sirupsen/logrus/.gitignore delete mode 100644 vendor/github.com/Sirupsen/logrus/.travis.yml delete mode 100644 vendor/github.com/Sirupsen/logrus/LICENSE delete mode 100644 vendor/github.com/Sirupsen/logrus/README.md delete mode 100644 vendor/github.com/Sirupsen/logrus/entry.go delete mode 100644 vendor/github.com/Sirupsen/logrus/examples/basic/basic.go delete mode 100644 vendor/github.com/Sirupsen/logrus/examples/hook/hook.go delete mode 100644 vendor/github.com/Sirupsen/logrus/exported.go delete mode 100644 vendor/github.com/Sirupsen/logrus/formatter.go delete mode 100644 vendor/github.com/Sirupsen/logrus/formatter_bench_test.go delete mode 100644 vendor/github.com/Sirupsen/logrus/hook_test.go delete mode 100644 vendor/github.com/Sirupsen/logrus/hooks.go delete mode 100644 vendor/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go delete mode 100644 vendor/github.com/Sirupsen/logrus/hooks/papertrail/README.md delete mode 100644 vendor/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go delete mode 100644 vendor/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go delete mode 100644 vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md delete mode 100644 vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go delete mode 100644 vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go delete mode 100644 vendor/github.com/Sirupsen/logrus/json_formatter.go delete mode 100644 vendor/github.com/Sirupsen/logrus/logger.go delete mode 100644 vendor/github.com/Sirupsen/logrus/logrus.go delete mode 100644 vendor/github.com/Sirupsen/logrus/logrus_test.go delete mode 100644 vendor/github.com/Sirupsen/logrus/terminal_darwin.go delete mode 100644 vendor/github.com/Sirupsen/logrus/terminal_freebsd.go delete mode 100644 vendor/github.com/Sirupsen/logrus/terminal_linux.go delete mode 100644 vendor/github.com/Sirupsen/logrus/terminal_notwindows.go delete mode 100644 vendor/github.com/Sirupsen/logrus/terminal_windows.go delete mode 100644 vendor/github.com/Sirupsen/logrus/text_formatter.go delete mode 100644 vendor/github.com/davecgh/go-spew/.gitignore delete mode 100644 vendor/github.com/davecgh/go-spew/.travis.yml delete mode 100644 vendor/github.com/davecgh/go-spew/LICENSE delete mode 100644 vendor/github.com/davecgh/go-spew/README.md delete mode 100644 vendor/github.com/davecgh/go-spew/cov_report.sh delete mode 100644 vendor/github.com/davecgh/go-spew/spew/bypass.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/bypasssafe.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/common.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/common_test.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/config.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/doc.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/dump.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/dump_test.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/example_test.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/format.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/format_test.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/internal_test.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/spew.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/spew_test.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go delete mode 100644 vendor/github.com/davecgh/go-spew/test_coverage.txt delete mode 100644 vendor/github.com/gorilla/context/LICENSE delete mode 100644 vendor/github.com/gorilla/context/README.md delete mode 100644 vendor/github.com/gorilla/context/context.go delete mode 100644 vendor/github.com/gorilla/context/context_test.go delete mode 100644 vendor/github.com/gorilla/context/doc.go delete mode 100644 vendor/github.com/gorilla/mux/.travis.yml delete mode 100644 vendor/github.com/gorilla/mux/LICENSE delete mode 100644 vendor/github.com/gorilla/mux/README.md delete mode 100644 vendor/github.com/gorilla/mux/bench_test.go delete mode 100644 vendor/github.com/gorilla/mux/doc.go delete mode 100644 vendor/github.com/gorilla/mux/mux.go delete mode 100644 vendor/github.com/gorilla/mux/mux_test.go delete mode 100644 vendor/github.com/gorilla/mux/old_test.go delete mode 100644 vendor/github.com/gorilla/mux/regexp.go delete mode 100644 vendor/github.com/gorilla/mux/route.go delete mode 100644 vendor/github.com/pmezard/go-difflib/.travis.yml delete mode 100644 vendor/github.com/pmezard/go-difflib/LICENSE delete mode 100644 vendor/github.com/pmezard/go-difflib/README.md delete mode 100644 vendor/github.com/pmezard/go-difflib/difflib/difflib.go delete mode 100644 vendor/github.com/pmezard/go-difflib/difflib/difflib_test.go delete mode 160000 vendor/github.com/stretchr/testify delete mode 100644 vendor/gopkg.in/tomb.v1/LICENSE delete mode 100644 vendor/gopkg.in/tomb.v1/README.md delete mode 100644 vendor/gopkg.in/tomb.v1/tomb.go delete mode 100644 vendor/gopkg.in/tomb.v1/tomb_test.go diff --git a/.gitignore b/.gitignore index 3c49ecd..463f6d2 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,19 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +vendor/ + release/ marten marten.json diff --git a/glide.yaml b/glide.yaml deleted file mode 100644 index 916041d..0000000 --- a/glide.yaml +++ /dev/null @@ -1,12 +0,0 @@ -package: github.com/gaggl/marten -import: -- package: github.com/Shopify/toxiproxy - version: ^2.1.1 - subpackages: - - stream - - toxics -testImport: -- package: github.com/stretchr/testify - version: ^1.1.4 - subpackages: - - assert diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..ec2c1dd --- /dev/null +++ b/go.mod @@ -0,0 +1,11 @@ +module marten + +go 1.16 + +require ( + github.com/Shopify/toxiproxy v2.1.4+incompatible + github.com/gorilla/mux v1.8.0 // indirect + github.com/sirupsen/logrus v1.8.1 // indirect + github.com/stretchr/testify v1.7.0 + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..28192cc --- /dev/null +++ b/go.sum @@ -0,0 +1,23 @@ +github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/Shopify/toxiproxy/.gitignore b/vendor/github.com/Shopify/toxiproxy/.gitignore deleted file mode 100644 index bb390ea..0000000 --- a/vendor/github.com/Shopify/toxiproxy/.gitignore +++ /dev/null @@ -1,12 +0,0 @@ -toxiproxy -toxiproxy-server -toxiproxy-cli -./cli/cli -toxiproxy.test -testing -testing.test -cpu.out -cover*.out -coverage.html -*.deb -tmp/ diff --git a/vendor/github.com/Shopify/toxiproxy/CHANGELOG.md b/vendor/github.com/Shopify/toxiproxy/CHANGELOG.md deleted file mode 100644 index 2d99734..0000000 --- a/vendor/github.com/Shopify/toxiproxy/CHANGELOG.md +++ /dev/null @@ -1,72 +0,0 @@ -# 2.1.1 - -* Fix timeout toxic causing hang (issue #159) - -# 2.1.0 - -* Add -config server option to populate on startup #154 -* Updated CLI for scriptability #133 -* Add `/populate` endpoint to server #111 -* Change error responses from `title` to `error` -* Allow hostname to be specified in CLI #129 -* Add support for stateful toxics #127 -* Add limit_data toxic - -# 2.0.0 - -* Add CLI (`toxiproxy-cli`) and rename server binary to `toxiproxy-server` #93 -* Fix removing a timeout toxic causing API to hang #89 -* API and client return toxics as array rather than a map of name to toxic #92 -* Fix multiple latency toxics not accumulating #94 -* Change default toxic name to `_` #96 -* Nest toxic attributes rather than having a flat structure #98 -* 2.0 RFC: #54 and PR #62 - * Change toxic API endpoints to an Add/Update/Remove structure - * Remove `enabled` field, and add `name` and `type` fields to toxics - * Add global toxic fields to a wrapper struct - * Chain toxics together dynamically instead of in a fixed length chain - * Register toxics in `init()` functions instead of a hard-coded list - * Clean up API error codes to make them more consistent - * Move toxics to their own package to allow 3rd party toxics -* Remove stream direction from API urls #73 -* Add `toxicity` field for toxics #75 -* Refactor Go client to make usage easier with 2.0 #76 -* Make `ChanReader` in the `stream` package interruptible #77 -* Define proxy buffer sizes per-toxic (Fixes #72) -* Fix slicer toxic testing race condition #71 - -# 1.2.1 - -* Fix proxy name conflicts leaking an open port #69 - -# 1.2.0 - -* Add a Toxic and Toxics type for the Go client -* Add `Dockerfile` -* Fix latency toxic limiting bandwidth #67 -* Add Slicer toxic - -# 1.1.0 - -* Remove /toxics endpoint in favour of /proxies -* Add bandwidth toxic - -# 1.0.3 - -* Rename Go library package to Toxiproxy from Client -* Fix latency toxic send to closed channel panic #46 -* Fix latency toxic accumulating delay #47 - -# 1.0.2 - -* Added Toxic support to Go client - -# 1.0.1 - -* Various improvements to the documentation -* Initial version of Go client -* Fix toxic disabling bug #42 - -# 1.0.0 - -Initial public release. diff --git a/vendor/github.com/Shopify/toxiproxy/CREATING_TOXICS.md b/vendor/github.com/Shopify/toxiproxy/CREATING_TOXICS.md deleted file mode 100644 index ab39bc8..0000000 --- a/vendor/github.com/Shopify/toxiproxy/CREATING_TOXICS.md +++ /dev/null @@ -1,166 +0,0 @@ -# Creating custom toxics - -Creating a toxic is done by implementing the `Toxic` interface: - -```go -type Toxic interface { - Pipe(*toxics.ToxicStub) -} -``` - -The `Pipe()` function defines how data flows through the toxic, and is passed a -`ToxicStub` to operate on. A `ToxicStub` stores the input and output channels for -the toxic, as well as an interrupt channel that is used to pause operation of the -toxic. - -The input and output channels in a `ToxicStub` send and receive `StreamChunk` structs, -which are similar to network packets. A `StreamChunk` contains a `byte[]` of stream -data, and a timestamp of when Toxiproxy received the data from the client or server. -This is used instead of just a plain `byte[]` so that toxics like latency can find out -how long a chunk of data has been waiting in the proxy. - -Toxics are registered in an `init()` function so that they can be used by the server: -```go -func init() { - toxics.Register("toxic_name", new(ExampleToxic)) -} -``` - -In order to use your own toxics, you will need to compile your own binary. This can be -done by copying [toxiproxy.go](https://github.com/Shopify/toxiproxy/blob/master/cmd/toxiproxy.go) -into a new project and registering your toxic with the server. This will allow you to add toxics -without having to make a full fork of the project. If you think your toxics will be useful -to others, contribute them back with a Pull Request. - -An example project for building a separate binary can be found here: -https://github.com/xthexder/toxic-example - -## A basic toxic - -The most basic implementation of a toxic is the [noop toxic](https://github.com/Shopify/toxiproxy/blob/master/toxics/noop.go), -which just passes data through without any modifications. - -```go -type NoopToxic struct{} - -func (t *NoopToxic) Pipe(stub *toxics.ToxicStub) { - for { - select { - case <-stub.Interrupt: - return - case c := <-stub.Input: - if c == nil { - stub.Close() - return - } - stub.Output <- c - } - } -} -``` - -The above code reads from `stub.Input` in a loop, and passes the `StreamChunk` along to -`stub.Output`. Since reading from `stub.Input` will block until a chunk is available, -we need to check for interrupts as the same time. - -Toxics will be interrupted whenever they are being updated, or possibly removed. This can -happen at any point within the `Pipe()` function, so all blocking operations (including sleep), -should be interruptible. When an interrupt is received, the toxic should return from the `Pipe()` -function after it has written any "in-flight" data back to `stub.Output`. It is important that -all data read from `stub.Input` is passed along to `stub.Output`, otherwise the stream will be -missing bytes and become corrupted. - -When an `end of stream` is reached, `stub.Input` will return a `nil` chunk. Whenever a -nil chunk is returned, the toxic should call `Close()` on the stub, and return from `Pipe()`. - -## Toxic configuration - -Toxic configuration information can be stored in the toxic struct. The toxic will be json -encoded and decoded by the api, so all public fields will be api accessible. - -An example of a toxic that uses configuration values is the [latency toxic](https://github.com/Shopify/toxiproxy/blob/master/toxics/latency.go) - -```go -type LatencyToxic struct { - Latency int64 `json:"latency"` - Jitter int64 `json:"jitter"` -} -``` - -These fields can be used inside the `Pipe()` function, but generally should not be written -to from the toxic. A separate instance of the toxic exists for each connection through the -proxy, and may be replaced when updated by the api. If state is required in your toxic, it -is better to use a local variable at the top of `Pipe()`, since struct fields are not -guaranteed to be persisted across interrupts. - -## Toxic buffering - -By default, toxics are not buffered. This means that writes to `stub.Output` will block until -either the endpoint or another toxic reads it. Since toxics are chained together, this means -not reading from `stub.Input` will block other toxics (and endpoint writes) from operating. -If this is not behavior you want your toxic to have, you can specify a buffer size for your -toxic's input. The [latency toxic](https://github.com/Shopify/toxiproxy/blob/master/toxics/latency.go) -uses this in order to prevent added latency from limiting the proxy bandwidth. - -Specifying a buffer size is done by implementing the `BufferedToxic` interface, which adds the -`GetBufferSize()` function: - -```go -func (t *LatencyToxic) GetBufferSize() int { - return 1024 -} -``` - -The unit used by `GetBufferSize()` is `StreamChunk`s. Chunks are generally anywhere from -1 byte, up to 32KB, so keep this in mind when thinking about how much buffering you need, -and how much memory you are comfortable with using. - -## Stateful toxics - -If a toxic needs to store extra information for a connection such as the number of bytes -transferred (See `limit_data` toxic), a state object can be created by implementing the -`StatefulToxic` interface. This interface defines the `NewState()` function that can create -a new state object with default values set. - -When a stateful toxic is created, the state object will be stored on the `ToxicStub` and -can be accessed from `toxic.Pipe()`: - -```go -state := stub.State.(*ExampleToxicState) -``` - -If necessary, some global state can be stored in the toxic struct, which will not be -instanced per-connection. These fields cannot have a custom default value set and will -not be thread-safe, so proper locking or atomic operations will need to be used. - -## Using `io.Reader` and `io.Writer` - -If your toxic involves modifying the data going through a proxy, you can use the `ChanReader` -and `ChanWriter` interfaces in the [stream package](https://github.com/Shopify/toxiproxy/tree/master/stream). -These allow reading and writing from the input and output channels as you would a normal data -stream such as a TCP socket. - -An implementation of the noop toxic above using the stream package would look something like this: - -```go -func (t *NoopToxic) Pipe(stub *toxics.ToxicStub) { - buf := make([]byte, 32*1024) - writer := stream.NewChanWriter(stub.Output) - reader := stream.NewChanReader(stub.Input) - reader.SetInterrupt(stub.Interrupt) - for { - n, err := reader.Read(buf) - if err == stream.ErrInterrupted { - writer.Write(buf[:n]) - return - } else if err == io.EOF { - stub.Close() - return - } - writer.Write(buf[:n]) - } -} -``` - -See https://github.com/xthexder/toxic-example/blob/master/http.go for a full example of using -the stream package with Go's http package. diff --git a/vendor/github.com/Shopify/toxiproxy/Dockerfile b/vendor/github.com/Shopify/toxiproxy/Dockerfile deleted file mode 100644 index 13bdddf..0000000 --- a/vendor/github.com/Shopify/toxiproxy/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -FROM alpine - -COPY tmp/build/toxiproxy-server-linux-amd64 /go/bin/toxiproxy -COPY tmp/build/toxiproxy-cli-linux-amd64 /go/bin/toxiproxy-cli - -EXPOSE 8474 -ENTRYPOINT ["/go/bin/toxiproxy"] -CMD ["-host=0.0.0.0"] diff --git a/vendor/github.com/Shopify/toxiproxy/Godeps/Godeps.json b/vendor/github.com/Shopify/toxiproxy/Godeps/Godeps.json deleted file mode 100644 index 6766568..0000000 --- a/vendor/github.com/Shopify/toxiproxy/Godeps/Godeps.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "ImportPath": "github.com/Shopify/toxiproxy", - "GoVersion": "go1.7", - "GodepVersion": "v74", - "Packages": [ - "./..." - ], - "Deps": [ - { - "ImportPath": "github.com/Sirupsen/logrus", - "Comment": "v0.5.1-29-g3737b32", - "Rev": "3737b3267e9df4c78170cf695eb35a984330cd6f" - }, - { - "ImportPath": "github.com/gorilla/context", - "Rev": "708054d61e5a2918b9f4e9700000ee611dcf03f5" - }, - { - "ImportPath": "github.com/gorilla/mux", - "Rev": "9ede152210fa25c1377d33e867cb828c19316445" - }, - { - "ImportPath": "github.com/urfave/cli", - "Comment": "v1.18.0-74-g6f2647a", - "Rev": "6f2647a880e25bd7178ab4bbf1f4045ac256cc57" - }, - { - "ImportPath": "golang.org/x/crypto/ssh/terminal", - "Rev": "1fbbd62cfec66bd39d91e97749579579d4d3037e" - }, - { - "ImportPath": "gopkg.in/tomb.v1", - "Rev": "c131134a1947e9afd9cecfe11f4c6dff0732ae58" - } - ] -} diff --git a/vendor/github.com/Shopify/toxiproxy/Godeps/Readme b/vendor/github.com/Shopify/toxiproxy/Godeps/Readme deleted file mode 100644 index 4cdaa53..0000000 --- a/vendor/github.com/Shopify/toxiproxy/Godeps/Readme +++ /dev/null @@ -1,5 +0,0 @@ -This directory tree is generated automatically by godep. - -Please do not edit. - -See https://github.com/tools/godep for more information. diff --git a/vendor/github.com/Shopify/toxiproxy/LICENSE b/vendor/github.com/Shopify/toxiproxy/LICENSE deleted file mode 100644 index 04d0a14..0000000 --- a/vendor/github.com/Shopify/toxiproxy/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Shopify - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/Shopify/toxiproxy/Makefile b/vendor/github.com/Shopify/toxiproxy/Makefile deleted file mode 100644 index cae6554..0000000 --- a/vendor/github.com/Shopify/toxiproxy/Makefile +++ /dev/null @@ -1,76 +0,0 @@ -SERVER_NAME=toxiproxy-server -CLI_NAME=toxiproxy-cli -VERSION=$(shell cat VERSION) -DEB=pkg/toxiproxy_$(VERSION)_amd64.deb -GODEP_PATH=$(shell pwd)/vendor -ORIGINAL_PATH=$(shell echo $(GOPATH)) -COMBINED_GOPATH=$(GODEP_PATH):$(ORIGINAL_PATH) - -.PHONY: packages deb test linux darwin windows - -build: - GOPATH=$(COMBINED_GOPATH) go build -ldflags="-X github.com/Shopify/toxiproxy.Version=git-$(shell git rev-parse --short HEAD)" -o $(SERVER_NAME) ./cmd - GOPATH=$(COMBINED_GOPATH) go build -ldflags="-X github.com/Shopify/toxiproxy.Version=git-$(shell git rev-parse --short HEAD)" -o $(CLI_NAME) ./cli - -all: deb linux darwin windows -deb: $(DEB) - -darwin: tmp/build/$(SERVER_NAME)-darwin-amd64 tmp/build/$(CLI_NAME)-darwin-amd64 -linux: tmp/build/$(SERVER_NAME)-linux-amd64 tmp/build/$(CLI_NAME)-linux-amd64 -windows: tmp/build/$(SERVER_NAME)-windows-amd64.exe tmp/build/$(CLI_NAME)-windows-amd64.exe - -release: all docker-release - -clean: - rm -f tmp/build/* - rm -f $(SERVER_NAME) - rm -f $(CLI_NAME) - rm -f *.deb - -test: - echo "Testing with" `go version` - GOMAXPROCS=4 GOPATH=$(COMBINED_GOPATH) go test -v -race ./... - -tmp/build/$(SERVER_NAME)-linux-amd64: - GOOS=linux GOARCH=amd64 GOPATH=$(COMBINED_GOPATH) go build -ldflags="-X github.com/Shopify/toxiproxy.Version=$(VERSION)" -o $(@) ./cmd - -tmp/build/$(SERVER_NAME)-darwin-amd64: - GOOS=darwin GOARCH=amd64 GOPATH=$(COMBINED_GOPATH) go build -ldflags="-X github.com/Shopify/toxiproxy.Version=$(VERSION)" -o $(@) ./cmd - -tmp/build/$(SERVER_NAME)-windows-amd64.exe: - GOOS=windows GOARCH=amd64 GOPATH=$(COMBINED_GOPATH) go build -ldflags="-X github.com/Shopify/toxiproxy.Version=$(VERSION)" -o $(@) ./cmd - -tmp/build/$(CLI_NAME)-linux-amd64: - GOOS=linux GOARCH=amd64 GOPATH=$(COMBINED_GOPATH) go build -ldflags="-X github.com/Shopify/toxiproxy.Version=$(VERSION)" -o $(@) ./cli - -tmp/build/$(CLI_NAME)-darwin-amd64: - GOOS=darwin GOARCH=amd64 GOPATH=$(COMBINED_GOPATH) go build -ldflags="-X github.com/Shopify/toxiproxy.Version=$(VERSION)" -o $(@) ./cli - -tmp/build/$(CLI_NAME)-windows-amd64.exe: - GOOS=windows GOARCH=amd64 GOPATH=$(COMBINED_GOPATH) go build -ldflags="-X github.com/Shopify/toxiproxy.Version=$(VERSION)" -o $(@) ./cli - -$(DEB): tmp/build/$(SERVER_NAME)-linux-amd64 tmp/build/$(CLI_NAME)-linux-amd64 - fpm -t deb \ - -s dir \ - -p tmp/build/ \ - --name "toxiproxy" \ - --version $(VERSION) \ - --license MIT \ - --no-depends \ - --no-auto-depends \ - --architecture amd64 \ - --maintainer "Simon Eskildsen " \ - --description "TCP proxy to simulate network and system conditions" \ - --url "https://github.com/Shopify/toxiproxy" \ - $(word 1,$^)=/usr/bin/$(SERVER_NAME) \ - $(word 2,$^)=/usr/bin/$(CLI_NAME) \ - ./share/toxiproxy.conf=/etc/init/toxiproxy.conf - -docker: - docker build --tag="shopify/toxiproxy:git" . - -docker-release: linux - docker build --rm=true --tag="shopify/toxiproxy:$(VERSION)" . - docker tag shopify/toxiproxy:$(VERSION) shopify/toxiproxy:latest - docker push shopify/toxiproxy:$(VERSION) - docker push shopify/toxiproxy:latest diff --git a/vendor/github.com/Shopify/toxiproxy/README.md b/vendor/github.com/Shopify/toxiproxy/README.md deleted file mode 100644 index 7e92b0f..0000000 --- a/vendor/github.com/Shopify/toxiproxy/README.md +++ /dev/null @@ -1,573 +0,0 @@ -# Toxiproxy -[![GitHub release](https://img.shields.io/github/release/Shopify/toxiproxy.svg)](https://github.com/Shopify/toxiproxy/releases/latest) -[![Build Status](https://img.shields.io/circleci/project/github/Shopify/toxiproxy/master.svg)](https://circleci.com/gh/Shopify/toxiproxy/tree/master) -[![IRC Channel](https://img.shields.io/badge/chat-on%20freenode-brightgreen.svg)](https://kiwiirc.com/client/irc.freenode.net/#toxiproxy) - -![](http://i.imgur.com/sOaNw0o.png) - -Toxiproxy is a framework for simulating network conditions. It's made -specifically to work in testing, CI and development environments, supporting -deterministic tampering with connections, but with support for randomized chaos -and customization. **Toxiproxy is the tool you need to prove with tests that -your application doesn't have single points of failure.** We've been -successfully using it in all development and test environments at Shopify since -October, 2014. See our [blog post][blog] on resiliency for more information. - -Toxiproxy usage consists of two parts. A TCP proxy written in Go (what this -repository contains) and a client communicating with the proxy over HTTP. You -configure your application to make all test connections go through Toxiproxy -and can then manipulate their health via HTTP. See [Usage](#usage) -below on how to set up your project. - -For example, to add 1000ms of latency to the response of MySQL from the [Ruby -client](https://github.com/Shopify/toxiproxy-ruby): - -```ruby -Toxiproxy[:mysql_master].downstream(:latency, latency: 1000).apply do - Shop.first # this takes at least 1s -end -``` - -To take down all Redis instances: - -```ruby -Toxiproxy[/redis/].down do - Shop.first # this will throw an exception -end -``` - -While the examples in this README are currently in Ruby, there's nothing -stopping you from creating a client in any other language (see -[Clients](#clients)). - -## Table of Contents - -1. [Why yet another chaotic TCP proxy?](#why-yet-another-chaotic-tcp-proxy) -2. [Clients](#clients) -3. [Example](#example) -4. [Usage](#usage) - 1. [Installing](#1-installing-toxiproxy) - 1. [Upgrading from 1.x](#upgrading-from-toxiproxy-1x) - 2. [Populating](#2-populating-toxiproxy) - 3. [Using](#3-using-toxiproxy) -5. [Toxics](#toxics) - 1. [Latency](#latency) - 2. [Down](#down) - 3. [Bandwidth](#bandwidth) - 4. [Slow close](#slow_close) - 5. [Timeout](#timeout) - 6. [Slicer](#slicer) -6. [HTTP API](#http-api) - 1. [Proxy fields](#proxy-fields) - 2. [Toxic fields](#toxic-fields) - 3. [Endpoints](#endpoints) - 4. [Populating Proxies](#populating-proxies) -7. [CLI example](#cli-example) -8. [FAQ](#frequently-asked-questions) -9. [Development](#development) - -## Why yet another chaotic TCP proxy? - -The existing ones we found didn't provide the kind of dynamic API we needed for -integration and unit testing. Linux tools like `nc` and so on are not -cross-platform and require root, which makes them problematic in test, -development and CI environments. - -## Clients - -* [toxiproxy-ruby](https://github.com/Shopify/toxiproxy-ruby) -* [toxiproxy-go](https://github.com/Shopify/toxiproxy/tree/master/client) -* [toxiproxy-python](https://github.com/douglas/toxiproxy-python) -* [toxiproxy.net](https://github.com/mdevilliers/Toxiproxy.Net) -* [toxiproxy-php-client](https://github.com/ihsw/toxiproxy-php-client) -* [toxiproxy-node-client](https://github.com/ihsw/toxiproxy-node-client) -* [toxiproxy-java](https://github.com/trekawek/toxiproxy-java) - -## Example - -Let's walk through an example with a Rails application. Note that Toxiproxy is -in no way tied to Ruby, it's just been our first use case. You can see the full example at -[Sirupsen/toxiproxy-rails-example](https://github.com/Sirupsen/toxiproxy-rails-example). -To get started right away, jump down to [Usage](#usage). - -For our popular blog, for some reason we're storing the tags for our posts in -Redis and the posts themselves in MySQL. We might have a `Post` class that -includes some methods to manipulate tags in a [Redis set](http://redis.io/commands#set): - -```ruby -class Post < ActiveRecord::Base - # Return an Array of all the tags. - def tags - TagRedis.smembers(tag_key) - end - - # Add a tag to the post. - def add_tag(tag) - TagRedis.sadd(tag_key, tag) - end - - # Remove a tag from the post. - def remove_tag(tag) - TagRedis.srem(tag_key, tag) - end - - # Return the key in Redis for the set of tags for the post. - def tag_key - "post:tags:#{self.id}" - end -end -``` - -We've decided that erroring while writing to the tag data store -(adding/removing) is OK. However, if the tag data store is down, we should be -able to see the post with no tags. We could simply rescue the -`Redis::CannotConnectError` around the `SMEMBERS` Redis call in the `tags` -method. Let's use Toxiproxy to test that. - -Since we've already installed Toxiproxy and it's running on our machine, we can -skip to step 2. This is where we need to make sure Toxiproxy has a mapping for -Redis tags. To `config/boot.rb` (before any connection is made) we add: - -```ruby -require 'toxiproxy' - -Toxiproxy.populate([ - { - name: "toxiproxy_test_redis_tags", - listen: "127.0.0.1:22222", - upstream: "127.0.0.1:6379" - } -]) -``` - -Then in `config/environments/test.rb` we set the `TagRedis` to be a Redis client -that connects to Redis through Toxiproxy by adding this line: - -```ruby -TagRedis = Redis.new(port: 22222) -``` - -All calls in the test environment now go through Toxiproxy. That means we can -add a unit test where we simulate a failure: - -```ruby -test "should return empty array when tag redis is down when listing tags" do - @post.add_tag "mammals" - - # Take down all Redises in Toxiproxy - Toxiproxy[/redis/].down do - assert_equal [], @post.tags - end -end -``` - -The test fails with `Redis::CannotConnectError`. Perfect! Toxiproxy took down -the Redis successfully for the duration of the closure. Let's fix the `tags` -method to be resilient: - -```ruby -def tags - TagRedis.smembers(tag_key) -rescue Redis::CannotConnectError - [] -end -``` - -The tests pass! We now have a unit test that proves fetching the tags when Redis -is down returns an empty array, instead of throwing an exception. For full -coverage you should also write an integration test that wraps fetching the -entire blog post page when Redis is down. - -Full example application is at -[Sirupsen/toxiproxy-rails-example](https://github.com/Sirupsen/toxiproxy-rails-example). - -## Usage - -Configuring a project to use Toxiproxy consists of three steps: - -1. Installing Toxiproxy -2. Populating Toxiproxy -3. Using Toxiproxy - -### 1. Installing Toxiproxy - -**Linux** - -See [`Releases`](https://github.com/Shopify/toxiproxy/releases) for the latest -binaries and system packages for your architecture. - -**Ubuntu** - -```bash -$ wget -O toxiproxy-2.1.1.deb https://github.com/Shopify/toxiproxy/releases/download/v2.1.1/toxiproxy_2.1.1_amd64.deb -$ sudo dpkg -i toxiproxy-2.1.1.deb -$ sudo service toxiproxy start -``` - -**OS X** - -```bash -$ brew tap shopify/shopify -$ brew install toxiproxy -``` - -**Windows** - -Toxiproxy for Windows is available for download at https://github.com/Shopify/toxiproxy/releases/download/v2.1.1/toxiproxy-server-windows-amd64.exe - -**Docker** - -Toxiproxy is available on [Docker Hub](https://hub.docker.com/r/shopify/toxiproxy/). - -```bash -$ docker pull shopify/toxiproxy -$ docker run -it shopify/toxiproxy -``` - -If using Toxiproxy from the host rather than other containers, enable host networking with `--net=host`. - -**Source** - -If you have Go installed, you can build Toxiproxy from source using the make file: -```bash -$ make build -$ ./toxiproxy-server -``` - -#### Upgrading from Toxiproxy 1.x - -In Toxiproxy 2.0 several changes were made to the API that make it incompatible with version 1.x. -In order to use version 2.x of the Toxiproxy server, you will need to make sure your client -library supports the same version. You can check which version of Toxiproxy you are running by -looking at the `/version` endpoint. - -See the documentation for your client library for specific library changes. Detailed changes -for the Toxiproxy server can been found in [CHANGELOG.md](https://github.com/Shopify/toxiproxy/blob/master/CHANGELOG.md). - -### 2. Populating Toxiproxy - -When your application boots, it needs to make sure that Toxiproxy knows which -endpoints to proxy where. The main parameters are: name, address for Toxiproxy -to **listen** on and the address of the upstream. - -Some client libraries have helpers for this task, which is essentially just -making sure each proxy in a list is created. Example from the Ruby client: - -```ruby -# Make sure `shopify_test_redis_master` and `shopify_test_mysql_master` are -# present in Toxiproxy -Toxiproxy.populate([ - { - name: "shopify_test_redis_master", - listen: "127.0.0.1:22220", - upstream: "127.0.0.1:6379" - }, - { - name: "shopify_test_mysql_master", - listen: "127.0.0.1:24220", - upstream: "127.0.0.1:3306" - } -]) -``` - -This code needs to run as early in boot as possible, before any code establishes -a connection through Toxiproxy. Please check your client library for -documentation on the population helpers. - -Alternatively use the CLI to create proxies, e.g.: - -```bash -toxiproxy-cli create shopify_test_redis_master -l localhost:26379 -u localhost:6379 -``` - -We recommend a naming such as the above: `___`. -This makes sure there are no clashes between applications using the same -Toxiproxy. - -For large application we recommend storing the Toxiproxy configurations in a -separate configuration file. We use `config/toxiproxy.json`. This file can be -passed to the server using the `-config` option, or loaded by the application -to use with the `populate` function. - -Use ports outside the ephemeral port range to avoid random port conflicts. -It's `32,768` to `61,000` on Linux by default, see -`/proc/sys/net/ipv4/ip_local_port_range`. - -### 3. Using Toxiproxy - -To use Toxiproxy, you now need to configure your application to connect through -Toxiproxy. Continuing with our example from step two, we can configure our Redis -client to connect through Toxiproxy: - -```ruby -# old straight to redis -redis = Redis.new(port: 6380) - -# new through toxiproxy -redis = Redis.new(port: 22220) -``` - -Now you can tamper with it through the Toxiproxy API. In Ruby: - -```ruby -redis = Redis.new(port: 22220) - -Toxiproxy[:shopify_test_redis_master].downstream(:latency, latency: 1000).apply do - redis.get("test") # will take 1s -end -``` - -Or via the CLI: - -```bash -toxiproxy-cli toxic add shopify_test_redis_master -t latency -a latency=1000 -``` - -Please consult your respective client library on usage. - -### Toxics - -Toxics manipulate the pipe between the client and upstream. They can be added -and removed from proxies using the [HTTP api](#http-api). Each toxic has its own parameters -to change how it affects the proxy links. - -For documentation on implementing custom toxics, see [CREATING_TOXICS.md](https://github.com/Shopify/toxiproxy/blob/master/CREATING_TOXICS.md) - -#### latency - -Add a delay to all data going through the proxy. The delay is equal to `latency` +/- `jitter`. - -Attributes: - - - `latency`: time in milliseconds - - `jitter`: time in milliseconds - -#### down - -Bringing a service down is not technically a toxic in the implementation of -Toxiproxy. This is done by `POST`ing to `/proxies/{proxy}` and setting the -`enabled` field to `false`. - -#### bandwidth - -Limit a connection to a maximum number of kilobytes per second. - -Attributes: - - - `rate`: rate in KB/s - -#### slow_close - -Delay the TCP socket from closing until `delay` has elapsed. - -Attributes: - - - `delay`: time in milliseconds - -#### timeout - -Stops all data from getting through, and closes the connection after `timeout`. If -`timeout` is 0, the connection won't close, and data will be delayed until the -toxic is removed. - -Attributes: - - - `timeout`: time in milliseconds - -#### slicer - -Slices TCP data up into small bits, optionally adding a delay between each -sliced "packet". - -Attributes: - - - `average_size`: size in bytes of an average packet - - `size_variation`: variation in bytes of an average packet (should be smaller than average_size) - - `delay`: time in microseconds to delay each packet by - -#### limit_data - -Closes connection when transmitted data exceeded limit. - - - `bytes`: number of bytes it should transmit before connection is closed - -### HTTP API - -All communication with the Toxiproxy daemon from the client happens through the -HTTP interface, which is described here. - -Toxiproxy listens for HTTP on port **8474**. - -#### Proxy fields: - - - `name`: proxy name (string) - - `listen`: listen address (string) - - `upstream`: proxy upstream address (string) - - `enabled`: true/false (defaults to true on creation) - -To change a proxy's name, it must be deleted and recreated. - -Changing the `listen` or `upstream` fields will restart the proxy and drop any active connections. - -If `listen` is specified with a port of 0, toxiproxy will pick an ephemeral port. The `listen` field -in the response will be updated with the actual port. - -If you change `enabled` to `false`, it will take down the proxy. You can switch it -back to `true` to reenable it. - -#### Toxic fields: - - - `name`: toxic name (string, defaults to `_`) - - `type`: toxic type (string) - - `stream`: link direction to affect (defaults to `downstream`) - - `toxicity`: probability of the toxic being applied to a link (defaults to 1.0, 100%) - - `attributes`: a map of toxic-specific attributes - -See [Toxics](#toxics) for toxic-specific attributes. - -The `stream` direction must be either `upstream` or `downstream`. `upstream` applies -the toxic on the `client -> server` connection, while `downstream` applies the toxic -on the `server -> client` connection. This can be used to modify requests and responses -separately. - -#### Endpoints - -All endpoints are JSON. - - - **GET /proxies** - List existing proxies and their toxics - - **POST /proxies** - Create a new proxy - - **POST /populate** - Create or replace a list of proxies - - **GET /proxies/{proxy}** - Show the proxy with all its active toxics - - **POST /proxies/{proxy}** - Update a proxy's fields - - **DELETE /proxies/{proxy}** - Delete an existing proxy - - **GET /proxies/{proxy}/toxics** - List active toxics - - **POST /proxies/{proxy}/toxics** - Create a new toxic - - **GET /proxies/{proxy}/toxics/{toxic}** - Get an active toxic's fields - - **POST /proxies/{proxy}/toxics/{toxic}** - Update an active toxic - - **DELETE /proxies/{proxy}/toxics/{toxic}** - Remove an active toxic - - **POST /reset** - Enable all proxies and remove all active toxics - - **GET /version** - Returns the server version number - -#### Populating Proxies - -Proxies can be added and configured in bulk using the `/populate` endpoint. This is done by -passing an json array of proxies to toxiproxy. If a proxy with the same name already exists, -it will be compared to the new proxy and replaced if the `upstream` and `listen` address don't match. - -A `/populate` call can be included for example at application start to ensure all required proxies -exist. It is safe to make this call several times, since proxies will be untouched as long as their -fields are consistent with the new data. - -### CLI Example - -```bash -$ toxiproxy-cli create redis -l localhost:26379 -u localhost:6379 -Created new proxy redis -$ toxiproxy-cli list -Listen Upstream Name Enabled Toxics -====================================================================== -127.0.0.1:26379 localhost:6379 redis true None - -Hint: inspect toxics with `toxiproxy-client inspect ` -``` - -```bash -$ redis-cli -p 26379 -127.0.0.1:26379> SET omg pandas -OK -127.0.0.1:26379> GET omg -"pandas" -``` - -```bash -$ toxiproxy-cli toxic add redis -t latency -a latency=1000 -Added downstream latency toxic 'latency_downstream' on proxy 'redis' -``` - -```bash -$ redis-cli -p 26379 -127.0.0.1:26379> GET omg -"pandas" -(1.00s) -127.0.0.1:26379> DEL omg -(integer) 1 -(1.00s) -``` - -```bash -$ toxiproxy-cli toxic remove redis -n latency_downstream -Removed toxic 'latency_downstream' on proxy 'redis' -``` - -```bash -$ redis-cli -p 26379 -127.0.0.1:26379> GET omg -(nil) -``` - -```bash -$ toxiproxy-cli delete redis -Deleted proxy redis -``` - -```bash -$ redis-cli -p 26379 -Could not connect to Redis at 127.0.0.1:26379: Connection refused -``` - -### Frequently Asked Questions - -**How fast is Toxiproxy?** The speed of Toxiproxy depends largely on your hardware, -but you can expect a latency of *< 100µs* when no toxics are enabled. When running -with `GOMAXPROCS=4` on a Macbook Pro we acheived *~1000MB/s* throughput, and as high -as *2400MB/s* on a higher end desktop. Basically, you can expect Toxiproxy to move -data around at least as fast the app you're testing. - -**Can Toxiproxy do randomized testing?** Many of the available toxics can be configured -to have randomness, such as `jitter` in the `latency` toxic. There is also a -global `toxicity` parameter that specifies the percentage of connections a toxic -will affect. This is most useful for things like the `timeout` toxic, which would -allow X% of connections to timeout. - -**I am not seeing my Toxiproxy actions reflected for MySQL**. MySQL will prefer -the local Unix domain socket for some clients, no matter which port you pass it -if the host is set to `localhost`. Configure your MySQL server to not create a -socket, and use `127.0.0.1` as the host. Remember to remove the old socket -after you restart the server. - -**Toxiproxy causes intermittent connection failures**. Use ports outside the -ephemeral port range to avoid random port conflicts. It's `32,768` to `61,000` on -Linux by default, see `/proc/sys/net/ipv4/ip_local_port_range`. - -**Should I run a Toxiproxy for each application?** No, we recommend using the -same Toxiproxy for all applications. To distinguish between services we -recommend naming your proxies with the scheme: `___`. -For example, `shopify_test_redis_master` or `shopify_development_mysql_1`. - -### Development - -* `make`. Build a toxiproxy development binary for the current platform. -* `make all`. Build Toxiproxy binaries and packages for all platforms. Requires - to have Go compiled with cross compilation enabled on Linux and Darwin (amd64) - as well as [`fpm`](https://github.com/jordansissel/fpm) in your `$PATH` to - build the Debian package. -* `make test`. Run the Toxiproxy tests. -* `make darwin`. Build binary for Darwin. -* `make linux`. Build binary for Linux. -* `make windows`. Build binary for Windows. - -### Release - -1. Ensure this release has run internally for `Shopify/shopify` for at least a - day which is the best fuzzy test for robustness we have. -2. Update `CHANGELOG.md` -3. Bump `VERSION` -4. Change versions in `README.md` -5. Commit -6. Tag -7. `make release` to create binaries, packages and push new Docker image -8. Create [Github draft release](https://github.com/Shopify/toxiproxy/releases/new) against new tag and upload binaries and Debian package -9. [Bump version for Homebrew](https://github.com/Shopify/homebrew-shopify/blob/master/toxiproxy.rb#L9) - - -[blog]: https://engineering.shopify.com/17489072-building-and-testing-resilient-ruby-on-rails-applications diff --git a/vendor/github.com/Shopify/toxiproxy/VERSION b/vendor/github.com/Shopify/toxiproxy/VERSION deleted file mode 100644 index 3e3c2f1..0000000 --- a/vendor/github.com/Shopify/toxiproxy/VERSION +++ /dev/null @@ -1 +0,0 @@ -2.1.1 diff --git a/vendor/github.com/Shopify/toxiproxy/api.go b/vendor/github.com/Shopify/toxiproxy/api.go deleted file mode 100644 index 25748c3..0000000 --- a/vendor/github.com/Shopify/toxiproxy/api.go +++ /dev/null @@ -1,448 +0,0 @@ -package toxiproxy - -import ( - "encoding/json" - "fmt" - "log" - "net" - "net/http" - "os" - - "github.com/Shopify/toxiproxy/toxics" - "github.com/Sirupsen/logrus" - "github.com/gorilla/mux" -) - -type ApiServer struct { - Collection *ProxyCollection -} - -func NewServer() *ApiServer { - return &ApiServer{ - Collection: NewProxyCollection(), - } -} - -func (server *ApiServer) PopulateConfig(filename string) { - file, err := os.Open(filename) - if err != nil { - logrus.WithFields(logrus.Fields{ - "config": filename, - "error": err, - }).Error("Error reading config file") - } else { - proxies, err := server.Collection.PopulateJson(file) - if err != nil { - logrus.WithFields(logrus.Fields{ - "config": filename, - "error": err, - }).Error("Failed to populate proxies from file") - } else { - logrus.WithFields(logrus.Fields{ - "config": filename, - "proxies": len(proxies), - }).Info("Populated proxies from file") - } - } -} - -func (server *ApiServer) Listen(host string, port string) { - r := mux.NewRouter() - r.HandleFunc("/reset", server.ResetState).Methods("POST") - r.HandleFunc("/proxies", server.ProxyIndex).Methods("GET") - r.HandleFunc("/proxies", server.ProxyCreate).Methods("POST") - r.HandleFunc("/populate", server.Populate).Methods("POST") - r.HandleFunc("/proxies/{proxy}", server.ProxyShow).Methods("GET") - r.HandleFunc("/proxies/{proxy}", server.ProxyUpdate).Methods("POST") - r.HandleFunc("/proxies/{proxy}", server.ProxyDelete).Methods("DELETE") - r.HandleFunc("/proxies/{proxy}/toxics", server.ToxicIndex).Methods("GET") - r.HandleFunc("/proxies/{proxy}/toxics", server.ToxicCreate).Methods("POST") - r.HandleFunc("/proxies/{proxy}/toxics/{toxic}", server.ToxicShow).Methods("GET") - r.HandleFunc("/proxies/{proxy}/toxics/{toxic}", server.ToxicUpdate).Methods("POST") - r.HandleFunc("/proxies/{proxy}/toxics/{toxic}", server.ToxicDelete).Methods("DELETE") - - r.HandleFunc("/version", server.Version).Methods("GET") - http.Handle("/", r) - - logrus.WithFields(logrus.Fields{ - "host": host, - "port": port, - "version": Version, - }).Info("API HTTP server starting") - - err := http.ListenAndServe(net.JoinHostPort(host, port), nil) - if err != nil { - log.Fatal("ListenAndServe: ", err) - } -} - -func (server *ApiServer) ProxyIndex(response http.ResponseWriter, request *http.Request) { - proxies := server.Collection.Proxies() - marshalData := make(map[string]interface{}, len(proxies)) - - for name, proxy := range proxies { - marshalData[name] = proxyWithToxics(proxy) - } - - data, err := json.Marshal(marshalData) - if apiError(response, err) { - return - } - - response.Header().Set("Content-Type", "application/json") - _, err = response.Write(data) - if err != nil { - logrus.Warn("ProxyIndex: Failed to write response to client", err) - } -} - -func (server *ApiServer) ResetState(response http.ResponseWriter, request *http.Request) { - proxies := server.Collection.Proxies() - - for _, proxy := range proxies { - err := proxy.Start() - if err != ErrProxyAlreadyStarted && apiError(response, err) { - return - } - - proxy.Toxics.ResetToxics() - } - - response.WriteHeader(http.StatusNoContent) - _, err := response.Write(nil) - if err != nil { - logrus.Warn("ResetState: Failed to write headers to client", err) - } -} - -func (server *ApiServer) ProxyCreate(response http.ResponseWriter, request *http.Request) { - // Default fields to enable the proxy right away - input := Proxy{Enabled: true} - err := json.NewDecoder(request.Body).Decode(&input) - if apiError(response, joinError(err, ErrBadRequestBody)) { - return - } - - if len(input.Name) < 1 { - apiError(response, joinError(fmt.Errorf("name"), ErrMissingField)) - return - } - if len(input.Upstream) < 1 { - apiError(response, joinError(fmt.Errorf("upstream"), ErrMissingField)) - return - } - - proxy := NewProxy() - proxy.Name = input.Name - proxy.Listen = input.Listen - proxy.Upstream = input.Upstream - - err = server.Collection.Add(proxy, input.Enabled) - if apiError(response, err) { - return - } - - data, err := json.Marshal(proxyWithToxics(proxy)) - if apiError(response, err) { - return - } - - response.Header().Set("Content-Type", "application/json") - response.WriteHeader(http.StatusCreated) - _, err = response.Write(data) - if err != nil { - logrus.Warn("ProxyCreate: Failed to write response to client", err) - } -} - -func (server *ApiServer) Populate(response http.ResponseWriter, request *http.Request) { - proxies, err := server.Collection.PopulateJson(request.Body) - - apiErr, ok := err.(*ApiError) - if !ok && err != nil { - logrus.Warn("Error did not include status code: ", err) - apiErr = &ApiError{err.Error(), http.StatusInternalServerError} - } - - data, err := json.Marshal(struct { - *ApiError `json:",omitempty"` - Proxies []proxyToxics `json:"proxies"` - }{apiErr, proxiesWithToxics(proxies)}) - if apiError(response, err) { - return - } - - responseCode := http.StatusCreated - if apiErr != nil { - responseCode = apiErr.StatusCode - } - - response.Header().Set("Content-Type", "application/json") - response.WriteHeader(responseCode) - _, err = response.Write(data) - if err != nil { - logrus.Warn("Populate: Failed to write response to client", err) - } -} - -func (server *ApiServer) ProxyShow(response http.ResponseWriter, request *http.Request) { - vars := mux.Vars(request) - - proxy, err := server.Collection.Get(vars["proxy"]) - if apiError(response, err) { - return - } - - data, err := json.Marshal(proxyWithToxics(proxy)) - if apiError(response, err) { - return - } - - response.Header().Set("Content-Type", "application/json") - _, err = response.Write(data) - if err != nil { - logrus.Warn("ProxyShow: Failed to write response to client", err) - } -} - -func (server *ApiServer) ProxyUpdate(response http.ResponseWriter, request *http.Request) { - vars := mux.Vars(request) - - proxy, err := server.Collection.Get(vars["proxy"]) - if apiError(response, err) { - return - } - - // Default fields are the same as existing proxy - input := Proxy{Listen: proxy.Listen, Upstream: proxy.Upstream, Enabled: proxy.Enabled} - err = json.NewDecoder(request.Body).Decode(&input) - if apiError(response, joinError(err, ErrBadRequestBody)) { - return - } - - err = proxy.Update(&input) - if apiError(response, err) { - return - } - - data, err := json.Marshal(proxyWithToxics(proxy)) - if apiError(response, err) { - return - } - - response.Header().Set("Content-Type", "application/json") - _, err = response.Write(data) - if err != nil { - logrus.Warn("ProxyUpdate: Failed to write response to client", err) - } -} - -func (server *ApiServer) ProxyDelete(response http.ResponseWriter, request *http.Request) { - vars := mux.Vars(request) - - err := server.Collection.Remove(vars["proxy"]) - if apiError(response, err) { - return - } - - response.WriteHeader(http.StatusNoContent) - _, err = response.Write(nil) - if err != nil { - logrus.Warn("ProxyDelete: Failed to write headers to client", err) - } -} - -func (server *ApiServer) ToxicIndex(response http.ResponseWriter, request *http.Request) { - vars := mux.Vars(request) - - proxy, err := server.Collection.Get(vars["proxy"]) - if apiError(response, err) { - return - } - - toxics := proxy.Toxics.GetToxicArray() - data, err := json.Marshal(toxics) - if apiError(response, err) { - return - } - - response.Header().Set("Content-Type", "application/json") - _, err = response.Write(data) - if err != nil { - logrus.Warn("ToxicIndex: Failed to write response to client", err) - } -} - -func (server *ApiServer) ToxicCreate(response http.ResponseWriter, request *http.Request) { - vars := mux.Vars(request) - - proxy, err := server.Collection.Get(vars["proxy"]) - if apiError(response, err) { - return - } - - toxic, err := proxy.Toxics.AddToxicJson(request.Body) - if apiError(response, err) { - return - } - - data, err := json.Marshal(toxic) - if apiError(response, err) { - return - } - - response.Header().Set("Content-Type", "application/json") - _, err = response.Write(data) - if err != nil { - logrus.Warn("ToxicCreate: Failed to write response to client", err) - } -} - -func (server *ApiServer) ToxicShow(response http.ResponseWriter, request *http.Request) { - vars := mux.Vars(request) - - proxy, err := server.Collection.Get(vars["proxy"]) - if apiError(response, err) { - return - } - - toxic := proxy.Toxics.GetToxic(vars["toxic"]) - if toxic == nil { - apiError(response, ErrToxicNotFound) - return - } - - data, err := json.Marshal(toxic) - if apiError(response, err) { - return - } - - response.Header().Set("Content-Type", "application/json") - _, err = response.Write(data) - if err != nil { - logrus.Warn("ToxicShow: Failed to write response to client", err) - } -} - -func (server *ApiServer) ToxicUpdate(response http.ResponseWriter, request *http.Request) { - vars := mux.Vars(request) - - proxy, err := server.Collection.Get(vars["proxy"]) - if apiError(response, err) { - return - } - - toxic, err := proxy.Toxics.UpdateToxicJson(vars["toxic"], request.Body) - if apiError(response, err) { - return - } - - data, err := json.Marshal(toxic) - if apiError(response, err) { - return - } - - response.Header().Set("Content-Type", "application/json") - _, err = response.Write(data) - if err != nil { - logrus.Warn("ToxicUpdate: Failed to write response to client", err) - } -} - -func (server *ApiServer) ToxicDelete(response http.ResponseWriter, request *http.Request) { - vars := mux.Vars(request) - - proxy, err := server.Collection.Get(vars["proxy"]) - if apiError(response, err) { - return - } - - err = proxy.Toxics.RemoveToxic(vars["toxic"]) - if apiError(response, err) { - return - } - - response.WriteHeader(http.StatusNoContent) - _, err = response.Write(nil) - if err != nil { - logrus.Warn("ToxicDelete: Failed to write headers to client", err) - } -} - -func (server *ApiServer) Version(response http.ResponseWriter, request *http.Request) { - response.Header().Set("Content-Type", "text/plain") - _, err := response.Write([]byte(Version)) - if err != nil { - logrus.Warn("Version: Failed to write response to client", err) - } -} - -type ApiError struct { - Message string `json:"error"` - StatusCode int `json:"status"` -} - -func (e *ApiError) Error() string { - return e.Message -} - -func newError(msg string, status int) *ApiError { - return &ApiError{msg, status} -} - -func joinError(err error, wrapper *ApiError) *ApiError { - if err != nil { - return &ApiError{wrapper.Message + ": " + err.Error(), wrapper.StatusCode} - } - return nil -} - -var ( - ErrBadRequestBody = newError("bad request body", http.StatusBadRequest) - ErrMissingField = newError("missing required field", http.StatusBadRequest) - ErrProxyNotFound = newError("proxy not found", http.StatusNotFound) - ErrProxyAlreadyExists = newError("proxy already exists", http.StatusConflict) - ErrInvalidStream = newError("stream was invalid, can be either upstream or downstream", http.StatusBadRequest) - ErrInvalidToxicType = newError("invalid toxic type", http.StatusBadRequest) - ErrToxicAlreadyExists = newError("toxic already exists", http.StatusConflict) - ErrToxicNotFound = newError("toxic not found", http.StatusNotFound) -) - -func apiError(resp http.ResponseWriter, err error) bool { - obj, ok := err.(*ApiError) - if !ok && err != nil { - logrus.Warn("Error did not include status code: ", err) - obj = &ApiError{err.Error(), http.StatusInternalServerError} - } - - if obj == nil { - return false - } - - data, err2 := json.Marshal(obj) - if err2 != nil { - logrus.Warn("Error json encoding error (╯°□°)╯︵ ┻━┻ ", err2) - } - resp.Header().Set("Content-Type", "application/json") - http.Error(resp, string(data), obj.StatusCode) - - return true -} - -type proxyToxics struct { - *Proxy - Toxics []toxics.Toxic `json:"toxics"` -} - -func proxyWithToxics(proxy *Proxy) (result proxyToxics) { - result.Proxy = proxy - result.Toxics = proxy.Toxics.GetToxicArray() - return -} - -func proxiesWithToxics(proxies []*Proxy) (result []proxyToxics) { - for _, proxy := range proxies { - result = append(result, proxyWithToxics(proxy)) - } - return -} diff --git a/vendor/github.com/Shopify/toxiproxy/api_test.go b/vendor/github.com/Shopify/toxiproxy/api_test.go deleted file mode 100644 index c185f7d..0000000 --- a/vendor/github.com/Shopify/toxiproxy/api_test.go +++ /dev/null @@ -1,953 +0,0 @@ -package toxiproxy_test - -import ( - "bytes" - "io/ioutil" - "net/http" - "testing" - "time" - - "github.com/Shopify/toxiproxy" - tclient "github.com/Shopify/toxiproxy/client" -) - -var testServer *toxiproxy.ApiServer - -var client = tclient.NewClient("http://127.0.0.1:8475") - -func WithServer(t *testing.T, f func(string)) { - // Make sure only one server is running at a time. Apparently there's no clean - // way to shut it down between each test run. - if testServer == nil { - testServer = toxiproxy.NewServer() - go testServer.Listen("localhost", "8475") - - // Allow server to start. There's no clean way to know when it listens. - time.Sleep(50 * time.Millisecond) - } - - defer func() { - err := testServer.Collection.Clear() - if err != nil { - t.Error("Failed to clear collection", err) - } - }() - - f("http://localhost:8475") -} - -func TestIndexWithNoProxies(t *testing.T) { - WithServer(t, func(addr string) { - client := tclient.NewClient(addr) - proxies, err := client.Proxies() - if err != nil { - t.Fatal("Failed getting proxies:", err) - } - - if len(proxies) > 0 { - t.Fatal("Expected no proxies, got:", proxies) - } - }) -} - -func TestCreateProxyBlankName(t *testing.T) { - WithServer(t, func(addr string) { - _, err := client.CreateProxy("", "", "") - if err == nil { - t.Fatal("Expected error creating proxy, got nil") - } else if err.Error() != "Create: HTTP 400: missing required field: name" { - t.Fatal("Expected different error creating proxy:", err) - } - }) -} - -func TestCreateProxyBlankUpstream(t *testing.T) { - WithServer(t, func(addr string) { - _, err := client.CreateProxy("test", "", "") - if err == nil { - t.Fatal("Expected error creating proxy, got nil") - } else if err.Error() != "Create: HTTP 400: missing required field: upstream" { - t.Fatal("Expected different error creating proxy:", err) - } - }) -} - -func TestPopulateProxy(t *testing.T) { - WithServer(t, func(addr string) { - testProxies, err := client.Populate([]tclient.Proxy{ - { - Name: "one", - Listen: "localhost:7070", - Upstream: "localhost:7171", - Enabled: true, - }, - { - Name: "two", - Listen: "localhost:7373", - Upstream: "localhost:7474", - Enabled: true, - }, - }) - - if err != nil { - t.Fatal("Unable to populate:", err) - } else if len(testProxies) != 2 { - t.Fatalf("Wrong number of proxies returned: %d != 2", len(testProxies)) - } else if testProxies[0].Name != "one" || testProxies[1].Name != "two" { - t.Fatalf("Wrong proxy names returned: %s, %s", testProxies[0].Name, testProxies[1].Name) - } - - for _, p := range testProxies { - AssertProxyUp(t, p.Listen, true) - } - }) -} - -func TestPopulateDefaultEnabled(t *testing.T) { - WithServer(t, func(addr string) { - request := []byte(`[{"name": "test", "listen": "localhost:7070", "upstream": "localhost:7171"}]`) - - resp, err := http.Post(addr+"/populate", "application/json", bytes.NewReader(request)) - if err != nil { - t.Fatal("Failed to send POST to /populate:", err) - } - - if resp.StatusCode != http.StatusCreated { - message, _ := ioutil.ReadAll(resp.Body) - t.Fatalf("Failed to populate proxy list: HTTP %s\n%s", resp.Status, string(message)) - } - - proxies, err := client.Proxies() - if err != nil { - t.Fatal(err) - } else if len(proxies) != 1 { - t.Fatalf("Wrong number of proxies created: %d != 1", len(proxies)) - } else if _, ok := proxies["test"]; !ok { - t.Fatalf("Wrong proxy name returned") - } - - for _, p := range proxies { - AssertProxyUp(t, p.Listen, true) - } - }) -} - -func TestPopulateDisabledProxy(t *testing.T) { - WithServer(t, func(addr string) { - testProxies, err := client.Populate([]tclient.Proxy{ - { - Name: "one", - Listen: "localhost:7070", - Upstream: "localhost:7171", - Enabled: false, - }, - { - Name: "two", - Listen: "localhost:7373", - Upstream: "localhost:7474", - Enabled: true, - }, - }) - - if err != nil { - t.Fatal("Unable to populate:", err) - } else if len(testProxies) != 2 { - t.Fatalf("Wrong number of proxies returned: %d != 2", len(testProxies)) - } else if testProxies[0].Name != "one" || testProxies[1].Name != "two" { - t.Fatalf("Wrong proxy names returned: %s, %s", testProxies[0].Name, testProxies[1].Name) - } - - AssertProxyUp(t, "localhost:7070", false) - AssertProxyUp(t, "localhost:7373", true) - }) -} - -func TestPopulateExistingProxy(t *testing.T) { - WithServer(t, func(addr string) { - testProxy, err := client.CreateProxy("one", "localhost:7070", "localhost:7171") - if err != nil { - t.Fatal("Unable to create proxy:", err) - } - _, err = client.CreateProxy("two", "localhost:7373", "localhost:7474") - if err != nil { - t.Fatal("Unable to create proxy:", err) - } - - // Create a toxic so we can make sure the proxy wasn't replaced - _, err = testProxy.AddToxic("", "latency", "downstream", 1, nil) - if err != nil { - t.Fatal("Unable to create toxic:", err) - } - - testProxies, err := client.Populate([]tclient.Proxy{ - { - Name: "one", - Listen: "127.0.0.1:7070", // TODO(xthexder): Will replace existing proxy if not resolved ip... - Upstream: "localhost:7171", - Enabled: true, - }, - { - Name: "two", - Listen: "127.0.0.1:7575", - Upstream: "localhost:7676", - Enabled: true, - }, - }) - - if err != nil { - t.Fatal("Unable to populate:", err) - } else if len(testProxies) != 2 { - t.Fatalf("Wrong number of proxies returned: %d != 2", len(testProxies)) - } else if testProxies[0].Name != "one" || testProxies[1].Name != "two" { - t.Fatalf("Wrong proxy names returned: %s, %s", testProxies[0].Name, testProxies[1].Name) - } else if testProxies[0].Listen != "127.0.0.1:7070" || testProxies[1].Listen != "127.0.0.1:7575" { - t.Fatalf("Wrong proxy listen addresses returned: %s, %s", testProxies[0].Listen, testProxies[1].Listen) - } - - toxics, err := testProxy.Toxics() - if err != nil { - t.Fatal("Unable to get toxics:", err) - } - if len(toxics) != 1 || toxics[0].Type != "latency" { - t.Fatalf("Populate did not preseve existing proxy. (%d toxics)", len(toxics)) - } - - for _, p := range testProxies { - AssertProxyUp(t, p.Listen, true) - } - }) -} - -func TestPopulateWithBadName(t *testing.T) { - WithServer(t, func(addr string) { - testProxies, err := client.Populate([]tclient.Proxy{ - { - Name: "one", - Listen: "localhost:7070", - Upstream: "localhost:7171", - Enabled: true, - }, - { - Name: "", - Listen: "", - Enabled: true, - }, - }) - - if err == nil { - t.Fatal("Expected Populate to fail.") - } else if err.Error() != "Populate: HTTP 400: missing required field: name at proxy 2" { - t.Fatal("Expected different error during populate:", err) - } else if len(testProxies) != 0 { - t.Fatalf("Wrong number of proxies returned: %d != 0", len(testProxies)) - } - - proxies, err := client.Proxies() - if err != nil { - t.Fatal(err) - } else if len(proxies) != 0 { - t.Fatalf("Expected no proxies to be created: %d != 0", len(proxies)) - } - }) -} - -func TestPopulateProxyWithBadDataShouldReturnError(t *testing.T) { - WithServer(t, func(addr string) { - testProxies, err := client.Populate([]tclient.Proxy{ - { - Name: "one", - Listen: "localhost:7070", - Upstream: "localhost:7171", - Enabled: true, - }, - { - Name: "two", - Listen: "local373", - Upstream: "localhost:7474", - Enabled: true, - }, - { - Name: "three", - Listen: "localhost:7575", - Upstream: "localhost:7676", - Enabled: true, - }, - }) - - if err == nil { - t.Fatal("Expected Populate to fail.") - } else if len(testProxies) != 1 { - t.Fatalf("Wrong number of proxies returned: %d != %d", len(testProxies), 1) - } else if testProxies[0].Name != "one" { - t.Fatalf("Wrong proxy name returned: %s != one", testProxies[0].Name) - } - - for _, p := range testProxies { - AssertProxyUp(t, p.Listen, true) - } - - proxies, err := client.Proxies() - if err != nil { - t.Fatal(err) - } - - for _, p := range proxies { - if p.Name == "two" || p.Name == "three" { - t.Fatalf("Proxy %s exists, populate did not fail correctly.") - } - } - }) -} - -func TestListingProxies(t *testing.T) { - WithServer(t, func(addr string) { - _, err := client.CreateProxy("mysql_master", "localhost:3310", "localhost:20001") - if err != nil { - t.Fatal("Unable to create proxy:", err) - } - - proxies, err := client.Proxies() - if err != nil { - t.Fatal("Error listing proxies:", err) - } - - if len(proxies) == 0 { - t.Fatal("Expected new proxy in list") - } - proxy, ok := proxies["mysql_master"] - if !ok { - t.Fatal("Expected to see mysql_master proxy in list") - } - if proxy.Name != "mysql_master" || proxy.Listen != "127.0.0.1:3310" || proxy.Upstream != "localhost:20001" { - t.Fatalf("Unexpected proxy metadata: %s, %s, %s", proxy.Name, proxy.Listen, proxy.Upstream) - } - AssertToxicExists(t, proxy.ActiveToxics, "latency", "", "", false) - }) -} - -func TestCreateAndGetProxy(t *testing.T) { - WithServer(t, func(addr string) { - _, err := client.CreateProxy("mysql_master", "localhost:3310", "localhost:20001") - if err != nil { - t.Fatal("Unable to create proxy:", err) - } - - proxy, err := client.Proxy("mysql_master") - if err != nil { - t.Fatal("Unable to retriecve proxy:", err) - } - - if proxy.Name != "mysql_master" || proxy.Listen != "127.0.0.1:3310" || proxy.Upstream != "localhost:20001" || !proxy.Enabled { - t.Fatalf("Unexpected proxy metadata: %s, %s, %s, %v", proxy.Name, proxy.Listen, proxy.Upstream, proxy.Enabled) - } - - AssertToxicExists(t, proxy.ActiveToxics, "latency", "", "", false) - }) -} - -func TestCreateProxyWithSave(t *testing.T) { - WithServer(t, func(addr string) { - testProxy := client.NewProxy() - testProxy.Name = "mysql_master" - testProxy.Listen = "localhost:3310" - testProxy.Upstream = "localhost:20001" - testProxy.Enabled = true - - err := testProxy.Save() - if err != nil { - t.Fatal("Unable to create proxy:", err) - } - - proxy, err := client.Proxy("mysql_master") - if err != nil { - t.Fatal("Unable to retriecve proxy:", err) - } - - if proxy.Name != "mysql_master" || proxy.Listen != "127.0.0.1:3310" || proxy.Upstream != "localhost:20001" || !proxy.Enabled { - t.Fatalf("Unexpected proxy metadata: %s, %s, %s, %v", proxy.Name, proxy.Listen, proxy.Upstream, proxy.Enabled) - } - - AssertProxyUp(t, proxy.Listen, true) - }) -} - -func TestCreateDisabledProxy(t *testing.T) { - WithServer(t, func(addr string) { - disabledProxy := client.NewProxy() - disabledProxy.Name = "mysql_master" - disabledProxy.Listen = "localhost:3310" - disabledProxy.Upstream = "localhost:20001" - - err := disabledProxy.Save() - if err != nil { - t.Fatal("Unable to create proxy:", err) - } - - proxy, err := client.Proxy("mysql_master") - if err != nil { - t.Fatal("Unable to retriecve proxy:", err) - } - - if proxy.Name != "mysql_master" || proxy.Listen != "localhost:3310" || proxy.Upstream != "localhost:20001" || proxy.Enabled { - t.Fatalf("Unexpected proxy metadata: %s, %s, %s, %v", proxy.Name, proxy.Listen, proxy.Upstream, proxy.Enabled) - } - - AssertProxyUp(t, proxy.Listen, false) - }) -} - -func TestCreateDisabledProxyAndEnable(t *testing.T) { - WithServer(t, func(addr string) { - disabledProxy := client.NewProxy() - disabledProxy.Name = "mysql_master" - disabledProxy.Listen = "localhost:3310" - disabledProxy.Upstream = "localhost:20001" - - err := disabledProxy.Save() - if err != nil { - t.Fatal("Unable to create proxy:", err) - } - - proxy, err := client.Proxy("mysql_master") - if err != nil { - t.Fatal("Unable to retriecve proxy:", err) - } - - if proxy.Name != "mysql_master" || proxy.Listen != "localhost:3310" || proxy.Upstream != "localhost:20001" || proxy.Enabled { - t.Fatalf("Unexpected proxy metadata: %s, %s, %s, %v", proxy.Name, proxy.Listen, proxy.Upstream, proxy.Enabled) - } - - proxy.Enabled = true - - err = proxy.Save() - if err != nil { - t.Fatal("Failed to update proxy:", err) - } - - AssertProxyUp(t, proxy.Listen, true) - - proxy.Enabled = false - - err = proxy.Save() - if err != nil { - t.Fatal("Failed to update proxy:", err) - } - - AssertProxyUp(t, proxy.Listen, false) - }) -} - -func TestDeleteProxy(t *testing.T) { - WithServer(t, func(addr string) { - testProxy, err := client.CreateProxy("mysql_master", "localhost:3310", "localhost:20001") - if err != nil { - t.Fatal("Unable to create proxy:", err) - } - - proxies, err := client.Proxies() - if err != nil { - t.Fatal("Error listing proxies:", err) - } - - if len(proxies) == 0 { - t.Fatal("Expected new proxy in list") - } - - AssertProxyUp(t, testProxy.Listen, true) - - err = testProxy.Delete() - if err != nil { - t.Fatal("Failed deleting proxy:", err) - } - - AssertProxyUp(t, testProxy.Listen, false) - - proxies, err = client.Proxies() - if err != nil { - t.Fatal("Error listing proxies:", err) - } - - if len(proxies) > 0 { - t.Fatal("Expected proxy to be deleted from list") - } - - err = testProxy.Delete() - if err == nil { - t.Fatal("Proxy did not result in not found.") - } else if err.Error() != "Delete: HTTP 404: proxy not found" { - t.Fatal("Incorrect error removing proxy:", err) - } - }) -} - -func TestCreateProxyPortConflict(t *testing.T) { - WithServer(t, func(addr string) { - testProxy, err := client.CreateProxy("mysql_master", "localhost:3310", "localhost:20001") - if err != nil { - t.Fatal("Unable to create proxy:", err) - } - - _, err = client.CreateProxy("test", "localhost:3310", "localhost:20001") - if err == nil { - t.Fatal("Proxy did not result in conflict.") - } else if err.Error() != "Create: HTTP 500: listen tcp 127.0.0.1:3310: bind: address already in use" { - t.Fatal("Incorrect error adding proxy:", err) - } - - err = testProxy.Delete() - if err != nil { - t.Fatal("Unable to delete proxy:", err) - } - _, err = client.CreateProxy("test", "localhost:3310", "localhost:20001") - if err != nil { - t.Fatal("Unable to create proxy:", err) - } - }) -} - -func TestCreateProxyNameConflict(t *testing.T) { - WithServer(t, func(addr string) { - testProxy, err := client.CreateProxy("mysql_master", "localhost:3310", "localhost:20001") - if err != nil { - t.Fatal("Unable to create proxy:", err) - } - - _, err = client.CreateProxy("mysql_master", "localhost:3311", "localhost:20001") - if err == nil { - t.Fatal("Proxy did not result in conflict.") - } else if err.Error() != "Create: HTTP 409: proxy already exists" { - t.Fatal("Incorrect error adding proxy:", err) - } - - err = testProxy.Delete() - if err != nil { - t.Fatal("Unable to delete proxy:", err) - } - _, err = client.CreateProxy("mysql_master", "localhost:3311", "localhost:20001") - if err != nil { - t.Fatal("Unable to create proxy:", err) - } - }) -} - -func TestResetState(t *testing.T) { - WithServer(t, func(addr string) { - testProxy, err := client.CreateProxy("mysql_master", "localhost:3310", "localhost:20001") - if err != nil { - t.Fatal("Unable to create proxy:", err) - } - - latency, err := testProxy.AddToxic("", "latency", "downstream", 1, tclient.Attributes{ - "latency": 100, - "jitter": 10, - }) - if err != nil { - t.Fatal("Error setting toxic:", err) - } - - if latency.Attributes["latency"] != 100.0 || latency.Attributes["jitter"] != 10.0 { - t.Fatal("Latency toxic did not start up with correct settings") - } - - err = client.ResetState() - if err != nil { - t.Fatal("unable to reset state:", err) - } - - proxies, err := client.Proxies() - if err != nil { - t.Fatal("Error listing proxies:", err) - } - - proxy, ok := proxies["mysql_master"] - if !ok { - t.Fatal("Expected proxy to still exist") - } - if !proxy.Enabled { - t.Fatal("Expected proxy to be enabled") - } - - toxics, err := proxy.Toxics() - if err != nil { - t.Fatal("Error requesting toxics:", err) - } - - AssertToxicExists(t, toxics, "latency", "", "", false) - - AssertProxyUp(t, proxy.Listen, true) - }) -} - -func TestListingToxics(t *testing.T) { - WithServer(t, func(addr string) { - testProxy, err := client.CreateProxy("mysql_master", "localhost:3310", "localhost:20001") - if err != nil { - t.Fatal("Unable to create proxy:", err) - } - - toxics, err := testProxy.Toxics() - if err != nil { - t.Fatal("Error returning toxics:", err) - } - - AssertToxicExists(t, toxics, "latency", "", "", false) - }) -} - -func TestAddToxic(t *testing.T) { - WithServer(t, func(addr string) { - testProxy, err := client.CreateProxy("mysql_master", "localhost:3310", "localhost:20001") - if err != nil { - t.Fatal("Unable to create proxy:", err) - } - - latency, err := testProxy.AddToxic("foobar", "latency", "downstream", 1, tclient.Attributes{ - "latency": 100, - "jitter": 10, - }) - if err != nil { - t.Fatal("Error setting toxic:", err) - } - - if latency.Attributes["latency"] != 100.0 || latency.Attributes["jitter"] != 10.0 { - t.Fatal("Latency toxic did not start up with correct settings") - } - - toxics, err := testProxy.Toxics() - if err != nil { - t.Fatal("Error returning toxics:", err) - } - toxic := AssertToxicExists(t, toxics, "foobar", "latency", "downstream", true) - if toxic.Toxicity != 1.0 || toxic.Attributes["latency"] != 100.0 || toxic.Attributes["jitter"] != 10.0 { - t.Fatal("Toxic was not read back correctly:", toxic) - } - }) -} - -func TestAddMultipleToxics(t *testing.T) { - WithServer(t, func(addr string) { - testProxy, err := client.CreateProxy("mysql_master", "localhost:3310", "localhost:20001") - if err != nil { - t.Fatal("Unable to create proxy:", err) - } - - _, err = testProxy.AddToxic("latency1", "latency", "downstream", 1, nil) - if err != nil { - t.Fatal("Error setting toxic:", err) - } - - _, err = testProxy.AddToxic("latency2", "latency", "downstream", 1, nil) - if err != nil { - t.Fatal("Error setting toxic:", err) - } - - toxics, err := testProxy.Toxics() - if err != nil { - t.Fatal("Error returning toxics:", err) - } - AssertToxicExists(t, toxics, "latency1", "latency", "downstream", true) - toxic := AssertToxicExists(t, toxics, "latency2", "latency", "downstream", true) - if toxic.Toxicity != 1.0 || toxic.Attributes["latency"] != 0.0 || toxic.Attributes["jitter"] != 0.0 { - t.Fatal("Toxic was not read back correctly:", toxic) - } - AssertToxicExists(t, toxics, "latency1", "", "upstream", false) - AssertToxicExists(t, toxics, "latency2", "", "upstream", false) - }) -} - -func TestAddConflictingToxic(t *testing.T) { - WithServer(t, func(addr string) { - testProxy, err := client.CreateProxy("mysql_master", "localhost:3310", "localhost:20001") - if err != nil { - t.Fatal("Unable to create proxy:", err) - } - - _, err = testProxy.AddToxic("foobar", "latency", "downstream", 1, nil) - if err != nil { - t.Fatal("Error setting toxic:", err) - } - - _, err = testProxy.AddToxic("foobar", "slow_close", "downstream", 1, nil) - if err == nil { - t.Fatal("Toxic did not result in conflict.") - } else if err.Error() != "AddToxic: HTTP 409: toxic already exists" { - t.Fatal("Incorrect error setting toxic:", err) - } - - toxics, err := testProxy.Toxics() - if err != nil { - t.Fatal("Error returning toxics:", err) - } - toxic := AssertToxicExists(t, toxics, "foobar", "latency", "downstream", true) - if toxic.Toxicity != 1.0 || toxic.Attributes["latency"] != 0.0 || toxic.Attributes["jitter"] != 0.0 { - t.Fatal("Toxic was not read back correctly:", toxic) - } - AssertToxicExists(t, toxics, "foobar", "", "upstream", false) - }) -} - -func TestAddConflictingToxicsMultistream(t *testing.T) { - WithServer(t, func(addr string) { - testProxy, err := client.CreateProxy("mysql_master", "localhost:3310", "localhost:20001") - if err != nil { - t.Fatal("Unable to create proxy:", err) - } - - _, err = testProxy.AddToxic("foobar", "latency", "upstream", 1, nil) - if err != nil { - t.Fatal("Error setting toxic:", err) - } - - _, err = testProxy.AddToxic("foobar", "latency", "downstream", 1, nil) - if err == nil { - t.Fatal("Toxic did not result in conflict.") - } else if err.Error() != "AddToxic: HTTP 409: toxic already exists" { - t.Fatal("Incorrect error setting toxic:", err) - } - - toxics, err := testProxy.Toxics() - if err != nil { - t.Fatal("Error returning toxics:", err) - } - - toxic := AssertToxicExists(t, toxics, "foobar", "latency", "upstream", true) - if toxic.Toxicity != 1.0 || toxic.Attributes["latency"] != 0.0 || toxic.Attributes["jitter"] != 0.0 { - t.Fatal("Toxic was not read back correctly:", toxic) - } - AssertToxicExists(t, toxics, "foobar", "", "downstream", false) - }) -} - -func TestAddConflictingToxicsMultistreamDefaults(t *testing.T) { - WithServer(t, func(addr string) { - testProxy, err := client.CreateProxy("mysql_master", "localhost:3310", "localhost:20001") - if err != nil { - t.Fatal("Unable to create proxy:", err) - } - - _, err = testProxy.AddToxic("", "latency", "upstream", 1, nil) - if err != nil { - t.Fatal("Error setting toxic:", err) - } - - _, err = testProxy.AddToxic("", "latency", "downstream", 1, nil) - if err != nil { - t.Fatal("Error setting toxic:", err) - } - - toxics, err := testProxy.Toxics() - if err != nil { - t.Fatal("Error returning toxics:", err) - } - toxic := AssertToxicExists(t, toxics, "latency_upstream", "latency", "upstream", true) - if toxic.Toxicity != 1.0 || toxic.Attributes["latency"] != 0.0 || toxic.Attributes["jitter"] != 0.0 { - t.Fatal("Toxic was not read back correctly:", toxic) - } - toxic = AssertToxicExists(t, toxics, "latency_downstream", "latency", "downstream", true) - if toxic.Toxicity != 1.0 || toxic.Attributes["latency"] != 0.0 || toxic.Attributes["jitter"] != 0.0 { - t.Fatal("Toxic was not read back correctly:", toxic) - } - }) -} - -func TestAddToxicWithToxicity(t *testing.T) { - WithServer(t, func(addr string) { - testProxy, err := client.CreateProxy("mysql_master", "localhost:3310", "localhost:20001") - if err != nil { - t.Fatal("Unable to create proxy:", err) - } - - latency, err := testProxy.AddToxic("", "latency", "downstream", 0.2, tclient.Attributes{ - "latency": 100, - "jitter": 10, - }) - if err != nil { - t.Fatal("Error setting toxic:", err) - } - - if latency.Toxicity != 0.2 || latency.Attributes["latency"] != 100.0 || latency.Attributes["jitter"] != 10.0 { - t.Fatal("Latency toxic did not start up with correct settings:", latency) - } - - toxics, err := testProxy.Toxics() - if err != nil { - t.Fatal("Error returning toxics:", err) - } - toxic := AssertToxicExists(t, toxics, "latency_downstream", "latency", "downstream", true) - if toxic.Toxicity != 0.2 || toxic.Attributes["latency"] != 100.0 || toxic.Attributes["jitter"] != 10.0 { - t.Fatal("Toxic was not read back correctly:", toxic) - } - }) -} - -func TestAddNoop(t *testing.T) { - WithServer(t, func(addr string) { - testProxy, err := client.CreateProxy("mysql_master", "localhost:3310", "localhost:20001") - if err != nil { - t.Fatal("Unable to create proxy:", err) - } - - noop, err := testProxy.AddToxic("foobar", "noop", "", 1, nil) - if err != nil { - t.Fatal("Error setting toxic:", err) - } - - if noop.Toxicity != 1.0 || noop.Name != "foobar" || noop.Type != "noop" || noop.Stream != "downstream" { - t.Fatal("Noop toxic did not start up with correct settings:", noop) - } - - toxics, err := testProxy.Toxics() - if err != nil { - t.Fatal("Error returning toxics:", err) - } - toxic := AssertToxicExists(t, toxics, "foobar", "noop", "downstream", true) - if toxic.Toxicity != 1.0 { - t.Fatal("Toxic was not read back correctly:", toxic) - } - }) -} - -func TestUpdateToxics(t *testing.T) { - WithServer(t, func(addr string) { - testProxy, err := client.CreateProxy("mysql_master", "localhost:3310", "localhost:20001") - if err != nil { - t.Fatal("Unable to create proxy:", err) - } - - latency, err := testProxy.AddToxic("", "latency", "downstream", -1, tclient.Attributes{ - "latency": 100, - "jitter": 10, - }) - if err != nil { - t.Fatal("Error setting toxic:", err) - } - - if latency.Toxicity != 1.0 || latency.Attributes["latency"] != 100.0 || latency.Attributes["jitter"] != 10.0 { - t.Fatal("Latency toxic did not start up with correct settings:", latency) - } - - latency, err = testProxy.UpdateToxic("latency_downstream", 0.5, tclient.Attributes{ - "latency": 1000, - }) - if err != nil { - t.Fatal("Error setting toxic:", err) - } - - if latency.Toxicity != 0.5 || latency.Attributes["latency"] != 1000.0 || latency.Attributes["jitter"] != 10.0 { - t.Fatal("Latency toxic did not get updated with the correct settings:", latency) - } - - latency, err = testProxy.UpdateToxic("latency_downstream", -1, tclient.Attributes{ - "latency": 500, - }) - if err != nil { - t.Fatal("Error setting toxic:", err) - } - - if latency.Toxicity != 0.5 || latency.Attributes["latency"] != 500.0 || latency.Attributes["jitter"] != 10.0 { - t.Fatal("Latency toxic did not get updated with the correct settings:", latency) - } - - toxics, err := testProxy.Toxics() - if err != nil { - t.Fatal("Error returning toxics:", err) - } - - toxic := AssertToxicExists(t, toxics, "latency_downstream", "latency", "downstream", true) - if toxic.Toxicity != 0.5 || toxic.Attributes["latency"] != 500.0 || toxic.Attributes["jitter"] != 10.0 { - t.Fatal("Toxic was not read back correctly:", toxic) - } - }) -} - -func TestRemoveToxic(t *testing.T) { - WithServer(t, func(addr string) { - testProxy, err := client.CreateProxy("mysql_master", "localhost:3310", "localhost:20001") - if err != nil { - t.Fatal("Unable to create proxy:", err) - } - - _, err = testProxy.AddToxic("", "latency", "downstream", 1, nil) - if err != nil { - t.Fatal("Error setting toxic:", err) - } - - toxics, err := testProxy.Toxics() - if err != nil { - t.Fatal("Error returning toxics:", err) - } - - toxic := AssertToxicExists(t, toxics, "latency_downstream", "latency", "downstream", true) - if toxic.Toxicity != 1.0 || toxic.Attributes["latency"] != 0.0 || toxic.Attributes["jitter"] != 0.0 { - t.Fatal("Toxic was not read back correctly:", toxic) - } - - err = testProxy.RemoveToxic("latency_downstream") - if err != nil { - t.Fatal("Error removing toxic:", err) - } - - toxics, err = testProxy.Toxics() - if err != nil { - t.Fatal("Error returning toxics:", err) - } - AssertToxicExists(t, toxics, "latency_downstream", "", "", false) - }) -} - -func TestVersionEndpointReturnsVersion(t *testing.T) { - WithServer(t, func(addr string) { - resp, err := http.Get(addr + "/version") - if err != nil { - t.Fatal("Failed to get index", err) - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatal("Unable to read body from response") - } - - if string(body) != toxiproxy.Version { - t.Fatal("Expected to return Version from /version, got:", string(body)) - } - }) -} - -func TestInvalidStream(t *testing.T) { - WithServer(t, func(addr string) { - testProxy, err := client.CreateProxy("mysql_master", "localhost:3310", "localhost:20001") - if err != nil { - t.Fatal("Unable to create proxy:", err) - } - - _, err = testProxy.AddToxic("", "latency", "walrustream", 1, nil) - if err == nil { - t.Fatal("Error setting toxic:", err) - } - }) -} - -func AssertToxicExists(t *testing.T, toxics tclient.Toxics, name, typeName, stream string, exists bool) *tclient.Toxic { - var toxic *tclient.Toxic - var actualType, actualStream string - - for _, tox := range toxics { - if name == tox.Name { - toxic = &tox - actualType = tox.Type - actualStream = tox.Stream - } - } - if exists { - if toxic == nil { - t.Fatalf("Expected to see %s toxic in list", name) - } else if actualType != typeName { - t.Fatalf("Expected %s to be of type %s, found %s", name, typeName, actualType) - } else if actualStream != stream { - t.Fatalf("Expected %s to be in stream %s, found %s", name, stream, actualStream) - } - } else if toxic != nil && actualStream == stream { - t.Fatalf("Expected %s toxic to be missing from list, found type %s", name, actualType) - } - return toxic -} diff --git a/vendor/github.com/Shopify/toxiproxy/circle.yml b/vendor/github.com/Shopify/toxiproxy/circle.yml deleted file mode 100644 index aadf39d..0000000 --- a/vendor/github.com/Shopify/toxiproxy/circle.yml +++ /dev/null @@ -1,8 +0,0 @@ -# Don't let Circle try to do anything clever with this Go project. We isolate -# with godep to avoid `go get` going nuts. -dependencies: - override: - -test: - override: - - make test diff --git a/vendor/github.com/Shopify/toxiproxy/cli/cli.go b/vendor/github.com/Shopify/toxiproxy/cli/cli.go deleted file mode 100644 index 38d31c6..0000000 --- a/vendor/github.com/Shopify/toxiproxy/cli/cli.go +++ /dev/null @@ -1,603 +0,0 @@ -package main - -import ( - "fmt" - "os" - "sort" - "strconv" - "strings" - - toxiproxyServer "github.com/Shopify/toxiproxy" - "github.com/Shopify/toxiproxy/client" - "github.com/urfave/cli" - "golang.org/x/crypto/ssh/terminal" -) - -const ( - RED = "\x1b[31m" - GREEN = "\x1b[32m" - YELLOW = "\x1b[33m" - BLUE = "\x1b[34m" - CYAN = "\x1b[36m" - PURPLE = "\x1b[35m" - GRAY = "\x1b[37m" - NONE = "\x1b[0m" -) - -func color(color string) string { - if isTTY { - return color - } else { - return "" - } -} - -var toxicDescription = ` - Default Toxics: - latency: delay all data +/- jitter - latency=,jitter= - - bandwidth: limit to max kb/s - rate= - - slow_close: delay from closing - delay= - - timeout: stop all data and close after timeout - timeout= - - slicer: slice data into bits with optional delay - average_size=,size_variation=,delay= - - toxic add: - usage: toxiproxy-cli add --type --toxicName \ - --attribute --upstream --downstream - - example: toxiproxy-cli toxic add myProxy -t latency -n myToxic -a latency=100 -a jitter=50 - - toxic update: - usage: toxiproxy-cli update --toxicName \ - --attribute --attribute - - example: toxiproxy-cli toxic update myProxy -n myToxic -a jitter=25 - - toxic delete: - usage: toxiproxy-cli update --toxicName - - example: toxiproxy-cli toxic delete myProxy -n myToxic -` - -var hostname string -var isTTY bool - -func main() { - app := cli.NewApp() - app.Name = "toxiproxy-cli" - app.Version = toxiproxyServer.Version - app.Usage = "Simulate network and system conditions" - app.Commands = []cli.Command{ - { - Name: "list", - Usage: "list all proxies\n\tusage: 'toxiproxy-cli list'\n", - Aliases: []string{"l", "li", "ls"}, - Action: withToxi(list), - }, - { - Name: "inspect", - Aliases: []string{"i", "ins"}, - Usage: "inspect a single proxy\n\tusage: 'toxiproxy-cli inspect '\n", - Action: withToxi(inspectProxy), - }, - { - Name: "create", - Usage: "create a new proxy\n\tusage: 'toxiproxy-cli create --listen --upstream '\n", - Aliases: []string{"c", "new"}, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "listen, l", - Usage: "proxy will listen on this address", - }, - cli.StringFlag{ - Name: "upstream, u", - Usage: "proxy will forward to this address", - }, - }, - Action: withToxi(createProxy), - }, - { - Name: "toggle", - Usage: "\ttoggle enabled status on a proxy\n\t\tusage: 'toxiproxy-cli toggle '\n", - Aliases: []string{"tog"}, - Action: withToxi(toggleProxy), - }, - { - Name: "delete", - Usage: "\tdelete a proxy\n\t\tusage: 'toxiproxy-cli delete '\n", - Aliases: []string{"d"}, - Action: withToxi(deleteProxy), - }, - { - Name: "toxic", - Aliases: []string{"t"}, - Usage: "\tadd, remove or update a toxic\n\t\tusage: see 'toxiproxy-cli toxic'\n", - Description: toxicDescription, - Subcommands: []cli.Command{ - { - Name: "add", - Aliases: []string{"a"}, - Usage: "add a new toxic", - ArgsUsage: "", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "toxicName, n", - Usage: "name of the toxic", - }, - cli.StringFlag{ - Name: "type, t", - Usage: "type of toxic", - }, - cli.StringFlag{ - Name: "toxicity, tox", - Usage: "toxicity of toxic", - }, - cli.StringSliceFlag{ - Name: "attribute, a", - Usage: "toxic attribute in key=value format", - }, - cli.BoolFlag{ - Name: "upstream, u", - Usage: "add toxic to upstream", - }, - cli.BoolFlag{ - Name: "downstream, d", - Usage: "add toxic to downstream", - }, - }, - Action: withToxi(addToxic), - }, - { - Name: "update", - Aliases: []string{"u"}, - Usage: "update an enabled toxic", - ArgsUsage: "", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "toxicName, n", - Usage: "name of the toxic", - }, - cli.StringFlag{ - Name: "toxicity, tox", - Usage: "toxicity of toxic", - }, - cli.StringSliceFlag{ - Name: "attribute, a", - Usage: "toxic attribute in key=value format", - }, - }, - Action: withToxi(updateToxic), - }, - { - Name: "remove", - Aliases: []string{"r", "delete", "d"}, - Usage: "remove an enabled toxic", - ArgsUsage: "", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "toxicName, n", - Usage: "name of the toxic", - }, - }, - Action: withToxi(removeToxic), - }, - }, - }, - } - cli.HelpFlag = cli.BoolFlag{ - Name: "help", - Usage: "show help", - } - app.Flags = []cli.Flag{ - cli.StringFlag{ - Name: "host, h", - Value: "http://localhost:8474", - Usage: "toxiproxy host to connect to", - Destination: &hostname, - }, - } - - isTTY = terminal.IsTerminal(int(os.Stdout.Fd())) - - app.Run(os.Args) -} - -type toxiAction func(*cli.Context, *toxiproxy.Client) error - -func withToxi(f toxiAction) func(*cli.Context) error { - return func(c *cli.Context) error { - toxiproxyClient := toxiproxy.NewClient(hostname) - return f(c, toxiproxyClient) - } -} - -func list(c *cli.Context, t *toxiproxy.Client) error { - proxies, err := t.Proxies() - if err != nil { - return errorf("Failed to retrieve proxies: %s", err) - } - - var proxyNames []string - for proxyName := range proxies { - proxyNames = append(proxyNames, proxyName) - } - sort.Strings(proxyNames) - - if isTTY { - fmt.Printf("%sName\t\t\t%sListen\t\t%sUpstream\t\t%sEnabled\t\t%sToxics\n%s", color(GREEN), color(BLUE), - color(YELLOW), color(PURPLE), color(RED), color(NONE)) - fmt.Printf("%s======================================================================================\n", color(NONE)) - - if len(proxyNames) == 0 { - fmt.Printf("%sno proxies\n%s", color(RED), color(NONE)) - hint("create a proxy with `toxiproxy-cli create`") - return nil - } - } - - for _, proxyName := range proxyNames { - proxy := proxies[proxyName] - numToxics := strconv.Itoa(len(proxy.ActiveToxics)) - if numToxics == "0" && isTTY { - numToxics = "None" - } - printWidth(color(colorEnabled(proxy.Enabled)), proxy.Name, 3) - printWidth(BLUE, proxy.Listen, 2) - printWidth(YELLOW, proxy.Upstream, 3) - printWidth(PURPLE, enabledText(proxy.Enabled), 2) - fmt.Printf("%s%s%s\n", color(RED), numToxics, color(NONE)) - } - hint("inspect toxics with `toxiproxy-cli inspect `") - return nil -} - -func inspectProxy(c *cli.Context, t *toxiproxy.Client) error { - proxyName := c.Args().First() - if proxyName == "" { - cli.ShowSubcommandHelp(c) - return errorf("Proxy name is required as the first argument.\n") - } - - proxy, err := t.Proxy(proxyName) - if err != nil { - return errorf("Failed to retrieve proxy %s: %s\n", proxyName, err.Error()) - } - - if isTTY { - fmt.Printf("%sName: %s%s\t", color(PURPLE), color(NONE), proxy.Name) - fmt.Printf("%sListen: %s%s\t", color(BLUE), color(NONE), proxy.Listen) - fmt.Printf("%sUpstream: %s%s\n", color(YELLOW), color(NONE), proxy.Upstream) - fmt.Printf("%s======================================================================\n", color(NONE)) - - splitToxics := func(toxics toxiproxy.Toxics) (toxiproxy.Toxics, toxiproxy.Toxics) { - upstream := make(toxiproxy.Toxics, 0) - downstream := make(toxiproxy.Toxics, 0) - for _, toxic := range toxics { - if toxic.Stream == "upstream" { - upstream = append(upstream, toxic) - } else { - downstream = append(downstream, toxic) - } - } - return upstream, downstream - } - - if len(proxy.ActiveToxics) == 0 { - fmt.Printf("%sProxy has no toxics enabled.\n%s", color(RED), color(NONE)) - } else { - up, down := splitToxics(proxy.ActiveToxics) - listToxics(up, "Upstream") - fmt.Println() - listToxics(down, "Downstream") - } - - hint("add a toxic with `toxiproxy-cli toxic add`") - } else { - listToxics(proxy.ActiveToxics, "") - } - return nil -} - -func toggleProxy(c *cli.Context, t *toxiproxy.Client) error { - proxyName := c.Args().First() - if proxyName == "" { - cli.ShowSubcommandHelp(c) - return errorf("Proxy name is required as the first argument.\n") - } - - proxy, err := t.Proxy(proxyName) - if err != nil { - return errorf("Failed to retrieve proxy %s: %s\n", proxyName, err.Error()) - } - - proxy.Enabled = !proxy.Enabled - - err = proxy.Save() - if err != nil { - return errorf("Failed to toggle proxy %s: %s\n", proxyName, err.Error()) - } - - fmt.Printf("Proxy %s%s%s is now %s%s%s\n", colorEnabled(proxy.Enabled), proxyName, color(NONE), colorEnabled(proxy.Enabled), enabledText(proxy.Enabled), color(NONE)) - return nil -} - -func createProxy(c *cli.Context, t *toxiproxy.Client) error { - proxyName := c.Args().First() - if proxyName == "" { - cli.ShowSubcommandHelp(c) - return errorf("Proxy name is required as the first argument.\n") - } - listen, err := getArgOrFail(c, "listen") - if err != nil { - return err - } - upstream, err := getArgOrFail(c, "upstream") - if err != nil { - return err - } - _, err = t.CreateProxy(proxyName, listen, upstream) - if err != nil { - return errorf("Failed to create proxy: %s\n", err.Error()) - } - fmt.Printf("Created new proxy %s\n", proxyName) - return nil -} - -func deleteProxy(c *cli.Context, t *toxiproxy.Client) error { - proxyName := c.Args().First() - if proxyName == "" { - cli.ShowSubcommandHelp(c) - return errorf("Proxy name is required as the first argument.\n") - } - p, err := t.Proxy(proxyName) - if err != nil { - return errorf("Failed to retrieve proxy %s: %s\n", proxyName, err.Error()) - } - - err = p.Delete() - if err != nil { - return errorf("Failed to delete proxy: %s\n", err.Error()) - } - fmt.Printf("Deleted proxy %s\n", proxyName) - return nil -} - -func parseToxicity(c *cli.Context, defaultToxicity float32) (float32, error) { - var toxicity = defaultToxicity - toxicityString := c.String("toxicity") - if toxicityString != "" { - tox, err := strconv.ParseFloat(toxicityString, 32) - if err != nil || tox > 1 || tox < 0 { - return 0, errorf("toxicity should be a float between 0 and 1.\n") - } - toxicity = float32(tox) - } - return toxicity, nil -} - -func addToxic(c *cli.Context, t *toxiproxy.Client) error { - proxyName := c.Args().First() - if proxyName == "" { - cli.ShowSubcommandHelp(c) - return errorf("Proxy name is required as the first argument.\n") - } - toxicName := c.String("toxicName") - toxicType, err := getArgOrFail(c, "type") - if err != nil { - return err - } - - upstream := c.Bool("upstream") - downstream := c.Bool("downstream") - - toxicity, err := parseToxicity(c, 1.0) - if err != nil { - return err - } - - attributes := parseAttributes(c, "attribute") - - p, err := t.Proxy(proxyName) - if err != nil { - return errorf("Failed to retrieve proxy %s: %s\n", proxyName, err.Error()) - } - - addToxic := func(stream string) error { - t, err := p.AddToxic(toxicName, toxicType, stream, toxicity, attributes) - if err != nil { - return errorf("Failed to add toxic: %s\n", err.Error()) - } - toxicName = t.Name - fmt.Printf("Added %s %s toxic '%s' on proxy '%s'\n", stream, toxicType, toxicName, proxyName) - return nil - } - - if upstream { - err := addToxic("upstream") - if err != nil { - return err - } - } - // Default to downstream. - if downstream || (!downstream && !upstream) { - return addToxic("downstream") - } - return nil -} - -func updateToxic(c *cli.Context, t *toxiproxy.Client) error { - proxyName := c.Args().First() - if proxyName == "" { - cli.ShowSubcommandHelp(c) - return errorf("Proxy name is required as the first argument.\n") - } - toxicName, err := getArgOrFail(c, "toxicName") - if err != nil { - return err - } - - attributes := parseAttributes(c, "attribute") - - p, err := t.Proxy(proxyName) - if err != nil { - return errorf("Failed to retrieve proxy %s: %s\n", proxyName, err.Error()) - } - - toxicity, err := parseToxicity(c, -1) - if err != nil { - return nil - } - - _, err = p.UpdateToxic(toxicName, toxicity, attributes) - if err != nil { - return errorf("Failed to update toxic: %s\n", err.Error()) - } - - fmt.Printf("Updated toxic '%s' on proxy '%s'\n", toxicName, proxyName) - return nil -} - -func removeToxic(c *cli.Context, t *toxiproxy.Client) error { - proxyName := c.Args().First() - if proxyName == "" { - cli.ShowSubcommandHelp(c) - return errorf("Proxy name is required as the first argument.\n") - } - toxicName, err := getArgOrFail(c, "toxicName") - if err != nil { - return err - } - - p, err := t.Proxy(proxyName) - if err != nil { - return errorf("Failed to retrieve proxy %s: %s\n", proxyName, err.Error()) - } - - err = p.RemoveToxic(toxicName) - if err != nil { - return errorf("Failed to remove toxic: %s\n", err.Error()) - } - - fmt.Printf("Removed toxic '%s' on proxy '%s'\n", toxicName, proxyName) - return nil -} - -func parseAttributes(c *cli.Context, name string) toxiproxy.Attributes { - parsed := map[string]interface{}{} - args := c.StringSlice(name) - - for _, raw := range args { - kv := strings.SplitN(raw, "=", 2) - if len(kv) < 2 { - continue - } - if float, err := strconv.ParseFloat(kv[1], 64); err == nil { - parsed[kv[0]] = float - } else { - parsed[kv[0]] = kv[1] - } - } - return parsed -} - -func colorEnabled(enabled bool) string { - if enabled { - return color(GREEN) - } - - return color(RED) -} - -func enabledText(enabled bool) string { - if enabled { - return "enabled" - } - - return "disabled" -} - -type attribute struct { - key string - value interface{} -} - -type attributeList []attribute - -func (a attributeList) Len() int { return len(a) } -func (a attributeList) Less(i, j int) bool { return a[i].key < a[j].key } -func (a attributeList) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -func sortedAttributes(attrs toxiproxy.Attributes) attributeList { - li := make(attributeList, 0, len(attrs)) - for k, v := range attrs { - li = append(li, attribute{k, v.(float64)}) - } - sort.Sort(li) - return li -} - -func listToxics(toxics toxiproxy.Toxics, stream string) { - if isTTY { - fmt.Printf("%s%s toxics:\n%s", color(GREEN), stream, color(NONE)) - if len(toxics) == 0 { - fmt.Printf("%sProxy has no %s toxics enabled.\n%s", color(RED), stream, color(NONE)) - return - } - } - for _, t := range toxics { - if isTTY { - fmt.Printf("%s%s:%s\t", color(BLUE), t.Name, color(NONE)) - } else { - fmt.Printf("%s\t", t.Name) - } - fmt.Printf("type=%s\t", t.Type) - fmt.Printf("stream=%s\t", t.Stream) - fmt.Printf("toxicity=%.2f\t", t.Toxicity) - fmt.Printf("attributes=[") - sorted := sortedAttributes(t.Attributes) - for _, a := range sorted { - fmt.Printf("\t%s=", a.key) - fmt.Print(a.value) - } - fmt.Printf("\t]\n") - } -} - -func getArgOrFail(c *cli.Context, name string) (string, error) { - arg := c.String(name) - if arg == "" { - cli.ShowSubcommandHelp(c) - return "", errorf("Required argument '%s' was empty.\n", name) - } - return arg, nil -} - -func hint(m string) { - if isTTY { - fmt.Printf("\n%sHint: %s\n", color(NONE), m) - } -} - -func errorf(m string, args ...interface{}) error { - return cli.NewExitError(fmt.Sprintf(m, args...), 1) -} - -func printWidth(col string, m string, numTabs int) { - if isTTY { - numTabs -= len(m)/8 + 1 - if numTabs < 0 { - numTabs = 0 - } - } else { - numTabs = 0 - } - fmt.Printf("%s%s%s\t%s", color(col), m, color(NONE), strings.Repeat("\t", numTabs)) -} diff --git a/vendor/github.com/Shopify/toxiproxy/client/README.md b/vendor/github.com/Shopify/toxiproxy/client/README.md deleted file mode 100644 index a078f87..0000000 --- a/vendor/github.com/Shopify/toxiproxy/client/README.md +++ /dev/null @@ -1,161 +0,0 @@ -# toxiproxy-go - -This is the Go client library for the -[Toxiproxy](https://github.com/shopify/toxiproxy) API. Please read the [usage -section in the Toxiproxy README](https://github.com/shopify/toxiproxy#usage) -before attempting to use the client. - -This client is compatible with Toxiproxy 2.x, for the latest 1.x client see -[v1.2.1](https://github.com/Shopify/toxiproxy/tree/v1.2.1/client). - -## Changes in Toxiproxy-go Client 2.x - -In order to make use of the 2.0 api, and to make usage a little easier, the -client api has changed: - - - `client.NewProxy()` no longer accepts a proxy as an argument. - - `proxy.Create()` is removed in favour of using `proxy.Save()`. - - Proxies can be created in a single call using `client.CreateProxy()`. - - `proxy.Disable()` and `proxy.Enable()` have been added to simplify taking - down a proxy. - - `proxy.ToxicsUpstream` and `proxy.ToxicsDownstream` have been merged into a - single `ActiveToxics` list. - - `proxy.Toxics()`` no longer requires a direction to be specified, and will - return toxics for both directions. - - `proxy.SetToxic()` has been replaced by `proxy.AddToxic()`, - `proxy.UpdateToxic()`, and `proxy.RemoveToxic()`. - -## Usage - -For detailed API docs please [see the Godoc -documentation](http://godoc.org/github.com/Shopify/toxiproxy/client). - -First import toxiproxy and create a new client: -```go -import "github.com/Shopify/toxiproxy/client" - -client := toxiproxy.NewClient("localhost:8474") -``` - -You can then create a new proxy using the client: -```go -proxy := client.CreateProxy("redis", "localhost:26379", "localhost:6379") -``` - -For large amounts of proxies, they can also be created using a configuration file: -```go -var config []toxiproxy.Proxy -data, _ := ioutil.ReadFile("config.json") -json.Unmarshal(data, &config) -proxies, err = client.Populate(config) -``` -```json -[{ - "name": "redis", - "listen": "localhost:26379", - "upstream": "localhost:6379" -}] -``` - -Toxics can be added as follows: -```go -// Add 1s latency to 100% of downstream connections -proxy.AddToxic("latency_down", "latency", "downstream", 1.0, toxiproxy.Attributes{ - "latency": 1000, -}) - -// Change downstream latency to add 100ms of jitter -proxy.UpdateToxic("latency_down", 1.0, toxiproxy.Attributes{ - "jitter": 100, -}) - -// Remove the latency toxic -proxy.RemoveToxic("latency_down") -``` - - -The proxy can be taken down using `Disable()`: -```go -proxy.Disable() -``` - -When a proxy is no longer needed, it can be cleaned up with `Delete()`: -```go -proxy.Delete() -``` - -## Full Example - -```go -import ( - "net/http" - "testing" - "time" - - "github.com/Shopify/toxiproxy/client" - "github.com/garyburd/redigo/redis" -) - -var toxiClient *toxiproxy.Client -var proxies map[string]*toxiproxy.Proxy - -func init() { - var err error - toxiClient = toxiproxy.NewClient("localhost:8474") - proxies, err = toxiClient.Populate([]toxiproxy.Proxy{{ - Name: "redis", - Listen: "localhost:26379", - Upstream: "localhost:6379", - }}) - if err != nil { - panic(err) - } - // Alternatively, create the proxies manually with - // toxiClient.CreateProxy("redis", "localhost:26379", "localhost:6379") -} - -func TestRedisBackendDown(t *testing.T) { - proxies["redis"].Disable() - defer proxies["redis"].Enable() - - // Test that redis is down - _, err := redis.Dial("tcp", ":26379") - if err == nil { - t.Fatal("Connection to redis did not fail") - } -} - -func TestRedisBackendSlow(t *testing.T) { - proxies["redis"].AddToxic("", "latency", "", 1, toxiproxy.Attributes{ - "latency": 1000, - }) - defer proxies["redis"].RemoveToxic("latency_downstream") - - // Test that redis is slow - start := time.Now() - conn, err := redis.Dial("tcp", ":26379") - if err != nil { - t.Fatal("Connection to redis failed", err) - } - - _, err = conn.Do("GET", "test") - if err != nil { - t.Fatal("Redis command failed", err) - } else if time.Since(start) < 900*time.Millisecond { - t.Fatal("Redis command did not take long enough:", time.Since(start)) - } -} - -func TestEphemeralProxy(t *testing.T) { - proxy, _ := toxiClient.CreateProxy("test", "", "google.com:80") - defer proxy.Delete() - - // Test connection through proxy.Listen - resp, err := http.Get("http://" + proxy.Listen) - if err != nil { - t.Fatal(err) - } else if resp.StatusCode != 200 { - t.Fatal("Proxy to google failed:", resp.StatusCode) - } -} -``` diff --git a/vendor/github.com/Shopify/toxiproxy/client/client.go b/vendor/github.com/Shopify/toxiproxy/client/client.go deleted file mode 100644 index 2590024..0000000 --- a/vendor/github.com/Shopify/toxiproxy/client/client.go +++ /dev/null @@ -1,362 +0,0 @@ -// Package Toxiproxy provides a client wrapper around the Toxiproxy HTTP API for -// testing the resiliency of Go applications. -// -// For use with Toxiproxy 2.x -package toxiproxy - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "strings" -) - -// Client holds information about where to connect to Toxiproxy. -type Client struct { - endpoint string -} - -type Attributes map[string]interface{} - -type Toxic struct { - Name string `json:"name"` - Type string `json:"type"` - Stream string `json:"stream,omitempty"` - Toxicity float32 `json:"toxicity"` - Attributes Attributes `json:"attributes"` -} - -type Toxics []Toxic - -type Proxy struct { - Name string `json:"name"` // The name of the proxy - Listen string `json:"listen"` // The address the proxy listens on - Upstream string `json:"upstream"` // The upstream address to proxy to - Enabled bool `json:"enabled"` // Whether the proxy is enabled - - ActiveToxics Toxics `json:"toxics"` // The toxics active on this proxy - - client *Client - created bool // True if this proxy exists on the server -} - -// NewClient creates a new client which provides the base of all communication -// with Toxiproxy. Endpoint is the address to the proxy (e.g. localhost:8474 if -// not overriden) -func NewClient(endpoint string) *Client { - if !strings.HasPrefix(endpoint, "http://") { - endpoint = "http://" + endpoint - } - return &Client{endpoint: endpoint} -} - -// Proxies returns a map with all the proxies and their toxics. -func (client *Client) Proxies() (map[string]*Proxy, error) { - resp, err := http.Get(client.endpoint + "/proxies") - if err != nil { - return nil, err - } - - err = checkError(resp, http.StatusOK, "Proxies") - if err != nil { - return nil, err - } - - proxies := make(map[string]*Proxy) - err = json.NewDecoder(resp.Body).Decode(&proxies) - if err != nil { - return nil, err - } - for _, proxy := range proxies { - proxy.client = client - proxy.created = true - } - - return proxies, nil -} - -// Generates a new uncommitted proxy instance. In order to use the result, the -// proxy fields will need to be set and have `Save()` called. -func (client *Client) NewProxy() *Proxy { - return &Proxy{ - client: client, - } -} - -// CreateProxy instantiates a new proxy and starts listening on the specified address. -// This is an alias for `NewProxy()` + `proxy.Save()` -func (client *Client) CreateProxy(name, listen, upstream string) (*Proxy, error) { - proxy := &Proxy{ - Name: name, - Listen: listen, - Upstream: upstream, - Enabled: true, - client: client, - } - - err := proxy.Save() - if err != nil { - return nil, err - } - - return proxy, nil -} - -// Proxy returns a proxy by name. -func (client *Client) Proxy(name string) (*Proxy, error) { - // TODO url encode - resp, err := http.Get(client.endpoint + "/proxies/" + name) - if err != nil { - return nil, err - } - - err = checkError(resp, http.StatusOK, "Proxy") - if err != nil { - return nil, err - } - - proxy := new(Proxy) - err = json.NewDecoder(resp.Body).Decode(proxy) - if err != nil { - return nil, err - } - proxy.client = client - proxy.created = true - - return proxy, nil -} - -// Create a list of proxies using a configuration list. If a proxy already exists, it will be replaced -// with the specified configuration. For large amounts of proxies, `config` can be loaded from a file. -// Returns a list of the successfully created proxies. -func (client *Client) Populate(config []Proxy) ([]*Proxy, error) { - proxies := struct { - Proxies []*Proxy `json:"proxies"` - }{} - request, err := json.Marshal(config) - if err != nil { - return nil, err - } - - resp, err := http.Post(client.endpoint+"/populate", "application/json", bytes.NewReader(request)) - if err != nil { - return nil, err - } - - // Response body may need to be read twice, we want to return both the proxy list and any errors - var body bytes.Buffer - tee := io.TeeReader(resp.Body, &body) - err = json.NewDecoder(tee).Decode(&proxies) - if err != nil { - return nil, err - } - - resp.Body = ioutil.NopCloser(&body) - err = checkError(resp, http.StatusCreated, "Populate") - return proxies.Proxies, err -} - -// Save saves changes to a proxy such as its enabled status or upstream port. -func (proxy *Proxy) Save() error { - request, err := json.Marshal(proxy) - if err != nil { - return err - } - - var resp *http.Response - if proxy.created { - resp, err = http.Post(proxy.client.endpoint+"/proxies/"+proxy.Name, "text/plain", bytes.NewReader(request)) - } else { - resp, err = http.Post(proxy.client.endpoint+"/proxies", "application/json", bytes.NewReader(request)) - } - if err != nil { - return err - } - - if proxy.created { - err = checkError(resp, http.StatusOK, "Save") - } else { - err = checkError(resp, http.StatusCreated, "Create") - } - if err != nil { - return err - } - - err = json.NewDecoder(resp.Body).Decode(proxy) - if err != nil { - return err - } - proxy.created = true - - return nil -} - -// Enable a proxy again after it has been disabled. -func (proxy *Proxy) Enable() error { - proxy.Enabled = true - return proxy.Save() -} - -// Disable a proxy so that no connections can pass through. This will drop all active connections. -func (proxy *Proxy) Disable() error { - proxy.Enabled = false - return proxy.Save() -} - -// Delete a proxy complete and close all existing connections through it. All information about -// the proxy such as listen port and active toxics will be deleted as well. If you just wish to -// stop and later enable a proxy, use `Enable()` and `Disable()`. -func (proxy *Proxy) Delete() error { - httpClient := &http.Client{} - req, err := http.NewRequest("DELETE", proxy.client.endpoint+"/proxies/"+proxy.Name, nil) - - if err != nil { - return err - } - - resp, err := httpClient.Do(req) - if err != nil { - return err - } - - return checkError(resp, http.StatusNoContent, "Delete") -} - -// Toxics returns a map of all the active toxics and their attributes. -func (proxy *Proxy) Toxics() (Toxics, error) { - resp, err := http.Get(proxy.client.endpoint + "/proxies/" + proxy.Name + "/toxics") - if err != nil { - return nil, err - } - - err = checkError(resp, http.StatusOK, "Toxics") - if err != nil { - return nil, err - } - - toxics := make(Toxics, 0) - err = json.NewDecoder(resp.Body).Decode(&toxics) - if err != nil { - return nil, err - } - - return toxics, nil -} - -// AddToxic adds a toxic to the given stream direction. -// If a name is not specified, it will default to _. -// If a stream is not specified, it will default to downstream. -// See https://github.com/Shopify/toxiproxy#toxics for a list of all Toxic types. -func (proxy *Proxy) AddToxic(name, typeName, stream string, toxicity float32, attrs Attributes) (*Toxic, error) { - toxic := Toxic{name, typeName, stream, toxicity, attrs} - if toxic.Toxicity == -1 { - toxic.Toxicity = 1 // Just to be consistent with a toxicity of -1 using the default - } - - request, err := json.Marshal(&toxic) - if err != nil { - return nil, err - } - - resp, err := http.Post(proxy.client.endpoint+"/proxies/"+proxy.Name+"/toxics", "application/json", bytes.NewReader(request)) - if err != nil { - return nil, err - } - - err = checkError(resp, http.StatusOK, "AddToxic") - if err != nil { - return nil, err - } - - result := &Toxic{} - err = json.NewDecoder(resp.Body).Decode(result) - if err != nil { - return nil, err - } - - return result, nil -} - -// UpdateToxic sets the parameters for an existing toxic with the given name. -// If toxicity is set to -1, the current value will be used. -func (proxy *Proxy) UpdateToxic(name string, toxicity float32, attrs Attributes) (*Toxic, error) { - toxic := map[string]interface{}{ - "attributes": attrs, - } - if toxicity != -1 { - toxic["toxicity"] = toxicity - } - request, err := json.Marshal(&toxic) - if err != nil { - return nil, err - } - - resp, err := http.Post(proxy.client.endpoint+"/proxies/"+proxy.Name+"/toxics/"+name, "application/json", bytes.NewReader(request)) - if err != nil { - return nil, err - } - - err = checkError(resp, http.StatusOK, "UpdateToxic") - if err != nil { - return nil, err - } - - result := &Toxic{} - err = json.NewDecoder(resp.Body).Decode(result) - if err != nil { - return nil, err - } - - return result, nil -} - -// RemoveToxic renives the toxic with the given name. -func (proxy *Proxy) RemoveToxic(name string) error { - httpClient := &http.Client{} - req, err := http.NewRequest("DELETE", proxy.client.endpoint+"/proxies/"+proxy.Name+"/toxics/"+name, nil) - if err != nil { - return err - } - - resp, err := httpClient.Do(req) - if err != nil { - return err - } - - return checkError(resp, http.StatusNoContent, "RemoveToxic") -} - -// ResetState resets the state of all proxies and toxics in Toxiproxy. -func (client *Client) ResetState() error { - resp, err := http.Post(client.endpoint+"/reset", "text/plain", bytes.NewReader([]byte{})) - if err != nil { - return err - } - - return checkError(resp, http.StatusNoContent, "ResetState") -} - -type ApiError struct { - Message string `json:"error"` - Status int `json:"status"` -} - -func (err *ApiError) Error() string { - return fmt.Sprintf("HTTP %d: %s", err.Status, err.Message) -} - -func checkError(resp *http.Response, expectedCode int, caller string) error { - if resp.StatusCode != expectedCode { - apiError := new(ApiError) - err := json.NewDecoder(resp.Body).Decode(apiError) - if err != nil { - apiError.Message = fmt.Sprintf("Unexpected response code, expected %d", expectedCode) - apiError.Status = resp.StatusCode - } - return fmt.Errorf("%s: %v", caller, apiError) - } - return nil -} diff --git a/vendor/github.com/Shopify/toxiproxy/cmd/toxiproxy.go b/vendor/github.com/Shopify/toxiproxy/cmd/toxiproxy.go deleted file mode 100644 index e53a8fb..0000000 --- a/vendor/github.com/Shopify/toxiproxy/cmd/toxiproxy.go +++ /dev/null @@ -1,30 +0,0 @@ -package main - -import ( - "flag" - "math/rand" - "time" - - "github.com/Shopify/toxiproxy" -) - -var host string -var port string -var config string - -func init() { - flag.StringVar(&host, "host", "localhost", "Host for toxiproxy's API to listen on") - flag.StringVar(&port, "port", "8474", "Port for toxiproxy's API to listen on") - flag.StringVar(&config, "config", "", "JSON file containing proxies to create on startup") - seed := flag.Int64("seed", time.Now().UTC().UnixNano(), "Seed for randomizing toxics with") - flag.Parse() - rand.Seed(*seed) -} - -func main() { - server := toxiproxy.NewServer() - if len(config) > 0 { - server.PopulateConfig(config) - } - server.Listen(host, port) -} diff --git a/vendor/github.com/Shopify/toxiproxy/dev.yml b/vendor/github.com/Shopify/toxiproxy/dev.yml deleted file mode 100644 index 2f8e9c7..0000000 --- a/vendor/github.com/Shopify/toxiproxy/dev.yml +++ /dev/null @@ -1,16 +0,0 @@ -name: toxiproxy - -up: - - go: 1.7.3 - -commands: - build: - run: godep go build - desc: 'build the project' - test: - run: godep go test ./... - desc: 'run unit tests' - -packages: - - git@github.com:Shopify/dev-shopify.git - diff --git a/vendor/github.com/Shopify/toxiproxy/link.go b/vendor/github.com/Shopify/toxiproxy/link.go deleted file mode 100644 index 3861b3b..0000000 --- a/vendor/github.com/Shopify/toxiproxy/link.go +++ /dev/null @@ -1,176 +0,0 @@ -package toxiproxy - -import ( - "io" - - "github.com/Shopify/toxiproxy/stream" - "github.com/Shopify/toxiproxy/toxics" - "github.com/Sirupsen/logrus" -) - -// ToxicLinks are single direction pipelines that connects an input and output via -// a chain of toxics. The chain always starts with a NoopToxic, and toxics are added -// and removed as they are enabled/disabled. New toxics are always added to the end -// of the chain. -// -// NoopToxic LatencyToxic -// v v -// Input > ToxicStub > ToxicStub > Output -// -type ToxicLink struct { - stubs []*toxics.ToxicStub - proxy *Proxy - toxics *ToxicCollection - input *stream.ChanWriter - output *stream.ChanReader - direction stream.Direction -} - -func NewToxicLink(proxy *Proxy, collection *ToxicCollection, direction stream.Direction) *ToxicLink { - link := &ToxicLink{ - stubs: make([]*toxics.ToxicStub, len(collection.chain[direction]), cap(collection.chain[direction])), - proxy: proxy, - toxics: collection, - direction: direction, - } - - // Initialize the link with ToxicStubs - last := make(chan *stream.StreamChunk) // The first toxic is always a noop - link.input = stream.NewChanWriter(last) - for i := 0; i < len(link.stubs); i++ { - var next chan *stream.StreamChunk - if i+1 < len(link.stubs) { - next = make(chan *stream.StreamChunk, link.toxics.chain[direction][i+1].BufferSize) - } else { - next = make(chan *stream.StreamChunk) - } - - link.stubs[i] = toxics.NewToxicStub(last, next) - last = next - } - link.output = stream.NewChanReader(last) - return link -} - -// Start the link with the specified toxics -func (link *ToxicLink) Start(name string, source io.Reader, dest io.WriteCloser) { - go func() { - bytes, err := io.Copy(link.input, source) - if err != nil { - logrus.WithFields(logrus.Fields{ - "name": link.proxy.Name, - "bytes": bytes, - "err": err, - }).Warn("Source terminated") - } - link.input.Close() - }() - for i, toxic := range link.toxics.chain[link.direction] { - if stateful, ok := toxic.Toxic.(toxics.StatefulToxic); ok { - link.stubs[i].State = stateful.NewState() - } - - go link.stubs[i].Run(toxic) - } - go func() { - bytes, err := io.Copy(dest, link.output) - if err != nil { - logrus.WithFields(logrus.Fields{ - "name": link.proxy.Name, - "bytes": bytes, - "err": err, - }).Warn("Destination terminated") - } - dest.Close() - link.toxics.RemoveLink(name) - link.proxy.RemoveConnection(name) - }() -} - -// Add a toxic to the end of the chain. -func (link *ToxicLink) AddToxic(toxic *toxics.ToxicWrapper) { - i := len(link.stubs) - - newin := make(chan *stream.StreamChunk, toxic.BufferSize) - link.stubs = append(link.stubs, toxics.NewToxicStub(newin, link.stubs[i-1].Output)) - - // Interrupt the last toxic so that we don't have a race when moving channels - if link.stubs[i-1].InterruptToxic() { - link.stubs[i-1].Output = newin - - if stateful, ok := toxic.Toxic.(toxics.StatefulToxic); ok { - link.stubs[i].State = stateful.NewState() - } - - go link.stubs[i].Run(toxic) - go link.stubs[i-1].Run(link.toxics.chain[link.direction][i-1]) - } else { - // This link is already closed, make sure the new toxic matches - link.stubs[i].Output = newin // The real output is already closed, close this instead - link.stubs[i].Close() - } -} - -// Update an existing toxic in the chain. -func (link *ToxicLink) UpdateToxic(toxic *toxics.ToxicWrapper) { - if link.stubs[toxic.Index].InterruptToxic() { - go link.stubs[toxic.Index].Run(toxic) - } -} - -// Remove an existing toxic from the chain. -func (link *ToxicLink) RemoveToxic(toxic *toxics.ToxicWrapper) { - i := toxic.Index - - if link.stubs[i].InterruptToxic() { - cleanup, ok := toxic.Toxic.(toxics.CleanupToxic) - if ok { - cleanup.Cleanup(link.stubs[i]) - // Cleanup could have closed the stub. - if link.stubs[i].Closed() { - return - } - } - - stop := make(chan bool) - // Interrupt the previous toxic to update its output - go func() { - stop <- link.stubs[i-1].InterruptToxic() - }() - - // Unblock the previous toxic if it is trying to flush - // If the previous toxic is closed, continue flusing until we reach the end. - interrupted := false - stopped := false - for !interrupted { - select { - case interrupted = <-stop: - stopped = true - case tmp := <-link.stubs[i].Input: - if tmp == nil { - link.stubs[i].Close() - if !stopped { - <-stop - } - return - } - link.stubs[i].Output <- tmp - } - } - - // Empty the toxic's buffer if necessary - for len(link.stubs[i].Input) > 0 { - tmp := <-link.stubs[i].Input - if tmp == nil { - link.stubs[i].Close() - return - } - link.stubs[i].Output <- tmp - } - - link.stubs[i-1].Output = link.stubs[i].Output - link.stubs = append(link.stubs[:i], link.stubs[i+1:]...) - - go link.stubs[i-1].Run(link.toxics.chain[link.direction][i-1]) - } -} diff --git a/vendor/github.com/Shopify/toxiproxy/link_test.go b/vendor/github.com/Shopify/toxiproxy/link_test.go deleted file mode 100644 index 0210366..0000000 --- a/vendor/github.com/Shopify/toxiproxy/link_test.go +++ /dev/null @@ -1,225 +0,0 @@ -package toxiproxy - -import ( - "encoding/binary" - "io" - "testing" - "time" - - "github.com/Shopify/toxiproxy/stream" - "github.com/Shopify/toxiproxy/testhelper" - "github.com/Shopify/toxiproxy/toxics" -) - -func TestToxicsAreLoaded(t *testing.T) { - if toxics.Count() < 1 { - t.Fatal("No toxics loaded!") - } -} - -func TestStubInitializaation(t *testing.T) { - collection := NewToxicCollection(nil) - link := NewToxicLink(nil, collection, stream.Downstream) - if len(link.stubs) != 1 { - t.Fatalf("Link created with wrong number of stubs: %d != 1", len(link.stubs)) - } else if cap(link.stubs) != toxics.Count()+1 { - t.Fatalf("Link created with wrong capacity: %d != %d", cap(link.stubs), toxics.Count()+1) - } else if cap(link.stubs[0].Input) != 0 { - t.Fatalf("Noop buffer was not initialized as 0: %d", cap(link.stubs[0].Input)) - } else if cap(link.stubs[0].Output) != 0 { - t.Fatalf("Link output buffer was not initialized as 0: %d", cap(link.stubs[0].Output)) - } -} - -func TestStubInitializaationWithToxics(t *testing.T) { - collection := NewToxicCollection(nil) - collection.chainAddToxic(&toxics.ToxicWrapper{ - Toxic: new(toxics.LatencyToxic), - Type: "latency", - Direction: stream.Downstream, - BufferSize: 1024, - Toxicity: 1, - }) - collection.chainAddToxic(&toxics.ToxicWrapper{ - Toxic: new(toxics.BandwidthToxic), - Type: "bandwidth", - Direction: stream.Downstream, - Toxicity: 1, - }) - link := NewToxicLink(nil, collection, stream.Downstream) - if len(link.stubs) != 3 { - t.Fatalf("Link created with wrong number of stubs: %d != 3", len(link.stubs)) - } else if cap(link.stubs) != toxics.Count()+1 { - t.Fatalf("Link created with wrong capacity: %d != %d", cap(link.stubs), toxics.Count()+1) - } else if cap(link.stubs[len(link.stubs)-1].Output) != 0 { - t.Fatalf("Link output buffer was not initialized as 0: %d", cap(link.stubs[0].Output)) - } - for i, toxic := range collection.chain[stream.Downstream] { - if cap(link.stubs[i].Input) != toxic.BufferSize { - t.Fatalf("%s buffer was not initialized as %d: %d", toxic.Type, toxic.BufferSize, cap(link.stubs[i].Input)) - } - } -} - -func TestAddRemoveStubs(t *testing.T) { - collection := NewToxicCollection(nil) - link := NewToxicLink(nil, collection, stream.Downstream) - go link.stubs[0].Run(collection.chain[stream.Downstream][0]) - collection.links["test"] = link - - // Add stubs - collection.chainAddToxic(&toxics.ToxicWrapper{ - Toxic: new(toxics.LatencyToxic), - Type: "latency", - Direction: stream.Downstream, - BufferSize: 1024, - Toxicity: 1, - }) - toxic := &toxics.ToxicWrapper{ - Toxic: new(toxics.BandwidthToxic), - Type: "bandwidth", - Direction: stream.Downstream, - BufferSize: 2048, - Toxicity: 1, - } - collection.chainAddToxic(toxic) - if cap(link.stubs[len(link.stubs)-1].Output) != 0 { - t.Fatalf("Link output buffer was not initialized as 0: %d", cap(link.stubs[0].Output)) - } - for i, toxic := range collection.chain[stream.Downstream] { - if cap(link.stubs[i].Input) != toxic.BufferSize { - t.Fatalf("%s buffer was not initialized as %d: %d", toxic.Type, toxic.BufferSize, cap(link.stubs[i].Input)) - } - } - - // Remove stubs - collection.chainRemoveToxic(toxic) - if cap(link.stubs[len(link.stubs)-1].Output) != 0 { - t.Fatalf("Link output buffer was not initialized as 0: %d", cap(link.stubs[0].Output)) - } - for i, toxic := range collection.chain[stream.Downstream] { - if cap(link.stubs[i].Input) != toxic.BufferSize { - t.Fatalf("%s buffer was not initialized as %d: %d", toxic.Type, toxic.BufferSize, cap(link.stubs[i].Input)) - } - } -} - -func TestNoDataDropped(t *testing.T) { - collection := NewToxicCollection(nil) - link := NewToxicLink(nil, collection, stream.Downstream) - go link.stubs[0].Run(collection.chain[stream.Downstream][0]) - collection.links["test"] = link - - toxic := &toxics.ToxicWrapper{ - Toxic: &toxics.LatencyToxic{ - Latency: 1000, - }, - Type: "latency", - Direction: stream.Downstream, - BufferSize: 1024, - Toxicity: 1, - } - - done := make(chan struct{}) - defer close(done) - go func() { - for i := 0; i < 64*1024; i++ { - buf := make([]byte, 2) - binary.BigEndian.PutUint16(buf, uint16(i)) - link.input.Write(buf) - } - link.input.Close() - }() - go func() { - for { - select { - case <-done: - return - default: - collection.chainAddToxic(toxic) - collection.chainRemoveToxic(toxic) - } - } - }() - - buf := make([]byte, 2) - for i := 0; i < 64*1024; i++ { - n, err := link.output.Read(buf) - if n != 2 || err != nil { - t.Fatalf("Read failed: %d %v", n, err) - } else { - val := binary.BigEndian.Uint16(buf) - if val != uint16(i) { - t.Fatalf("Read incorrect bytes: %v != %d", val, i) - } - } - } - n, err := link.output.Read(buf) - if n != 0 || err != io.EOF { - t.Fatalf("Expected EOF: %d %v", n, err) - } -} - -func TestToxicity(t *testing.T) { - collection := NewToxicCollection(nil) - link := NewToxicLink(nil, collection, stream.Downstream) - go link.stubs[0].Run(collection.chain[stream.Downstream][0]) - collection.links["test"] = link - - toxic := &toxics.ToxicWrapper{ - Toxic: new(toxics.TimeoutToxic), - Name: "timeout1", - Type: "timeout", - Direction: stream.Downstream, - Toxicity: 0, - } - collection.chainAddToxic(toxic) - - // Toxic should be a Noop because of toxicity - n, err := link.input.Write([]byte{42}) - if n != 1 || err != nil { - t.Fatalf("Write failed: %d %v", n, err) - } - buf := make([]byte, 2) - n, err = link.output.Read(buf) - if n != 1 || err != nil { - t.Fatalf("Read failed: %d %v", n, err) - } else if buf[0] != 42 { - t.Fatalf("Read wrong byte: %x", buf[0]) - } - - toxic.Toxicity = 1 - toxic.Toxic.(*toxics.TimeoutToxic).Timeout = 100 - collection.chainUpdateToxic(toxic) - - err = testhelper.TimeoutAfter(150*time.Millisecond, func() { - n, err = link.input.Write([]byte{42}) - if n != 1 || err != nil { - t.Fatalf("Write failed: %d %v", n, err) - } - n, err = link.output.Read(buf) - if n != 0 || err != io.EOF { - t.Fatalf("Read did not get EOF: %d %v", n, err) - } - }) - if err != nil { - t.Fatal(err) - } -} - -func TestStateCreated(t *testing.T) { - collection := NewToxicCollection(nil) - link := NewToxicLink(nil, collection, stream.Downstream) - go link.stubs[0].Run(collection.chain[stream.Downstream][0]) - collection.links["test"] = link - - collection.chainAddToxic(&toxics.ToxicWrapper{ - Toxic: new(toxics.LimitDataToxic), - Type: "limit_data", - Direction: stream.Downstream, - Toxicity: 1, - }) - if link.stubs[len(link.stubs)-1].State == nil { - t.Fatalf("New toxic did not have state object created.") - } -} diff --git a/vendor/github.com/Shopify/toxiproxy/proxy.go b/vendor/github.com/Shopify/toxiproxy/proxy.go deleted file mode 100644 index b738881..0000000 --- a/vendor/github.com/Shopify/toxiproxy/proxy.go +++ /dev/null @@ -1,225 +0,0 @@ -package toxiproxy - -import ( - "errors" - "sync" - - "github.com/Shopify/toxiproxy/stream" - "github.com/Sirupsen/logrus" - tomb "gopkg.in/tomb.v1" - - "net" -) - -// Proxy represents the proxy in its entirity with all its links. The main -// responsibility of Proxy is to accept new client and create Links between the -// client and upstream. -// -// Client <-> toxiproxy <-> Upstream -// -type Proxy struct { - sync.Mutex - - Name string `json:"name"` - Listen string `json:"listen"` - Upstream string `json:"upstream"` - Enabled bool `json:"enabled"` - - started chan error - - tomb tomb.Tomb - connections ConnectionList - Toxics *ToxicCollection `json:"-"` -} - -type ConnectionList struct { - list map[string]net.Conn - lock sync.Mutex -} - -func (c *ConnectionList) Lock() { - c.lock.Lock() -} - -func (c *ConnectionList) Unlock() { - c.lock.Unlock() -} - -var ErrProxyAlreadyStarted = errors.New("Proxy already started") - -func NewProxy() *Proxy { - proxy := &Proxy{ - started: make(chan error), - connections: ConnectionList{list: make(map[string]net.Conn)}, - } - proxy.Toxics = NewToxicCollection(proxy) - return proxy -} - -func (proxy *Proxy) Start() error { - proxy.Lock() - defer proxy.Unlock() - - return start(proxy) -} - -func (proxy *Proxy) Update(input *Proxy) error { - proxy.Lock() - defer proxy.Unlock() - - if input.Listen != proxy.Listen || input.Upstream != proxy.Upstream { - stop(proxy) - proxy.Listen = input.Listen - proxy.Upstream = input.Upstream - } - - if input.Enabled != proxy.Enabled { - if input.Enabled { - return start(proxy) - } - stop(proxy) - } - return nil -} - -func (proxy *Proxy) Stop() { - proxy.Lock() - defer proxy.Unlock() - - stop(proxy) -} - -// server runs the Proxy server, accepting new clients and creating Links to -// connect them to upstreams. -func (proxy *Proxy) server() { - ln, err := net.Listen("tcp", proxy.Listen) - if err != nil { - proxy.started <- err - return - } - - proxy.Listen = ln.Addr().String() - proxy.started <- nil - - logrus.WithFields(logrus.Fields{ - "name": proxy.Name, - "proxy": proxy.Listen, - "upstream": proxy.Upstream, - }).Info("Started proxy") - - acceptTomb := tomb.Tomb{} - defer acceptTomb.Done() - - // This channel is to kill the blocking Accept() call below by closing the - // net.Listener. - go func() { - <-proxy.tomb.Dying() - - // Notify ln.Accept() that the shutdown was safe - acceptTomb.Killf("Shutting down from stop()") - // Unblock ln.Accept() - err := ln.Close() - if err != nil { - logrus.WithFields(logrus.Fields{ - "proxy": proxy.Name, - "listen": proxy.Listen, - "err": err, - }).Warn("Attempted to close an already closed proxy server") - } - - // Wait for the accept loop to finish processing - acceptTomb.Wait() - proxy.tomb.Done() - }() - - for { - client, err := ln.Accept() - if err != nil { - // This is to confirm we're being shut down in a legit way. Unfortunately, - // Go doesn't export the error when it's closed from Close() so we have to - // sync up with a channel here. - // - // See http://zhen.org/blog/graceful-shutdown-of-go-net-dot-listeners/ - select { - case <-acceptTomb.Dying(): - default: - logrus.WithFields(logrus.Fields{ - "proxy": proxy.Name, - "listen": proxy.Listen, - "err": err, - }).Warn("Error while accepting client") - } - return - } - - logrus.WithFields(logrus.Fields{ - "name": proxy.Name, - "client": client.RemoteAddr(), - "proxy": proxy.Listen, - "upstream": proxy.Upstream, - }).Info("Accepted client") - - upstream, err := net.Dial("tcp", proxy.Upstream) - if err != nil { - logrus.WithFields(logrus.Fields{ - "name": proxy.Name, - "client": client.RemoteAddr(), - "proxy": proxy.Listen, - "upstream": proxy.Upstream, - }).Error("Unable to open connection to upstream") - client.Close() - continue - } - - name := client.RemoteAddr().String() - proxy.connections.Lock() - proxy.connections.list[name+"upstream"] = upstream - proxy.connections.list[name+"downstream"] = client - proxy.connections.Unlock() - proxy.Toxics.StartLink(name+"upstream", client, upstream, stream.Upstream) - proxy.Toxics.StartLink(name+"downstream", upstream, client, stream.Downstream) - } -} - -func (proxy *Proxy) RemoveConnection(name string) { - proxy.connections.Lock() - defer proxy.connections.Unlock() - delete(proxy.connections.list, name) -} - -// Starts a proxy, assumes the lock has already been taken -func start(proxy *Proxy) error { - if proxy.Enabled { - return ErrProxyAlreadyStarted - } - - proxy.tomb = tomb.Tomb{} // Reset tomb, from previous starts/stops - go proxy.server() - err := <-proxy.started - // Only enable the proxy if it successfully started - proxy.Enabled = err == nil - return err -} - -// Stops a proxy, assumes the lock has already been taken -func stop(proxy *Proxy) { - if !proxy.Enabled { - return - } - proxy.Enabled = false - - proxy.tomb.Killf("Shutting down from stop()") - proxy.tomb.Wait() // Wait until we stop accepting new connections - - proxy.connections.Lock() - defer proxy.connections.Unlock() - for _, conn := range proxy.connections.list { - conn.Close() - } - - logrus.WithFields(logrus.Fields{ - "name": proxy.Name, - "proxy": proxy.Listen, - "upstream": proxy.Upstream, - }).Info("Terminated proxy") -} diff --git a/vendor/github.com/Shopify/toxiproxy/proxy_collection.go b/vendor/github.com/Shopify/toxiproxy/proxy_collection.go deleted file mode 100644 index 3070f5e..0000000 --- a/vendor/github.com/Shopify/toxiproxy/proxy_collection.go +++ /dev/null @@ -1,166 +0,0 @@ -package toxiproxy - -import ( - "encoding/json" - "fmt" - "io" - "sync" -) - -// ProxyCollection is a collection of proxies. It's the interface for anything -// to add and remove proxies from the toxiproxy instance. It's responsibilty is -// to maintain the integrity of the proxy set, by guarding for things such as -// duplicate names. -type ProxyCollection struct { - sync.RWMutex - - proxies map[string]*Proxy -} - -func NewProxyCollection() *ProxyCollection { - return &ProxyCollection{ - proxies: make(map[string]*Proxy), - } -} - -func (collection *ProxyCollection) Add(proxy *Proxy, start bool) error { - collection.Lock() - defer collection.Unlock() - - if _, exists := collection.proxies[proxy.Name]; exists { - return ErrProxyAlreadyExists - } - - if start { - err := proxy.Start() - if err != nil { - return err - } - } - - collection.proxies[proxy.Name] = proxy - - return nil -} - -func (collection *ProxyCollection) AddOrReplace(proxy *Proxy, start bool) error { - collection.Lock() - defer collection.Unlock() - - if existing, exists := collection.proxies[proxy.Name]; exists { - if existing.Listen == proxy.Listen && existing.Upstream == proxy.Upstream { - return nil - } - existing.Stop() - } - - if start { - err := proxy.Start() - if err != nil { - return err - } - } - - collection.proxies[proxy.Name] = proxy - - return nil -} - -func (collection *ProxyCollection) PopulateJson(data io.Reader) ([]*Proxy, error) { - input := []struct { - Proxy - Enabled *bool `json:"enabled"` // Overrides Proxy field to make field nullable - }{} - - err := json.NewDecoder(data).Decode(&input) - if err != nil { - return nil, joinError(err, ErrBadRequestBody) - } - - // Check for valid input before creating any proxies - t := true - for i, p := range input { - if len(p.Name) < 1 { - return nil, joinError(fmt.Errorf("name at proxy %d", i+1), ErrMissingField) - } - if len(p.Upstream) < 1 { - return nil, joinError(fmt.Errorf("upstream at proxy %d", i+1), ErrMissingField) - } - if p.Enabled == nil { - input[i].Enabled = &t - } - } - - proxies := make([]*Proxy, 0, len(input)) - - for _, p := range input { - proxy := NewProxy() - proxy.Name = p.Name - proxy.Listen = p.Listen - proxy.Upstream = p.Upstream - - err = collection.AddOrReplace(proxy, *p.Enabled) - if err != nil { - break - } - - proxies = append(proxies, proxy) - } - return proxies, err -} - -func (collection *ProxyCollection) Proxies() map[string]*Proxy { - collection.RLock() - defer collection.RUnlock() - - // Copy the map since using the existing one isn't thread-safe - proxies := make(map[string]*Proxy, len(collection.proxies)) - for k, v := range collection.proxies { - proxies[k] = v - } - return proxies -} - -func (collection *ProxyCollection) Get(name string) (*Proxy, error) { - collection.RLock() - defer collection.RUnlock() - - return collection.getByName(name) -} - -func (collection *ProxyCollection) Remove(name string) error { - collection.Lock() - defer collection.Unlock() - - proxy, err := collection.getByName(name) - if err != nil { - return err - } - proxy.Stop() - - delete(collection.proxies, proxy.Name) - return nil -} - -func (collection *ProxyCollection) Clear() error { - collection.Lock() - defer collection.Unlock() - - for _, proxy := range collection.proxies { - proxy.Stop() - - delete(collection.proxies, proxy.Name) - } - - return nil -} - -// getByName returns a proxy by its name. Its used from #remove and #get. -// It assumes the lock has already been acquired. -func (collection *ProxyCollection) getByName(name string) (*Proxy, error) { - proxy, exists := collection.proxies[name] - if !exists { - return nil, ErrProxyNotFound - } - return proxy, nil -} diff --git a/vendor/github.com/Shopify/toxiproxy/proxy_collection_test.go b/vendor/github.com/Shopify/toxiproxy/proxy_collection_test.go deleted file mode 100644 index 5bffdeb..0000000 --- a/vendor/github.com/Shopify/toxiproxy/proxy_collection_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package toxiproxy_test - -import ( - "bytes" - "net" - "testing" - - "github.com/Shopify/toxiproxy" -) - -func TestAddProxyToCollection(t *testing.T) { - collection := toxiproxy.NewProxyCollection() - proxy := NewTestProxy("test", "localhost:20000") - - if _, err := collection.Get(proxy.Name); err == nil { - t.Error("Expected proxies to be empty") - } - - err := collection.Add(proxy, false) - if err != nil { - t.Error("Expected to be able to add first proxy to collection") - } - - if _, err := collection.Get(proxy.Name); err != nil { - t.Error("Expected proxy to be added to map") - } -} - -func TestAddTwoProxiesToCollection(t *testing.T) { - collection := toxiproxy.NewProxyCollection() - proxy := NewTestProxy("test", "localhost:20000") - - err := collection.Add(proxy, false) - if err != nil { - t.Error("Expected to be able to add first proxy to collection") - } - - err = collection.Add(proxy, false) - if err == nil { - t.Error("Expected to not be able to add proxy with same name") - } -} - -func TestListProxies(t *testing.T) { - collection := toxiproxy.NewProxyCollection() - proxy := NewTestProxy("test", "localhost:20000") - - err := collection.Add(proxy, false) - if err != nil { - t.Error("Expected to be able to add first proxy to collection") - } - - proxies := collection.Proxies() - proxy, ok := proxies[proxy.Name] - if !ok { - t.Error("Expected to be able to see existing proxy") - } else if proxy.Enabled { - t.Error("Expected proxy not to be running") - } -} - -func TestAddProxyAndStart(t *testing.T) { - collection := toxiproxy.NewProxyCollection() - proxy := NewTestProxy("test", "localhost:20000") - - err := collection.Add(proxy, true) - if err != nil { - t.Error("Expected to be able to add proxy to collection:", err) - } - - proxies := collection.Proxies() - proxy, ok := proxies[proxy.Name] - if !ok { - t.Error("Expected to be able to see existing proxy") - } else if !proxy.Enabled { - t.Error("Expected proxy to be running") - } -} - -func TestAddAndRemoveProxyFromCollection(t *testing.T) { - WithTCPProxy(t, func(conn net.Conn, response chan []byte, proxy *toxiproxy.Proxy) { - collection := toxiproxy.NewProxyCollection() - - if _, err := collection.Get(proxy.Name); err == nil { - t.Error("Expected proxies to be empty") - } - - err := collection.Add(proxy, false) - if err != nil { - t.Error("Expected to be able to add first proxy to collection") - } - - if _, err := collection.Get(proxy.Name); err != nil { - t.Error("Expected proxy to be added to map") - } - - msg := []byte("go away") - - _, err = conn.Write(msg) - if err != nil { - t.Error("Failed writing to socket to shut down server") - } - - conn.Close() - - resp := <-response - if !bytes.Equal(resp, msg) { - t.Error("Server didn't read bytes from client") - } - - err = collection.Remove(proxy.Name) - if err != nil { - t.Error("Expected to remove proxy from collection") - } - - if _, err := collection.Get(proxy.Name); err == nil { - t.Error("Expected proxies to be empty") - } - }) -} diff --git a/vendor/github.com/Shopify/toxiproxy/proxy_test.go b/vendor/github.com/Shopify/toxiproxy/proxy_test.go deleted file mode 100644 index 5d094c7..0000000 --- a/vendor/github.com/Shopify/toxiproxy/proxy_test.go +++ /dev/null @@ -1,316 +0,0 @@ -package toxiproxy_test - -import ( - "bytes" - "encoding/hex" - "io" - "io/ioutil" - "net" - "testing" - "time" - - "github.com/Shopify/toxiproxy" - "github.com/Sirupsen/logrus" - tomb "gopkg.in/tomb.v1" -) - -func init() { - logrus.SetLevel(logrus.FatalLevel) -} - -func NewTestProxy(name, upstream string) *toxiproxy.Proxy { - proxy := toxiproxy.NewProxy() - - proxy.Name = name - proxy.Listen = "localhost:0" - proxy.Upstream = upstream - - return proxy -} - -func WithTCPServer(t *testing.T, f func(string, chan []byte)) { - ln, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatal("Failed to create TCP server", err) - } - - defer ln.Close() - - response := make(chan []byte, 1) - tomb := tomb.Tomb{} - - go func() { - defer tomb.Done() - src, err := ln.Accept() - if err != nil { - select { - case <-tomb.Dying(): - default: - t.Fatal("Failed to accept client") - } - return - } - - ln.Close() - - val, err := ioutil.ReadAll(src) - if err != nil { - t.Fatal("Failed to read from client") - } - - response <- val - }() - - f(ln.Addr().String(), response) - - tomb.Killf("Function body finished") - ln.Close() - tomb.Wait() - - close(response) -} - -func TestSimpleServer(t *testing.T) { - WithTCPServer(t, func(addr string, response chan []byte) { - conn, err := net.Dial("tcp", addr) - if err != nil { - t.Error("Unable to dial TCP server", err) - } - - msg := []byte("hello world") - - _, err = conn.Write(msg) - if err != nil { - t.Error("Failed writing to TCP server", err) - } - - err = conn.Close() - if err != nil { - t.Error("Failed to close TCP connection", err) - } - - resp := <-response - if !bytes.Equal(resp, msg) { - t.Error("Server didn't read bytes from client") - } - }) -} - -func WithTCPProxy(t *testing.T, f func(proxy net.Conn, response chan []byte, proxyServer *toxiproxy.Proxy)) { - WithTCPServer(t, func(upstream string, response chan []byte) { - proxy := NewTestProxy("test", upstream) - proxy.Start() - - conn := AssertProxyUp(t, proxy.Listen, true) - - f(conn, response, proxy) - - proxy.Stop() - }) -} - -func TestProxySimpleMessage(t *testing.T) { - WithTCPProxy(t, func(conn net.Conn, response chan []byte, proxy *toxiproxy.Proxy) { - msg := []byte("hello world") - - _, err := conn.Write(msg) - if err != nil { - t.Error("Failed writing to TCP server", err) - } - - err = conn.Close() - if err != nil { - t.Error("Failed to close TCP connection", err) - } - - resp := <-response - if !bytes.Equal(resp, msg) { - t.Error("Server didn't read correct bytes from client", resp) - } - }) -} - -func TestProxyToDownUpstream(t *testing.T) { - proxy := NewTestProxy("test", "localhost:20009") - proxy.Start() - - conn := AssertProxyUp(t, proxy.Listen, true) - // Check to make sure the connection is closed - conn.SetReadDeadline(time.Now().Add(100 * time.Millisecond)) - _, err := conn.Read(make([]byte, 1)) - if err != io.EOF { - t.Error("Proxy did not close connection when upstream down", err) - } - - proxy.Stop() -} - -func TestProxyBigMessage(t *testing.T) { - WithTCPProxy(t, func(conn net.Conn, response chan []byte, proxy *toxiproxy.Proxy) { - buf := make([]byte, 32*1024) - msg := make([]byte, len(buf)*2) - hex.Encode(msg, buf) - - _, err := conn.Write(msg) - if err != nil { - t.Error("Failed writing to TCP server", err) - } - - err = conn.Close() - if err != nil { - t.Error("Failed to close TCP connection", err) - } - - resp := <-response - if !bytes.Equal(resp, msg) { - t.Error("Server didn't read correct bytes from client", resp) - } - }) -} - -func TestProxyTwoPartMessage(t *testing.T) { - WithTCPProxy(t, func(conn net.Conn, response chan []byte, proxy *toxiproxy.Proxy) { - msg1 := []byte("hello world") - msg2 := []byte("hello world") - - _, err := conn.Write(msg1) - if err != nil { - t.Error("Failed writing to TCP server", err) - } - - _, err = conn.Write(msg2) - if err != nil { - t.Error("Failed writing to TCP server", err) - } - - err = conn.Close() - if err != nil { - t.Error("Failed to close TCP connection", err) - } - - msg1 = append(msg1, msg2...) - - resp := <-response - if !bytes.Equal(resp, msg1) { - t.Error("Server didn't read correct bytes from client", resp) - } - }) -} - -func TestClosingProxyMultipleTimes(t *testing.T) { - WithTCPProxy(t, func(conn net.Conn, response chan []byte, proxy *toxiproxy.Proxy) { - proxy.Stop() - proxy.Stop() - proxy.Stop() - }) -} - -func TestStartTwoProxiesOnSameAddress(t *testing.T) { - WithTCPProxy(t, func(conn net.Conn, response chan []byte, proxy *toxiproxy.Proxy) { - proxy2 := NewTestProxy("proxy_2", "localhost:3306") - proxy2.Listen = proxy.Listen - if err := proxy2.Start(); err == nil { - t.Fatal("Expected an err back from start") - } - }) -} - -func TestStopProxyBeforeStarting(t *testing.T) { - WithTCPServer(t, func(upstream string, response chan []byte) { - proxy := NewTestProxy("test", upstream) - AssertProxyUp(t, proxy.Listen, false) - - proxy.Stop() - err := proxy.Start() - if err != nil { - t.Error("Proxy failed to start", err) - } - - err = proxy.Start() - if err != toxiproxy.ErrProxyAlreadyStarted { - t.Error("Proxy did not fail to start when already started", err) - } - AssertProxyUp(t, proxy.Listen, true) - - proxy.Stop() - AssertProxyUp(t, proxy.Listen, false) - }) -} - -func TestProxyUpdate(t *testing.T) { - WithTCPServer(t, func(upstream string, response chan []byte) { - proxy := NewTestProxy("test", upstream) - err := proxy.Start() - if err != nil { - t.Error("Proxy failed to start", err) - } - AssertProxyUp(t, proxy.Listen, true) - - before := proxy.Listen - - input := &toxiproxy.Proxy{Listen: "localhost:0", Upstream: proxy.Upstream, Enabled: true} - err = proxy.Update(input) - if err != nil { - t.Error("Failed to update proxy", err) - } - if proxy.Listen == before || proxy.Listen == input.Listen { - t.Errorf("Proxy update didn't change listen address: %s to %s", before, proxy.Listen) - } - AssertProxyUp(t, proxy.Listen, true) - - input.Listen = proxy.Listen - err = proxy.Update(input) - if err != nil { - t.Error("Failed to update proxy", err) - } - AssertProxyUp(t, proxy.Listen, true) - - input.Enabled = false - err = proxy.Update(input) - if err != nil { - t.Error("Failed to update proxy", err) - } - AssertProxyUp(t, proxy.Listen, false) - }) -} - -func TestRestartFailedToStartProxy(t *testing.T) { - WithTCPServer(t, func(upstream string, response chan []byte) { - proxy := NewTestProxy("test", upstream) - conflict := NewTestProxy("test2", upstream) - - err := conflict.Start() - if err != nil { - t.Error("Proxy failed to start", err) - } - AssertProxyUp(t, conflict.Listen, true) - - proxy.Listen = conflict.Listen - err = proxy.Start() - if err == nil || err == toxiproxy.ErrProxyAlreadyStarted { - t.Error("Proxy started when it should have conflicted") - } - - conflict.Stop() - AssertProxyUp(t, conflict.Listen, false) - - err = proxy.Start() - if err != nil { - t.Error("Proxy failed to start after conflict went away", err) - } - AssertProxyUp(t, proxy.Listen, true) - - proxy.Stop() - AssertProxyUp(t, proxy.Listen, false) - }) -} - -func AssertProxyUp(t *testing.T, addr string, up bool) net.Conn { - conn, err := net.Dial("tcp", addr) - if err != nil && up { - t.Error("Expected proxy to be up:", err) - } else if err == nil && !up { - t.Error("Expected proxy to be down") - } - return conn -} diff --git a/vendor/github.com/Shopify/toxiproxy/share/toxiproxy.conf b/vendor/github.com/Shopify/toxiproxy/share/toxiproxy.conf deleted file mode 100644 index 6fa7df7..0000000 --- a/vendor/github.com/Shopify/toxiproxy/share/toxiproxy.conf +++ /dev/null @@ -1,13 +0,0 @@ -description "TCP proxy to simulate network and system conditions" -author "Simon Eskildsen & Jacob Wirth" - -start on startup -stop on shutdown - -env HOST="localhost" -env PORT="8474" -env BINARY="/usr/bin/toxiproxy-server" - -script - exec $BINARY -port $PORT -host $HOST -end script diff --git a/vendor/github.com/Shopify/toxiproxy/stream/io_chan.go b/vendor/github.com/Shopify/toxiproxy/stream/io_chan.go deleted file mode 100644 index 4038c32..0000000 --- a/vendor/github.com/Shopify/toxiproxy/stream/io_chan.go +++ /dev/null @@ -1,108 +0,0 @@ -package stream - -import ( - "fmt" - "io" - "time" -) - -type Direction uint8 - -const ( - Upstream Direction = iota - Downstream - NumDirections -) - -// Stores a slice of bytes with its receive timestmap -type StreamChunk struct { - Data []byte - Timestamp time.Time -} - -// Implements the io.WriteCloser interface for a chan []byte -type ChanWriter struct { - output chan<- *StreamChunk -} - -func NewChanWriter(output chan<- *StreamChunk) *ChanWriter { - return &ChanWriter{output} -} - -// Write `buf` as a StreamChunk to the channel. The full buffer is always written, and error -// will always be nil. Calling `Write()` after closing the channel will panic. -func (c *ChanWriter) Write(buf []byte) (int, error) { - packet := &StreamChunk{make([]byte, len(buf)), time.Now()} - copy(packet.Data, buf) // Make a copy before sending it to the channel - c.output <- packet - return len(buf), nil -} - -// Close the output channel -func (c *ChanWriter) Close() error { - close(c.output) - return nil -} - -// Implements the io.Reader interface for a chan []byte -type ChanReader struct { - input <-chan *StreamChunk - interrupt <-chan struct{} - buffer []byte -} - -var ErrInterrupted = fmt.Errorf("read interrupted by channel") - -func NewChanReader(input <-chan *StreamChunk) *ChanReader { - return &ChanReader{input, make(chan struct{}), []byte{}} -} - -// Specify a channel that can interrupt a read if it is blocking. -func (c *ChanReader) SetInterrupt(interrupt <-chan struct{}) { - c.interrupt = interrupt -} - -// Read from the channel into `out`. This will block until data is available, -// and can be interrupted with a channel using `SetInterrupt()`. If the read -// was interrupted, `ErrInterrupted` will be returned. -func (c *ChanReader) Read(out []byte) (int, error) { - if c.buffer == nil { - return 0, io.EOF - } - n := copy(out, c.buffer) - c.buffer = c.buffer[n:] - if len(out) <= len(c.buffer) { - return n, nil - } else if n > 0 { - // We have some data to return, so make the channel read optional - select { - case p := <-c.input: - if p == nil { // Stream was closed - c.buffer = nil - if n > 0 { - return n, nil - } - return 0, io.EOF - } - n2 := copy(out[n:], p.Data) - c.buffer = p.Data[n2:] - return n + n2, nil - default: - return n, nil - } - } - var p *StreamChunk - select { - case p = <-c.input: - case <-c.interrupt: - c.buffer = c.buffer[:0] - return n, ErrInterrupted - } - if p == nil { // Stream was closed - c.buffer = nil - return 0, io.EOF - } - n2 := copy(out[n:], p.Data) - c.buffer = p.Data[n2:] - return n + n2, nil -} diff --git a/vendor/github.com/Shopify/toxiproxy/stream/io_chan_test.go b/vendor/github.com/Shopify/toxiproxy/stream/io_chan_test.go deleted file mode 100644 index 9e140ef..0000000 --- a/vendor/github.com/Shopify/toxiproxy/stream/io_chan_test.go +++ /dev/null @@ -1,201 +0,0 @@ -package stream - -import ( - "bytes" - "io" - "testing" - "time" -) - -func TestBasicReadWrite(t *testing.T) { - send := []byte("hello world") - c := make(chan *StreamChunk) - writer := NewChanWriter(c) - reader := NewChanReader(c) - go writer.Write(send) - buf := make([]byte, len(send)) - n, err := reader.Read(buf) - if n != len(send) { - t.Fatalf("Read wrong number of bytes: %d expected %d", n, len(send)) - } - if err != nil { - t.Fatal("Couldn't read from stream", err) - } - if !bytes.Equal(buf, send) { - t.Fatal("Got wrong message from stream", string(buf)) - } - writer.Close() - n, err = reader.Read(buf) - if err != io.EOF { - t.Fatal("Read returned wrong error after close:", err) - } - if n != 0 { - t.Fatalf("Read still returned data after close: %d bytes", n) - } -} - -func TestReadMoreThanWrite(t *testing.T) { - send := []byte("hello world") - c := make(chan *StreamChunk) - writer := NewChanWriter(c) - reader := NewChanReader(c) - go writer.Write(send) - buf := make([]byte, len(send)+10) - n, err := reader.Read(buf) - if n != len(send) { - t.Fatalf("Read wrong number of bytes: %d expected %d", n, len(send)) - } - if err != nil { - t.Fatal("Couldn't read from stream", err) - } - if !bytes.Equal(buf[:n], send) { - t.Fatal("Got wrong message from stream", string(buf[:n])) - } - writer.Close() - n, err = reader.Read(buf) - if err != io.EOF { - t.Fatal("Read returned wrong error after close:", err) - } - if n != 0 { - t.Fatalf("Read still returned data after close: %d bytes", n) - } -} - -func TestReadLessThanWrite(t *testing.T) { - send := []byte("hello world") - c := make(chan *StreamChunk) - writer := NewChanWriter(c) - reader := NewChanReader(c) - go writer.Write(send) - buf := make([]byte, 6) - n, err := reader.Read(buf) - if n != len(buf) { - t.Fatalf("Read wrong number of bytes: %d expected %d", n, len(buf)) - } - if err != nil { - t.Fatal("Couldn't read from stream", err) - } - if !bytes.Equal(buf, send[:len(buf)]) { - t.Fatal("Got wrong message from stream", string(buf)) - } - writer.Close() - n, err = reader.Read(buf) - if n != len(send)-len(buf) { - t.Fatalf("Read wrong number of bytes: %d expected %d", n, len(send)-len(buf)) - } - if err != nil { - t.Fatal("Couldn't read from stream", err) - } - if !bytes.Equal(buf[:n], send[len(buf):]) { - t.Fatal("Got wrong message from stream", string(buf[:n])) - } - n, err = reader.Read(buf) - if err != io.EOF { - t.Fatal("Read returned wrong error after close:", err) - } - if n != 0 { - t.Fatalf("Read still returned data after close: %d bytes", n) - } -} - -func TestMultiReadWrite(t *testing.T) { - send := []byte("hello world, this message is longer") - c := make(chan *StreamChunk) - writer := NewChanWriter(c) - reader := NewChanReader(c) - go func() { - writer.Write(send[:9]) - writer.Write(send[9:19]) - writer.Write(send[19:]) - writer.Close() - }() - buf := make([]byte, 10) - for read := 0; read < len(send); { - n, err := reader.Read(buf) - if err != nil { - t.Fatal("Couldn't read from stream", err, n) - } - if !bytes.Equal(buf[:n], send[read:read+n]) { - t.Fatal("Got wrong message from stream", string(buf)) - } - read += n - } - n, err := reader.Read(buf) - if err != io.EOF { - t.Fatal("Read returned wrong error after close:", err, string(buf[:n])) - } - if !bytes.Equal(buf[:n], send[len(send)-n:]) { - t.Fatal("Got wrong message from stream", string(buf[:n])) - } -} - -func TestMultiWriteWithCopy(t *testing.T) { - send := []byte("hello world, this message is longer") - c := make(chan *StreamChunk) - writer := NewChanWriter(c) - reader := NewChanReader(c) - go func() { - writer.Write(send[:9]) - writer.Write(send[9:19]) - writer.Write(send[19:]) - writer.Close() - }() - buf := new(bytes.Buffer) - n, err := io.Copy(buf, reader) - if int(n) != len(send) { - t.Fatalf("Read wrong number of bytes: %d expected %d", n, len(send)) - } - if err != nil { - t.Fatal("Couldn't read from stream", err) - } - if !bytes.Equal(buf.Bytes(), send) { - t.Fatal("Got wrong message from stream", buf.String()) - } -} - -func TestReadInterrupt(t *testing.T) { - send := []byte("hello world") - c := make(chan *StreamChunk) - interrupt := make(chan struct{}) - writer := NewChanWriter(c) - reader := NewChanReader(c) - reader.SetInterrupt(interrupt) - go writer.Write(send) - buf := make([]byte, len(send)) - n, err := reader.Read(buf) - if n != len(send) { - t.Fatalf("Read wrong number of bytes: %d expected %d", n, len(send)) - } - if err != nil { - t.Fatal("Couldn't read from stream", err) - } - if !bytes.Equal(buf, send) { - t.Fatal("Got wrong message from stream", string(buf)) - } - - // Try interrupting the stream mid-read - go func() { - time.Sleep(50 * time.Millisecond) - interrupt <- struct{}{} - }() - n, err = reader.Read(buf) - if err != ErrInterrupted { - t.Fatal("Read returned wrong error after interrupt:", err) - } - if n != 0 { - t.Fatalf("Read still returned data after interrput: %d bytes", n) - } - - // Try writing again after the channel was interrupted - go writer.Write(send) - n, err = reader.Read(buf) - if n != len(send) { - t.Fatalf("Read wrong number of bytes: %d expected %d", n, len(send)) - } - if err != nil { - t.Fatal("Couldn't read from stream", err) - } - if !bytes.Equal(buf, send) { - t.Fatal("Got wrong message from stream", string(buf)) - } -} diff --git a/vendor/github.com/Shopify/toxiproxy/testhelper/testhelper.go b/vendor/github.com/Shopify/toxiproxy/testhelper/testhelper.go deleted file mode 100644 index 0801bf9..0000000 --- a/vendor/github.com/Shopify/toxiproxy/testhelper/testhelper.go +++ /dev/null @@ -1,20 +0,0 @@ -package testhelper - -import ( - "time" - "fmt" -) - -func TimeoutAfter(after time.Duration, f func()) error { - success := make(chan struct{}) - go func() { - f() - close(success) - }() - select { - case <-success: - return nil - case <-time.After(after): - return fmt.Errorf("timed out after %s", after) - } -} diff --git a/vendor/github.com/Shopify/toxiproxy/testhelper/testhelper_test.go b/vendor/github.com/Shopify/toxiproxy/testhelper/testhelper_test.go deleted file mode 100644 index b6f1762..0000000 --- a/vendor/github.com/Shopify/toxiproxy/testhelper/testhelper_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package testhelper - -import ( - "testing" - "time" -) - -func TestTimeoutAfter(t *testing.T) { - err := TimeoutAfter(5*time.Millisecond, func() {}) - if err != nil { - t.Fatal("Non blocking function should not timeout.") - } - - err = TimeoutAfter(5*time.Millisecond, func() { - time.Sleep(time.Second) - }) - if err == nil { - t.Fatal("Blocking function should timeout.") - } -} diff --git a/vendor/github.com/Shopify/toxiproxy/toxic_collection.go b/vendor/github.com/Shopify/toxiproxy/toxic_collection.go deleted file mode 100644 index 3aded16..0000000 --- a/vendor/github.com/Shopify/toxiproxy/toxic_collection.go +++ /dev/null @@ -1,260 +0,0 @@ -package toxiproxy - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "strings" - "sync" - - "github.com/Shopify/toxiproxy/stream" - "github.com/Shopify/toxiproxy/toxics" -) - -// ToxicCollection contains a list of toxics that are chained together. Each proxy -// has its own collection. A hidden noop toxic is always maintained at the beginning -// of each chain so toxics have a method of pausing incoming data (by interrupting -// the preceding toxic). -type ToxicCollection struct { - sync.Mutex - - noop *toxics.ToxicWrapper - proxy *Proxy - chain [][]*toxics.ToxicWrapper - links map[string]*ToxicLink -} - -func NewToxicCollection(proxy *Proxy) *ToxicCollection { - collection := &ToxicCollection{ - noop: &toxics.ToxicWrapper{ - Toxic: new(toxics.NoopToxic), - Type: "noop", - }, - proxy: proxy, - chain: make([][]*toxics.ToxicWrapper, stream.NumDirections), - links: make(map[string]*ToxicLink), - } - for dir := range collection.chain { - collection.chain[dir] = make([]*toxics.ToxicWrapper, 1, toxics.Count()+1) - collection.chain[dir][0] = collection.noop - } - return collection -} - -func (c *ToxicCollection) ResetToxics() { - c.Lock() - defer c.Unlock() - - // Remove all but the first noop toxic - for dir := range c.chain { - for len(c.chain[dir]) > 1 { - c.chainRemoveToxic(c.chain[dir][1]) - } - } -} - -func (c *ToxicCollection) GetToxic(name string) *toxics.ToxicWrapper { - c.Lock() - defer c.Unlock() - - return c.findToxicByName(name) -} - -func (c *ToxicCollection) GetToxicArray() []toxics.Toxic { - c.Lock() - defer c.Unlock() - - result := make([]toxics.Toxic, 0) - for dir := range c.chain { - for i, toxic := range c.chain[dir] { - if i == 0 { - // Skip the first noop toxic, it should not be visible - continue - } - result = append(result, toxic) - } - } - return result -} - -func (c *ToxicCollection) AddToxicJson(data io.Reader) (*toxics.ToxicWrapper, error) { - c.Lock() - defer c.Unlock() - - var buffer bytes.Buffer - - // Default to a downstream toxic with a toxicity of 1. - wrapper := &toxics.ToxicWrapper{ - Stream: "downstream", - Toxicity: 1.0, - Toxic: new(toxics.NoopToxic), - } - - err := json.NewDecoder(io.TeeReader(data, &buffer)).Decode(wrapper) - if err != nil { - return nil, joinError(err, ErrBadRequestBody) - } - - switch strings.ToLower(wrapper.Stream) { - case "downstream": - wrapper.Direction = stream.Downstream - case "upstream": - wrapper.Direction = stream.Upstream - default: - return nil, ErrInvalidStream - } - if wrapper.Name == "" { - wrapper.Name = fmt.Sprintf("%s_%s", wrapper.Type, wrapper.Stream) - } - - if toxics.New(wrapper) == nil { - return nil, ErrInvalidToxicType - } - - found := c.findToxicByName(wrapper.Name) - if found != nil { - return nil, ErrToxicAlreadyExists - } - - // Parse attributes because we now know the toxics type. - attrs := &struct { - Attributes interface{} `json:"attributes"` - }{ - wrapper.Toxic, - } - err = json.NewDecoder(&buffer).Decode(attrs) - if err != nil { - return nil, joinError(err, ErrBadRequestBody) - } - - c.chainAddToxic(wrapper) - return wrapper, nil -} - -func (c *ToxicCollection) UpdateToxicJson(name string, data io.Reader) (*toxics.ToxicWrapper, error) { - c.Lock() - defer c.Unlock() - - toxic := c.findToxicByName(name) - if toxic != nil { - attrs := &struct { - Attributes interface{} `json:"attributes"` - Toxicity float32 `json:"toxicity"` - }{ - toxic.Toxic, - toxic.Toxicity, - } - err := json.NewDecoder(data).Decode(attrs) - if err != nil { - return nil, joinError(err, ErrBadRequestBody) - } - toxic.Toxicity = attrs.Toxicity - - c.chainUpdateToxic(toxic) - return toxic, nil - } - return nil, ErrToxicNotFound -} - -func (c *ToxicCollection) RemoveToxic(name string) error { - c.Lock() - defer c.Unlock() - - toxic := c.findToxicByName(name) - if toxic != nil { - c.chainRemoveToxic(toxic) - return nil - } - return ErrToxicNotFound -} - -func (c *ToxicCollection) StartLink(name string, input io.Reader, output io.WriteCloser, direction stream.Direction) { - c.Lock() - defer c.Unlock() - - link := NewToxicLink(c.proxy, c, direction) - link.Start(name, input, output) - c.links[name] = link -} - -func (c *ToxicCollection) RemoveLink(name string) { - c.Lock() - defer c.Unlock() - delete(c.links, name) -} - -// All following functions assume the lock is already grabbed -func (c *ToxicCollection) findToxicByName(name string) *toxics.ToxicWrapper { - for dir := range c.chain { - for i, toxic := range c.chain[dir] { - if i == 0 { - // Skip the first noop toxic, it has no name - continue - } - if toxic.Name == name { - return toxic - } - } - } - return nil -} - -func (c *ToxicCollection) chainAddToxic(toxic *toxics.ToxicWrapper) { - dir := toxic.Direction - toxic.Index = len(c.chain[dir]) - c.chain[dir] = append(c.chain[dir], toxic) - - // Asynchronously add the toxic to each link - group := sync.WaitGroup{} - for _, link := range c.links { - if link.direction == dir { - group.Add(1) - go func(link *ToxicLink) { - defer group.Done() - link.AddToxic(toxic) - }(link) - } - } - group.Wait() -} - -func (c *ToxicCollection) chainUpdateToxic(toxic *toxics.ToxicWrapper) { - c.chain[toxic.Direction][toxic.Index] = toxic - - // Asynchronously update the toxic in each link - group := sync.WaitGroup{} - for _, link := range c.links { - if link.direction == toxic.Direction { - group.Add(1) - go func(link *ToxicLink) { - defer group.Done() - link.UpdateToxic(toxic) - }(link) - } - } - group.Wait() -} - -func (c *ToxicCollection) chainRemoveToxic(toxic *toxics.ToxicWrapper) { - dir := toxic.Direction - c.chain[dir] = append(c.chain[dir][:toxic.Index], c.chain[dir][toxic.Index+1:]...) - for i := toxic.Index; i < len(c.chain[dir]); i++ { - c.chain[dir][i].Index = i - } - - // Asynchronously remove the toxic from each link - group := sync.WaitGroup{} - for _, link := range c.links { - if link.direction == dir { - group.Add(1) - go func(link *ToxicLink) { - defer group.Done() - link.RemoveToxic(toxic) - }(link) - } - } - group.Wait() - - toxic.Index = -1 -} diff --git a/vendor/github.com/Shopify/toxiproxy/toxics/bandwidth.go b/vendor/github.com/Shopify/toxiproxy/toxics/bandwidth.go deleted file mode 100644 index 6c4624a..0000000 --- a/vendor/github.com/Shopify/toxiproxy/toxics/bandwidth.go +++ /dev/null @@ -1,59 +0,0 @@ -package toxics - -import ( - "time" - - "github.com/Shopify/toxiproxy/stream" -) - -// The BandwidthToxic passes data through at a limited rate -type BandwidthToxic struct { - // Rate in KB/s - Rate int64 `json:"rate"` -} - -func (t *BandwidthToxic) Pipe(stub *ToxicStub) { - var sleep time.Duration = 0 - for { - select { - case <-stub.Interrupt: - return - case p := <-stub.Input: - if p == nil { - stub.Close() - return - } - if t.Rate <= 0 { - sleep = 0 - } else { - sleep += time.Duration(len(p.Data)) * time.Millisecond / time.Duration(t.Rate) - } - // If the rate is low enough, split the packet up and send in 100 millisecond intervals - for int64(len(p.Data)) > t.Rate*100 { - select { - case <-time.After(100 * time.Millisecond): - stub.Output <- &stream.StreamChunk{p.Data[:t.Rate*100], p.Timestamp} - p.Data = p.Data[t.Rate*100:] - sleep -= 100 * time.Millisecond - case <-stub.Interrupt: - stub.Output <- p // Don't drop any data on the floor - return - } - } - start := time.Now() - select { - case <-time.After(sleep): - // time.After only seems to have ~1ms prevision, so offset the next sleep by the error - sleep -= time.Since(start) - stub.Output <- p - case <-stub.Interrupt: - stub.Output <- p // Don't drop any data on the floor - return - } - } - } -} - -func init() { - Register("bandwidth", new(BandwidthToxic)) -} diff --git a/vendor/github.com/Shopify/toxiproxy/toxics/bandwidth_test.go b/vendor/github.com/Shopify/toxiproxy/toxics/bandwidth_test.go deleted file mode 100644 index 6a3cf8d..0000000 --- a/vendor/github.com/Shopify/toxiproxy/toxics/bandwidth_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package toxics_test - -import ( - "bytes" - "io" - "net" - "strings" - "testing" - "time" - - "github.com/Shopify/toxiproxy/toxics" -) - -func TestBandwidthToxic(t *testing.T) { - ln, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatal("Failed to create TCP server", err) - } - - defer ln.Close() - - proxy := NewTestProxy("test", ln.Addr().String()) - proxy.Start() - defer proxy.Stop() - - serverConnRecv := make(chan net.Conn) - - go func() { - conn, err := ln.Accept() - if err != nil { - t.Error("Unable to accept TCP connection", err) - } - serverConnRecv <- conn - }() - - conn, err := net.Dial("tcp", proxy.Listen) - if err != nil { - t.Error("Unable to dial TCP server", err) - } - - serverConn := <-serverConnRecv - - rate := 1000 // 1MB/s - proxy.Toxics.AddToxicJson(ToxicToJson(t, "", "bandwidth", "upstream", &toxics.BandwidthToxic{Rate: int64(rate)})) - - buf := []byte(strings.Repeat("hello world ", 40000)) // 480KB - go func() { - n, err := conn.Write(buf) - conn.Close() - if n != len(buf) || err != nil { - t.Errorf("Failed to write buffer: (%d == %d) %v", n, len(buf), err) - } - }() - - buf2 := make([]byte, len(buf)) - start := time.Now() - _, err = io.ReadAtLeast(serverConn, buf2, len(buf2)) - if err != nil { - t.Errorf("Proxy read failed: %v", err) - } else if bytes.Compare(buf, buf2) != 0 { - t.Errorf("Server did not read correct buffer from client!") - } - - AssertDeltaTime(t, - "Bandwidth", - time.Since(start), - time.Duration(len(buf))*time.Second/time.Duration(rate*1000), - 10*time.Millisecond, - ) -} - -func BenchmarkBandwidthToxic100MB(b *testing.B) { - ln, err := net.Listen("tcp", "localhost:0") - if err != nil { - b.Fatal("Failed to create TCP server", err) - } - - defer ln.Close() - - proxy := NewTestProxy("test", ln.Addr().String()) - proxy.Start() - defer proxy.Stop() - - buf := []byte(strings.Repeat("hello world ", 1000)) - - go func() { - conn, err := ln.Accept() - if err != nil { - b.Error("Unable to accept TCP connection", err) - } - buf2 := make([]byte, len(buf)) - for err == nil { - _, err = conn.Read(buf2) - } - }() - - conn, err := net.Dial("tcp", proxy.Listen) - if err != nil { - b.Error("Unable to dial TCP server", err) - } - - proxy.Toxics.AddToxicJson(ToxicToJson(nil, "", "bandwidth", "upstream", &toxics.BandwidthToxic{Rate: 100 * 1000})) - - b.SetBytes(int64(len(buf))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - n, err := conn.Write(buf) - if err != nil || n != len(buf) { - b.Errorf("%v, %d == %d", err, n, len(buf)) - break - } - } - - err = conn.Close() - if err != nil { - b.Error("Failed to close TCP connection", err) - } -} diff --git a/vendor/github.com/Shopify/toxiproxy/toxics/latency.go b/vendor/github.com/Shopify/toxiproxy/toxics/latency.go deleted file mode 100644 index 2baf549..0000000 --- a/vendor/github.com/Shopify/toxiproxy/toxics/latency.go +++ /dev/null @@ -1,55 +0,0 @@ -package toxics - -import ( - "math/rand" - "time" -) - -// The LatencyToxic passes data through with the a delay of latency +/- jitter added. -type LatencyToxic struct { - // Times in milliseconds - Latency int64 `json:"latency"` - Jitter int64 `json:"jitter"` -} - -func (t *LatencyToxic) GetBufferSize() int { - return 1024 -} - -func (t *LatencyToxic) delay() time.Duration { - // Delay = t.Latency +/- t.Jitter - delay := t.Latency - jitter := int64(t.Jitter) - if jitter > 0 { - delay += rand.Int63n(jitter*2) - jitter - } - return time.Duration(delay) * time.Millisecond -} - -func (t *LatencyToxic) Pipe(stub *ToxicStub) { - for { - select { - case <-stub.Interrupt: - return - case c := <-stub.Input: - if c == nil { - stub.Close() - return - } - sleep := t.delay() - time.Since(c.Timestamp) - select { - case <-time.After(sleep): - c.Timestamp = c.Timestamp.Add(sleep) - stub.Output <- c - case <-stub.Interrupt: - // Exit fast without applying latency. - stub.Output <- c // Don't drop any data on the floor - return - } - } - } -} - -func init() { - Register("latency", new(LatencyToxic)) -} diff --git a/vendor/github.com/Shopify/toxiproxy/toxics/latency_test.go b/vendor/github.com/Shopify/toxiproxy/toxics/latency_test.go deleted file mode 100644 index b5c41b8..0000000 --- a/vendor/github.com/Shopify/toxiproxy/toxics/latency_test.go +++ /dev/null @@ -1,250 +0,0 @@ -package toxics_test - -import ( - "bufio" - "bytes" - "io" - "net" - "strconv" - "strings" - "testing" - "time" - - "github.com/Shopify/toxiproxy" - "github.com/Shopify/toxiproxy/toxics" -) - -func AssertDeltaTime(t *testing.T, message string, actual, expected, delta time.Duration) { - diff := actual - expected - if diff < 0 { - diff *= -1 - } - if diff > delta { - t.Errorf("[%s] Time was more than %v off: got %v expected %v", message, delta, actual, expected) - } else { - t.Logf("[%s] Time was correct: %v (expected %v)", message, actual, expected) - } -} - -func DoLatencyTest(t *testing.T, upLatency, downLatency *toxics.LatencyToxic) { - WithEchoProxy(t, func(conn net.Conn, response chan []byte, proxy *toxiproxy.Proxy) { - if upLatency == nil { - upLatency = &toxics.LatencyToxic{} - } else { - _, err := proxy.Toxics.AddToxicJson(ToxicToJson(t, "latency_up", "latency", "upstream", upLatency)) - if err != nil { - t.Error("AddToxicJson returned error:", err) - } - } - if downLatency == nil { - downLatency = &toxics.LatencyToxic{} - } else { - _, err := proxy.Toxics.AddToxicJson(ToxicToJson(t, "latency_down", "latency", "downstream", downLatency)) - if err != nil { - t.Error("AddToxicJson returned error:", err) - } - } - t.Logf("Using latency: Up: %dms +/- %dms, Down: %dms +/- %dms", upLatency.Latency, upLatency.Jitter, downLatency.Latency, downLatency.Jitter) - - msg := []byte("hello world " + strings.Repeat("a", 32*1024) + "\n") - - timer := time.Now() - _, err := conn.Write(msg) - if err != nil { - t.Error("Failed writing to TCP server", err) - } - - resp := <-response - if !bytes.Equal(resp, msg) { - t.Error("Server didn't read correct bytes from client:", string(resp)) - } - AssertDeltaTime(t, - "Server read", - time.Since(timer), - time.Duration(upLatency.Latency)*time.Millisecond, - time.Duration(upLatency.Jitter+10)*time.Millisecond, - ) - timer2 := time.Now() - - scan := bufio.NewScanner(conn) - if scan.Scan() { - resp = append(scan.Bytes(), '\n') - if !bytes.Equal(resp, msg) { - t.Error("Client didn't read correct bytes from server:", string(resp)) - } - } - AssertDeltaTime(t, - "Client read", - time.Since(timer2), - time.Duration(downLatency.Latency)*time.Millisecond, - time.Duration(downLatency.Jitter+10)*time.Millisecond, - ) - AssertDeltaTime(t, - "Round trip", - time.Since(timer), - time.Duration(upLatency.Latency+downLatency.Latency)*time.Millisecond, - time.Duration(upLatency.Jitter+downLatency.Jitter+20)*time.Millisecond, - ) - - proxy.Toxics.RemoveToxic("latency_up") - proxy.Toxics.RemoveToxic("latency_down") - - err = conn.Close() - if err != nil { - t.Error("Failed to close TCP connection", err) - } - }) -} - -func TestUpstreamLatency(t *testing.T) { - DoLatencyTest(t, &toxics.LatencyToxic{Latency: 100}, nil) -} - -func TestDownstreamLatency(t *testing.T) { - DoLatencyTest(t, nil, &toxics.LatencyToxic{Latency: 100}) -} - -func TestFullstreamLatencyEven(t *testing.T) { - DoLatencyTest(t, &toxics.LatencyToxic{Latency: 100}, &toxics.LatencyToxic{Latency: 100}) -} - -func TestFullstreamLatencyBiasUp(t *testing.T) { - DoLatencyTest(t, &toxics.LatencyToxic{Latency: 1000}, &toxics.LatencyToxic{Latency: 100}) -} - -func TestFullstreamLatencyBiasDown(t *testing.T) { - DoLatencyTest(t, &toxics.LatencyToxic{Latency: 100}, &toxics.LatencyToxic{Latency: 1000}) -} - -func TestZeroLatency(t *testing.T) { - DoLatencyTest(t, &toxics.LatencyToxic{Latency: 0}, &toxics.LatencyToxic{Latency: 0}) -} - -func TestLatencyToxicCloseRace(t *testing.T) { - ln, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatal("Failed to create TCP server", err) - } - - defer ln.Close() - - proxy := NewTestProxy("test", ln.Addr().String()) - proxy.Start() - defer proxy.Stop() - - go func() { - for { - _, err := ln.Accept() - if err != nil { - return - } - } - }() - - // Check for potential race conditions when interrupting toxics - for i := 0; i < 1000; i++ { - proxy.Toxics.AddToxicJson(ToxicToJson(t, "", "latency", "upstream", &toxics.LatencyToxic{Latency: 10})) - conn, err := net.Dial("tcp", proxy.Listen) - if err != nil { - t.Error("Unable to dial TCP server", err) - } - conn.Write([]byte("hello")) - conn.Close() - proxy.Toxics.RemoveToxic("latency") - } -} - -func TestTwoLatencyToxics(t *testing.T) { - WithEchoProxy(t, func(conn net.Conn, response chan []byte, proxy *toxiproxy.Proxy) { - toxics := []*toxics.LatencyToxic{&toxics.LatencyToxic{Latency: 500}, &toxics.LatencyToxic{Latency: 500}} - for i, toxic := range toxics { - _, err := proxy.Toxics.AddToxicJson(ToxicToJson(t, "latency_"+strconv.Itoa(i), "latency", "upstream", toxic)) - if err != nil { - t.Error("AddToxicJson returned error:", err) - } - } - - msg := []byte("hello world " + strings.Repeat("a", 32*1024) + "\n") - - timer := time.Now() - _, err := conn.Write(msg) - if err != nil { - t.Error("Failed writing to TCP server", err) - } - - resp := <-response - if !bytes.Equal(resp, msg) { - t.Error("Server didn't read correct bytes from client:", string(resp)) - } - - AssertDeltaTime(t, - "Upstream two latency toxics", - time.Since(timer), - time.Duration(1000)*time.Millisecond, - time.Duration(10)*time.Millisecond, - ) - - for i := range toxics { - proxy.Toxics.RemoveToxic("latency_" + strconv.Itoa(i)) - } - - err = conn.Close() - if err != nil { - t.Error("Failed to close TCP connection", err) - } - }) -} - -func TestLatencyToxicBandwidth(t *testing.T) { - ln, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatal("Failed to create TCP server", err) - } - - defer ln.Close() - - proxy := NewTestProxy("test", ln.Addr().String()) - proxy.Start() - defer proxy.Stop() - - buf := []byte(strings.Repeat("hello world ", 1000)) - - go func() { - conn, err := ln.Accept() - if err != nil { - t.Error("Unable to accept TCP connection", err) - } - for err == nil { - _, err = conn.Write(buf) - } - }() - - conn, err := net.Dial("tcp", proxy.Listen) - if err != nil { - t.Error("Unable to dial TCP server", err) - } - - proxy.Toxics.AddToxicJson(ToxicToJson(t, "", "latency", "", &toxics.LatencyToxic{Latency: 100})) - - time.Sleep(150 * time.Millisecond) // Wait for latency toxic - buf2 := make([]byte, len(buf)) - - start := time.Now() - count := 0 - for i := 0; i < 100; i++ { - n, err := io.ReadFull(conn, buf2) - count += n - if err != nil { - t.Error(err) - break - } - } - - // Assert the transfer was at least 100MB/s - AssertDeltaTime(t, "Latency toxic bandwidth", time.Since(start), 0, time.Duration(count/100000)*time.Millisecond) - - err = conn.Close() - if err != nil { - t.Error("Failed to close TCP connection", err) - } -} diff --git a/vendor/github.com/Shopify/toxiproxy/toxics/limit_data.go b/vendor/github.com/Shopify/toxiproxy/toxics/limit_data.go deleted file mode 100644 index 5384fac..0000000 --- a/vendor/github.com/Shopify/toxiproxy/toxics/limit_data.go +++ /dev/null @@ -1,60 +0,0 @@ -package toxics - -import "github.com/Shopify/toxiproxy/stream" - -// LimitDataToxic has limit in bytes -type LimitDataToxic struct { - Bytes int64 `json:"bytes"` -} - -type LimitDataToxicState struct { - bytesTransmitted int64 -} - -func (t *LimitDataToxic) Pipe(stub *ToxicStub) { - state := stub.State.(*LimitDataToxicState) - var bytesRemaining = t.Bytes - state.bytesTransmitted - - for { - select { - case <-stub.Interrupt: - return - case c := <-stub.Input: - if c == nil { - stub.Close() - return - } - - if bytesRemaining < 0 { - bytesRemaining = 0 - } - - if bytesRemaining < int64(len(c.Data)) { - c = &stream.StreamChunk{ - Timestamp: c.Timestamp, - Data: c.Data[0:bytesRemaining], - } - } - - if len(c.Data) > 0 { - stub.Output <- c - state.bytesTransmitted += int64(len(c.Data)) - } - - bytesRemaining = t.Bytes - state.bytesTransmitted - - if bytesRemaining <= 0 { - stub.Close() - return - } - } - } -} - -func (t *LimitDataToxic) NewState() interface{} { - return new(LimitDataToxicState) -} - -func init() { - Register("limit_data", new(LimitDataToxic)) -} diff --git a/vendor/github.com/Shopify/toxiproxy/toxics/limit_data_test.go b/vendor/github.com/Shopify/toxiproxy/toxics/limit_data_test.go deleted file mode 100644 index 50b3572..0000000 --- a/vendor/github.com/Shopify/toxiproxy/toxics/limit_data_test.go +++ /dev/null @@ -1,160 +0,0 @@ -package toxics_test - -import ( - "bytes" - "crypto/rand" - "fmt" - "testing" - - "github.com/Shopify/toxiproxy/stream" - "github.com/Shopify/toxiproxy/toxics" -) - -func buffer(size int) []byte { - buf := make([]byte, size) - rand.Read(buf) - - return buf -} - -func checkOutgoingChunk(t *testing.T, output chan *stream.StreamChunk, expected []byte) { - chunk := <-output - if !bytes.Equal(chunk.Data, expected) { - t.Error("Data in outgoing chunk doesn't match expected values") - } -} - -func checkRemainingChunks(t *testing.T, output chan *stream.StreamChunk) { - if len(output) != 0 { - t.Error(fmt.Sprintf("There is %d chunks in output channel. 0 is expected.", len(output))) - } -} - -func check(t *testing.T, toxic *toxics.LimitDataToxic, chunks [][]byte, expectedChunks [][]byte) { - input := make(chan *stream.StreamChunk) - output := make(chan *stream.StreamChunk, 100) - stub := toxics.NewToxicStub(input, output) - stub.State = toxic.NewState() - - go toxic.Pipe(stub) - - for _, buf := range chunks { - input <- &stream.StreamChunk{Data: buf} - } - - for _, expected := range expectedChunks { - checkOutgoingChunk(t, output, expected) - } - - checkRemainingChunks(t, output) -} - -func TestLimitDataToxicMayBeRestarted(t *testing.T) { - toxic := &toxics.LimitDataToxic{Bytes: 100} - - input := make(chan *stream.StreamChunk) - output := make(chan *stream.StreamChunk, 100) - stub := toxics.NewToxicStub(input, output) - stub.State = toxic.NewState() - - buf := buffer(90) - buf2 := buffer(20) - - // Send chunk with data not exceeding limit and interrupt - go func() { - input <- &stream.StreamChunk{Data: buf} - stub.Interrupt <- struct{}{} - }() - - toxic.Pipe(stub) - checkOutgoingChunk(t, output, buf) - - // Send 2nd chunk to exceed limit - go func() { - input <- &stream.StreamChunk{Data: buf2} - }() - - toxic.Pipe(stub) - checkOutgoingChunk(t, output, buf2[0:10]) - - checkRemainingChunks(t, output) -} - -func TestLimitDataToxicMayBeInterrupted(t *testing.T) { - toxic := &toxics.LimitDataToxic{Bytes: 100} - - input := make(chan *stream.StreamChunk) - output := make(chan *stream.StreamChunk) - stub := toxics.NewToxicStub(input, output) - stub.State = toxic.NewState() - - go func() { - stub.Interrupt <- struct{}{} - }() - - toxic.Pipe(stub) -} - -func TestLimitDataToxicNilShouldClosePipe(t *testing.T) { - toxic := &toxics.LimitDataToxic{Bytes: 100} - - input := make(chan *stream.StreamChunk) - output := make(chan *stream.StreamChunk) - stub := toxics.NewToxicStub(input, output) - stub.State = toxic.NewState() - - go func() { - input <- nil - }() - - toxic.Pipe(stub) -} - -func TestLimitDataToxicChunkSmallerThanLimit(t *testing.T) { - toxic := &toxics.LimitDataToxic{Bytes: 100} - - buf := buffer(50) - check(t, toxic, [][]byte{buf}, [][]byte{buf}) -} - -func TestLimitDataToxicChunkLengthMatchesLimit(t *testing.T) { - toxic := &toxics.LimitDataToxic{Bytes: 100} - - buf := buffer(100) - check(t, toxic, [][]byte{buf}, [][]byte{buf}) -} - -func TestLimitDataToxicChunkBiggerThanLimit(t *testing.T) { - toxic := &toxics.LimitDataToxic{Bytes: 100} - - buf := buffer(150) - expected := buf[0:100] - - check(t, toxic, [][]byte{buf}, [][]byte{expected}) -} - -func TestLimitDataToxicMultipleChunksMatchThanLimit(t *testing.T) { - toxic := &toxics.LimitDataToxic{Bytes: 100} - - buf := buffer(25) - - check(t, toxic, [][]byte{buf, buf, buf, buf}, [][]byte{buf, buf, buf, buf}) -} - -func TestLimitDataToxicSecondChunkWouldOverflowLimit(t *testing.T) { - toxic := &toxics.LimitDataToxic{Bytes: 100} - - buf := buffer(90) - buf2 := buffer(20) - expected := buf2[0:10] - - check(t, toxic, [][]byte{buf, buf2}, [][]byte{buf, expected}) -} - -func TestLimitDataToxicLimitIsSetToZero(t *testing.T) { - toxic := &toxics.LimitDataToxic{Bytes: 0} - - buf := buffer(100) - - check(t, toxic, [][]byte{buf}, [][]byte{}) -} diff --git a/vendor/github.com/Shopify/toxiproxy/toxics/noop.go b/vendor/github.com/Shopify/toxiproxy/toxics/noop.go deleted file mode 100644 index 08cdcde..0000000 --- a/vendor/github.com/Shopify/toxiproxy/toxics/noop.go +++ /dev/null @@ -1,23 +0,0 @@ -package toxics - -// The NoopToxic passes all data through without any toxic effects. -type NoopToxic struct{} - -func (t *NoopToxic) Pipe(stub *ToxicStub) { - for { - select { - case <-stub.Interrupt: - return - case c := <-stub.Input: - if c == nil { - stub.Close() - return - } - stub.Output <- c - } - } -} - -func init() { - Register("noop", new(NoopToxic)) -} diff --git a/vendor/github.com/Shopify/toxiproxy/toxics/slicer.go b/vendor/github.com/Shopify/toxiproxy/toxics/slicer.go deleted file mode 100644 index 7bb83a6..0000000 --- a/vendor/github.com/Shopify/toxiproxy/toxics/slicer.go +++ /dev/null @@ -1,83 +0,0 @@ -package toxics - -import ( - "math/rand" - "time" - - "github.com/Shopify/toxiproxy/stream" -) - -// The SlicerToxic slices data into multiple smaller packets -// to simulate real-world TCP behaviour. -type SlicerToxic struct { - // Average number of bytes to slice at - AverageSize int `json:"average_size"` - // +/- bytes to vary sliced amounts. Must be less than - // the average size - SizeVariation int `json:"size_variation"` - // Microseconds to delay each packet. May be useful since there's - // usually some kind of buffering of network data - Delay int `json:"delay"` -} - -// Returns a list of chunk offsets to slice up a packet of the -// given total size. For example, for a size of 100, output might be: -// -// []int{0, 18, 18, 43, 43, 67, 67, 77, 77, 100} -// ^---^ ^----^ ^----^ ^----^ ^-----^ -// -// This tries to get fairly evenly-varying chunks (no tendency -// to have a small/large chunk at the start/end). -func (t *SlicerToxic) chunk(start int, end int) []int { - // Base case: - // If the size is within the random varation, _or already - // less than the average size_, just return it. - // Otherwise split the chunk in about two, and recurse. - if (end-start)-t.AverageSize <= t.SizeVariation { - return []int{start, end} - } - - // +1 in the size variation to offset favoring of smaller - // numbers by integer division - mid := start + (end-start)/2 + (rand.Intn(t.SizeVariation*2) - t.SizeVariation) + rand.Intn(1) - left := t.chunk(start, mid) - right := t.chunk(mid, end) - - return append(left, right...) -} - -func (t *SlicerToxic) Pipe(stub *ToxicStub) { - for { - select { - case <-stub.Interrupt: - return - case c := <-stub.Input: - if c == nil { - stub.Close() - return - } - - chunks := t.chunk(0, len(c.Data)) - for i := 1; i < len(chunks); i += 2 { - stub.Output <- &stream.StreamChunk{ - Data: c.Data[chunks[i-1]:chunks[i]], - Timestamp: c.Timestamp, - } - - select { - case <-stub.Interrupt: - stub.Output <- &stream.StreamChunk{ - Data: c.Data[chunks[i]:], - Timestamp: c.Timestamp, - } - return - case <-time.After(time.Duration(t.Delay) * time.Microsecond): - } - } - } - } -} - -func init() { - Register("slicer", new(SlicerToxic)) -} diff --git a/vendor/github.com/Shopify/toxiproxy/toxics/slicer_test.go b/vendor/github.com/Shopify/toxiproxy/toxics/slicer_test.go deleted file mode 100644 index 6be1da1..0000000 --- a/vendor/github.com/Shopify/toxiproxy/toxics/slicer_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package toxics_test - -import ( - "bytes" - "strings" - "testing" - "time" - - "github.com/Shopify/toxiproxy/stream" - "github.com/Shopify/toxiproxy/toxics" -) - -func TestSlicerToxic(t *testing.T) { - data := []byte(strings.Repeat("hello world ", 40000)) // 480 kb - slicer := &toxics.SlicerToxic{AverageSize: 1024, SizeVariation: 512, Delay: 10} - - input := make(chan *stream.StreamChunk) - output := make(chan *stream.StreamChunk) - stub := toxics.NewToxicStub(input, output) - - done := make(chan bool) - go func() { - slicer.Pipe(stub) - done <- true - }() - defer func() { - close(input) - for { - select { - case <-done: - return - case <-output: - } - } - }() - - input <- &stream.StreamChunk{Data: data} - - buf := make([]byte, 0, len(data)) - reads := 0 -L: - for { - select { - case c := <-output: - reads++ - buf = append(buf, c.Data...) - case <-time.After(10 * time.Millisecond): - break L - } - } - - if reads < 480/2 || reads > 480/2+480 { - t.Errorf("Expected to read about 480 times, but read %d times.", reads) - } - if bytes.Compare(buf, data) != 0 { - t.Errorf("Server did not read correct buffer from client!") - } -} diff --git a/vendor/github.com/Shopify/toxiproxy/toxics/slow_close.go b/vendor/github.com/Shopify/toxiproxy/toxics/slow_close.go deleted file mode 100644 index fc35ed7..0000000 --- a/vendor/github.com/Shopify/toxiproxy/toxics/slow_close.go +++ /dev/null @@ -1,34 +0,0 @@ -package toxics - -import "time" - -// The SlowCloseToxic stops the TCP connection from closing until after a delay. -type SlowCloseToxic struct { - // Times in milliseconds - Delay int64 `json:"delay"` -} - -func (t *SlowCloseToxic) Pipe(stub *ToxicStub) { - for { - select { - case <-stub.Interrupt: - return - case c := <-stub.Input: - if c == nil { - delay := time.Duration(t.Delay) * time.Millisecond - select { - case <-time.After(delay): - stub.Close() - return - case <-stub.Interrupt: - return - } - } - stub.Output <- c - } - } -} - -func init() { - Register("slow_close", new(SlowCloseToxic)) -} diff --git a/vendor/github.com/Shopify/toxiproxy/toxics/timeout.go b/vendor/github.com/Shopify/toxiproxy/toxics/timeout.go deleted file mode 100644 index f42d9d2..0000000 --- a/vendor/github.com/Shopify/toxiproxy/toxics/timeout.go +++ /dev/null @@ -1,44 +0,0 @@ -package toxics - -import "time" - -// The TimeoutToxic stops any data from flowing through, and will close the connection after a timeout. -// If the timeout is set to 0, then the connection will not be closed. -type TimeoutToxic struct { - // Times in milliseconds - Timeout int64 `json:"timeout"` -} - -func (t *TimeoutToxic) Pipe(stub *ToxicStub) { - timeout := time.Duration(t.Timeout) * time.Millisecond - if timeout > 0 { - for { - select { - case <-time.After(timeout): - stub.Close() - return - case <-stub.Interrupt: - return - case <-stub.Input: - // Drop the data on the ground. - } - } - } else { - for { - select { - case <-stub.Interrupt: - return - case <-stub.Input: - // Drop the data on the ground. - } - } - } -} - -func (t *TimeoutToxic) Cleanup(stub *ToxicStub) { - stub.Close() -} - -func init() { - Register("timeout", new(TimeoutToxic)) -} diff --git a/vendor/github.com/Shopify/toxiproxy/toxics/timeout_test.go b/vendor/github.com/Shopify/toxiproxy/toxics/timeout_test.go deleted file mode 100644 index 43646cb..0000000 --- a/vendor/github.com/Shopify/toxiproxy/toxics/timeout_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package toxics_test - -import ( - "bytes" - "io" - "net" - "testing" - "time" - - "github.com/Shopify/toxiproxy" - "github.com/Shopify/toxiproxy/testhelper" - "github.com/Shopify/toxiproxy/toxics" -) - -func WithEstablishedProxy(t *testing.T, f func(net.Conn, net.Conn, *toxiproxy.Proxy)) { - ln, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatal("Failed to create TCP server", err) - } - defer ln.Close() - - proxy := NewTestProxy("test", ln.Addr().String()) - proxy.Start() - defer proxy.Stop() - - serverConnRecv := make(chan net.Conn) - go func() { - conn, err := ln.Accept() - if err != nil { - t.Error("Unable to accept TCP connection", err) - } - serverConnRecv <- conn - }() - - conn, err := net.Dial("tcp", proxy.Listen) - if err != nil { - t.Fatal("Unable to dial TCP server", err) - } - defer conn.Close() - - serverConn := <-serverConnRecv - defer serverConn.Close() - - writeAndReceive := func(from, to net.Conn) { - data := []byte("foobar") - _, err := from.Write(data) - if err != nil { - t.Fatal(err) - } - - err = testhelper.TimeoutAfter(time.Second, func() { - resp := make([]byte, len(data)) - to.Read(resp) - if !bytes.Equal(resp, data) { - t.Fatalf("expected '%s' but got '%s'", string(data), string(resp)) - } - }) - if err != nil { - t.Fatal(err) - } - } - - // Make sure we can send and receive data before continuing. - writeAndReceive(conn, serverConn) - writeAndReceive(serverConn, conn) - - f(conn, serverConn, proxy) -} - -func TestTimeoutToxicDoesNotCauseHang(t *testing.T) { - WithEstablishedProxy(t, func(conn, _ net.Conn, proxy *toxiproxy.Proxy) { - proxy.Toxics.AddToxicJson(ToxicToJson(t, "might_block", "latency", "upstream", &toxics.LatencyToxic{Latency: 10})) - proxy.Toxics.AddToxicJson(ToxicToJson(t, "timeout", "timeout", "upstream", &toxics.TimeoutToxic{Timeout: 0})) - - for i := 0; i < 5; i++ { - _, err := conn.Write([]byte("hello")) - if err != nil { - t.Fatal("Unable to write to proxy", err) - } - time.Sleep(200 * time.Millisecond) // Shitty sync waiting for latency toxi to get data. - } - - err := testhelper.TimeoutAfter(time.Second, func() { - proxy.Toxics.RemoveToxic("might_block") - }) - if err != nil { - t.Fatal(err) - } - }) -} - -func TestTimeoutToxicClosesConnectionOnRemove(t *testing.T) { - WithEstablishedProxy(t, func(conn, serverConn net.Conn, proxy *toxiproxy.Proxy) { - proxy.Toxics.AddToxicJson(ToxicToJson(t, "to_delete", "timeout", "upstream", &toxics.TimeoutToxic{Timeout: 0})) - - proxy.Toxics.RemoveToxic("to_delete") - - err := testhelper.TimeoutAfter(time.Second, func() { - buf := make([]byte, 1) - _, err := conn.Read(buf) - if err != io.EOF { - t.Fatal("expected EOF from closed connetion") - } - _, err = serverConn.Read(buf) - if err != io.EOF { - t.Fatal("expected EOF from closed server connetion") - } - }) - if err != nil { - t.Fatal(err) - } - }) -} diff --git a/vendor/github.com/Shopify/toxiproxy/toxics/toxic.go b/vendor/github.com/Shopify/toxiproxy/toxics/toxic.go deleted file mode 100644 index bccd5f8..0000000 --- a/vendor/github.com/Shopify/toxiproxy/toxics/toxic.go +++ /dev/null @@ -1,151 +0,0 @@ -package toxics - -import ( - "math/rand" - "reflect" - "sync" - - "github.com/Shopify/toxiproxy/stream" -) - -// A Toxic is something that can be attatched to a link to modify the way -// data can be passed through (for example, by adding latency) -// -// Toxic -// v -// Client <-> ToxicStub <-> Upstream -// -// Toxic's work in a pipeline fashion, and can be chained together -// with channels. The toxic itself only defines the settings and -// Pipe() function definition, and uses the ToxicStub struct to store -// per-connection information. This allows the same toxic to be used -// for multiple connections. - -type Toxic interface { - // Defines how packets flow through a ToxicStub. Pipe() blocks until the link is closed or interrupted. - Pipe(*ToxicStub) -} - -type CleanupToxic interface { - // Cleanup is called before a toxic is removed. - Cleanup(*ToxicStub) -} - -type BufferedToxic interface { - // Defines the size of buffer this toxic should use - GetBufferSize() int -} - -// Stateful toxics store a per-connection state object on the ToxicStub. -// The state is created once when the toxic is added and persists until the -// toxic is removed or the connection is closed. -type StatefulToxic interface { - // Creates a new object to store toxic state in - NewState() interface{} -} - -type ToxicWrapper struct { - Toxic `json:"attributes"` - Name string `json:"name"` - Type string `json:"type"` - Stream string `json:"stream"` - Toxicity float32 `json:"toxicity"` - Direction stream.Direction `json:"-"` - Index int `json:"-"` - BufferSize int `json:"-"` -} - -type ToxicStub struct { - Input <-chan *stream.StreamChunk - Output chan<- *stream.StreamChunk - State interface{} - Interrupt chan struct{} - running chan struct{} - closed chan struct{} -} - -func NewToxicStub(input <-chan *stream.StreamChunk, output chan<- *stream.StreamChunk) *ToxicStub { - return &ToxicStub{ - Interrupt: make(chan struct{}), - closed: make(chan struct{}), - Input: input, - Output: output, - } -} - -// Begin running a toxic on this stub, can be interrupted. -// Runs a noop toxic randomly depending on toxicity -func (s *ToxicStub) Run(toxic *ToxicWrapper) { - s.running = make(chan struct{}) - defer close(s.running) - if rand.Float32() < toxic.Toxicity { - toxic.Pipe(s) - } else { - new(NoopToxic).Pipe(s) - } -} - -// Interrupt the flow of data so that the toxic controlling the stub can be replaced. -// Returns true if the stream was successfully interrupted, or false if the stream is closed. -func (s *ToxicStub) InterruptToxic() bool { - select { - case <-s.closed: - return false - case s.Interrupt <- struct{}{}: - <-s.running // Wait for the running toxic to exit - return true - } -} - -func (s *ToxicStub) Closed() bool { - select { - case <-s.closed: - return true - default: - return false - } -} - -func (s *ToxicStub) Close() { - if !s.Closed() { - close(s.closed) - close(s.Output) - } -} - -var ToxicRegistry map[string]Toxic -var registryMutex sync.RWMutex - -func Register(typeName string, toxic Toxic) { - registryMutex.Lock() - defer registryMutex.Unlock() - - if ToxicRegistry == nil { - ToxicRegistry = make(map[string]Toxic) - } - ToxicRegistry[typeName] = toxic -} - -func New(wrapper *ToxicWrapper) Toxic { - registryMutex.RLock() - defer registryMutex.RUnlock() - - orig, ok := ToxicRegistry[wrapper.Type] - if !ok { - return nil - } - wrapper.Toxic = reflect.New(reflect.TypeOf(orig).Elem()).Interface().(Toxic) - if buffered, ok := wrapper.Toxic.(BufferedToxic); ok { - wrapper.BufferSize = buffered.GetBufferSize() - } else { - wrapper.BufferSize = 0 - } - return wrapper.Toxic -} - -func Count() int { - registryMutex.RLock() - defer registryMutex.RUnlock() - - return len(ToxicRegistry) -} diff --git a/vendor/github.com/Shopify/toxiproxy/toxics/toxic_test.go b/vendor/github.com/Shopify/toxiproxy/toxics/toxic_test.go deleted file mode 100644 index 585d53b..0000000 --- a/vendor/github.com/Shopify/toxiproxy/toxics/toxic_test.go +++ /dev/null @@ -1,340 +0,0 @@ -package toxics_test - -import ( - "bufio" - "bytes" - "encoding/json" - "io" - "net" - "strings" - "testing" - "time" - - "github.com/Shopify/toxiproxy" - "github.com/Shopify/toxiproxy/toxics" - "github.com/Sirupsen/logrus" - tomb "gopkg.in/tomb.v1" -) - -func init() { - logrus.SetLevel(logrus.FatalLevel) -} - -func NewTestProxy(name, upstream string) *toxiproxy.Proxy { - proxy := toxiproxy.NewProxy() - - proxy.Name = name - proxy.Listen = "localhost:0" - proxy.Upstream = upstream - - return proxy -} - -func WithEchoServer(t *testing.T, f func(string, chan []byte)) { - ln, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatal("Failed to create TCP server", err) - } - - defer ln.Close() - - response := make(chan []byte, 1) - tomb := tomb.Tomb{} - - go func() { - defer tomb.Done() - src, err := ln.Accept() - if err != nil { - select { - case <-tomb.Dying(): - default: - t.Fatal("Failed to accept client") - } - return - } - - ln.Close() - - scan := bufio.NewScanner(src) - if scan.Scan() { - received := append(scan.Bytes(), '\n') - response <- received - - src.Write(received) - } - }() - - f(ln.Addr().String(), response) - - tomb.Killf("Function body finished") - ln.Close() - tomb.Wait() - - close(response) -} - -func WithEchoProxy(t *testing.T, f func(proxy net.Conn, response chan []byte, proxyServer *toxiproxy.Proxy)) { - WithEchoServer(t, func(upstream string, response chan []byte) { - proxy := NewTestProxy("test", upstream) - proxy.Start() - - conn, err := net.Dial("tcp", proxy.Listen) - if err != nil { - t.Error("Unable to dial TCP server", err) - } - - f(conn, response, proxy) - - proxy.Stop() - }) -} - -func ToxicToJson(t *testing.T, name, typeName, stream string, toxic toxics.Toxic) io.Reader { - data := map[string]interface{}{ - "name": name, - "type": typeName, - "stream": stream, - "attributes": toxic, - } - request, err := json.Marshal(data) - if err != nil { - t.Errorf("Failed to marshal toxic for api (1): %v", toxic) - } - - return bytes.NewReader(request) -} - -func AssertEchoResponse(t *testing.T, client, server net.Conn) { - msg := []byte("hello world\n") - - _, err := client.Write(msg) - if err != nil { - t.Error("Failed writing to TCP server", err) - } - - scan := bufio.NewScanner(server) - if !scan.Scan() { - t.Error("Client unexpectedly closed connection") - } - - resp := append(scan.Bytes(), '\n') - if !bytes.Equal(resp, msg) { - t.Error("Server didn't read correct bytes from client:", string(resp)) - } - - _, err = server.Write(resp) - if err != nil { - t.Error("Failed writing to TCP client", err) - } - - scan = bufio.NewScanner(client) - if !scan.Scan() { - t.Error("Server unexpectedly closed connection") - } - - resp = append(scan.Bytes(), '\n') - if !bytes.Equal(resp, msg) { - t.Error("Client didn't read correct bytes from server:", string(resp)) - } -} - -func TestPersistentConnections(t *testing.T) { - ln, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatal("Failed to create TCP server", err) - } - - defer ln.Close() - - proxy := NewTestProxy("test", ln.Addr().String()) - proxy.Start() - defer proxy.Stop() - - serverConnRecv := make(chan net.Conn) - - go func() { - conn, err := ln.Accept() - if err != nil { - t.Error("Unable to accept TCP connection", err) - } - serverConnRecv <- conn - }() - - conn, err := net.Dial("tcp", proxy.Listen) - if err != nil { - t.Error("Unable to dial TCP server", err) - } - - serverConn := <-serverConnRecv - - proxy.Toxics.AddToxicJson(ToxicToJson(t, "noop_up", "noop", "upstream", &toxics.NoopToxic{})) - proxy.Toxics.AddToxicJson(ToxicToJson(t, "noop_down", "noop", "downstream", &toxics.NoopToxic{})) - - AssertEchoResponse(t, conn, serverConn) - - proxy.Toxics.ResetToxics() - - AssertEchoResponse(t, conn, serverConn) - - proxy.Toxics.ResetToxics() - - AssertEchoResponse(t, conn, serverConn) - - err = conn.Close() - if err != nil { - t.Error("Failed to close TCP connection", err) - } -} - -func TestToxicAddRemove(t *testing.T) { - ln, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatal("Failed to create TCP server", err) - } - - defer ln.Close() - - proxy := NewTestProxy("test", ln.Addr().String()) - proxy.Start() - defer proxy.Stop() - - serverConnRecv := make(chan net.Conn) - - go func() { - conn, err := ln.Accept() - if err != nil { - t.Error("Unable to accept TCP connection", err) - } - serverConnRecv <- conn - }() - - conn, err := net.Dial("tcp", proxy.Listen) - if err != nil { - t.Error("Unable to dial TCP server", err) - } - - serverConn := <-serverConnRecv - - running := make(chan struct{}) - go func() { - enabled := false - for { - select { - case <-running: - return - default: - if enabled { - proxy.Toxics.AddToxicJson(ToxicToJson(t, "noop_up", "noop", "upstream", &toxics.NoopToxic{})) - proxy.Toxics.RemoveToxic("noop_down") - } else { - proxy.Toxics.RemoveToxic("noop_up") - proxy.Toxics.AddToxicJson(ToxicToJson(t, "noop_down", "noop", "downstream", &toxics.NoopToxic{})) - } - enabled = !enabled - } - } - }() - - for i := 0; i < 100; i++ { - AssertEchoResponse(t, conn, serverConn) - } - close(running) - - err = conn.Close() - if err != nil { - t.Error("Failed to close TCP connection", err) - } -} - -func TestProxyLatency(t *testing.T) { - ln, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatal("Failed to create TCP server", err) - } - - defer ln.Close() - - proxy := NewTestProxy("test", ln.Addr().String()) - proxy.Start() - defer proxy.Stop() - - serverConnRecv := make(chan net.Conn) - - go func() { - conn, err := ln.Accept() - if err != nil { - t.Error("Unable to accept TCP connection", err) - } - serverConnRecv <- conn - }() - - conn, err := net.Dial("tcp", proxy.Listen) - if err != nil { - t.Error("Unable to dial TCP server", err) - } - - serverConn := <-serverConnRecv - - start := time.Now() - for i := 0; i < 100; i++ { - AssertEchoResponse(t, conn, serverConn) - } - latency := time.Since(start) / 200 - if latency > 300*time.Microsecond { - t.Errorf("Average proxy latency > 300µs (%v)", latency) - } else { - t.Logf("Average proxy latency: %v", latency) - } - - err = conn.Close() - if err != nil { - t.Error("Failed to close TCP connection", err) - } -} - -func BenchmarkProxyBandwidth(b *testing.B) { - ln, err := net.Listen("tcp", "localhost:0") - if err != nil { - b.Fatal("Failed to create TCP server", err) - } - - defer ln.Close() - - proxy := NewTestProxy("test", ln.Addr().String()) - proxy.Start() - defer proxy.Stop() - - buf := []byte(strings.Repeat("hello world ", 1000)) - - go func() { - conn, err := ln.Accept() - if err != nil { - b.Error("Unable to accept TCP connection", err) - } - buf2 := make([]byte, len(buf)) - for err == nil { - _, err = conn.Read(buf2) - } - }() - - conn, err := net.Dial("tcp", proxy.Listen) - if err != nil { - b.Error("Unable to dial TCP server", err) - } - - b.SetBytes(int64(len(buf))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - n, err := conn.Write(buf) - if err != nil || n != len(buf) { - b.Errorf("%v, %d == %d", err, n, len(buf)) - break - } - } - - err = conn.Close() - if err != nil { - b.Error("Failed to close TCP connection", err) - } -} diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/.gitignore b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/.gitignore deleted file mode 100644 index 66be63a..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/.gitignore +++ /dev/null @@ -1 +0,0 @@ -logrus diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/.travis.yml b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/.travis.yml deleted file mode 100644 index d5a559f..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - 1.2 - - 1.3 - - tip -install: - - go get github.com/stretchr/testify - - go get github.com/stvp/go-udp-testing - - go get github.com/tobi/airbrake-go diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/LICENSE deleted file mode 100644 index f090cb4..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Simon Eskildsen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/README.md deleted file mode 100644 index 46dd102..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/README.md +++ /dev/null @@ -1,342 +0,0 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) - -Logrus is a structured logger for Go (golang), completely API compatible with -the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not -yet stable (pre 1.0), the core API is unlikely change much but please version -control your Logrus to make sure you aren't fetching latest `master` on every -build.** - -Nicely color-coded in development (when a TTY is attached, otherwise just -plain text): - -![Colored](http://i.imgur.com/PY7qMwd.png) - -With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash -or Splunk: - -```json -{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the -ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} - -{"level":"warning","msg":"The group's number increased tremendously!", -"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"A giant walrus appears!", -"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", -"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} - -{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, -"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} -``` - -With the default `log.Formatter = new(logrus.TextFormatter)` when a TTY is not -attached, the output is compatible with the -[l2met](http://r.32k.io/l2met-introduction) format: - -```text -time="2014-04-20 15:36:23.830442383 -0400 EDT" level="info" msg="A group of walrus emerges from the ocean" animal="walrus" size=10 -time="2014-04-20 15:36:23.830584199 -0400 EDT" level="warning" msg="The group's number increased tremendously!" omg=true number=122 -time="2014-04-20 15:36:23.830596521 -0400 EDT" level="info" msg="A giant walrus appears!" animal="walrus" size=10 -time="2014-04-20 15:36:23.830611837 -0400 EDT" level="info" msg="Tremendously sized cow enters the ocean." animal="walrus" size=9 -time="2014-04-20 15:36:23.830626464 -0400 EDT" level="fatal" msg="The ice breaks!" omg=true number=100 -``` - -#### Example - -The simplest way to use Logrus is simply the package-level exported logger: - -```go -package main - -import ( - log "github.com/Sirupsen/logrus" -) - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - }).Info("A walrus appears") -} -``` - -Note that it's completely api-compatible with the stdlib logger, so you can -replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` -and you'll now have the flexibility of Logrus. You can customize it all you -want: - -```go -package main - -import ( - "os" - log "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/airbrake" -) - -func init() { - // Log as JSON instead of the default ASCII formatter. - log.SetFormatter(&log.JSONFormatter{}) - - // Use the Airbrake hook to report errors that have Error severity or above to - // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(&logrus_airbrake.AirbrakeHook{}) - - // Output to stderr instead of stdout, could also be a file. - log.SetOutput(os.Stderr) - - // Only log the warning severity or above. - log.SetLevel(log.WarnLevel) -} - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(log.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(log.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") -} -``` - -For more advanced usage such as logging to multiple locations from the same -application, you can also create an instance of the `logrus` Logger: - -```go -package main - -import ( - "github.com/Sirupsen/logrus" -) - -// Create a new instance of the logger. You can have any number of instances. -var log = logrus.New() - -func main() { - // The API for setting attributes is a little different than the package level - // exported logger. See Godoc. - log.Out = os.Stderr - - log.WithFields(log.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") -} -``` - -#### Fields - -Logrus encourages careful, structured logging though logging fields instead of -long, unparseable error messages. For example, instead of: `log.Fatalf("Failed -to send event %s to topic %s with key %d")`, you should log the much more -discoverable: - -```go -log.WithFields(log.Fields{ - "event": event, - "topic": topic, - "key": key, -}).Fatal("Failed to send event") -``` - -We've found this API forces you to think about logging in a way that produces -much more useful logging messages. We've been in countless situations where just -a single added field to a log statement that was already there would've saved us -hours. The `WithFields` call is optional. - -In general, with Logrus using any of the `printf`-family functions should be -seen as a hint you should add a field, however, you can still use the -`printf`-family functions with Logrus. - -#### Hooks - -You can add hooks for logging levels. For example to send errors to an exception -tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to -multiple places simultaneously, e.g. syslog. - -```go -// Not the real implementation of the Airbrake hook. Just a simple sample. -import ( - log "github.com/Sirupsen/logrus" -) - -func init() { - log.AddHook(new(AirbrakeHook)) -} - -type AirbrakeHook struct{} - -// `Fire()` takes the entry that the hook is fired for. `entry.Data[]` contains -// the fields for the entry. See the Fields section of the README. -func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error { - err := airbrake.Notify(entry.Data["error"].(error)) - if err != nil { - log.WithFields(log.Fields{ - "source": "airbrake", - "endpoint": airbrake.Endpoint, - }).Info("Failed to send error to Airbrake") - } - - return nil -} - -// `Levels()` returns a slice of `Levels` the hook is fired for. -func (hook *AirbrakeHook) Levels() []log.Level { - return []log.Level{ - log.ErrorLevel, - log.FatalLevel, - log.PanicLevel, - } -} -``` - -Logrus comes with built-in hooks. Add those, or your custom hook, in `init`: - -```go -import ( - log "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/airbrake" - "github.com/Sirupsen/logrus/hooks/syslog" -) - -func init() { - log.AddHook(new(logrus_airbrake.AirbrakeHook)) - log.AddHook(logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")) -} -``` - -* [`github.com/Sirupsen/logrus/hooks/airbrake`](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) - Send errors to an exception tracking service compatible with the Airbrake API. - Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. - -* [`github.com/Sirupsen/logrus/hooks/papertrail`](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) - Send errors to the Papertrail hosted logging service via UDP. - -* [`github.com/Sirupsen/logrus/hooks/syslog`](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) - Send errors to remote syslog server. - Uses standard library `log/syslog` behind the scenes. - -* [`github.com/nubo/hiprus`](https://github.com/nubo/hiprus) - Send errors to a channel in hipchat. - -#### Level logging - -Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. - -```go -log.Debug("Useful debugging information.") -log.Info("Something noteworthy happened!") -log.Warn("You should probably take a look at this.") -log.Error("Something failed but I'm not quitting.") -// Calls os.Exit(1) after logging -log.Fatal("Bye.") -// Calls panic() after logging -log.Panic("I'm bailing.") -``` - -You can set the logging level on a `Logger`, then it will only log entries with -that severity or anything above it: - -```go -// Will log anything that is info or above (warn, error, fatal, panic). Default. -log.SetLevel(log.InfoLevel) -``` - -It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose -environment if your application has that. - -#### Entries - -Besides the fields added with `WithField` or `WithFields` some fields are -automatically added to all logging events: - -1. `time`. The timestamp when the entry was created. -2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after - the `AddFields` call. E.g. `Failed to send event.` -3. `level`. The logging level. E.g. `info`. - -#### Environments - -Logrus has no notion of environment. - -If you wish for hooks and formatters to only be used in specific environments, -you should handle that yourself. For example, if your application has a global -variable `Environment`, which is a string representation of the environment you -could do: - -```go -import ( - log "github.com/Sirupsen/logrus" -) - -init() { - // do something here to set environment depending on an environment variable - // or command-line flag - if Environment == "production" { - log.SetFormatter(logrus.JSONFormatter) - } else { - // The TextFormatter is default, you don't actually have to do this. - log.SetFormatter(logrus.TextFormatter) - } -} -``` - -This configuration is how `logrus` was intended to be used, but JSON in -production is mostly only useful if you do log aggregation with tools like -Splunk or Logstash. - -#### Formatters - -The built-in logging formatters are: - -* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise - without colors. - * *Note:* to force colored output when there is no TTY, set the `ForceColors` - field to `true`. To force no colored output even if there is a TTY set the - `DisableColors` field to `true` -* `logrus.JSONFormatter`. Logs fields as JSON. - -Third party logging formatters: - -* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. - -You can define your formatter by implementing the `Formatter` interface, -requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a -`Fields` type (`map[string]interface{}`) with all your fields as well as the -default ones (see Entries section above): - -```go -type MyJSONFormatter struct { -} - -log.SetFormatter(new(MyJSONFormatter)) - -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - // Note this doesn't include Time, Level and Message which are available on - // the Entry. Consult `godoc` on information about those fields or read the - // source of the official loggers. - serialized, err := json.Marshal(entry.Data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} -``` - -#### Rotation - -Log rotation is not provided with Logrus. Log rotation should be done by an -external program (like `logrotated(8)`) that can compress and delete old log -entries. It should not be a feature of the application-level logger. - - -[godoc]: https://godoc.org/github.com/Sirupsen/logrus diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/entry.go deleted file mode 100644 index dc2b0a7..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/entry.go +++ /dev/null @@ -1,244 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "io" - "os" - "time" -) - -// An entry is the final or intermediate Logrus logging entry. It containts all -// the fields passed with WithField{,s}. It's finally logged when Debug, Info, -// Warn, Error, Fatal or Panic is called on it. These objects can be reused and -// passed around as much as you wish to avoid field duplication. -type Entry struct { - Logger *Logger - - // Contains all the fields set by the user. - Data Fields - - // Time at which the log entry was created - Time time.Time - - // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic - Level Level - - // Message passed to Debug, Info, Warn, Error, Fatal or Panic - Message string -} - -func NewEntry(logger *Logger) *Entry { - return &Entry{ - Logger: logger, - // Default is three fields, give a little extra room - Data: make(Fields, 5), - } -} - -// Returns a reader for the entry, which is a proxy to the formatter. -func (entry *Entry) Reader() (*bytes.Buffer, error) { - serialized, err := entry.Logger.Formatter.Format(entry) - return bytes.NewBuffer(serialized), err -} - -// Returns the string representation from the reader and ultimately the -// formatter. -func (entry *Entry) String() (string, error) { - reader, err := entry.Reader() - if err != nil { - return "", err - } - - return reader.String(), err -} - -// Add a single field to the Entry. -func (entry *Entry) WithField(key string, value interface{}) *Entry { - return entry.WithFields(Fields{key: value}) -} - -// Add a map of fields to the Entry. -func (entry *Entry) WithFields(fields Fields) *Entry { - data := Fields{} - for k, v := range entry.Data { - data[k] = v - } - for k, v := range fields { - data[k] = v - } - return &Entry{Logger: entry.Logger, Data: data} -} - -func (entry *Entry) log(level Level, msg string) { - entry.Time = time.Now() - entry.Level = level - entry.Message = msg - - if err := entry.Logger.Hooks.Fire(level, entry); err != nil { - fmt.Fprintf(os.Stderr, "Failed to fire hook\n", err) - } - - reader, err := entry.Reader() - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) - } - - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() - - _, err = io.Copy(entry.Logger.Out, reader) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) - } - - // To avoid Entry#log() returning a value that only would make sense for - // panic() to use in Entry#Panic(), we avoid the allocation by checking - // directly here. - if level <= PanicLevel { - panic(reader.String()) - } -} - -func (entry *Entry) Debug(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.log(DebugLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Print(args ...interface{}) { - entry.Info(args...) -} - -func (entry *Entry) Info(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.log(InfoLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Warn(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.log(WarnLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Error(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.log(ErrorLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Fatal(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.log(FatalLevel, fmt.Sprint(args...)) - } - os.Exit(1) -} - -func (entry *Entry) Panic(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.log(PanicLevel, fmt.Sprint(args...)) - } - panic(fmt.Sprint(args...)) -} - -// Entry Printf family functions - -func (entry *Entry) Debugf(format string, args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.Debug(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Infof(format string, args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.Info(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Printf(format string, args ...interface{}) { - entry.Infof(format, args...) -} - -func (entry *Entry) Warnf(format string, args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.Warn(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Warningf(format string, args ...interface{}) { - entry.Warnf(format, args...) -} - -func (entry *Entry) Errorf(format string, args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.Error(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Fatalf(format string, args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.Fatal(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Panicf(format string, args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.Panic(fmt.Sprintf(format, args...)) - } -} - -// Entry Println family functions - -func (entry *Entry) Debugln(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.Debug(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Infoln(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.Info(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Println(args ...interface{}) { - entry.Infoln(args...) -} - -func (entry *Entry) Warnln(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.Warn(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Warningln(args ...interface{}) { - entry.Warnln(args...) -} - -func (entry *Entry) Errorln(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.Error(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Fatalln(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.Fatal(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Panicln(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.Panic(entry.sprintlnn(args...)) - } -} - -// Sprintlnn => Sprint no newline. This is to get the behavior of how -// fmt.Sprintln where spaces are always added between operands, regardless of -// their type. Instead of vendoring the Sprintln implementation to spare a -// string allocation, we do the simplest thing. -func (entry *Entry) sprintlnn(args ...interface{}) string { - msg := fmt.Sprintln(args...) - return msg[:len(msg)-1] -} diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/exported.go deleted file mode 100644 index 383ce93..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/exported.go +++ /dev/null @@ -1,177 +0,0 @@ -package logrus - -import ( - "io" -) - -var ( - // std is the name of the standard logger in stdlib `log` - std = New() -) - -// SetOutput sets the standard logger output. -func SetOutput(out io.Writer) { - std.mu.Lock() - defer std.mu.Unlock() - std.Out = out -} - -// SetFormatter sets the standard logger formatter. -func SetFormatter(formatter Formatter) { - std.mu.Lock() - defer std.mu.Unlock() - std.Formatter = formatter -} - -// SetLevel sets the standard logger level. -func SetLevel(level Level) { - std.mu.Lock() - defer std.mu.Unlock() - std.Level = level -} - -// AddHook adds a hook to the standard logger hooks. -func AddHook(hook Hook) { - std.mu.Lock() - defer std.mu.Unlock() - std.Hooks.Add(hook) -} - -// WithField creates an entry from the standard logger and adds a field to -// it. If you want multiple fields, use `WithFields`. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithField(key string, value interface{}) *Entry { - return std.WithField(key, value) -} - -// WithFields creates an entry from the standard logger and adds multiple -// fields to it. This is simply a helper for `WithField`, invoking it -// once for each field. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithFields(fields Fields) *Entry { - return std.WithFields(fields) -} - -// Debug logs a message at level Debug on the standard logger. -func Debug(args ...interface{}) { - std.Debug(args...) -} - -// Print logs a message at level Info on the standard logger. -func Print(args ...interface{}) { - std.Print(args...) -} - -// Info logs a message at level Info on the standard logger. -func Info(args ...interface{}) { - std.Info(args...) -} - -// Warn logs a message at level Warn on the standard logger. -func Warn(args ...interface{}) { - std.Warn(args...) -} - -// Warning logs a message at level Warn on the standard logger. -func Warning(args ...interface{}) { - std.Warning(args...) -} - -// Error logs a message at level Error on the standard logger. -func Error(args ...interface{}) { - std.Error(args...) -} - -// Panic logs a message at level Panic on the standard logger. -func Panic(args ...interface{}) { - std.Panic(args...) -} - -// Fatal logs a message at level Fatal on the standard logger. -func Fatal(args ...interface{}) { - std.Fatal(args...) -} - -// Debugf logs a message at level Debugf on the standard logger. -func Debugf(format string, args ...interface{}) { - std.Debugf(format, args...) -} - -// Printf logs a message at level Info on the standard logger. -func Printf(format string, args ...interface{}) { - std.Printf(format, args...) -} - -// Infof logs a message at level Info on the standard logger. -func Infof(format string, args ...interface{}) { - std.Infof(format, args...) -} - -// Warnf logs a message at level Warn on the standard logger. -func Warnf(format string, args ...interface{}) { - std.Warnf(format, args...) -} - -// Warningf logs a message at level Warn on the standard logger. -func Warningf(format string, args ...interface{}) { - std.Warningf(format, args...) -} - -// Errorf logs a message at level Error on the standard logger. -func Errorf(format string, args ...interface{}) { - std.Errorf(format, args...) -} - -// Panicf logs a message at level Pancf on the standard logger. -func Panicf(format string, args ...interface{}) { - std.Panicf(format, args...) -} - -// Fatalf logs a message at level Fatal on the standard logger. -func Fatalf(format string, args ...interface{}) { - std.Fatalf(format, args...) -} - -// Debugln logs a message at level Debug on the standard logger. -func Debugln(args ...interface{}) { - std.Debugln(args...) -} - -// Println logs a message at level Info on the standard logger. -func Println(args ...interface{}) { - std.Println(args...) -} - -// Infoln logs a message at level Info on the standard logger. -func Infoln(args ...interface{}) { - std.Infoln(args...) -} - -// Warnln logs a message at level Warn on the standard logger. -func Warnln(args ...interface{}) { - std.Warnln(args...) -} - -// Warningln logs a message at level Warn on the standard logger. -func Warningln(args ...interface{}) { - std.Warningln(args...) -} - -// Errorln logs a message at level Error on the standard logger. -func Errorln(args ...interface{}) { - std.Errorln(args...) -} - -// Panicln logs a message at level Panic on the standard logger. -func Panicln(args ...interface{}) { - std.Panicln(args...) -} - -// Fatalln logs a message at level Fatal on the standard logger. -func Fatalln(args ...interface{}) { - std.Fatalln(args...) -} diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/formatter.go deleted file mode 100644 index cccf1c2..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/formatter.go +++ /dev/null @@ -1,44 +0,0 @@ -package logrus - -// The Formatter interface is used to implement a custom Formatter. It takes an -// `Entry`. It exposes all the fields, including the default ones: -// -// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. -// * `entry.Data["time"]`. The timestamp. -// * `entry.Data["level"]. The level the entry was logged at. -// -// Any additional fields added with `WithField` or `WithFields` are also in -// `entry.Data`. Format is expected to return an array of bytes which are then -// logged to `logger.Out`. -type Formatter interface { - Format(*Entry) ([]byte, error) -} - -// This is to not silently overwrite `time`, `msg` and `level` fields when -// dumping it. If this code wasn't there doing: -// -// logrus.WithField("level", 1).Info("hello") -// -// Would just silently drop the user provided level. Instead with this code -// it'll logged as: -// -// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} -// -// It's not exported because it's still using Data in an opionated way. It's to -// avoid code duplication between the two default formatters. -func prefixFieldClashes(entry *Entry) { - _, ok := entry.Data["time"] - if ok { - entry.Data["fields.time"] = entry.Data["time"] - } - - _, ok = entry.Data["msg"] - if ok { - entry.Data["fields.msg"] = entry.Data["msg"] - } - - _, ok = entry.Data["level"] - if ok { - entry.Data["fields.level"] = entry.Data["level"] - } -} diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/hooks.go deleted file mode 100644 index 0da2b36..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/hooks.go +++ /dev/null @@ -1,34 +0,0 @@ -package logrus - -// A hook to be fired when logging on the logging levels returned from -// `Levels()` on your implementation of the interface. Note that this is not -// fired in a goroutine or a channel with workers, you should handle such -// functionality yourself if your call is non-blocking and you don't wish for -// the logging calls for levels returned from `Levels()` to block. -type Hook interface { - Levels() []Level - Fire(*Entry) error -} - -// Internal type for storing the hooks on a logger instance. -type levelHooks map[Level][]Hook - -// Add a hook to an instance of logger. This is called with -// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. -func (hooks levelHooks) Add(hook Hook) { - for _, level := range hook.Levels() { - hooks[level] = append(hooks[level], hook) - } -} - -// Fire all the hooks for the passed level. Used by `entry.log` to fire -// appropriate hooks for a log entry. -func (hooks levelHooks) Fire(level Level, entry *Entry) error { - for _, hook := range hooks[level] { - if err := hook.Fire(entry); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/json_formatter.go deleted file mode 100644 index 9d11b64..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/json_formatter.go +++ /dev/null @@ -1,22 +0,0 @@ -package logrus - -import ( - "encoding/json" - "fmt" - "time" -) - -type JSONFormatter struct{} - -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - prefixFieldClashes(entry) - entry.Data["time"] = entry.Time.Format(time.RFC3339) - entry.Data["msg"] = entry.Message - entry.Data["level"] = entry.Level.String() - - serialized, err := json.Marshal(entry.Data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/logger.go deleted file mode 100644 index 7374fe3..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/logger.go +++ /dev/null @@ -1,161 +0,0 @@ -package logrus - -import ( - "io" - "os" - "sync" -) - -type Logger struct { - // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a - // file, or leave it default which is `os.Stdout`. You can also set this to - // something more adventorous, such as logging to Kafka. - Out io.Writer - // Hooks for the logger instance. These allow firing events based on logging - // levels and log entries. For example, to send errors to an error tracking - // service, log to StatsD or dump the core on fatal errors. - Hooks levelHooks - // All log entries pass through the formatter before logged to Out. The - // included formatters are `TextFormatter` and `JSONFormatter` for which - // TextFormatter is the default. In development (when a TTY is attached) it - // logs with colors, but to a file it wouldn't. You can easily implement your - // own that implements the `Formatter` interface, see the `README` or included - // formatters for examples. - Formatter Formatter - // The logging level the logger should log at. This is typically (and defaults - // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be - // logged. `logrus.Debug` is useful in - Level Level - // Used to sync writing to the log. - mu sync.Mutex -} - -// Creates a new logger. Configuration should be set by changing `Formatter`, -// `Out` and `Hooks` directly on the default logger instance. You can also just -// instantiate your own: -// -// var log = &Logger{ -// Out: os.Stderr, -// Formatter: new(JSONFormatter), -// Hooks: make(levelHooks), -// Level: logrus.Debug, -// } -// -// It's recommended to make this a global instance called `log`. -func New() *Logger { - return &Logger{ - Out: os.Stdout, - Formatter: new(TextFormatter), - Hooks: make(levelHooks), - Level: InfoLevel, - } -} - -// Adds a field to the log entry, note that you it doesn't log until you call -// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. -// Ff you want multiple fields, use `WithFields`. -func (logger *Logger) WithField(key string, value interface{}) *Entry { - return NewEntry(logger).WithField(key, value) -} - -// Adds a struct of fields to the log entry. All it does is call `WithField` for -// each `Field`. -func (logger *Logger) WithFields(fields Fields) *Entry { - return NewEntry(logger).WithFields(fields) -} - -func (logger *Logger) Debugf(format string, args ...interface{}) { - NewEntry(logger).Debugf(format, args...) -} - -func (logger *Logger) Infof(format string, args ...interface{}) { - NewEntry(logger).Infof(format, args...) -} - -func (logger *Logger) Printf(format string, args ...interface{}) { - NewEntry(logger).Printf(format, args...) -} - -func (logger *Logger) Warnf(format string, args ...interface{}) { - NewEntry(logger).Warnf(format, args...) -} - -func (logger *Logger) Warningf(format string, args ...interface{}) { - NewEntry(logger).Warnf(format, args...) -} - -func (logger *Logger) Errorf(format string, args ...interface{}) { - NewEntry(logger).Errorf(format, args...) -} - -func (logger *Logger) Fatalf(format string, args ...interface{}) { - NewEntry(logger).Fatalf(format, args...) -} - -func (logger *Logger) Panicf(format string, args ...interface{}) { - NewEntry(logger).Panicf(format, args...) -} - -func (logger *Logger) Debug(args ...interface{}) { - NewEntry(logger).Debug(args...) -} - -func (logger *Logger) Info(args ...interface{}) { - NewEntry(logger).Info(args...) -} - -func (logger *Logger) Print(args ...interface{}) { - NewEntry(logger).Info(args...) -} - -func (logger *Logger) Warn(args ...interface{}) { - NewEntry(logger).Warn(args...) -} - -func (logger *Logger) Warning(args ...interface{}) { - NewEntry(logger).Warn(args...) -} - -func (logger *Logger) Error(args ...interface{}) { - NewEntry(logger).Error(args...) -} - -func (logger *Logger) Fatal(args ...interface{}) { - NewEntry(logger).Fatal(args...) -} - -func (logger *Logger) Panic(args ...interface{}) { - NewEntry(logger).Panic(args...) -} - -func (logger *Logger) Debugln(args ...interface{}) { - NewEntry(logger).Debugln(args...) -} - -func (logger *Logger) Infoln(args ...interface{}) { - NewEntry(logger).Infoln(args...) -} - -func (logger *Logger) Println(args ...interface{}) { - NewEntry(logger).Println(args...) -} - -func (logger *Logger) Warnln(args ...interface{}) { - NewEntry(logger).Warnln(args...) -} - -func (logger *Logger) Warningln(args ...interface{}) { - NewEntry(logger).Warnln(args...) -} - -func (logger *Logger) Errorln(args ...interface{}) { - NewEntry(logger).Errorln(args...) -} - -func (logger *Logger) Fatalln(args ...interface{}) { - NewEntry(logger).Fatalln(args...) -} - -func (logger *Logger) Panicln(args ...interface{}) { - NewEntry(logger).Panicln(args...) -} diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/logrus.go deleted file mode 100644 index 43ee12e..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/logrus.go +++ /dev/null @@ -1,94 +0,0 @@ -package logrus - -import ( - "fmt" - "log" -) - -// Fields type, used to pass to `WithFields`. -type Fields map[string]interface{} - -// Level type -type Level uint8 - -// Convert the Level to a string. E.g. PanicLevel becomes "panic". -func (level Level) String() string { - switch level { - case DebugLevel: - return "debug" - case InfoLevel: - return "info" - case WarnLevel: - return "warning" - case ErrorLevel: - return "error" - case FatalLevel: - return "fatal" - case PanicLevel: - return "panic" - } - - return "unknown" -} - -// ParseLevel takes a string level and returns the Logrus log level constant. -func ParseLevel(lvl string) (Level, error) { - switch lvl { - case "panic": - return PanicLevel, nil - case "fatal": - return FatalLevel, nil - case "error": - return ErrorLevel, nil - case "warn", "warning": - return WarnLevel, nil - case "info": - return InfoLevel, nil - case "debug": - return DebugLevel, nil - } - - var l Level - return l, fmt.Errorf("not a valid logrus Level: %q", lvl) -} - -// These are the different logging levels. You can set the logging level to log -// on your instance of logger, obtained with `logrus.New()`. -const ( - // PanicLevel level, highest level of severity. Logs and then calls panic with the - // message passed to Debug, Info, ... - PanicLevel Level = iota - // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the - // logging level is set to Panic. - FatalLevel - // ErrorLevel level. Logs. Used for errors that should definitely be noted. - // Commonly used for hooks to send errors to an error tracking service. - ErrorLevel - // WarnLevel level. Non-critical entries that deserve eyes. - WarnLevel - // InfoLevel level. General operational entries about what's going on inside the - // application. - InfoLevel - // DebugLevel level. Usually only enabled when debugging. Very verbose logging. - DebugLevel -) - -// Won't compile if StdLogger can't be realized by a log.Logger -var _ StdLogger = &log.Logger{} - -// StdLogger is what your logrus-enabled library should take, that way -// it'll accept a stdlib logger and a logrus logger. There's no standard -// interface, this is the closest we get, unfortunately. -type StdLogger interface { - Print(...interface{}) - Printf(string, ...interface{}) - Println(...interface{}) - - Fatal(...interface{}) - Fatalf(string, ...interface{}) - Fatalln(...interface{}) - - Panic(...interface{}) - Panicf(string, ...interface{}) - Panicln(...interface{}) -} diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/terminal_darwin.go b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/terminal_darwin.go deleted file mode 100644 index 8fe02a4..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/terminal_darwin.go +++ /dev/null @@ -1,12 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logrus - -import "syscall" - -const ioctlReadTermios = syscall.TIOCGETA - -type Termios syscall.Termios diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go deleted file mode 100644 index 0428ee5..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go +++ /dev/null @@ -1,20 +0,0 @@ -/* - Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin. -*/ -package logrus - -import ( - "syscall" -) - -const ioctlReadTermios = syscall.TIOCGETA - -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]uint8 - Ispeed uint32 - Ospeed uint32 -} diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/terminal_linux.go deleted file mode 100644 index a2c0b40..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/terminal_linux.go +++ /dev/null @@ -1,12 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logrus - -import "syscall" - -const ioctlReadTermios = syscall.TCGETS - -type Termios syscall.Termios diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go deleted file mode 100644 index 276447b..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go +++ /dev/null @@ -1,21 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux,!appengine darwin freebsd - -package logrus - -import ( - "syscall" - "unsafe" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stdout - var termios Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/terminal_windows.go deleted file mode 100644 index 2e09f6f..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/terminal_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package logrus - -import ( - "syscall" - "unsafe" -) - -var kernel32 = syscall.NewLazyDLL("kernel32.dll") - -var ( - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stdout - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/text_formatter.go deleted file mode 100644 index 2ab0139..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/Sirupsen/logrus/text_formatter.go +++ /dev/null @@ -1,94 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "sort" - "strings" - "time" -) - -const ( - nocolor = 0 - red = 31 - green = 32 - yellow = 33 - blue = 34 -) - -var ( - baseTimestamp time.Time - isTerminal bool -) - -func init() { - baseTimestamp = time.Now() - isTerminal = IsTerminal() -} - -func miniTS() int { - return int(time.Since(baseTimestamp) / time.Second) -} - -type TextFormatter struct { - // Set to true to bypass checking for a TTY before outputting colors. - ForceColors bool - DisableColors bool -} - -func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { - - var keys []string - for k := range entry.Data { - keys = append(keys, k) - } - sort.Strings(keys) - - b := &bytes.Buffer{} - - prefixFieldClashes(entry) - - isColored := (f.ForceColors || isTerminal) && !f.DisableColors - - if isColored { - printColored(b, entry, keys) - } else { - f.AppendKeyValue(b, "time", entry.Time.Format(time.RFC3339)) - f.AppendKeyValue(b, "level", entry.Level.String()) - f.AppendKeyValue(b, "msg", entry.Message) - for _, key := range keys { - f.AppendKeyValue(b, key, entry.Data[key]) - } - } - - b.WriteByte('\n') - return b.Bytes(), nil -} - -func printColored(b *bytes.Buffer, entry *Entry, keys []string) { - var levelColor int - switch entry.Level { - case WarnLevel: - levelColor = yellow - case ErrorLevel, FatalLevel, PanicLevel: - levelColor = red - default: - levelColor = blue - } - - levelText := strings.ToUpper(entry.Level.String())[0:4] - - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) - for _, k := range keys { - v := entry.Data[k] - fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v) - } -} - -func (f *TextFormatter) AppendKeyValue(b *bytes.Buffer, key, value interface{}) { - if _, ok := value.(string); ok { - fmt.Fprintf(b, "%v=%q ", key, value) - } else { - fmt.Fprintf(b, "%v=%v ", key, value) - } -} diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/context/LICENSE b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/context/LICENSE deleted file mode 100644 index 0e5fb87..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/context/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 Rodrigo Moraes. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/context/README.md b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/context/README.md deleted file mode 100644 index 8ee62b4..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/context/README.md +++ /dev/null @@ -1,6 +0,0 @@ -context -======= - -gorilla/context is a general purpose registry for global request variables. - -Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/context/context.go b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/context/context.go deleted file mode 100644 index 35d6556..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/context/context.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context - -import ( - "net/http" - "sync" - "time" -) - -var ( - mutex sync.Mutex - data = make(map[*http.Request]map[interface{}]interface{}) - datat = make(map[*http.Request]int64) -) - -// Set stores a value for a given key in a given request. -func Set(r *http.Request, key, val interface{}) { - mutex.Lock() - defer mutex.Unlock() - if data[r] == nil { - data[r] = make(map[interface{}]interface{}) - datat[r] = time.Now().Unix() - } - data[r][key] = val -} - -// Get returns a value stored for a given key in a given request. -func Get(r *http.Request, key interface{}) interface{} { - mutex.Lock() - defer mutex.Unlock() - if data[r] != nil { - return data[r][key] - } - return nil -} - -// GetOk returns stored value and presence state like multi-value return of map access. -func GetOk(r *http.Request, key interface{}) (interface{}, bool) { - mutex.Lock() - defer mutex.Unlock() - if _, ok := data[r]; ok { - value, ok := data[r][key] - return value, ok - } - return nil, false -} - -// Delete removes a value stored for a given key in a given request. -func Delete(r *http.Request, key interface{}) { - mutex.Lock() - defer mutex.Unlock() - if data[r] != nil { - delete(data[r], key) - } -} - -// Clear removes all values stored for a given request. -// -// This is usually called by a handler wrapper to clean up request -// variables at the end of a request lifetime. See ClearHandler(). -func Clear(r *http.Request) { - mutex.Lock() - defer mutex.Unlock() - clear(r) -} - -// clear is Clear without the lock. -func clear(r *http.Request) { - delete(data, r) - delete(datat, r) -} - -// Purge removes request data stored for longer than maxAge, in seconds. -// It returns the amount of requests removed. -// -// If maxAge <= 0, all request data is removed. -// -// This is only used for sanity check: in case context cleaning was not -// properly set some request data can be kept forever, consuming an increasing -// amount of memory. In case this is detected, Purge() must be called -// periodically until the problem is fixed. -func Purge(maxAge int) int { - mutex.Lock() - defer mutex.Unlock() - count := 0 - if maxAge <= 0 { - count = len(data) - data = make(map[*http.Request]map[interface{}]interface{}) - datat = make(map[*http.Request]int64) - } else { - min := time.Now().Unix() - int64(maxAge) - for r, _ := range data { - if datat[r] < min { - clear(r) - count++ - } - } - } - return count -} - -// ClearHandler wraps an http.Handler and clears request values at the end -// of a request lifetime. -func ClearHandler(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - defer Clear(r) - h.ServeHTTP(w, r) - }) -} diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/context/doc.go b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/context/doc.go deleted file mode 100644 index 2976064..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/context/doc.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package gorilla/context stores values shared during a request lifetime. - -For example, a router can set variables extracted from the URL and later -application handlers can access those values, or it can be used to store -sessions values to be saved at the end of a request. There are several -others common uses. - -The idea was posted by Brad Fitzpatrick to the go-nuts mailing list: - - http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53 - -Here's the basic usage: first define the keys that you will need. The key -type is interface{} so a key can be of any type that supports equality. -Here we define a key using a custom int type to avoid name collisions: - - package foo - - import ( - "github.com/gorilla/context" - ) - - type key int - - const MyKey key = 0 - -Then set a variable. Variables are bound to an http.Request object, so you -need a request instance to set a value: - - context.Set(r, MyKey, "bar") - -The application can later access the variable using the same key you provided: - - func MyHandler(w http.ResponseWriter, r *http.Request) { - // val is "bar". - val := context.Get(r, foo.MyKey) - - // returns ("bar", true) - val, ok := context.GetOk(r, foo.MyKey) - // ... - } - -And that's all about the basic usage. We discuss some other ideas below. - -Any type can be stored in the context. To enforce a given type, make the key -private and wrap Get() and Set() to accept and return values of a specific -type: - - type key int - - const mykey key = 0 - - // GetMyKey returns a value for this package from the request values. - func GetMyKey(r *http.Request) SomeType { - if rv := context.Get(r, mykey); rv != nil { - return rv.(SomeType) - } - return nil - } - - // SetMyKey sets a value for this package in the request values. - func SetMyKey(r *http.Request, val SomeType) { - context.Set(r, mykey, val) - } - -Variables must be cleared at the end of a request, to remove all values -that were stored. This can be done in an http.Handler, after a request was -served. Just call Clear() passing the request: - - context.Clear(r) - -...or use ClearHandler(), which conveniently wraps an http.Handler to clear -variables at the end of a request lifetime. - -The Routers from the packages gorilla/mux and gorilla/pat call Clear() -so if you are using either of them you don't need to clear the context manually. -*/ -package context diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/mux/.travis.yml b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/mux/.travis.yml deleted file mode 100644 index d87d465..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/mux/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go - -go: - - 1.0 - - 1.1 - - 1.2 - - tip diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/mux/LICENSE b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/mux/LICENSE deleted file mode 100644 index 0e5fb87..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/mux/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 Rodrigo Moraes. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/mux/README.md deleted file mode 100644 index e60301b..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/mux/README.md +++ /dev/null @@ -1,7 +0,0 @@ -mux -=== -[![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux) - -gorilla/mux is a powerful URL router and dispatcher. - -Read the full documentation here: http://www.gorillatoolkit.org/pkg/mux diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/mux/doc.go deleted file mode 100644 index b2deed3..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/mux/doc.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package gorilla/mux implements a request router and dispatcher. - -The name mux stands for "HTTP request multiplexer". Like the standard -http.ServeMux, mux.Router matches incoming requests against a list of -registered routes and calls a handler for the route that matches the URL -or other conditions. The main features are: - - * Requests can be matched based on URL host, path, path prefix, schemes, - header and query values, HTTP methods or using custom matchers. - * URL hosts and paths can have variables with an optional regular - expression. - * Registered URLs can be built, or "reversed", which helps maintaining - references to resources. - * Routes can be used as subrouters: nested routes are only tested if the - parent route matches. This is useful to define groups of routes that - share common conditions like a host, a path prefix or other repeated - attributes. As a bonus, this optimizes request matching. - * It implements the http.Handler interface so it is compatible with the - standard http.ServeMux. - -Let's start registering a couple of URL paths and handlers: - - func main() { - r := mux.NewRouter() - r.HandleFunc("/", HomeHandler) - r.HandleFunc("/products", ProductsHandler) - r.HandleFunc("/articles", ArticlesHandler) - http.Handle("/", r) - } - -Here we register three routes mapping URL paths to handlers. This is -equivalent to how http.HandleFunc() works: if an incoming request URL matches -one of the paths, the corresponding handler is called passing -(http.ResponseWriter, *http.Request) as parameters. - -Paths can have variables. They are defined using the format {name} or -{name:pattern}. If a regular expression pattern is not defined, the matched -variable will be anything until the next slash. For example: - - r := mux.NewRouter() - r.HandleFunc("/products/{key}", ProductHandler) - r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) - -The names are used to create a map of route variables which can be retrieved -calling mux.Vars(): - - vars := mux.Vars(request) - category := vars["category"] - -And this is all you need to know about the basic usage. More advanced options -are explained below. - -Routes can also be restricted to a domain or subdomain. Just define a host -pattern to be matched. They can also have variables: - - r := mux.NewRouter() - // Only matches if domain is "www.domain.com". - r.Host("www.domain.com") - // Matches a dynamic subdomain. - r.Host("{subdomain:[a-z]+}.domain.com") - -There are several other matchers that can be added. To match path prefixes: - - r.PathPrefix("/products/") - -...or HTTP methods: - - r.Methods("GET", "POST") - -...or URL schemes: - - r.Schemes("https") - -...or header values: - - r.Headers("X-Requested-With", "XMLHttpRequest") - -...or query values: - - r.Queries("key", "value") - -...or to use a custom matcher function: - - r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { - return r.ProtoMajor == 0 - }) - -...and finally, it is possible to combine several matchers in a single route: - - r.HandleFunc("/products", ProductsHandler). - Host("www.domain.com"). - Methods("GET"). - Schemes("http") - -Setting the same matching conditions again and again can be boring, so we have -a way to group several routes that share the same requirements. -We call it "subrouting". - -For example, let's say we have several URLs that should only match when the -host is "www.domain.com". Create a route for that host and get a "subrouter" -from it: - - r := mux.NewRouter() - s := r.Host("www.domain.com").Subrouter() - -Then register routes in the subrouter: - - s.HandleFunc("/products/", ProductsHandler) - s.HandleFunc("/products/{key}", ProductHandler) - s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) - -The three URL paths we registered above will only be tested if the domain is -"www.domain.com", because the subrouter is tested first. This is not -only convenient, but also optimizes request matching. You can create -subrouters combining any attribute matchers accepted by a route. - -Subrouters can be used to create domain or path "namespaces": you define -subrouters in a central place and then parts of the app can register its -paths relatively to a given subrouter. - -There's one more thing about subroutes. When a subrouter has a path prefix, -the inner routes use it as base for their paths: - - r := mux.NewRouter() - s := r.PathPrefix("/products").Subrouter() - // "/products/" - s.HandleFunc("/", ProductsHandler) - // "/products/{key}/" - s.HandleFunc("/{key}/", ProductHandler) - // "/products/{key}/details" - s.HandleFunc("/{key}/details", ProductDetailsHandler) - -Now let's see how to build registered URLs. - -Routes can be named. All routes that define a name can have their URLs built, -or "reversed". We define a name calling Name() on a route. For example: - - r := mux.NewRouter() - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). - Name("article") - -To build a URL, get the route and call the URL() method, passing a sequence of -key/value pairs for the route variables. For the previous route, we would do: - - url, err := r.Get("article").URL("category", "technology", "id", "42") - -...and the result will be a url.URL with the following path: - - "/articles/technology/42" - -This also works for host variables: - - r := mux.NewRouter() - r.Host("{subdomain}.domain.com"). - Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - - // url.String() will be "http://news.domain.com/articles/technology/42" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") - -All variables defined in the route are required, and their values must -conform to the corresponding patterns. These requirements guarantee that a -generated URL will always match a registered route -- the only exception is -for explicitly defined "build-only" routes which never match. - -There's also a way to build only the URL host or path for a route: -use the methods URLHost() or URLPath() instead. For the previous route, -we would do: - - // "http://news.domain.com/" - host, err := r.Get("article").URLHost("subdomain", "news") - - // "/articles/technology/42" - path, err := r.Get("article").URLPath("category", "technology", "id", "42") - -And if you use subrouters, host and path defined separately can be built -as well: - - r := mux.NewRouter() - s := r.Host("{subdomain}.domain.com").Subrouter() - s.Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - - // "http://news.domain.com/articles/technology/42" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") -*/ -package mux diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/mux/mux.go deleted file mode 100644 index ca51a01..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/mux/mux.go +++ /dev/null @@ -1,347 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "fmt" - "net/http" - "path" - - "github.com/gorilla/context" -) - -// NewRouter returns a new router instance. -func NewRouter() *Router { - return &Router{namedRoutes: make(map[string]*Route), KeepContext: false} -} - -// Router registers routes to be matched and dispatches a handler. -// -// It implements the http.Handler interface, so it can be registered to serve -// requests: -// -// var router = mux.NewRouter() -// -// func main() { -// http.Handle("/", router) -// } -// -// Or, for Google App Engine, register it in a init() function: -// -// func init() { -// http.Handle("/", router) -// } -// -// This will send all incoming requests to the router. -type Router struct { - // Configurable Handler to be used when no route matches. - NotFoundHandler http.Handler - // Parent route, if this is a subrouter. - parent parentRoute - // Routes to be matched, in order. - routes []*Route - // Routes by name for URL building. - namedRoutes map[string]*Route - // See Router.StrictSlash(). This defines the flag for new routes. - strictSlash bool - // If true, do not clear the request context after handling the request - KeepContext bool -} - -// Match matches registered routes against the request. -func (r *Router) Match(req *http.Request, match *RouteMatch) bool { - for _, route := range r.routes { - if route.Match(req, match) { - return true - } - } - return false -} - -// ServeHTTP dispatches the handler registered in the matched route. -// -// When there is a match, the route variables can be retrieved calling -// mux.Vars(request). -func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { - // Clean path to canonical form and redirect. - if p := cleanPath(req.URL.Path); p != req.URL.Path { - - // Added 3 lines (Philip Schlump) - It was droping the query string and #whatever from query. - // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: - // http://code.google.com/p/go/issues/detail?id=5252 - url := *req.URL - url.Path = p - p = url.String() - - w.Header().Set("Location", p) - w.WriteHeader(http.StatusMovedPermanently) - return - } - var match RouteMatch - var handler http.Handler - if r.Match(req, &match) { - handler = match.Handler - setVars(req, match.Vars) - setCurrentRoute(req, match.Route) - } - if handler == nil { - if r.NotFoundHandler == nil { - r.NotFoundHandler = http.NotFoundHandler() - } - handler = r.NotFoundHandler - } - if !r.KeepContext { - defer context.Clear(req) - } - handler.ServeHTTP(w, req) -} - -// Get returns a route registered with the given name. -func (r *Router) Get(name string) *Route { - return r.getNamedRoutes()[name] -} - -// GetRoute returns a route registered with the given name. This method -// was renamed to Get() and remains here for backwards compatibility. -func (r *Router) GetRoute(name string) *Route { - return r.getNamedRoutes()[name] -} - -// StrictSlash defines the slash behavior for new routes. -// -// When true, if the route path is "/path/", accessing "/path" will redirect -// to the former and vice versa. -// -// Special case: when a route sets a path prefix, strict slash is -// automatically set to false for that route because the redirect behavior -// can't be determined for prefixes. -func (r *Router) StrictSlash(value bool) *Router { - r.strictSlash = value - return r -} - -// ---------------------------------------------------------------------------- -// parentRoute -// ---------------------------------------------------------------------------- - -// getNamedRoutes returns the map where named routes are registered. -func (r *Router) getNamedRoutes() map[string]*Route { - if r.namedRoutes == nil { - if r.parent != nil { - r.namedRoutes = r.parent.getNamedRoutes() - } else { - r.namedRoutes = make(map[string]*Route) - } - } - return r.namedRoutes -} - -// getRegexpGroup returns regexp definitions from the parent route, if any. -func (r *Router) getRegexpGroup() *routeRegexpGroup { - if r.parent != nil { - return r.parent.getRegexpGroup() - } - return nil -} - -// ---------------------------------------------------------------------------- -// Route factories -// ---------------------------------------------------------------------------- - -// NewRoute registers an empty route. -func (r *Router) NewRoute() *Route { - route := &Route{parent: r, strictSlash: r.strictSlash} - r.routes = append(r.routes, route) - return route -} - -// Handle registers a new route with a matcher for the URL path. -// See Route.Path() and Route.Handler(). -func (r *Router) Handle(path string, handler http.Handler) *Route { - return r.NewRoute().Path(path).Handler(handler) -} - -// HandleFunc registers a new route with a matcher for the URL path. -// See Route.Path() and Route.HandlerFunc(). -func (r *Router) HandleFunc(path string, f func(http.ResponseWriter, - *http.Request)) *Route { - return r.NewRoute().Path(path).HandlerFunc(f) -} - -// Headers registers a new route with a matcher for request header values. -// See Route.Headers(). -func (r *Router) Headers(pairs ...string) *Route { - return r.NewRoute().Headers(pairs...) -} - -// Host registers a new route with a matcher for the URL host. -// See Route.Host(). -func (r *Router) Host(tpl string) *Route { - return r.NewRoute().Host(tpl) -} - -// MatcherFunc registers a new route with a custom matcher function. -// See Route.MatcherFunc(). -func (r *Router) MatcherFunc(f MatcherFunc) *Route { - return r.NewRoute().MatcherFunc(f) -} - -// Methods registers a new route with a matcher for HTTP methods. -// See Route.Methods(). -func (r *Router) Methods(methods ...string) *Route { - return r.NewRoute().Methods(methods...) -} - -// Path registers a new route with a matcher for the URL path. -// See Route.Path(). -func (r *Router) Path(tpl string) *Route { - return r.NewRoute().Path(tpl) -} - -// PathPrefix registers a new route with a matcher for the URL path prefix. -// See Route.PathPrefix(). -func (r *Router) PathPrefix(tpl string) *Route { - return r.NewRoute().PathPrefix(tpl) -} - -// Queries registers a new route with a matcher for URL query values. -// See Route.Queries(). -func (r *Router) Queries(pairs ...string) *Route { - return r.NewRoute().Queries(pairs...) -} - -// Schemes registers a new route with a matcher for URL schemes. -// See Route.Schemes(). -func (r *Router) Schemes(schemes ...string) *Route { - return r.NewRoute().Schemes(schemes...) -} - -// ---------------------------------------------------------------------------- -// Context -// ---------------------------------------------------------------------------- - -// RouteMatch stores information about a matched route. -type RouteMatch struct { - Route *Route - Handler http.Handler - Vars map[string]string -} - -type contextKey int - -const ( - varsKey contextKey = iota - routeKey -) - -// Vars returns the route variables for the current request, if any. -func Vars(r *http.Request) map[string]string { - if rv := context.Get(r, varsKey); rv != nil { - return rv.(map[string]string) - } - return nil -} - -// CurrentRoute returns the matched route for the current request, if any. -func CurrentRoute(r *http.Request) *Route { - if rv := context.Get(r, routeKey); rv != nil { - return rv.(*Route) - } - return nil -} - -func setVars(r *http.Request, val interface{}) { - context.Set(r, varsKey, val) -} - -func setCurrentRoute(r *http.Request, val interface{}) { - context.Set(r, routeKey, val) -} - -// ---------------------------------------------------------------------------- -// Helpers -// ---------------------------------------------------------------------------- - -// cleanPath returns the canonical path for p, eliminating . and .. elements. -// Borrowed from the net/http package. -func cleanPath(p string) string { - if p == "" { - return "/" - } - if p[0] != '/' { - p = "/" + p - } - np := path.Clean(p) - // path.Clean removes trailing slash except for root; - // put the trailing slash back if necessary. - if p[len(p)-1] == '/' && np != "/" { - np += "/" - } - return np -} - -// uniqueVars returns an error if two slices contain duplicated strings. -func uniqueVars(s1, s2 []string) error { - for _, v1 := range s1 { - for _, v2 := range s2 { - if v1 == v2 { - return fmt.Errorf("mux: duplicated route variable %q", v2) - } - } - } - return nil -} - -// mapFromPairs converts variadic string parameters to a string map. -func mapFromPairs(pairs ...string) (map[string]string, error) { - length := len(pairs) - if length%2 != 0 { - return nil, fmt.Errorf( - "mux: number of parameters must be multiple of 2, got %v", pairs) - } - m := make(map[string]string, length/2) - for i := 0; i < length; i += 2 { - m[pairs[i]] = pairs[i+1] - } - return m, nil -} - -// matchInArray returns true if the given string value is in the array. -func matchInArray(arr []string, value string) bool { - for _, v := range arr { - if v == value { - return true - } - } - return false -} - -// matchMap returns true if the given key/value pairs exist in a given map. -func matchMap(toCheck map[string]string, toMatch map[string][]string, - canonicalKey bool) bool { - for k, v := range toCheck { - // Check if key exists. - if canonicalKey { - k = http.CanonicalHeaderKey(k) - } - if values := toMatch[k]; values == nil { - return false - } else if v != "" { - // If value was defined as an empty string we only check that the - // key exists. Otherwise we also check for equality. - valueExists := false - for _, value := range values { - if v == value { - valueExists = true - break - } - } - if !valueExists { - return false - } - } - } - return true -} diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/mux/regexp.go deleted file mode 100644 index 4c3482b..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/mux/regexp.go +++ /dev/null @@ -1,247 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "bytes" - "fmt" - "net/http" - "net/url" - "regexp" - "strings" -) - -// newRouteRegexp parses a route template and returns a routeRegexp, -// used to match a host or path. -// -// It will extract named variables, assemble a regexp to be matched, create -// a "reverse" template to build URLs and compile regexps to validate variable -// values used in URL building. -// -// Previously we accepted only Python-like identifiers for variable -// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that -// name and pattern can't be empty, and names can't contain a colon. -func newRouteRegexp(tpl string, matchHost, matchPrefix, strictSlash bool) (*routeRegexp, error) { - // Check if it is well-formed. - idxs, errBraces := braceIndices(tpl) - if errBraces != nil { - return nil, errBraces - } - // Backup the original. - template := tpl - // Now let's parse it. - defaultPattern := "[^/]+" - if matchHost { - defaultPattern = "[^.]+" - matchPrefix, strictSlash = false, false - } - if matchPrefix { - strictSlash = false - } - // Set a flag for strictSlash. - endSlash := false - if strictSlash && strings.HasSuffix(tpl, "/") { - tpl = tpl[:len(tpl)-1] - endSlash = true - } - varsN := make([]string, len(idxs)/2) - varsR := make([]*regexp.Regexp, len(idxs)/2) - pattern := bytes.NewBufferString("^") - reverse := bytes.NewBufferString("") - var end int - var err error - for i := 0; i < len(idxs); i += 2 { - // Set all values we are interested in. - raw := tpl[end:idxs[i]] - end = idxs[i+1] - parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) - name := parts[0] - patt := defaultPattern - if len(parts) == 2 { - patt = parts[1] - } - // Name or pattern can't be empty. - if name == "" || patt == "" { - return nil, fmt.Errorf("mux: missing name or pattern in %q", - tpl[idxs[i]:end]) - } - // Build the regexp pattern. - fmt.Fprintf(pattern, "%s(%s)", regexp.QuoteMeta(raw), patt) - // Build the reverse template. - fmt.Fprintf(reverse, "%s%%s", raw) - // Append variable name and compiled pattern. - varsN[i/2] = name - varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) - if err != nil { - return nil, err - } - } - // Add the remaining. - raw := tpl[end:] - pattern.WriteString(regexp.QuoteMeta(raw)) - if strictSlash { - pattern.WriteString("[/]?") - } - if !matchPrefix { - pattern.WriteByte('$') - } - reverse.WriteString(raw) - if endSlash { - reverse.WriteByte('/') - } - // Compile full regexp. - reg, errCompile := regexp.Compile(pattern.String()) - if errCompile != nil { - return nil, errCompile - } - // Done! - return &routeRegexp{ - template: template, - matchHost: matchHost, - regexp: reg, - reverse: reverse.String(), - varsN: varsN, - varsR: varsR, - }, nil -} - -// routeRegexp stores a regexp to match a host or path and information to -// collect and validate route variables. -type routeRegexp struct { - // The unmodified template. - template string - // True for host match, false for path match. - matchHost bool - // Expanded regexp. - regexp *regexp.Regexp - // Reverse template. - reverse string - // Variable names. - varsN []string - // Variable regexps (validators). - varsR []*regexp.Regexp -} - -// Match matches the regexp against the URL host or path. -func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { - if !r.matchHost { - return r.regexp.MatchString(req.URL.Path) - } - return r.regexp.MatchString(getHost(req)) -} - -// url builds a URL part using the given values. -func (r *routeRegexp) url(pairs ...string) (string, error) { - values, err := mapFromPairs(pairs...) - if err != nil { - return "", err - } - urlValues := make([]interface{}, len(r.varsN)) - for k, v := range r.varsN { - value, ok := values[v] - if !ok { - return "", fmt.Errorf("mux: missing route variable %q", v) - } - urlValues[k] = value - } - rv := fmt.Sprintf(r.reverse, urlValues...) - if !r.regexp.MatchString(rv) { - // The URL is checked against the full regexp, instead of checking - // individual variables. This is faster but to provide a good error - // message, we check individual regexps if the URL doesn't match. - for k, v := range r.varsN { - if !r.varsR[k].MatchString(values[v]) { - return "", fmt.Errorf( - "mux: variable %q doesn't match, expected %q", values[v], - r.varsR[k].String()) - } - } - } - return rv, nil -} - -// braceIndices returns the first level curly brace indices from a string. -// It returns an error in case of unbalanced braces. -func braceIndices(s string) ([]int, error) { - var level, idx int - idxs := make([]int, 0) - for i := 0; i < len(s); i++ { - switch s[i] { - case '{': - if level++; level == 1 { - idx = i - } - case '}': - if level--; level == 0 { - idxs = append(idxs, idx, i+1) - } else if level < 0 { - return nil, fmt.Errorf("mux: unbalanced braces in %q", s) - } - } - } - if level != 0 { - return nil, fmt.Errorf("mux: unbalanced braces in %q", s) - } - return idxs, nil -} - -// ---------------------------------------------------------------------------- -// routeRegexpGroup -// ---------------------------------------------------------------------------- - -// routeRegexpGroup groups the route matchers that carry variables. -type routeRegexpGroup struct { - host *routeRegexp - path *routeRegexp -} - -// setMatch extracts the variables from the URL once a route matches. -func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { - // Store host variables. - if v.host != nil { - hostVars := v.host.regexp.FindStringSubmatch(getHost(req)) - if hostVars != nil { - for k, v := range v.host.varsN { - m.Vars[v] = hostVars[k+1] - } - } - } - // Store path variables. - if v.path != nil { - pathVars := v.path.regexp.FindStringSubmatch(req.URL.Path) - if pathVars != nil { - for k, v := range v.path.varsN { - m.Vars[v] = pathVars[k+1] - } - // Check if we should redirect. - if r.strictSlash { - p1 := strings.HasSuffix(req.URL.Path, "/") - p2 := strings.HasSuffix(v.path.template, "/") - if p1 != p2 { - u, _ := url.Parse(req.URL.String()) - if p1 { - u.Path = u.Path[:len(u.Path)-1] - } else { - u.Path += "/" - } - m.Handler = http.RedirectHandler(u.String(), 301) - } - } - } - } -} - -// getHost tries its best to return the request host. -func getHost(r *http.Request) string { - if !r.URL.IsAbs() { - host := r.Host - // Slice off any port information. - if i := strings.Index(host, ":"); i != -1 { - host = host[:i] - } - return host - } - return r.URL.Host -} diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/mux/route.go deleted file mode 100644 index 7766254..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/gorilla/mux/route.go +++ /dev/null @@ -1,499 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "errors" - "fmt" - "net/http" - "net/url" - "strings" -) - -// Route stores information to match a request and build URLs. -type Route struct { - // Parent where the route was registered (a Router). - parent parentRoute - // Request handler for the route. - handler http.Handler - // List of matchers. - matchers []matcher - // Manager for the variables from host and path. - regexp *routeRegexpGroup - // If true, when the path pattern is "/path/", accessing "/path" will - // redirect to the former and vice versa. - strictSlash bool - // If true, this route never matches: it is only used to build URLs. - buildOnly bool - // The name used to build URLs. - name string - // Error resulted from building a route. - err error -} - -// Match matches the route against the request. -func (r *Route) Match(req *http.Request, match *RouteMatch) bool { - if r.buildOnly || r.err != nil { - return false - } - // Match everything. - for _, m := range r.matchers { - if matched := m.Match(req, match); !matched { - return false - } - } - // Yay, we have a match. Let's collect some info about it. - if match.Route == nil { - match.Route = r - } - if match.Handler == nil { - match.Handler = r.handler - } - if match.Vars == nil { - match.Vars = make(map[string]string) - } - // Set variables. - if r.regexp != nil { - r.regexp.setMatch(req, match, r) - } - return true -} - -// ---------------------------------------------------------------------------- -// Route attributes -// ---------------------------------------------------------------------------- - -// GetError returns an error resulted from building the route, if any. -func (r *Route) GetError() error { - return r.err -} - -// BuildOnly sets the route to never match: it is only used to build URLs. -func (r *Route) BuildOnly() *Route { - r.buildOnly = true - return r -} - -// Handler -------------------------------------------------------------------- - -// Handler sets a handler for the route. -func (r *Route) Handler(handler http.Handler) *Route { - if r.err == nil { - r.handler = handler - } - return r -} - -// HandlerFunc sets a handler function for the route. -func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route { - return r.Handler(http.HandlerFunc(f)) -} - -// GetHandler returns the handler for the route, if any. -func (r *Route) GetHandler() http.Handler { - return r.handler -} - -// Name ----------------------------------------------------------------------- - -// Name sets the name for the route, used to build URLs. -// If the name was registered already it will be overwritten. -func (r *Route) Name(name string) *Route { - if r.name != "" { - r.err = fmt.Errorf("mux: route already has name %q, can't set %q", - r.name, name) - } - if r.err == nil { - r.name = name - r.getNamedRoutes()[name] = r - } - return r -} - -// GetName returns the name for the route, if any. -func (r *Route) GetName() string { - return r.name -} - -// ---------------------------------------------------------------------------- -// Matchers -// ---------------------------------------------------------------------------- - -// matcher types try to match a request. -type matcher interface { - Match(*http.Request, *RouteMatch) bool -} - -// addMatcher adds a matcher to the route. -func (r *Route) addMatcher(m matcher) *Route { - if r.err == nil { - r.matchers = append(r.matchers, m) - } - return r -} - -// addRegexpMatcher adds a host or path matcher and builder to a route. -func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix bool) error { - if r.err != nil { - return r.err - } - r.regexp = r.getRegexpGroup() - if !matchHost { - if len(tpl) == 0 || tpl[0] != '/' { - return fmt.Errorf("mux: path must start with a slash, got %q", tpl) - } - if r.regexp.path != nil { - tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl - } - } - rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, r.strictSlash) - if err != nil { - return err - } - if matchHost { - if r.regexp.path != nil { - if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { - return err - } - } - r.regexp.host = rr - } else { - if r.regexp.host != nil { - if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil { - return err - } - } - r.regexp.path = rr - } - r.addMatcher(rr) - return nil -} - -// Headers -------------------------------------------------------------------- - -// headerMatcher matches the request against header values. -type headerMatcher map[string]string - -func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMap(m, r.Header, true) -} - -// Headers adds a matcher for request header values. -// It accepts a sequence of key/value pairs to be matched. For example: -// -// r := mux.NewRouter() -// r.Headers("Content-Type", "application/json", -// "X-Requested-With", "XMLHttpRequest") -// -// The above route will only match if both request header values match. -// -// It the value is an empty string, it will match any value if the key is set. -func (r *Route) Headers(pairs ...string) *Route { - if r.err == nil { - var headers map[string]string - headers, r.err = mapFromPairs(pairs...) - return r.addMatcher(headerMatcher(headers)) - } - return r -} - -// Host ----------------------------------------------------------------------- - -// Host adds a matcher for the URL host. -// It accepts a template with zero or more URL variables enclosed by {}. -// Variables can define an optional regexp pattern to me matched: -// -// - {name} matches anything until the next dot. -// -// - {name:pattern} matches the given regexp pattern. -// -// For example: -// -// r := mux.NewRouter() -// r.Host("www.domain.com") -// r.Host("{subdomain}.domain.com") -// r.Host("{subdomain:[a-z]+}.domain.com") -// -// Variable names must be unique in a given route. They can be retrieved -// calling mux.Vars(request). -func (r *Route) Host(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, true, false) - return r -} - -// MatcherFunc ---------------------------------------------------------------- - -// MatcherFunc is the function signature used by custom matchers. -type MatcherFunc func(*http.Request, *RouteMatch) bool - -func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { - return m(r, match) -} - -// MatcherFunc adds a custom function to be used as request matcher. -func (r *Route) MatcherFunc(f MatcherFunc) *Route { - return r.addMatcher(f) -} - -// Methods -------------------------------------------------------------------- - -// methodMatcher matches the request against HTTP methods. -type methodMatcher []string - -func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchInArray(m, r.Method) -} - -// Methods adds a matcher for HTTP methods. -// It accepts a sequence of one or more methods to be matched, e.g.: -// "GET", "POST", "PUT". -func (r *Route) Methods(methods ...string) *Route { - for k, v := range methods { - methods[k] = strings.ToUpper(v) - } - return r.addMatcher(methodMatcher(methods)) -} - -// Path ----------------------------------------------------------------------- - -// Path adds a matcher for the URL path. -// It accepts a template with zero or more URL variables enclosed by {}. -// Variables can define an optional regexp pattern to me matched: -// -// - {name} matches anything until the next slash. -// -// - {name:pattern} matches the given regexp pattern. -// -// For example: -// -// r := mux.NewRouter() -// r.Path("/products/").Handler(ProductsHandler) -// r.Path("/products/{key}").Handler(ProductsHandler) -// r.Path("/articles/{category}/{id:[0-9]+}"). -// Handler(ArticleHandler) -// -// Variable names must be unique in a given route. They can be retrieved -// calling mux.Vars(request). -func (r *Route) Path(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, false, false) - return r -} - -// PathPrefix ----------------------------------------------------------------- - -// PathPrefix adds a matcher for the URL path prefix. -func (r *Route) PathPrefix(tpl string) *Route { - r.strictSlash = false - r.err = r.addRegexpMatcher(tpl, false, true) - return r -} - -// Query ---------------------------------------------------------------------- - -// queryMatcher matches the request against URL queries. -type queryMatcher map[string]string - -func (m queryMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMap(m, r.URL.Query(), false) -} - -// Queries adds a matcher for URL query values. -// It accepts a sequence of key/value pairs. For example: -// -// r := mux.NewRouter() -// r.Queries("foo", "bar", "baz", "ding") -// -// The above route will only match if the URL contains the defined queries -// values, e.g.: ?foo=bar&baz=ding. -// -// It the value is an empty string, it will match any value if the key is set. -func (r *Route) Queries(pairs ...string) *Route { - if r.err == nil { - var queries map[string]string - queries, r.err = mapFromPairs(pairs...) - return r.addMatcher(queryMatcher(queries)) - } - return r -} - -// Schemes -------------------------------------------------------------------- - -// schemeMatcher matches the request against URL schemes. -type schemeMatcher []string - -func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchInArray(m, r.URL.Scheme) -} - -// Schemes adds a matcher for URL schemes. -// It accepts a sequence of schemes to be matched, e.g.: "http", "https". -func (r *Route) Schemes(schemes ...string) *Route { - for k, v := range schemes { - schemes[k] = strings.ToLower(v) - } - return r.addMatcher(schemeMatcher(schemes)) -} - -// Subrouter ------------------------------------------------------------------ - -// Subrouter creates a subrouter for the route. -// -// It will test the inner routes only if the parent route matched. For example: -// -// r := mux.NewRouter() -// s := r.Host("www.domain.com").Subrouter() -// s.HandleFunc("/products/", ProductsHandler) -// s.HandleFunc("/products/{key}", ProductHandler) -// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) -// -// Here, the routes registered in the subrouter won't be tested if the host -// doesn't match. -func (r *Route) Subrouter() *Router { - router := &Router{parent: r, strictSlash: r.strictSlash} - r.addMatcher(router) - return router -} - -// ---------------------------------------------------------------------------- -// URL building -// ---------------------------------------------------------------------------- - -// URL builds a URL for the route. -// -// It accepts a sequence of key/value pairs for the route variables. For -// example, given this route: -// -// r := mux.NewRouter() -// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Name("article") -// -// ...a URL for it can be built using: -// -// url, err := r.Get("article").URL("category", "technology", "id", "42") -// -// ...which will return an url.URL with the following path: -// -// "/articles/technology/42" -// -// This also works for host variables: -// -// r := mux.NewRouter() -// r.Host("{subdomain}.domain.com"). -// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Name("article") -// -// // url.String() will be "http://news.domain.com/articles/technology/42" -// url, err := r.Get("article").URL("subdomain", "news", -// "category", "technology", -// "id", "42") -// -// All variables defined in the route are required, and their values must -// conform to the corresponding patterns. -func (r *Route) URL(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp == nil { - return nil, errors.New("mux: route doesn't have a host or path") - } - var scheme, host, path string - var err error - if r.regexp.host != nil { - // Set a default scheme. - scheme = "http" - if host, err = r.regexp.host.url(pairs...); err != nil { - return nil, err - } - } - if r.regexp.path != nil { - if path, err = r.regexp.path.url(pairs...); err != nil { - return nil, err - } - } - return &url.URL{ - Scheme: scheme, - Host: host, - Path: path, - }, nil -} - -// URLHost builds the host part of the URL for a route. See Route.URL(). -// -// The route must have a host defined. -func (r *Route) URLHost(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp == nil || r.regexp.host == nil { - return nil, errors.New("mux: route doesn't have a host") - } - host, err := r.regexp.host.url(pairs...) - if err != nil { - return nil, err - } - return &url.URL{ - Scheme: "http", - Host: host, - }, nil -} - -// URLPath builds the path part of the URL for a route. See Route.URL(). -// -// The route must have a path defined. -func (r *Route) URLPath(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp == nil || r.regexp.path == nil { - return nil, errors.New("mux: route doesn't have a path") - } - path, err := r.regexp.path.url(pairs...) - if err != nil { - return nil, err - } - return &url.URL{ - Path: path, - }, nil -} - -// ---------------------------------------------------------------------------- -// parentRoute -// ---------------------------------------------------------------------------- - -// parentRoute allows routes to know about parent host and path definitions. -type parentRoute interface { - getNamedRoutes() map[string]*Route - getRegexpGroup() *routeRegexpGroup -} - -// getNamedRoutes returns the map where named routes are registered. -func (r *Route) getNamedRoutes() map[string]*Route { - if r.parent == nil { - // During tests router is not always set. - r.parent = NewRouter() - } - return r.parent.getNamedRoutes() -} - -// getRegexpGroup returns regexp definitions from this route. -func (r *Route) getRegexpGroup() *routeRegexpGroup { - if r.regexp == nil { - if r.parent == nil { - // During tests router is not always set. - r.parent = NewRouter() - } - regexp := r.parent.getRegexpGroup() - if regexp == nil { - r.regexp = new(routeRegexpGroup) - } else { - // Copy. - r.regexp = &routeRegexpGroup{ - host: regexp.host, - path: regexp.path, - } - } - } - return r.regexp -} diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/.gitignore b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/.gitignore deleted file mode 100644 index faf70c4..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.coverprofile -node_modules/ diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/.travis.yml b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/.travis.yml deleted file mode 100644 index 94836d7..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/.travis.yml +++ /dev/null @@ -1,39 +0,0 @@ -language: go - -sudo: false - -cache: - directories: - - node_modules - -go: -- 1.2.x -- 1.3.x -- 1.4.2 -- 1.5.x -- 1.6.x -- 1.7.x -- master - -matrix: - allow_failures: - - go: master - include: - - go: 1.6.x - os: osx - - go: 1.7.x - os: osx - -before_script: -- go get github.com/urfave/gfmrun/... || true -- go get golang.org/x/tools/... || true -- if [ ! -f node_modules/.bin/markdown-toc ] ; then - npm install markdown-toc ; - fi - -script: -- ./runtests gen -- ./runtests vet -- ./runtests test -- ./runtests gfmrun -- ./runtests toc diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/CHANGELOG.md b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/CHANGELOG.md deleted file mode 100644 index 8b0d0ee..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/CHANGELOG.md +++ /dev/null @@ -1,336 +0,0 @@ -# Change Log - -**ATTN**: This project uses [semantic versioning](http://semver.org/). - -## [Unreleased] -### Added -- Flag type code generation via `go generate` -- Write to stderr and exit 1 if action returns non-nil error -- Added support for TOML to the `altsrc` loader - -### Changed -- Raise minimum tested/supported Go version to 1.2+ - -## [1.18.0] - 2016-06-27 -### Added -- `./runtests` test runner with coverage tracking by default -- testing on OS X -- testing on Windows -- `UintFlag`, `Uint64Flag`, and `Int64Flag` types and supporting code - -### Changed -- Use spaces for alignment in help/usage output instead of tabs, making the - output alignment consistent regardless of tab width - -### Fixed -- Printing of command aliases in help text -- Printing of visible flags for both struct and struct pointer flags -- Display the `help` subcommand when using `CommandCategories` -- No longer swallows `panic`s that occur within the `Action`s themselves when - detecting the signature of the `Action` field - -## [1.17.0] - 2016-05-09 -### Added -- Pluggable flag-level help text rendering via `cli.DefaultFlagStringFunc` -- `context.GlobalBoolT` was added as an analogue to `context.GlobalBool` -- Support for hiding commands by setting `Hidden: true` -- this will hide the - commands in help output - -### Changed -- `Float64Flag`, `IntFlag`, and `DurationFlag` default values are no longer - quoted in help text output. -- All flag types now include `(default: {value})` strings following usage when a - default value can be (reasonably) detected. -- `IntSliceFlag` and `StringSliceFlag` usage strings are now more consistent - with non-slice flag types -- Apps now exit with a code of 3 if an unknown subcommand is specified - (previously they printed "No help topic for...", but still exited 0. This - makes it easier to script around apps built using `cli` since they can trust - that a 0 exit code indicated a successful execution. -- cleanups based on [Go Report Card - feedback](https://goreportcard.com/report/github.com/urfave/cli) - -## [1.16.0] - 2016-05-02 -### Added -- `Hidden` field on all flag struct types to omit from generated help text - -### Changed -- `BashCompletionFlag` (`--enable-bash-completion`) is now omitted from -generated help text via the `Hidden` field - -### Fixed -- handling of error values in `HandleAction` and `HandleExitCoder` - -## [1.15.0] - 2016-04-30 -### Added -- This file! -- Support for placeholders in flag usage strings -- `App.Metadata` map for arbitrary data/state management -- `Set` and `GlobalSet` methods on `*cli.Context` for altering values after -parsing. -- Support for nested lookup of dot-delimited keys in structures loaded from -YAML. - -### Changed -- The `App.Action` and `Command.Action` now prefer a return signature of -`func(*cli.Context) error`, as defined by `cli.ActionFunc`. If a non-nil -`error` is returned, there may be two outcomes: - - If the error fulfills `cli.ExitCoder`, then `os.Exit` will be called - automatically - - Else the error is bubbled up and returned from `App.Run` -- Specifying an `Action` with the legacy return signature of -`func(*cli.Context)` will produce a deprecation message to stderr -- Specifying an `Action` that is not a `func` type will produce a non-zero exit -from `App.Run` -- Specifying an `Action` func that has an invalid (input) signature will -produce a non-zero exit from `App.Run` - -### Deprecated -- -`cli.App.RunAndExitOnError`, which should now be done by returning an error -that fulfills `cli.ExitCoder` to `cli.App.Run`. -- the legacy signature for -`cli.App.Action` of `func(*cli.Context)`, which should now have a return -signature of `func(*cli.Context) error`, as defined by `cli.ActionFunc`. - -### Fixed -- Added missing `*cli.Context.GlobalFloat64` method - -## [1.14.0] - 2016-04-03 (backfilled 2016-04-25) -### Added -- Codebeat badge -- Support for categorization via `CategorizedHelp` and `Categories` on app. - -### Changed -- Use `filepath.Base` instead of `path.Base` in `Name` and `HelpName`. - -### Fixed -- Ensure version is not shown in help text when `HideVersion` set. - -## [1.13.0] - 2016-03-06 (backfilled 2016-04-25) -### Added -- YAML file input support. -- `NArg` method on context. - -## [1.12.0] - 2016-02-17 (backfilled 2016-04-25) -### Added -- Custom usage error handling. -- Custom text support in `USAGE` section of help output. -- Improved help messages for empty strings. -- AppVeyor CI configuration. - -### Changed -- Removed `panic` from default help printer func. -- De-duping and optimizations. - -### Fixed -- Correctly handle `Before`/`After` at command level when no subcommands. -- Case of literal `-` argument causing flag reordering. -- Environment variable hints on Windows. -- Docs updates. - -## [1.11.1] - 2015-12-21 (backfilled 2016-04-25) -### Changed -- Use `path.Base` in `Name` and `HelpName` -- Export `GetName` on flag types. - -### Fixed -- Flag parsing when skipping is enabled. -- Test output cleanup. -- Move completion check to account for empty input case. - -## [1.11.0] - 2015-11-15 (backfilled 2016-04-25) -### Added -- Destination scan support for flags. -- Testing against `tip` in Travis CI config. - -### Changed -- Go version in Travis CI config. - -### Fixed -- Removed redundant tests. -- Use correct example naming in tests. - -## [1.10.2] - 2015-10-29 (backfilled 2016-04-25) -### Fixed -- Remove unused var in bash completion. - -## [1.10.1] - 2015-10-21 (backfilled 2016-04-25) -### Added -- Coverage and reference logos in README. - -### Fixed -- Use specified values in help and version parsing. -- Only display app version and help message once. - -## [1.10.0] - 2015-10-06 (backfilled 2016-04-25) -### Added -- More tests for existing functionality. -- `ArgsUsage` at app and command level for help text flexibility. - -### Fixed -- Honor `HideHelp` and `HideVersion` in `App.Run`. -- Remove juvenile word from README. - -## [1.9.0] - 2015-09-08 (backfilled 2016-04-25) -### Added -- `FullName` on command with accompanying help output update. -- Set default `$PROG` in bash completion. - -### Changed -- Docs formatting. - -### Fixed -- Removed self-referential imports in tests. - -## [1.8.0] - 2015-06-30 (backfilled 2016-04-25) -### Added -- Support for `Copyright` at app level. -- `Parent` func at context level to walk up context lineage. - -### Fixed -- Global flag processing at top level. - -## [1.7.1] - 2015-06-11 (backfilled 2016-04-25) -### Added -- Aggregate errors from `Before`/`After` funcs. -- Doc comments on flag structs. -- Include non-global flags when checking version and help. -- Travis CI config updates. - -### Fixed -- Ensure slice type flags have non-nil values. -- Collect global flags from the full command hierarchy. -- Docs prose. - -## [1.7.0] - 2015-05-03 (backfilled 2016-04-25) -### Changed -- `HelpPrinter` signature includes output writer. - -### Fixed -- Specify go 1.1+ in docs. -- Set `Writer` when running command as app. - -## [1.6.0] - 2015-03-23 (backfilled 2016-04-25) -### Added -- Multiple author support. -- `NumFlags` at context level. -- `Aliases` at command level. - -### Deprecated -- `ShortName` at command level. - -### Fixed -- Subcommand help output. -- Backward compatible support for deprecated `Author` and `Email` fields. -- Docs regarding `Names`/`Aliases`. - -## [1.5.0] - 2015-02-20 (backfilled 2016-04-25) -### Added -- `After` hook func support at app and command level. - -### Fixed -- Use parsed context when running command as subcommand. -- Docs prose. - -## [1.4.1] - 2015-01-09 (backfilled 2016-04-25) -### Added -- Support for hiding `-h / --help` flags, but not `help` subcommand. -- Stop flag parsing after `--`. - -### Fixed -- Help text for generic flags to specify single value. -- Use double quotes in output for defaults. -- Use `ParseInt` instead of `ParseUint` for int environment var values. -- Use `0` as base when parsing int environment var values. - -## [1.4.0] - 2014-12-12 (backfilled 2016-04-25) -### Added -- Support for environment variable lookup "cascade". -- Support for `Stdout` on app for output redirection. - -### Fixed -- Print command help instead of app help in `ShowCommandHelp`. - -## [1.3.1] - 2014-11-13 (backfilled 2016-04-25) -### Added -- Docs and example code updates. - -### Changed -- Default `-v / --version` flag made optional. - -## [1.3.0] - 2014-08-10 (backfilled 2016-04-25) -### Added -- `FlagNames` at context level. -- Exposed `VersionPrinter` var for more control over version output. -- Zsh completion hook. -- `AUTHOR` section in default app help template. -- Contribution guidelines. -- `DurationFlag` type. - -## [1.2.0] - 2014-08-02 -### Added -- Support for environment variable defaults on flags plus tests. - -## [1.1.0] - 2014-07-15 -### Added -- Bash completion. -- Optional hiding of built-in help command. -- Optional skipping of flag parsing at command level. -- `Author`, `Email`, and `Compiled` metadata on app. -- `Before` hook func support at app and command level. -- `CommandNotFound` func support at app level. -- Command reference available on context. -- `GenericFlag` type. -- `Float64Flag` type. -- `BoolTFlag` type. -- `IsSet` flag helper on context. -- More flag lookup funcs at context level. -- More tests & docs. - -### Changed -- Help template updates to account for presence/absence of flags. -- Separated subcommand help template. -- Exposed `HelpPrinter` var for more control over help output. - -## [1.0.0] - 2013-11-01 -### Added -- `help` flag in default app flag set and each command flag set. -- Custom handling of argument parsing errors. -- Command lookup by name at app level. -- `StringSliceFlag` type and supporting `StringSlice` type. -- `IntSliceFlag` type and supporting `IntSlice` type. -- Slice type flag lookups by name at context level. -- Export of app and command help functions. -- More tests & docs. - -## 0.1.0 - 2013-07-22 -### Added -- Initial implementation. - -[Unreleased]: https://github.com/urfave/cli/compare/v1.18.0...HEAD -[1.18.0]: https://github.com/urfave/cli/compare/v1.17.0...v1.18.0 -[1.17.0]: https://github.com/urfave/cli/compare/v1.16.0...v1.17.0 -[1.16.0]: https://github.com/urfave/cli/compare/v1.15.0...v1.16.0 -[1.15.0]: https://github.com/urfave/cli/compare/v1.14.0...v1.15.0 -[1.14.0]: https://github.com/urfave/cli/compare/v1.13.0...v1.14.0 -[1.13.0]: https://github.com/urfave/cli/compare/v1.12.0...v1.13.0 -[1.12.0]: https://github.com/urfave/cli/compare/v1.11.1...v1.12.0 -[1.11.1]: https://github.com/urfave/cli/compare/v1.11.0...v1.11.1 -[1.11.0]: https://github.com/urfave/cli/compare/v1.10.2...v1.11.0 -[1.10.2]: https://github.com/urfave/cli/compare/v1.10.1...v1.10.2 -[1.10.1]: https://github.com/urfave/cli/compare/v1.10.0...v1.10.1 -[1.10.0]: https://github.com/urfave/cli/compare/v1.9.0...v1.10.0 -[1.9.0]: https://github.com/urfave/cli/compare/v1.8.0...v1.9.0 -[1.8.0]: https://github.com/urfave/cli/compare/v1.7.1...v1.8.0 -[1.7.1]: https://github.com/urfave/cli/compare/v1.7.0...v1.7.1 -[1.7.0]: https://github.com/urfave/cli/compare/v1.6.0...v1.7.0 -[1.6.0]: https://github.com/urfave/cli/compare/v1.5.0...v1.6.0 -[1.5.0]: https://github.com/urfave/cli/compare/v1.4.1...v1.5.0 -[1.4.1]: https://github.com/urfave/cli/compare/v1.4.0...v1.4.1 -[1.4.0]: https://github.com/urfave/cli/compare/v1.3.1...v1.4.0 -[1.3.1]: https://github.com/urfave/cli/compare/v1.3.0...v1.3.1 -[1.3.0]: https://github.com/urfave/cli/compare/v1.2.0...v1.3.0 -[1.2.0]: https://github.com/urfave/cli/compare/v1.1.0...v1.2.0 -[1.1.0]: https://github.com/urfave/cli/compare/v1.0.0...v1.1.0 -[1.0.0]: https://github.com/urfave/cli/compare/v0.1.0...v1.0.0 diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/LICENSE b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/LICENSE deleted file mode 100644 index 42a597e..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2016 Jeremy Saenz & Contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/README.md b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/README.md deleted file mode 100644 index bb5f61e..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/README.md +++ /dev/null @@ -1,1364 +0,0 @@ -cli -=== - -[![Build Status](https://travis-ci.org/urfave/cli.svg?branch=master)](https://travis-ci.org/urfave/cli) -[![Windows Build Status](https://ci.appveyor.com/api/projects/status/rtgk5xufi932pb2v?svg=true)](https://ci.appveyor.com/project/urfave/cli) -[![GoDoc](https://godoc.org/github.com/urfave/cli?status.svg)](https://godoc.org/github.com/urfave/cli) -[![codebeat](https://codebeat.co/badges/0a8f30aa-f975-404b-b878-5fab3ae1cc5f)](https://codebeat.co/projects/github-com-urfave-cli) -[![Go Report Card](https://goreportcard.com/badge/urfave/cli)](https://goreportcard.com/report/urfave/cli) -[![top level coverage](https://gocover.io/_badge/github.com/urfave/cli?0 "top level coverage")](http://gocover.io/github.com/urfave/cli) / -[![altsrc coverage](https://gocover.io/_badge/github.com/urfave/cli/altsrc?0 "altsrc coverage")](http://gocover.io/github.com/urfave/cli/altsrc) - -**Notice:** This is the library formerly known as -`github.com/codegangsta/cli` -- Github will automatically redirect requests -to this repository, but we recommend updating your references for clarity. - -cli is a simple, fast, and fun package for building command line apps in Go. The -goal is to enable developers to write fast and distributable command line -applications in an expressive way. - - - -- [Overview](#overview) -- [Installation](#installation) - * [Supported platforms](#supported-platforms) - * [Using the `v2` branch](#using-the-v2-branch) - * [Pinning to the `v1` releases](#pinning-to-the-v1-releases) -- [Getting Started](#getting-started) -- [Examples](#examples) - * [Arguments](#arguments) - * [Flags](#flags) - + [Placeholder Values](#placeholder-values) - + [Alternate Names](#alternate-names) - + [Ordering](#ordering) - + [Values from the Environment](#values-from-the-environment) - + [Values from alternate input sources (YAML, TOML, and others)](#values-from-alternate-input-sources-yaml-toml-and-others) - * [Subcommands](#subcommands) - * [Subcommands categories](#subcommands-categories) - * [Exit code](#exit-code) - * [Bash Completion](#bash-completion) - + [Enabling](#enabling) - + [Distribution](#distribution) - + [Customization](#customization) - * [Generated Help Text](#generated-help-text) - + [Customization](#customization-1) - * [Version Flag](#version-flag) - + [Customization](#customization-2) - + [Full API Example](#full-api-example) -- [Contribution Guidelines](#contribution-guidelines) - - - -## Overview - -Command line apps are usually so tiny that there is absolutely no reason why -your code should *not* be self-documenting. Things like generating help text and -parsing command flags/options should not hinder productivity when writing a -command line app. - -**This is where cli comes into play.** cli makes command line programming fun, -organized, and expressive! - -## Installation - -Make sure you have a working Go environment. Go version 1.2+ is supported. [See -the install instructions for Go](http://golang.org/doc/install.html). - -To install cli, simply run: -``` -$ go get github.com/urfave/cli -``` - -Make sure your `PATH` includes the `$GOPATH/bin` directory so your commands can -be easily used: -``` -export PATH=$PATH:$GOPATH/bin -``` - -### Supported platforms - -cli is tested against multiple versions of Go on Linux, and against the latest -released version of Go on OS X and Windows. For full details, see -[`./.travis.yml`](./.travis.yml) and [`./appveyor.yml`](./appveyor.yml). - -### Using the `v2` branch - -**Warning**: The `v2` branch is currently unreleased and considered unstable. - -There is currently a long-lived branch named `v2` that is intended to land as -the new `master` branch once development there has settled down. The current -`master` branch (mirrored as `v1`) is being manually merged into `v2` on -an irregular human-based schedule, but generally if one wants to "upgrade" to -`v2` *now* and accept the volatility (read: "awesomeness") that comes along with -that, please use whatever version pinning of your preference, such as via -`gopkg.in`: - -``` -$ go get gopkg.in/urfave/cli.v2 -``` - -``` go -... -import ( - "gopkg.in/urfave/cli.v2" // imports as package "cli" -) -... -``` - -### Pinning to the `v1` releases - -Similarly to the section above describing use of the `v2` branch, if one wants -to avoid any unexpected compatibility pains once `v2` becomes `master`, then -pinning to `v1` is an acceptable option, e.g.: - -``` -$ go get gopkg.in/urfave/cli.v1 -``` - -``` go -... -import ( - "gopkg.in/urfave/cli.v1" // imports as package "cli" -) -... -``` - -This will pull the latest tagged `v1` release (e.g. `v1.18.1` at the time of writing). - -## Getting Started - -One of the philosophies behind cli is that an API should be playful and full of -discovery. So a cli app can be as little as one line of code in `main()`. - - -``` go -package main - -import ( - "os" - - "github.com/urfave/cli" -) - -func main() { - cli.NewApp().Run(os.Args) -} -``` - -This app will run and show help text, but is not very useful. Let's give an -action to execute and some help documentation: - - -``` go -package main - -import ( - "fmt" - "os" - - "github.com/urfave/cli" -) - -func main() { - app := cli.NewApp() - app.Name = "boom" - app.Usage = "make an explosive entrance" - app.Action = func(c *cli.Context) error { - fmt.Println("boom! I say!") - return nil - } - - app.Run(os.Args) -} -``` - -Running this already gives you a ton of functionality, plus support for things -like subcommands and flags, which are covered below. - -## Examples - -Being a programmer can be a lonely job. Thankfully by the power of automation -that is not the case! Let's create a greeter app to fend off our demons of -loneliness! - -Start by creating a directory named `greet`, and within it, add a file, -`greet.go` with the following code in it: - - -``` go -package main - -import ( - "fmt" - "os" - - "github.com/urfave/cli" -) - -func main() { - app := cli.NewApp() - app.Name = "greet" - app.Usage = "fight the loneliness!" - app.Action = func(c *cli.Context) error { - fmt.Println("Hello friend!") - return nil - } - - app.Run(os.Args) -} -``` - -Install our command to the `$GOPATH/bin` directory: - -``` -$ go install -``` - -Finally run our new command: - -``` -$ greet -Hello friend! -``` - -cli also generates neat help text: - -``` -$ greet help -NAME: - greet - fight the loneliness! - -USAGE: - greet [global options] command [command options] [arguments...] - -VERSION: - 0.0.0 - -COMMANDS: - help, h Shows a list of commands or help for one command - -GLOBAL OPTIONS - --version Shows version information -``` - -### Arguments - -You can lookup arguments by calling the `Args` function on `cli.Context`, e.g.: - - -``` go -package main - -import ( - "fmt" - "os" - - "github.com/urfave/cli" -) - -func main() { - app := cli.NewApp() - - app.Action = func(c *cli.Context) error { - fmt.Printf("Hello %q", c.Args().Get(0)) - return nil - } - - app.Run(os.Args) -} -``` - -### Flags - -Setting and querying flags is simple. - - -``` go -package main - -import ( - "fmt" - "os" - - "github.com/urfave/cli" -) - -func main() { - app := cli.NewApp() - - app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "lang", - Value: "english", - Usage: "language for the greeting", - }, - } - - app.Action = func(c *cli.Context) error { - name := "Nefertiti" - if c.NArg() > 0 { - name = c.Args().Get(0) - } - if c.String("lang") == "spanish" { - fmt.Println("Hola", name) - } else { - fmt.Println("Hello", name) - } - return nil - } - - app.Run(os.Args) -} -``` - -You can also set a destination variable for a flag, to which the content will be -scanned. - - -``` go -package main - -import ( - "os" - "fmt" - - "github.com/urfave/cli" -) - -func main() { - var language string - - app := cli.NewApp() - - app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "lang", - Value: "english", - Usage: "language for the greeting", - Destination: &language, - }, - } - - app.Action = func(c *cli.Context) error { - name := "someone" - if c.NArg() > 0 { - name = c.Args()[0] - } - if language == "spanish" { - fmt.Println("Hola", name) - } else { - fmt.Println("Hello", name) - } - return nil - } - - app.Run(os.Args) -} -``` - -See full list of flags at http://godoc.org/github.com/urfave/cli - -#### Placeholder Values - -Sometimes it's useful to specify a flag's value within the usage string itself. -Such placeholders are indicated with back quotes. - -For example this: - - -```go -package main - -import ( - "os" - - "github.com/urfave/cli" -) - -func main() { - app := cli.NewApp() - - app.Flags = []cli.Flag{ - cli.StringFlag{ - Name: "config, c", - Usage: "Load configuration from `FILE`", - }, - } - - app.Run(os.Args) -} -``` - -Will result in help output like: - -``` ---config FILE, -c FILE Load configuration from FILE -``` - -Note that only the first placeholder is used. Subsequent back-quoted words will -be left as-is. - -#### Alternate Names - -You can set alternate (or short) names for flags by providing a comma-delimited -list for the `Name`. e.g. - - -``` go -package main - -import ( - "os" - - "github.com/urfave/cli" -) - -func main() { - app := cli.NewApp() - - app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "lang, l", - Value: "english", - Usage: "language for the greeting", - }, - } - - app.Run(os.Args) -} -``` - -That flag can then be set with `--lang spanish` or `-l spanish`. Note that -giving two different forms of the same flag in the same command invocation is an -error. - -#### Ordering - -Flags for the application and commands are shown in the order they are defined. -However, it's possible to sort them from outside this library by using `FlagsByName` -with `sort`. - -For example this: - - -``` go -package main - -import ( - "os" - "sort" - - "github.com/urfave/cli" -) - -func main() { - app := cli.NewApp() - - app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "lang, l", - Value: "english", - Usage: "Language for the greeting", - }, - cli.StringFlag{ - Name: "config, c", - Usage: "Load configuration from `FILE`", - }, - } - - sort.Sort(cli.FlagsByName(app.Flags)) - - app.Run(os.Args) -} -``` - -Will result in help output like: - -``` ---config FILE, -c FILE Load configuration from FILE ---lang value, -l value Language for the greeting (default: "english") -``` - -#### Values from the Environment - -You can also have the default value set from the environment via `EnvVar`. e.g. - - -``` go -package main - -import ( - "os" - - "github.com/urfave/cli" -) - -func main() { - app := cli.NewApp() - - app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "lang, l", - Value: "english", - Usage: "language for the greeting", - EnvVar: "APP_LANG", - }, - } - - app.Run(os.Args) -} -``` - -The `EnvVar` may also be given as a comma-delimited "cascade", where the first -environment variable that resolves is used as the default. - - -``` go -package main - -import ( - "os" - - "github.com/urfave/cli" -) - -func main() { - app := cli.NewApp() - - app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "lang, l", - Value: "english", - Usage: "language for the greeting", - EnvVar: "LEGACY_COMPAT_LANG,APP_LANG,LANG", - }, - } - - app.Run(os.Args) -} -``` - -#### Values from alternate input sources (YAML, TOML, and others) - -There is a separate package altsrc that adds support for getting flag values -from other file input sources. - -Currently supported input source formats: -* YAML -* TOML - -In order to get values for a flag from an alternate input source the following -code would be added to wrap an existing cli.Flag like below: - -``` go - altsrc.NewIntFlag(cli.IntFlag{Name: "test"}) -``` - -Initialization must also occur for these flags. Below is an example initializing -getting data from a yaml file below. - -``` go - command.Before = altsrc.InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load")) -``` - -The code above will use the "load" string as a flag name to get the file name of -a yaml file from the cli.Context. It will then use that file name to initialize -the yaml input source for any flags that are defined on that command. As a note -the "load" flag used would also have to be defined on the command flags in order -for this code snipped to work. - -Currently only the aboved specified formats are supported but developers can -add support for other input sources by implementing the -altsrc.InputSourceContext for their given sources. - -Here is a more complete sample of a command using YAML support: - - -``` go -package notmain - -import ( - "fmt" - "os" - - "github.com/urfave/cli" - "github.com/urfave/cli/altsrc" -) - -func main() { - app := cli.NewApp() - - flags := []cli.Flag{ - altsrc.NewIntFlag(cli.IntFlag{Name: "test"}), - cli.StringFlag{Name: "load"}, - } - - app.Action = func(c *cli.Context) error { - fmt.Println("yaml ist rad") - return nil - } - - app.Before = altsrc.InitInputSourceWithContext(flags, altsrc.NewYamlSourceFromFlagFunc("load")) - app.Flags = flags - - app.Run(os.Args) -} -``` - -### Subcommands - -Subcommands can be defined for a more git-like command line app. - - -```go -package main - -import ( - "fmt" - "os" - - "github.com/urfave/cli" -) - -func main() { - app := cli.NewApp() - - app.Commands = []cli.Command{ - { - Name: "add", - Aliases: []string{"a"}, - Usage: "add a task to the list", - Action: func(c *cli.Context) error { - fmt.Println("added task: ", c.Args().First()) - return nil - }, - }, - { - Name: "complete", - Aliases: []string{"c"}, - Usage: "complete a task on the list", - Action: func(c *cli.Context) error { - fmt.Println("completed task: ", c.Args().First()) - return nil - }, - }, - { - Name: "template", - Aliases: []string{"t"}, - Usage: "options for task templates", - Subcommands: []cli.Command{ - { - Name: "add", - Usage: "add a new template", - Action: func(c *cli.Context) error { - fmt.Println("new task template: ", c.Args().First()) - return nil - }, - }, - { - Name: "remove", - Usage: "remove an existing template", - Action: func(c *cli.Context) error { - fmt.Println("removed task template: ", c.Args().First()) - return nil - }, - }, - }, - }, - } - - app.Run(os.Args) -} -``` - -### Subcommands categories - -For additional organization in apps that have many subcommands, you can -associate a category for each command to group them together in the help -output. - -E.g. - -```go -package main - -import ( - "os" - - "github.com/urfave/cli" -) - -func main() { - app := cli.NewApp() - - app.Commands = []cli.Command{ - { - Name: "noop", - }, - { - Name: "add", - Category: "template", - }, - { - Name: "remove", - Category: "template", - }, - } - - app.Run(os.Args) -} -``` - -Will include: - -``` -COMMANDS: - noop - - Template actions: - add - remove -``` - -### Exit code - -Calling `App.Run` will not automatically call `os.Exit`, which means that by -default the exit code will "fall through" to being `0`. An explicit exit code -may be set by returning a non-nil error that fulfills `cli.ExitCoder`, *or* a -`cli.MultiError` that includes an error that fulfills `cli.ExitCoder`, e.g.: - -``` go -package main - -import ( - "os" - - "github.com/urfave/cli" -) - -func main() { - app := cli.NewApp() - app.Flags = []cli.Flag{ - cli.BoolTFlag{ - Name: "ginger-crouton", - Usage: "is it in the soup?", - }, - } - app.Action = func(ctx *cli.Context) error { - if !ctx.Bool("ginger-crouton") { - return cli.NewExitError("it is not in the soup", 86) - } - return nil - } - - app.Run(os.Args) -} -``` - -### Bash Completion - -You can enable completion commands by setting the `EnableBashCompletion` -flag on the `App` object. By default, this setting will only auto-complete to -show an app's subcommands, but you can write your own completion methods for -the App or its subcommands. - - -``` go -package main - -import ( - "fmt" - "os" - - "github.com/urfave/cli" -) - -func main() { - tasks := []string{"cook", "clean", "laundry", "eat", "sleep", "code"} - - app := cli.NewApp() - app.EnableBashCompletion = true - app.Commands = []cli.Command{ - { - Name: "complete", - Aliases: []string{"c"}, - Usage: "complete a task on the list", - Action: func(c *cli.Context) error { - fmt.Println("completed task: ", c.Args().First()) - return nil - }, - BashComplete: func(c *cli.Context) { - // This will complete if no args are passed - if c.NArg() > 0 { - return - } - for _, t := range tasks { - fmt.Println(t) - } - }, - }, - } - - app.Run(os.Args) -} -``` - -#### Enabling - -Source the `autocomplete/bash_autocomplete` file in your `.bashrc` file while -setting the `PROG` variable to the name of your program: - -`PROG=myprogram source /.../cli/autocomplete/bash_autocomplete` - -#### Distribution - -Copy `autocomplete/bash_autocomplete` into `/etc/bash_completion.d/` and rename -it to the name of the program you wish to add autocomplete support for (or -automatically install it there if you are distributing a package). Don't forget -to source the file to make it active in the current shell. - -``` -sudo cp src/bash_autocomplete /etc/bash_completion.d/ -source /etc/bash_completion.d/ -``` - -Alternatively, you can just document that users should source the generic -`autocomplete/bash_autocomplete` in their bash configuration with `$PROG` set -to the name of their program (as above). - -#### Customization - -The default bash completion flag (`--generate-bash-completion`) is defined as -`cli.BashCompletionFlag`, and may be redefined if desired, e.g.: - - -``` go -package main - -import ( - "os" - - "github.com/urfave/cli" -) - -func main() { - cli.BashCompletionFlag = cli.BoolFlag{ - Name: "compgen", - Hidden: true, - } - - app := cli.NewApp() - app.EnableBashCompletion = true - app.Commands = []cli.Command{ - { - Name: "wat", - }, - } - app.Run(os.Args) -} -``` - -### Generated Help Text - -The default help flag (`-h/--help`) is defined as `cli.HelpFlag` and is checked -by the cli internals in order to print generated help text for the app, command, -or subcommand, and break execution. - -#### Customization - -All of the help text generation may be customized, and at multiple levels. The -templates are exposed as variables `AppHelpTemplate`, `CommandHelpTemplate`, and -`SubcommandHelpTemplate` which may be reassigned or augmented, and full override -is possible by assigning a compatible func to the `cli.HelpPrinter` variable, -e.g.: - - -``` go -package main - -import ( - "fmt" - "io" - "os" - - "github.com/urfave/cli" -) - -func main() { - // EXAMPLE: Append to an existing template - cli.AppHelpTemplate = fmt.Sprintf(`%s - -WEBSITE: http://awesometown.example.com - -SUPPORT: support@awesometown.example.com - -`, cli.AppHelpTemplate) - - // EXAMPLE: Override a template - cli.AppHelpTemplate = `NAME: - {{.Name}} - {{.Usage}} -USAGE: - {{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command -[command options]{{end}} {{if -.ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}} - {{if len .Authors}} -AUTHOR(S): - {{range .Authors}}{{ . }}{{end}} - {{end}}{{if .Commands}} -COMMANDS: -{{range .Commands}}{{if not .HideHelp}} {{join .Names ", "}}{{ "\t" -}}{{.Usage}}{{ "\n" }}{{end}}{{end}}{{end}}{{if .VisibleFlags}} -GLOBAL OPTIONS: - {{range .VisibleFlags}}{{.}} - {{end}}{{end}}{{if .Copyright }} -COPYRIGHT: - {{.Copyright}} - {{end}}{{if .Version}} -VERSION: - {{.Version}} - {{end}} -` - - // EXAMPLE: Replace the `HelpPrinter` func - cli.HelpPrinter = func(w io.Writer, templ string, data interface{}) { - fmt.Println("Ha HA. I pwnd the help!!1") - } - - cli.NewApp().Run(os.Args) -} -``` - -The default flag may be customized to something other than `-h/--help` by -setting `cli.HelpFlag`, e.g.: - - -``` go -package main - -import ( - "os" - - "github.com/urfave/cli" -) - -func main() { - cli.HelpFlag = cli.BoolFlag{ - Name: "halp, haaaaalp", - Usage: "HALP", - EnvVar: "SHOW_HALP,HALPPLZ", - } - - cli.NewApp().Run(os.Args) -} -``` - -### Version Flag - -The default version flag (`-v/--version`) is defined as `cli.VersionFlag`, which -is checked by the cli internals in order to print the `App.Version` via -`cli.VersionPrinter` and break execution. - -#### Customization - -The default flag may be customized to something other than `-v/--version` by -setting `cli.VersionFlag`, e.g.: - - -``` go -package main - -import ( - "os" - - "github.com/urfave/cli" -) - -func main() { - cli.VersionFlag = cli.BoolFlag{ - Name: "print-version, V", - Usage: "print only the version", - } - - app := cli.NewApp() - app.Name = "partay" - app.Version = "19.99.0" - app.Run(os.Args) -} -``` - -Alternatively, the version printer at `cli.VersionPrinter` may be overridden, e.g.: - - -``` go -package main - -import ( - "fmt" - "os" - - "github.com/urfave/cli" -) - -var ( - Revision = "fafafaf" -) - -func main() { - cli.VersionPrinter = func(c *cli.Context) { - fmt.Printf("version=%s revision=%s\n", c.App.Version, Revision) - } - - app := cli.NewApp() - app.Name = "partay" - app.Version = "19.99.0" - app.Run(os.Args) -} -``` - -#### Full API Example - -**Notice**: This is a contrived (functioning) example meant strictly for API -demonstration purposes. Use of one's imagination is encouraged. - - -``` go -package main - -import ( - "errors" - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "time" - - "github.com/urfave/cli" -) - -func init() { - cli.AppHelpTemplate += "\nCUSTOMIZED: you bet ur muffins\n" - cli.CommandHelpTemplate += "\nYMMV\n" - cli.SubcommandHelpTemplate += "\nor something\n" - - cli.HelpFlag = cli.BoolFlag{Name: "halp"} - cli.BashCompletionFlag = cli.BoolFlag{Name: "compgen", Hidden: true} - cli.VersionFlag = cli.BoolFlag{Name: "print-version, V"} - - cli.HelpPrinter = func(w io.Writer, templ string, data interface{}) { - fmt.Fprintf(w, "best of luck to you\n") - } - cli.VersionPrinter = func(c *cli.Context) { - fmt.Fprintf(c.App.Writer, "version=%s\n", c.App.Version) - } - cli.OsExiter = func(c int) { - fmt.Fprintf(cli.ErrWriter, "refusing to exit %d\n", c) - } - cli.ErrWriter = ioutil.Discard - cli.FlagStringer = func(fl cli.Flag) string { - return fmt.Sprintf("\t\t%s", fl.GetName()) - } -} - -type hexWriter struct{} - -func (w *hexWriter) Write(p []byte) (int, error) { - for _, b := range p { - fmt.Printf("%x", b) - } - fmt.Printf("\n") - - return len(p), nil -} - -type genericType struct{ - s string -} - -func (g *genericType) Set(value string) error { - g.s = value - return nil -} - -func (g *genericType) String() string { - return g.s -} - -func main() { - app := cli.NewApp() - app.Name = "kənˈtrīv" - app.Version = "19.99.0" - app.Compiled = time.Now() - app.Authors = []cli.Author{ - cli.Author{ - Name: "Example Human", - Email: "human@example.com", - }, - } - app.Copyright = "(c) 1999 Serious Enterprise" - app.HelpName = "contrive" - app.Usage = "demonstrate available API" - app.UsageText = "contrive - demonstrating the available API" - app.ArgsUsage = "[args and such]" - app.Commands = []cli.Command{ - cli.Command{ - Name: "doo", - Aliases: []string{"do"}, - Category: "motion", - Usage: "do the doo", - UsageText: "doo - does the dooing", - Description: "no really, there is a lot of dooing to be done", - ArgsUsage: "[arrgh]", - Flags: []cli.Flag{ - cli.BoolFlag{Name: "forever, forevvarr"}, - }, - Subcommands: cli.Commands{ - cli.Command{ - Name: "wop", - Action: wopAction, - }, - }, - SkipFlagParsing: false, - HideHelp: false, - Hidden: false, - HelpName: "doo!", - BashComplete: func(c *cli.Context) { - fmt.Fprintf(c.App.Writer, "--better\n") - }, - Before: func(c *cli.Context) error { - fmt.Fprintf(c.App.Writer, "brace for impact\n") - return nil - }, - After: func(c *cli.Context) error { - fmt.Fprintf(c.App.Writer, "did we lose anyone?\n") - return nil - }, - Action: func(c *cli.Context) error { - c.Command.FullName() - c.Command.HasName("wop") - c.Command.Names() - c.Command.VisibleFlags() - fmt.Fprintf(c.App.Writer, "dodododododoodododddooooododododooo\n") - if c.Bool("forever") { - c.Command.Run(c) - } - return nil - }, - OnUsageError: func(c *cli.Context, err error, isSubcommand bool) error { - fmt.Fprintf(c.App.Writer, "for shame\n") - return err - }, - }, - } - app.Flags = []cli.Flag{ - cli.BoolFlag{Name: "fancy"}, - cli.BoolTFlag{Name: "fancier"}, - cli.DurationFlag{Name: "howlong, H", Value: time.Second * 3}, - cli.Float64Flag{Name: "howmuch"}, - cli.GenericFlag{Name: "wat", Value: &genericType{}}, - cli.Int64Flag{Name: "longdistance"}, - cli.Int64SliceFlag{Name: "intervals"}, - cli.IntFlag{Name: "distance"}, - cli.IntSliceFlag{Name: "times"}, - cli.StringFlag{Name: "dance-move, d"}, - cli.StringSliceFlag{Name: "names, N"}, - cli.UintFlag{Name: "age"}, - cli.Uint64Flag{Name: "bigage"}, - } - app.EnableBashCompletion = true - app.HideHelp = false - app.HideVersion = false - app.BashComplete = func(c *cli.Context) { - fmt.Fprintf(c.App.Writer, "lipstick\nkiss\nme\nlipstick\nringo\n") - } - app.Before = func(c *cli.Context) error { - fmt.Fprintf(c.App.Writer, "HEEEERE GOES\n") - return nil - } - app.After = func(c *cli.Context) error { - fmt.Fprintf(c.App.Writer, "Phew!\n") - return nil - } - app.CommandNotFound = func(c *cli.Context, command string) { - fmt.Fprintf(c.App.Writer, "Thar be no %q here.\n", command) - } - app.OnUsageError = func(c *cli.Context, err error, isSubcommand bool) error { - if isSubcommand { - return err - } - - fmt.Fprintf(c.App.Writer, "WRONG: %#v\n", err) - return nil - } - app.Action = func(c *cli.Context) error { - cli.DefaultAppComplete(c) - cli.HandleExitCoder(errors.New("not an exit coder, though")) - cli.ShowAppHelp(c) - cli.ShowCommandCompletions(c, "nope") - cli.ShowCommandHelp(c, "also-nope") - cli.ShowCompletions(c) - cli.ShowSubcommandHelp(c) - cli.ShowVersion(c) - - categories := c.App.Categories() - categories.AddCommand("sounds", cli.Command{ - Name: "bloop", - }) - - for _, category := range c.App.Categories() { - fmt.Fprintf(c.App.Writer, "%s\n", category.Name) - fmt.Fprintf(c.App.Writer, "%#v\n", category.Commands) - fmt.Fprintf(c.App.Writer, "%#v\n", category.VisibleCommands()) - } - - fmt.Printf("%#v\n", c.App.Command("doo")) - if c.Bool("infinite") { - c.App.Run([]string{"app", "doo", "wop"}) - } - - if c.Bool("forevar") { - c.App.RunAsSubcommand(c) - } - c.App.Setup() - fmt.Printf("%#v\n", c.App.VisibleCategories()) - fmt.Printf("%#v\n", c.App.VisibleCommands()) - fmt.Printf("%#v\n", c.App.VisibleFlags()) - - fmt.Printf("%#v\n", c.Args().First()) - if len(c.Args()) > 0 { - fmt.Printf("%#v\n", c.Args()[1]) - } - fmt.Printf("%#v\n", c.Args().Present()) - fmt.Printf("%#v\n", c.Args().Tail()) - - set := flag.NewFlagSet("contrive", 0) - nc := cli.NewContext(c.App, set, c) - - fmt.Printf("%#v\n", nc.Args()) - fmt.Printf("%#v\n", nc.Bool("nope")) - fmt.Printf("%#v\n", nc.BoolT("nerp")) - fmt.Printf("%#v\n", nc.Duration("howlong")) - fmt.Printf("%#v\n", nc.Float64("hay")) - fmt.Printf("%#v\n", nc.Generic("bloop")) - fmt.Printf("%#v\n", nc.Int64("bonk")) - fmt.Printf("%#v\n", nc.Int64Slice("burnks")) - fmt.Printf("%#v\n", nc.Int("bips")) - fmt.Printf("%#v\n", nc.IntSlice("blups")) - fmt.Printf("%#v\n", nc.String("snurt")) - fmt.Printf("%#v\n", nc.StringSlice("snurkles")) - fmt.Printf("%#v\n", nc.Uint("flub")) - fmt.Printf("%#v\n", nc.Uint64("florb")) - fmt.Printf("%#v\n", nc.GlobalBool("global-nope")) - fmt.Printf("%#v\n", nc.GlobalBoolT("global-nerp")) - fmt.Printf("%#v\n", nc.GlobalDuration("global-howlong")) - fmt.Printf("%#v\n", nc.GlobalFloat64("global-hay")) - fmt.Printf("%#v\n", nc.GlobalGeneric("global-bloop")) - fmt.Printf("%#v\n", nc.GlobalInt("global-bips")) - fmt.Printf("%#v\n", nc.GlobalIntSlice("global-blups")) - fmt.Printf("%#v\n", nc.GlobalString("global-snurt")) - fmt.Printf("%#v\n", nc.GlobalStringSlice("global-snurkles")) - - fmt.Printf("%#v\n", nc.FlagNames()) - fmt.Printf("%#v\n", nc.GlobalFlagNames()) - fmt.Printf("%#v\n", nc.GlobalIsSet("wat")) - fmt.Printf("%#v\n", nc.GlobalSet("wat", "nope")) - fmt.Printf("%#v\n", nc.NArg()) - fmt.Printf("%#v\n", nc.NumFlags()) - fmt.Printf("%#v\n", nc.Parent()) - - nc.Set("wat", "also-nope") - - ec := cli.NewExitError("ohwell", 86) - fmt.Fprintf(c.App.Writer, "%d", ec.ExitCode()) - fmt.Printf("made it!\n") - return ec - } - - if os.Getenv("HEXY") != "" { - app.Writer = &hexWriter{} - app.ErrWriter = &hexWriter{} - } - - app.Metadata = map[string]interface{}{ - "layers": "many", - "explicable": false, - "whatever-values": 19.99, - } - - app.Run(os.Args) -} - -func wopAction(c *cli.Context) error { - fmt.Fprintf(c.App.Writer, ":wave: over here, eh\n") - return nil -} -``` - -## Contribution Guidelines - -Feel free to put up a pull request to fix a bug or maybe add a feature. I will -give it a code review and make sure that it does not break backwards -compatibility. If I or any other collaborators agree that it is in line with -the vision of the project, we will work with you to get the code into -a mergeable state and merge it into the master branch. - -If you have contributed something significant to the project, we will most -likely add you as a collaborator. As a collaborator you are given the ability -to merge others pull requests. It is very important that new code does not -break existing code, so be careful about what code you do choose to merge. - -If you feel like you have contributed to the project but have not yet been -added as a collaborator, we probably forgot to add you, please open an issue. diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/app.go b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/app.go deleted file mode 100644 index b9adf46..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/app.go +++ /dev/null @@ -1,502 +0,0 @@ -package cli - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "reflect" - "sort" - "strings" - "time" -) - -var ( - changeLogURL = "https://github.com/urfave/cli/blob/master/CHANGELOG.md" - appActionDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-action-signature", changeLogURL) - runAndExitOnErrorDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-runandexitonerror", changeLogURL) - - contactSysadmin = "This is an error in the application. Please contact the distributor of this application if this is not you." - - errNonFuncAction = NewExitError("ERROR invalid Action type. "+ - fmt.Sprintf("Must be a func of type `cli.ActionFunc`. %s", contactSysadmin)+ - fmt.Sprintf("See %s", appActionDeprecationURL), 2) - errInvalidActionSignature = NewExitError("ERROR invalid Action signature. "+ - fmt.Sprintf("Must be `cli.ActionFunc`. %s", contactSysadmin)+ - fmt.Sprintf("See %s", appActionDeprecationURL), 2) -) - -// App is the main structure of a cli application. It is recommended that -// an app be created with the cli.NewApp() function -type App struct { - // The name of the program. Defaults to path.Base(os.Args[0]) - Name string - // Full name of command for help, defaults to Name - HelpName string - // Description of the program. - Usage string - // Text to override the USAGE section of help - UsageText string - // Description of the program argument format. - ArgsUsage string - // Version of the program - Version string - // List of commands to execute - Commands []Command - // List of flags to parse - Flags []Flag - // Boolean to enable bash completion commands - EnableBashCompletion bool - // Boolean to hide built-in help command - HideHelp bool - // Boolean to hide built-in version flag and the VERSION section of help - HideVersion bool - // Populate on app startup, only gettable through method Categories() - categories CommandCategories - // An action to execute when the bash-completion flag is set - BashComplete BashCompleteFunc - // An action to execute before any subcommands are run, but after the context is ready - // If a non-nil error is returned, no subcommands are run - Before BeforeFunc - // An action to execute after any subcommands are run, but after the subcommand has finished - // It is run even if Action() panics - After AfterFunc - - // The action to execute when no subcommands are specified - // Expects a `cli.ActionFunc` but will accept the *deprecated* signature of `func(*cli.Context) {}` - // *Note*: support for the deprecated `Action` signature will be removed in a future version - Action interface{} - - // Execute this function if the proper command cannot be found - CommandNotFound CommandNotFoundFunc - // Execute this function if an usage error occurs - OnUsageError OnUsageErrorFunc - // Compilation date - Compiled time.Time - // List of all authors who contributed - Authors []Author - // Copyright of the binary if any - Copyright string - // Name of Author (Note: Use App.Authors, this is deprecated) - Author string - // Email of Author (Note: Use App.Authors, this is deprecated) - Email string - // Writer writer to write output to - Writer io.Writer - // ErrWriter writes error output - ErrWriter io.Writer - // Other custom info - Metadata map[string]interface{} - - didSetup bool -} - -// Tries to find out when this binary was compiled. -// Returns the current time if it fails to find it. -func compileTime() time.Time { - info, err := os.Stat(os.Args[0]) - if err != nil { - return time.Now() - } - return info.ModTime() -} - -// NewApp creates a new cli Application with some reasonable defaults for Name, -// Usage, Version and Action. -func NewApp() *App { - return &App{ - Name: filepath.Base(os.Args[0]), - HelpName: filepath.Base(os.Args[0]), - Usage: "A new cli application", - UsageText: "", - Version: "0.0.0", - BashComplete: DefaultAppComplete, - Action: helpCommand.Action, - Compiled: compileTime(), - Writer: os.Stdout, - } -} - -// Setup runs initialization code to ensure all data structures are ready for -// `Run` or inspection prior to `Run`. It is internally called by `Run`, but -// will return early if setup has already happened. -func (a *App) Setup() { - if a.didSetup { - return - } - - a.didSetup = true - - if a.Author != "" || a.Email != "" { - a.Authors = append(a.Authors, Author{Name: a.Author, Email: a.Email}) - } - - newCmds := []Command{} - for _, c := range a.Commands { - if c.HelpName == "" { - c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name) - } - newCmds = append(newCmds, c) - } - a.Commands = newCmds - - if a.Command(helpCommand.Name) == nil && !a.HideHelp { - a.Commands = append(a.Commands, helpCommand) - if (HelpFlag != BoolFlag{}) { - a.appendFlag(HelpFlag) - } - } - - if a.EnableBashCompletion { - a.appendFlag(BashCompletionFlag) - } - - if !a.HideVersion { - a.appendFlag(VersionFlag) - } - - a.categories = CommandCategories{} - for _, command := range a.Commands { - a.categories = a.categories.AddCommand(command.Category, command) - } - sort.Sort(a.categories) - - if a.Metadata == nil { - a.Metadata = make(map[string]interface{}) - } -} - -// Run is the entry point to the cli app. Parses the arguments slice and routes -// to the proper flag/args combination -func (a *App) Run(arguments []string) (err error) { - a.Setup() - - // parse flags - set := flagSet(a.Name, a.Flags) - set.SetOutput(ioutil.Discard) - err = set.Parse(arguments[1:]) - nerr := normalizeFlags(a.Flags, set) - context := NewContext(a, set, nil) - if nerr != nil { - fmt.Fprintln(a.Writer, nerr) - ShowAppHelp(context) - return nerr - } - - if checkCompletions(context) { - return nil - } - - if err != nil { - if a.OnUsageError != nil { - err := a.OnUsageError(context, err, false) - HandleExitCoder(err) - return err - } - fmt.Fprintf(a.Writer, "%s\n\n", "Incorrect Usage.") - ShowAppHelp(context) - return err - } - - if !a.HideHelp && checkHelp(context) { - ShowAppHelp(context) - return nil - } - - if !a.HideVersion && checkVersion(context) { - ShowVersion(context) - return nil - } - - if a.After != nil { - defer func() { - if afterErr := a.After(context); afterErr != nil { - if err != nil { - err = NewMultiError(err, afterErr) - } else { - err = afterErr - } - } - }() - } - - if a.Before != nil { - beforeErr := a.Before(context) - if beforeErr != nil { - fmt.Fprintf(a.Writer, "%v\n\n", beforeErr) - ShowAppHelp(context) - HandleExitCoder(beforeErr) - err = beforeErr - return err - } - } - - args := context.Args() - if args.Present() { - name := args.First() - c := a.Command(name) - if c != nil { - return c.Run(context) - } - } - - // Run default Action - err = HandleAction(a.Action, context) - - HandleExitCoder(err) - return err -} - -// RunAndExitOnError calls .Run() and exits non-zero if an error was returned -// -// Deprecated: instead you should return an error that fulfills cli.ExitCoder -// to cli.App.Run. This will cause the application to exit with the given eror -// code in the cli.ExitCoder -func (a *App) RunAndExitOnError() { - if err := a.Run(os.Args); err != nil { - fmt.Fprintln(a.errWriter(), err) - OsExiter(1) - } -} - -// RunAsSubcommand invokes the subcommand given the context, parses ctx.Args() to -// generate command-specific flags -func (a *App) RunAsSubcommand(ctx *Context) (err error) { - // append help to commands - if len(a.Commands) > 0 { - if a.Command(helpCommand.Name) == nil && !a.HideHelp { - a.Commands = append(a.Commands, helpCommand) - if (HelpFlag != BoolFlag{}) { - a.appendFlag(HelpFlag) - } - } - } - - newCmds := []Command{} - for _, c := range a.Commands { - if c.HelpName == "" { - c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name) - } - newCmds = append(newCmds, c) - } - a.Commands = newCmds - - // append flags - if a.EnableBashCompletion { - a.appendFlag(BashCompletionFlag) - } - - // parse flags - set := flagSet(a.Name, a.Flags) - set.SetOutput(ioutil.Discard) - err = set.Parse(ctx.Args().Tail()) - nerr := normalizeFlags(a.Flags, set) - context := NewContext(a, set, ctx) - - if nerr != nil { - fmt.Fprintln(a.Writer, nerr) - fmt.Fprintln(a.Writer) - if len(a.Commands) > 0 { - ShowSubcommandHelp(context) - } else { - ShowCommandHelp(ctx, context.Args().First()) - } - return nerr - } - - if checkCompletions(context) { - return nil - } - - if err != nil { - if a.OnUsageError != nil { - err = a.OnUsageError(context, err, true) - HandleExitCoder(err) - return err - } - fmt.Fprintf(a.Writer, "%s\n\n", "Incorrect Usage.") - ShowSubcommandHelp(context) - return err - } - - if len(a.Commands) > 0 { - if checkSubcommandHelp(context) { - return nil - } - } else { - if checkCommandHelp(ctx, context.Args().First()) { - return nil - } - } - - if a.After != nil { - defer func() { - afterErr := a.After(context) - if afterErr != nil { - HandleExitCoder(err) - if err != nil { - err = NewMultiError(err, afterErr) - } else { - err = afterErr - } - } - }() - } - - if a.Before != nil { - beforeErr := a.Before(context) - if beforeErr != nil { - HandleExitCoder(beforeErr) - err = beforeErr - return err - } - } - - args := context.Args() - if args.Present() { - name := args.First() - c := a.Command(name) - if c != nil { - return c.Run(context) - } - } - - // Run default Action - err = HandleAction(a.Action, context) - - HandleExitCoder(err) - return err -} - -// Command returns the named command on App. Returns nil if the command does not exist -func (a *App) Command(name string) *Command { - for _, c := range a.Commands { - if c.HasName(name) { - return &c - } - } - - return nil -} - -// Categories returns a slice containing all the categories with the commands they contain -func (a *App) Categories() CommandCategories { - return a.categories -} - -// VisibleCategories returns a slice of categories and commands that are -// Hidden=false -func (a *App) VisibleCategories() []*CommandCategory { - ret := []*CommandCategory{} - for _, category := range a.categories { - if visible := func() *CommandCategory { - for _, command := range category.Commands { - if !command.Hidden { - return category - } - } - return nil - }(); visible != nil { - ret = append(ret, visible) - } - } - return ret -} - -// VisibleCommands returns a slice of the Commands with Hidden=false -func (a *App) VisibleCommands() []Command { - ret := []Command{} - for _, command := range a.Commands { - if !command.Hidden { - ret = append(ret, command) - } - } - return ret -} - -// VisibleFlags returns a slice of the Flags with Hidden=false -func (a *App) VisibleFlags() []Flag { - return visibleFlags(a.Flags) -} - -func (a *App) hasFlag(flag Flag) bool { - for _, f := range a.Flags { - if flag == f { - return true - } - } - - return false -} - -func (a *App) errWriter() io.Writer { - - // When the app ErrWriter is nil use the package level one. - if a.ErrWriter == nil { - return ErrWriter - } - - return a.ErrWriter -} - -func (a *App) appendFlag(flag Flag) { - if !a.hasFlag(flag) { - a.Flags = append(a.Flags, flag) - } -} - -// Author represents someone who has contributed to a cli project. -type Author struct { - Name string // The Authors name - Email string // The Authors email -} - -// String makes Author comply to the Stringer interface, to allow an easy print in the templating process -func (a Author) String() string { - e := "" - if a.Email != "" { - e = "<" + a.Email + "> " - } - - return fmt.Sprintf("%v %v", a.Name, e) -} - -// HandleAction uses ✧✧✧reflection✧✧✧ to figure out if the given Action is an -// ActionFunc, a func with the legacy signature for Action, or some other -// invalid thing. If it's an ActionFunc or a func with the legacy signature for -// Action, the func is run! -func HandleAction(action interface{}, context *Context) (err error) { - defer func() { - if r := recover(); r != nil { - // Try to detect a known reflection error from *this scope*, rather than - // swallowing all panics that may happen when calling an Action func. - s := fmt.Sprintf("%v", r) - if strings.HasPrefix(s, "reflect: ") && strings.Contains(s, "too many input arguments") { - err = NewExitError(fmt.Sprintf("ERROR unknown Action error: %v.", r), 2) - } else { - panic(r) - } - } - }() - - if reflect.TypeOf(action).Kind() != reflect.Func { - return errNonFuncAction - } - - vals := reflect.ValueOf(action).Call([]reflect.Value{reflect.ValueOf(context)}) - - if len(vals) == 0 { - return nil - } - - if len(vals) > 1 { - return errInvalidActionSignature - } - - if retErr, ok := vals[0].Interface().(error); vals[0].IsValid() && ok { - return retErr - } - - return err -} diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/appveyor.yml b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/appveyor.yml deleted file mode 100644 index 698b188..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/appveyor.yml +++ /dev/null @@ -1,24 +0,0 @@ -version: "{build}" - -os: Windows Server 2012 R2 - -clone_folder: c:\gopath\src\github.com\urfave\cli - -environment: - GOPATH: C:\gopath - GOVERSION: 1.6 - PYTHON: C:\Python27-x64 - PYTHON_VERSION: 2.7.x - PYTHON_ARCH: 64 - -install: -- set PATH=%GOPATH%\bin;C:\go\bin;%PATH% -- go version -- go env -- go get github.com/urfave/gfmrun/... -- go get -v -t ./... - -build_script: -- python runtests vet -- python runtests test -- python runtests gfmrun diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/category.go b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/category.go deleted file mode 100644 index 1a60550..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/category.go +++ /dev/null @@ -1,44 +0,0 @@ -package cli - -// CommandCategories is a slice of *CommandCategory. -type CommandCategories []*CommandCategory - -// CommandCategory is a category containing commands. -type CommandCategory struct { - Name string - Commands Commands -} - -func (c CommandCategories) Less(i, j int) bool { - return c[i].Name < c[j].Name -} - -func (c CommandCategories) Len() int { - return len(c) -} - -func (c CommandCategories) Swap(i, j int) { - c[i], c[j] = c[j], c[i] -} - -// AddCommand adds a command to a category. -func (c CommandCategories) AddCommand(category string, command Command) CommandCategories { - for _, commandCategory := range c { - if commandCategory.Name == category { - commandCategory.Commands = append(commandCategory.Commands, command) - return c - } - } - return append(c, &CommandCategory{Name: category, Commands: []Command{command}}) -} - -// VisibleCommands returns a slice of the Commands with Hidden=false -func (c *CommandCategory) VisibleCommands() []Command { - ret := []Command{} - for _, command := range c.Commands { - if !command.Hidden { - ret = append(ret, command) - } - } - return ret -} diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/cli.go b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/cli.go deleted file mode 100644 index 74fd101..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/cli.go +++ /dev/null @@ -1,21 +0,0 @@ -// Package cli provides a minimal framework for creating and organizing command line -// Go applications. cli is designed to be easy to understand and write, the most simple -// cli application can be written as follows: -// func main() { -// cli.NewApp().Run(os.Args) -// } -// -// Of course this application does not do much, so let's make this an actual application: -// func main() { -// app := cli.NewApp() -// app.Name = "greet" -// app.Usage = "say a greeting" -// app.Action = func(c *cli.Context) error { -// println("Greetings") -// } -// -// app.Run(os.Args) -// } -package cli - -//go:generate python ./generate-flag-types cli -i flag-types.json -o flag_generated.go diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/command.go b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/command.go deleted file mode 100644 index 96253b6..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/command.go +++ /dev/null @@ -1,284 +0,0 @@ -package cli - -import ( - "fmt" - "io/ioutil" - "sort" - "strings" -) - -// Command is a subcommand for a cli.App. -type Command struct { - // The name of the command - Name string - // short name of the command. Typically one character (deprecated, use `Aliases`) - ShortName string - // A list of aliases for the command - Aliases []string - // A short description of the usage of this command - Usage string - // Custom text to show on USAGE section of help - UsageText string - // A longer explanation of how the command works - Description string - // A short description of the arguments of this command - ArgsUsage string - // The category the command is part of - Category string - // The function to call when checking for bash command completions - BashComplete BashCompleteFunc - // An action to execute before any sub-subcommands are run, but after the context is ready - // If a non-nil error is returned, no sub-subcommands are run - Before BeforeFunc - // An action to execute after any subcommands are run, but after the subcommand has finished - // It is run even if Action() panics - After AfterFunc - // The function to call when this command is invoked - Action interface{} - // TODO: replace `Action: interface{}` with `Action: ActionFunc` once some kind - // of deprecation period has passed, maybe? - - // Execute this function if a usage error occurs. - OnUsageError OnUsageErrorFunc - // List of child commands - Subcommands Commands - // List of flags to parse - Flags []Flag - // Treat all flags as normal arguments if true - SkipFlagParsing bool - // Skip argument reordering which attempts to move flags before arguments, - // but only works if all flags appear after all arguments. This behavior was - // removed n version 2 since it only works under specific conditions so we - // backport here by exposing it as an option for compatibility. - SkipArgReorder bool - // Boolean to hide built-in help command - HideHelp bool - // Boolean to hide this command from help or completion - Hidden bool - - // Full name of command for help, defaults to full command name, including parent commands. - HelpName string - commandNamePath []string -} - -// FullName returns the full name of the command. -// For subcommands this ensures that parent commands are part of the command path -func (c Command) FullName() string { - if c.commandNamePath == nil { - return c.Name - } - return strings.Join(c.commandNamePath, " ") -} - -// Commands is a slice of Command -type Commands []Command - -// Run invokes the command given the context, parses ctx.Args() to generate command-specific flags -func (c Command) Run(ctx *Context) (err error) { - if len(c.Subcommands) > 0 { - return c.startApp(ctx) - } - - if !c.HideHelp && (HelpFlag != BoolFlag{}) { - // append help to flags - c.Flags = append( - c.Flags, - HelpFlag, - ) - } - - if ctx.App.EnableBashCompletion { - c.Flags = append(c.Flags, BashCompletionFlag) - } - - set := flagSet(c.Name, c.Flags) - set.SetOutput(ioutil.Discard) - - if c.SkipFlagParsing { - err = set.Parse(append([]string{"--"}, ctx.Args().Tail()...)) - } else if !c.SkipArgReorder { - firstFlagIndex := -1 - terminatorIndex := -1 - for index, arg := range ctx.Args() { - if arg == "--" { - terminatorIndex = index - break - } else if arg == "-" { - // Do nothing. A dash alone is not really a flag. - continue - } else if strings.HasPrefix(arg, "-") && firstFlagIndex == -1 { - firstFlagIndex = index - } - } - - if firstFlagIndex > -1 { - args := ctx.Args() - regularArgs := make([]string, len(args[1:firstFlagIndex])) - copy(regularArgs, args[1:firstFlagIndex]) - - var flagArgs []string - if terminatorIndex > -1 { - flagArgs = args[firstFlagIndex:terminatorIndex] - regularArgs = append(regularArgs, args[terminatorIndex:]...) - } else { - flagArgs = args[firstFlagIndex:] - } - - err = set.Parse(append(flagArgs, regularArgs...)) - } else { - err = set.Parse(ctx.Args().Tail()) - } - } else { - err = set.Parse(ctx.Args().Tail()) - } - - if err != nil { - if c.OnUsageError != nil { - err := c.OnUsageError(ctx, err, false) - HandleExitCoder(err) - return err - } - fmt.Fprintln(ctx.App.Writer, "Incorrect Usage.") - fmt.Fprintln(ctx.App.Writer) - ShowCommandHelp(ctx, c.Name) - return err - } - - nerr := normalizeFlags(c.Flags, set) - if nerr != nil { - fmt.Fprintln(ctx.App.Writer, nerr) - fmt.Fprintln(ctx.App.Writer) - ShowCommandHelp(ctx, c.Name) - return nerr - } - - context := NewContext(ctx.App, set, ctx) - - if checkCommandCompletions(context, c.Name) { - return nil - } - - if checkCommandHelp(context, c.Name) { - return nil - } - - if c.After != nil { - defer func() { - afterErr := c.After(context) - if afterErr != nil { - HandleExitCoder(err) - if err != nil { - err = NewMultiError(err, afterErr) - } else { - err = afterErr - } - } - }() - } - - if c.Before != nil { - err = c.Before(context) - if err != nil { - fmt.Fprintln(ctx.App.Writer, err) - fmt.Fprintln(ctx.App.Writer) - ShowCommandHelp(ctx, c.Name) - HandleExitCoder(err) - return err - } - } - - context.Command = c - err = HandleAction(c.Action, context) - - if err != nil { - HandleExitCoder(err) - } - return err -} - -// Names returns the names including short names and aliases. -func (c Command) Names() []string { - names := []string{c.Name} - - if c.ShortName != "" { - names = append(names, c.ShortName) - } - - return append(names, c.Aliases...) -} - -// HasName returns true if Command.Name or Command.ShortName matches given name -func (c Command) HasName(name string) bool { - for _, n := range c.Names() { - if n == name { - return true - } - } - return false -} - -func (c Command) startApp(ctx *Context) error { - app := NewApp() - app.Metadata = ctx.App.Metadata - // set the name and usage - app.Name = fmt.Sprintf("%s %s", ctx.App.Name, c.Name) - if c.HelpName == "" { - app.HelpName = c.HelpName - } else { - app.HelpName = app.Name - } - - if c.Description != "" { - app.Usage = c.Description - } else { - app.Usage = c.Usage - } - - // set CommandNotFound - app.CommandNotFound = ctx.App.CommandNotFound - - // set the flags and commands - app.Commands = c.Subcommands - app.Flags = c.Flags - app.HideHelp = c.HideHelp - - app.Version = ctx.App.Version - app.HideVersion = ctx.App.HideVersion - app.Compiled = ctx.App.Compiled - app.Author = ctx.App.Author - app.Email = ctx.App.Email - app.Writer = ctx.App.Writer - - app.categories = CommandCategories{} - for _, command := range c.Subcommands { - app.categories = app.categories.AddCommand(command.Category, command) - } - - sort.Sort(app.categories) - - // bash completion - app.EnableBashCompletion = ctx.App.EnableBashCompletion - if c.BashComplete != nil { - app.BashComplete = c.BashComplete - } - - // set the actions - app.Before = c.Before - app.After = c.After - if c.Action != nil { - app.Action = c.Action - } else { - app.Action = helpSubcommand.Action - } - - for index, cc := range app.Commands { - app.Commands[index].commandNamePath = []string{c.Name, cc.Name} - } - - return app.RunAsSubcommand(ctx) -} - -// VisibleFlags returns a slice of the Flags with Hidden=false -func (c Command) VisibleFlags() []Flag { - return visibleFlags(c.Flags) -} diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/context.go b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/context.go deleted file mode 100644 index 492a742..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/context.go +++ /dev/null @@ -1,264 +0,0 @@ -package cli - -import ( - "errors" - "flag" - "os" - "reflect" - "strings" -) - -// Context is a type that is passed through to -// each Handler action in a cli application. Context -// can be used to retrieve context-specific Args and -// parsed command-line options. -type Context struct { - App *App - Command Command - flagSet *flag.FlagSet - setFlags map[string]bool - parentContext *Context -} - -// NewContext creates a new context. For use in when invoking an App or Command action. -func NewContext(app *App, set *flag.FlagSet, parentCtx *Context) *Context { - return &Context{App: app, flagSet: set, parentContext: parentCtx} -} - -// NumFlags returns the number of flags set -func (c *Context) NumFlags() int { - return c.flagSet.NFlag() -} - -// Set sets a context flag to a value. -func (c *Context) Set(name, value string) error { - return c.flagSet.Set(name, value) -} - -// GlobalSet sets a context flag to a value on the global flagset -func (c *Context) GlobalSet(name, value string) error { - return globalContext(c).flagSet.Set(name, value) -} - -// IsSet determines if the flag was actually set -func (c *Context) IsSet(name string) bool { - if c.setFlags == nil { - c.setFlags = make(map[string]bool) - - c.flagSet.Visit(func(f *flag.Flag) { - c.setFlags[f.Name] = true - }) - - c.flagSet.VisitAll(func(f *flag.Flag) { - if _, ok := c.setFlags[f.Name]; ok { - return - } - c.setFlags[f.Name] = false - }) - - // XXX hack to support IsSet for flags with EnvVar - // - // There isn't an easy way to do this with the current implementation since - // whether a flag was set via an environment variable is very difficult to - // determine here. Instead, we intend to introduce a backwards incompatible - // change in version 2 to add `IsSet` to the Flag interface to push the - // responsibility closer to where the information required to determine - // whether a flag is set by non-standard means such as environment - // variables is avaliable. - // - // See https://github.com/urfave/cli/issues/294 for additional discussion - flags := c.Command.Flags - if c.Command.Name == "" { // cannot == Command{} since it contains slice types - if c.App != nil { - flags = c.App.Flags - } - } - for _, f := range flags { - eachName(f.GetName(), func(name string) { - if isSet, ok := c.setFlags[name]; isSet || !ok { - return - } - - val := reflect.ValueOf(f) - if val.Kind() == reflect.Ptr { - val = val.Elem() - } - - envVarValue := val.FieldByName("EnvVar") - if !envVarValue.IsValid() { - return - } - - eachName(envVarValue.String(), func(envVar string) { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - c.setFlags[name] = true - return - } - }) - }) - } - } - - return c.setFlags[name] -} - -// GlobalIsSet determines if the global flag was actually set -func (c *Context) GlobalIsSet(name string) bool { - ctx := c - if ctx.parentContext != nil { - ctx = ctx.parentContext - } - - for ; ctx != nil; ctx = ctx.parentContext { - if ctx.IsSet(name) { - return true - } - } - return false -} - -// FlagNames returns a slice of flag names used in this context. -func (c *Context) FlagNames() (names []string) { - for _, flag := range c.Command.Flags { - name := strings.Split(flag.GetName(), ",")[0] - if name == "help" { - continue - } - names = append(names, name) - } - return -} - -// GlobalFlagNames returns a slice of global flag names used by the app. -func (c *Context) GlobalFlagNames() (names []string) { - for _, flag := range c.App.Flags { - name := strings.Split(flag.GetName(), ",")[0] - if name == "help" || name == "version" { - continue - } - names = append(names, name) - } - return -} - -// Parent returns the parent context, if any -func (c *Context) Parent() *Context { - return c.parentContext -} - -// Args contains apps console arguments -type Args []string - -// Args returns the command line arguments associated with the context. -func (c *Context) Args() Args { - args := Args(c.flagSet.Args()) - return args -} - -// NArg returns the number of the command line arguments. -func (c *Context) NArg() int { - return len(c.Args()) -} - -// Get returns the nth argument, or else a blank string -func (a Args) Get(n int) string { - if len(a) > n { - return a[n] - } - return "" -} - -// First returns the first argument, or else a blank string -func (a Args) First() string { - return a.Get(0) -} - -// Tail returns the rest of the arguments (not the first one) -// or else an empty string slice -func (a Args) Tail() []string { - if len(a) >= 2 { - return []string(a)[1:] - } - return []string{} -} - -// Present checks if there are any arguments present -func (a Args) Present() bool { - return len(a) != 0 -} - -// Swap swaps arguments at the given indexes -func (a Args) Swap(from, to int) error { - if from >= len(a) || to >= len(a) { - return errors.New("index out of range") - } - a[from], a[to] = a[to], a[from] - return nil -} - -func globalContext(ctx *Context) *Context { - if ctx == nil { - return nil - } - - for { - if ctx.parentContext == nil { - return ctx - } - ctx = ctx.parentContext - } -} - -func lookupGlobalFlagSet(name string, ctx *Context) *flag.FlagSet { - if ctx.parentContext != nil { - ctx = ctx.parentContext - } - for ; ctx != nil; ctx = ctx.parentContext { - if f := ctx.flagSet.Lookup(name); f != nil { - return ctx.flagSet - } - } - return nil -} - -func copyFlag(name string, ff *flag.Flag, set *flag.FlagSet) { - switch ff.Value.(type) { - case *StringSlice: - default: - set.Set(name, ff.Value.String()) - } -} - -func normalizeFlags(flags []Flag, set *flag.FlagSet) error { - visited := make(map[string]bool) - set.Visit(func(f *flag.Flag) { - visited[f.Name] = true - }) - for _, f := range flags { - parts := strings.Split(f.GetName(), ",") - if len(parts) == 1 { - continue - } - var ff *flag.Flag - for _, name := range parts { - name = strings.Trim(name, " ") - if visited[name] { - if ff != nil { - return errors.New("Cannot use two forms of the same flag: " + name + " " + ff.Name) - } - ff = set.Lookup(name) - } - } - if ff == nil { - continue - } - for _, name := range parts { - name = strings.Trim(name, " ") - if !visited[name] { - copyFlag(name, ff, set) - } - } - } - return nil -} diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/errors.go b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/errors.go deleted file mode 100644 index ddef369..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/errors.go +++ /dev/null @@ -1,98 +0,0 @@ -package cli - -import ( - "fmt" - "io" - "os" - "strings" -) - -// OsExiter is the function used when the app exits. If not set defaults to os.Exit. -var OsExiter = os.Exit - -// ErrWriter is used to write errors to the user. This can be anything -// implementing the io.Writer interface and defaults to os.Stderr. -var ErrWriter io.Writer = os.Stderr - -// MultiError is an error that wraps multiple errors. -type MultiError struct { - Errors []error -} - -// NewMultiError creates a new MultiError. Pass in one or more errors. -func NewMultiError(err ...error) MultiError { - return MultiError{Errors: err} -} - -// Error implements the error interface. -func (m MultiError) Error() string { - errs := make([]string, len(m.Errors)) - for i, err := range m.Errors { - errs[i] = err.Error() - } - - return strings.Join(errs, "\n") -} - -// ExitCoder is the interface checked by `App` and `Command` for a custom exit -// code -type ExitCoder interface { - error - ExitCode() int -} - -// ExitError fulfills both the builtin `error` interface and `ExitCoder` -type ExitError struct { - exitCode int - message string -} - -// NewExitError makes a new *ExitError -func NewExitError(message string, exitCode int) *ExitError { - return &ExitError{ - exitCode: exitCode, - message: message, - } -} - -// Error returns the string message, fulfilling the interface required by -// `error` -func (ee *ExitError) Error() string { - return ee.message -} - -// ExitCode returns the exit code, fulfilling the interface required by -// `ExitCoder` -func (ee *ExitError) ExitCode() int { - return ee.exitCode -} - -// HandleExitCoder checks if the error fulfills the ExitCoder interface, and if -// so prints the error to stderr (if it is non-empty) and calls OsExiter with the -// given exit code. If the given error is a MultiError, then this func is -// called on all members of the Errors slice. -func HandleExitCoder(err error) { - if err == nil { - return - } - - if exitErr, ok := err.(ExitCoder); ok { - if err.Error() != "" { - fmt.Fprintln(ErrWriter, err) - } - OsExiter(exitErr.ExitCode()) - return - } - - if multiErr, ok := err.(MultiError); ok { - for _, merr := range multiErr.Errors { - HandleExitCoder(merr) - } - return - } - - if err.Error() != "" { - fmt.Fprintln(ErrWriter, err) - } - OsExiter(1) -} diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/flag-types.json b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/flag-types.json deleted file mode 100644 index 1223107..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/flag-types.json +++ /dev/null @@ -1,93 +0,0 @@ -[ - { - "name": "Bool", - "type": "bool", - "value": false, - "context_default": "false", - "parser": "strconv.ParseBool(f.Value.String())" - }, - { - "name": "BoolT", - "type": "bool", - "value": false, - "doctail": " that is true by default", - "context_default": "false", - "parser": "strconv.ParseBool(f.Value.String())" - }, - { - "name": "Duration", - "type": "time.Duration", - "doctail": " (see https://golang.org/pkg/time/#ParseDuration)", - "context_default": "0", - "parser": "time.ParseDuration(f.Value.String())" - }, - { - "name": "Float64", - "type": "float64", - "context_default": "0", - "parser": "strconv.ParseFloat(f.Value.String(), 64)" - }, - { - "name": "Generic", - "type": "Generic", - "dest": false, - "context_default": "nil", - "context_type": "interface{}" - }, - { - "name": "Int64", - "type": "int64", - "context_default": "0", - "parser": "strconv.ParseInt(f.Value.String(), 0, 64)" - }, - { - "name": "Int", - "type": "int", - "context_default": "0", - "parser": "strconv.ParseInt(f.Value.String(), 0, 64)", - "parser_cast": "int(parsed)" - }, - { - "name": "IntSlice", - "type": "*IntSlice", - "dest": false, - "context_default": "nil", - "context_type": "[]int", - "parser": "(f.Value.(*IntSlice)).Value(), error(nil)" - }, - { - "name": "Int64Slice", - "type": "*Int64Slice", - "dest": false, - "context_default": "nil", - "context_type": "[]int64", - "parser": "(f.Value.(*Int64Slice)).Value(), error(nil)" - }, - { - "name": "String", - "type": "string", - "context_default": "\"\"", - "parser": "f.Value.String(), error(nil)" - }, - { - "name": "StringSlice", - "type": "*StringSlice", - "dest": false, - "context_default": "nil", - "context_type": "[]string", - "parser": "(f.Value.(*StringSlice)).Value(), error(nil)" - }, - { - "name": "Uint64", - "type": "uint64", - "context_default": "0", - "parser": "strconv.ParseUint(f.Value.String(), 0, 64)" - }, - { - "name": "Uint", - "type": "uint", - "context_default": "0", - "parser": "strconv.ParseUint(f.Value.String(), 0, 64)", - "parser_cast": "uint(parsed)" - } -] diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/flag.go b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/flag.go deleted file mode 100644 index 1ff28d3..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/flag.go +++ /dev/null @@ -1,636 +0,0 @@ -package cli - -import ( - "flag" - "fmt" - "os" - "reflect" - "runtime" - "strconv" - "strings" - "time" -) - -const defaultPlaceholder = "value" - -// BashCompletionFlag enables bash-completion for all commands and subcommands -var BashCompletionFlag = BoolFlag{ - Name: "generate-bash-completion", - Hidden: true, -} - -// VersionFlag prints the version for the application -var VersionFlag = BoolFlag{ - Name: "version, v", - Usage: "print the version", -} - -// HelpFlag prints the help for all commands and subcommands -// Set to the zero value (BoolFlag{}) to disable flag -- keeps subcommand -// unless HideHelp is set to true) -var HelpFlag = BoolFlag{ - Name: "help, h", - Usage: "show help", -} - -// FlagStringer converts a flag definition to a string. This is used by help -// to display a flag. -var FlagStringer FlagStringFunc = stringifyFlag - -// FlagsByName is a slice of Flag. -type FlagsByName []Flag - -func (f FlagsByName) Len() int { - return len(f) -} - -func (f FlagsByName) Less(i, j int) bool { - return f[i].GetName() < f[j].GetName() -} - -func (f FlagsByName) Swap(i, j int) { - f[i], f[j] = f[j], f[i] -} - -// Flag is a common interface related to parsing flags in cli. -// For more advanced flag parsing techniques, it is recommended that -// this interface be implemented. -type Flag interface { - fmt.Stringer - // Apply Flag settings to the given flag set - Apply(*flag.FlagSet) - GetName() string -} - -func flagSet(name string, flags []Flag) *flag.FlagSet { - set := flag.NewFlagSet(name, flag.ContinueOnError) - - for _, f := range flags { - f.Apply(set) - } - return set -} - -func eachName(longName string, fn func(string)) { - parts := strings.Split(longName, ",") - for _, name := range parts { - name = strings.Trim(name, " ") - fn(name) - } -} - -// Generic is a generic parseable type identified by a specific flag -type Generic interface { - Set(value string) error - String() string -} - -// Apply takes the flagset and calls Set on the generic flag with the value -// provided by the user for parsing by the flag -func (f GenericFlag) Apply(set *flag.FlagSet) { - val := f.Value - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - val.Set(envVal) - break - } - } - } - - eachName(f.Name, func(name string) { - set.Var(f.Value, name, f.Usage) - }) -} - -// StringSlice is an opaque type for []string to satisfy flag.Value -type StringSlice []string - -// Set appends the string value to the list of values -func (f *StringSlice) Set(value string) error { - *f = append(*f, value) - return nil -} - -// String returns a readable representation of this value (for usage defaults) -func (f *StringSlice) String() string { - return fmt.Sprintf("%s", *f) -} - -// Value returns the slice of strings set by this flag -func (f *StringSlice) Value() []string { - return *f -} - -// Apply populates the flag given the flag set and environment -func (f StringSliceFlag) Apply(set *flag.FlagSet) { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - newVal := &StringSlice{} - for _, s := range strings.Split(envVal, ",") { - s = strings.TrimSpace(s) - newVal.Set(s) - } - f.Value = newVal - break - } - } - } - - eachName(f.Name, func(name string) { - if f.Value == nil { - f.Value = &StringSlice{} - } - set.Var(f.Value, name, f.Usage) - }) -} - -// IntSlice is an opaque type for []int to satisfy flag.Value -type IntSlice []int - -// Set parses the value into an integer and appends it to the list of values -func (f *IntSlice) Set(value string) error { - tmp, err := strconv.Atoi(value) - if err != nil { - return err - } - *f = append(*f, tmp) - return nil -} - -// String returns a readable representation of this value (for usage defaults) -func (f *IntSlice) String() string { - return fmt.Sprintf("%#v", *f) -} - -// Value returns the slice of ints set by this flag -func (f *IntSlice) Value() []int { - return *f -} - -// Apply populates the flag given the flag set and environment -func (f IntSliceFlag) Apply(set *flag.FlagSet) { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - newVal := &IntSlice{} - for _, s := range strings.Split(envVal, ",") { - s = strings.TrimSpace(s) - err := newVal.Set(s) - if err != nil { - fmt.Fprintf(ErrWriter, err.Error()) - } - } - f.Value = newVal - break - } - } - } - - eachName(f.Name, func(name string) { - if f.Value == nil { - f.Value = &IntSlice{} - } - set.Var(f.Value, name, f.Usage) - }) -} - -// Int64Slice is an opaque type for []int to satisfy flag.Value -type Int64Slice []int64 - -// Set parses the value into an integer and appends it to the list of values -func (f *Int64Slice) Set(value string) error { - tmp, err := strconv.ParseInt(value, 10, 64) - if err != nil { - return err - } - *f = append(*f, tmp) - return nil -} - -// String returns a readable representation of this value (for usage defaults) -func (f *Int64Slice) String() string { - return fmt.Sprintf("%#v", *f) -} - -// Value returns the slice of ints set by this flag -func (f *Int64Slice) Value() []int64 { - return *f -} - -// Apply populates the flag given the flag set and environment -func (f Int64SliceFlag) Apply(set *flag.FlagSet) { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - newVal := &Int64Slice{} - for _, s := range strings.Split(envVal, ",") { - s = strings.TrimSpace(s) - err := newVal.Set(s) - if err != nil { - fmt.Fprintf(ErrWriter, err.Error()) - } - } - f.Value = newVal - break - } - } - } - - eachName(f.Name, func(name string) { - if f.Value == nil { - f.Value = &Int64Slice{} - } - set.Var(f.Value, name, f.Usage) - }) -} - -// Apply populates the flag given the flag set and environment -func (f BoolFlag) Apply(set *flag.FlagSet) { - val := false - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - envValBool, err := strconv.ParseBool(envVal) - if err == nil { - val = envValBool - } - break - } - } - } - - eachName(f.Name, func(name string) { - if f.Destination != nil { - set.BoolVar(f.Destination, name, val, f.Usage) - return - } - set.Bool(name, val, f.Usage) - }) -} - -// Apply populates the flag given the flag set and environment -func (f BoolTFlag) Apply(set *flag.FlagSet) { - val := true - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - envValBool, err := strconv.ParseBool(envVal) - if err == nil { - val = envValBool - break - } - } - } - } - - eachName(f.Name, func(name string) { - if f.Destination != nil { - set.BoolVar(f.Destination, name, val, f.Usage) - return - } - set.Bool(name, val, f.Usage) - }) -} - -// Apply populates the flag given the flag set and environment -func (f StringFlag) Apply(set *flag.FlagSet) { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - f.Value = envVal - break - } - } - } - - eachName(f.Name, func(name string) { - if f.Destination != nil { - set.StringVar(f.Destination, name, f.Value, f.Usage) - return - } - set.String(name, f.Value, f.Usage) - }) -} - -// Apply populates the flag given the flag set and environment -func (f IntFlag) Apply(set *flag.FlagSet) { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - envValInt, err := strconv.ParseInt(envVal, 0, 64) - if err == nil { - f.Value = int(envValInt) - break - } - } - } - } - - eachName(f.Name, func(name string) { - if f.Destination != nil { - set.IntVar(f.Destination, name, f.Value, f.Usage) - return - } - set.Int(name, f.Value, f.Usage) - }) -} - -// Apply populates the flag given the flag set and environment -func (f Int64Flag) Apply(set *flag.FlagSet) { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - envValInt, err := strconv.ParseInt(envVal, 0, 64) - if err == nil { - f.Value = envValInt - break - } - } - } - } - - eachName(f.Name, func(name string) { - if f.Destination != nil { - set.Int64Var(f.Destination, name, f.Value, f.Usage) - return - } - set.Int64(name, f.Value, f.Usage) - }) -} - -// Apply populates the flag given the flag set and environment -func (f UintFlag) Apply(set *flag.FlagSet) { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - envValInt, err := strconv.ParseUint(envVal, 0, 64) - if err == nil { - f.Value = uint(envValInt) - break - } - } - } - } - - eachName(f.Name, func(name string) { - if f.Destination != nil { - set.UintVar(f.Destination, name, f.Value, f.Usage) - return - } - set.Uint(name, f.Value, f.Usage) - }) -} - -// Apply populates the flag given the flag set and environment -func (f Uint64Flag) Apply(set *flag.FlagSet) { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - envValInt, err := strconv.ParseUint(envVal, 0, 64) - if err == nil { - f.Value = uint64(envValInt) - break - } - } - } - } - - eachName(f.Name, func(name string) { - if f.Destination != nil { - set.Uint64Var(f.Destination, name, f.Value, f.Usage) - return - } - set.Uint64(name, f.Value, f.Usage) - }) -} - -// Apply populates the flag given the flag set and environment -func (f DurationFlag) Apply(set *flag.FlagSet) { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - envValDuration, err := time.ParseDuration(envVal) - if err == nil { - f.Value = envValDuration - break - } - } - } - } - - eachName(f.Name, func(name string) { - if f.Destination != nil { - set.DurationVar(f.Destination, name, f.Value, f.Usage) - return - } - set.Duration(name, f.Value, f.Usage) - }) -} - -// Apply populates the flag given the flag set and environment -func (f Float64Flag) Apply(set *flag.FlagSet) { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - envValFloat, err := strconv.ParseFloat(envVal, 10) - if err == nil { - f.Value = float64(envValFloat) - } - } - } - } - - eachName(f.Name, func(name string) { - if f.Destination != nil { - set.Float64Var(f.Destination, name, f.Value, f.Usage) - return - } - set.Float64(name, f.Value, f.Usage) - }) -} - -func visibleFlags(fl []Flag) []Flag { - visible := []Flag{} - for _, flag := range fl { - if !flagValue(flag).FieldByName("Hidden").Bool() { - visible = append(visible, flag) - } - } - return visible -} - -func prefixFor(name string) (prefix string) { - if len(name) == 1 { - prefix = "-" - } else { - prefix = "--" - } - - return -} - -// Returns the placeholder, if any, and the unquoted usage string. -func unquoteUsage(usage string) (string, string) { - for i := 0; i < len(usage); i++ { - if usage[i] == '`' { - for j := i + 1; j < len(usage); j++ { - if usage[j] == '`' { - name := usage[i+1 : j] - usage = usage[:i] + name + usage[j+1:] - return name, usage - } - } - break - } - } - return "", usage -} - -func prefixedNames(fullName, placeholder string) string { - var prefixed string - parts := strings.Split(fullName, ",") - for i, name := range parts { - name = strings.Trim(name, " ") - prefixed += prefixFor(name) + name - if placeholder != "" { - prefixed += " " + placeholder - } - if i < len(parts)-1 { - prefixed += ", " - } - } - return prefixed -} - -func withEnvHint(envVar, str string) string { - envText := "" - if envVar != "" { - prefix := "$" - suffix := "" - sep := ", $" - if runtime.GOOS == "windows" { - prefix = "%" - suffix = "%" - sep = "%, %" - } - envText = fmt.Sprintf(" [%s%s%s]", prefix, strings.Join(strings.Split(envVar, ","), sep), suffix) - } - return str + envText -} - -func flagValue(f Flag) reflect.Value { - fv := reflect.ValueOf(f) - for fv.Kind() == reflect.Ptr { - fv = reflect.Indirect(fv) - } - return fv -} - -func stringifyFlag(f Flag) string { - fv := flagValue(f) - - switch f.(type) { - case IntSliceFlag: - return withEnvHint(fv.FieldByName("EnvVar").String(), - stringifyIntSliceFlag(f.(IntSliceFlag))) - case Int64SliceFlag: - return withEnvHint(fv.FieldByName("EnvVar").String(), - stringifyInt64SliceFlag(f.(Int64SliceFlag))) - case StringSliceFlag: - return withEnvHint(fv.FieldByName("EnvVar").String(), - stringifyStringSliceFlag(f.(StringSliceFlag))) - } - - placeholder, usage := unquoteUsage(fv.FieldByName("Usage").String()) - - needsPlaceholder := false - defaultValueString := "" - val := fv.FieldByName("Value") - - if val.IsValid() { - needsPlaceholder = true - defaultValueString = fmt.Sprintf(" (default: %v)", val.Interface()) - - if val.Kind() == reflect.String && val.String() != "" { - defaultValueString = fmt.Sprintf(" (default: %q)", val.String()) - } - } - - if defaultValueString == " (default: )" { - defaultValueString = "" - } - - if needsPlaceholder && placeholder == "" { - placeholder = defaultPlaceholder - } - - usageWithDefault := strings.TrimSpace(fmt.Sprintf("%s%s", usage, defaultValueString)) - - return withEnvHint(fv.FieldByName("EnvVar").String(), - fmt.Sprintf("%s\t%s", prefixedNames(fv.FieldByName("Name").String(), placeholder), usageWithDefault)) -} - -func stringifyIntSliceFlag(f IntSliceFlag) string { - defaultVals := []string{} - if f.Value != nil && len(f.Value.Value()) > 0 { - for _, i := range f.Value.Value() { - defaultVals = append(defaultVals, fmt.Sprintf("%d", i)) - } - } - - return stringifySliceFlag(f.Usage, f.Name, defaultVals) -} - -func stringifyInt64SliceFlag(f Int64SliceFlag) string { - defaultVals := []string{} - if f.Value != nil && len(f.Value.Value()) > 0 { - for _, i := range f.Value.Value() { - defaultVals = append(defaultVals, fmt.Sprintf("%d", i)) - } - } - - return stringifySliceFlag(f.Usage, f.Name, defaultVals) -} - -func stringifyStringSliceFlag(f StringSliceFlag) string { - defaultVals := []string{} - if f.Value != nil && len(f.Value.Value()) > 0 { - for _, s := range f.Value.Value() { - if len(s) > 0 { - defaultVals = append(defaultVals, fmt.Sprintf("%q", s)) - } - } - } - - return stringifySliceFlag(f.Usage, f.Name, defaultVals) -} - -func stringifySliceFlag(usage, name string, defaultVals []string) string { - placeholder, usage := unquoteUsage(usage) - if placeholder == "" { - placeholder = defaultPlaceholder - } - - defaultVal := "" - if len(defaultVals) > 0 { - defaultVal = fmt.Sprintf(" (default: %s)", strings.Join(defaultVals, ", ")) - } - - usageWithDefault := strings.TrimSpace(fmt.Sprintf("%s%s", usage, defaultVal)) - return fmt.Sprintf("%s\t%s", prefixedNames(name, placeholder), usageWithDefault) -} diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/flag_generated.go b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/flag_generated.go deleted file mode 100644 index 491b619..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/flag_generated.go +++ /dev/null @@ -1,627 +0,0 @@ -package cli - -import ( - "flag" - "strconv" - "time" -) - -// WARNING: This file is generated! - -// BoolFlag is a flag with type bool -type BoolFlag struct { - Name string - Usage string - EnvVar string - Hidden bool - Destination *bool -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f BoolFlag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f BoolFlag) GetName() string { - return f.Name -} - -// Bool looks up the value of a local BoolFlag, returns -// false if not found -func (c *Context) Bool(name string) bool { - return lookupBool(name, c.flagSet) -} - -// GlobalBool looks up the value of a global BoolFlag, returns -// false if not found -func (c *Context) GlobalBool(name string) bool { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupBool(name, fs) - } - return false -} - -func lookupBool(name string, set *flag.FlagSet) bool { - f := set.Lookup(name) - if f != nil { - parsed, err := strconv.ParseBool(f.Value.String()) - if err != nil { - return false - } - return parsed - } - return false -} - -// BoolTFlag is a flag with type bool that is true by default -type BoolTFlag struct { - Name string - Usage string - EnvVar string - Hidden bool - Destination *bool -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f BoolTFlag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f BoolTFlag) GetName() string { - return f.Name -} - -// BoolT looks up the value of a local BoolTFlag, returns -// false if not found -func (c *Context) BoolT(name string) bool { - return lookupBoolT(name, c.flagSet) -} - -// GlobalBoolT looks up the value of a global BoolTFlag, returns -// false if not found -func (c *Context) GlobalBoolT(name string) bool { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupBoolT(name, fs) - } - return false -} - -func lookupBoolT(name string, set *flag.FlagSet) bool { - f := set.Lookup(name) - if f != nil { - parsed, err := strconv.ParseBool(f.Value.String()) - if err != nil { - return false - } - return parsed - } - return false -} - -// DurationFlag is a flag with type time.Duration (see https://golang.org/pkg/time/#ParseDuration) -type DurationFlag struct { - Name string - Usage string - EnvVar string - Hidden bool - Value time.Duration - Destination *time.Duration -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f DurationFlag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f DurationFlag) GetName() string { - return f.Name -} - -// Duration looks up the value of a local DurationFlag, returns -// 0 if not found -func (c *Context) Duration(name string) time.Duration { - return lookupDuration(name, c.flagSet) -} - -// GlobalDuration looks up the value of a global DurationFlag, returns -// 0 if not found -func (c *Context) GlobalDuration(name string) time.Duration { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupDuration(name, fs) - } - return 0 -} - -func lookupDuration(name string, set *flag.FlagSet) time.Duration { - f := set.Lookup(name) - if f != nil { - parsed, err := time.ParseDuration(f.Value.String()) - if err != nil { - return 0 - } - return parsed - } - return 0 -} - -// Float64Flag is a flag with type float64 -type Float64Flag struct { - Name string - Usage string - EnvVar string - Hidden bool - Value float64 - Destination *float64 -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f Float64Flag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f Float64Flag) GetName() string { - return f.Name -} - -// Float64 looks up the value of a local Float64Flag, returns -// 0 if not found -func (c *Context) Float64(name string) float64 { - return lookupFloat64(name, c.flagSet) -} - -// GlobalFloat64 looks up the value of a global Float64Flag, returns -// 0 if not found -func (c *Context) GlobalFloat64(name string) float64 { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupFloat64(name, fs) - } - return 0 -} - -func lookupFloat64(name string, set *flag.FlagSet) float64 { - f := set.Lookup(name) - if f != nil { - parsed, err := strconv.ParseFloat(f.Value.String(), 64) - if err != nil { - return 0 - } - return parsed - } - return 0 -} - -// GenericFlag is a flag with type Generic -type GenericFlag struct { - Name string - Usage string - EnvVar string - Hidden bool - Value Generic -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f GenericFlag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f GenericFlag) GetName() string { - return f.Name -} - -// Generic looks up the value of a local GenericFlag, returns -// nil if not found -func (c *Context) Generic(name string) interface{} { - return lookupGeneric(name, c.flagSet) -} - -// GlobalGeneric looks up the value of a global GenericFlag, returns -// nil if not found -func (c *Context) GlobalGeneric(name string) interface{} { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupGeneric(name, fs) - } - return nil -} - -func lookupGeneric(name string, set *flag.FlagSet) interface{} { - f := set.Lookup(name) - if f != nil { - parsed, err := f.Value, error(nil) - if err != nil { - return nil - } - return parsed - } - return nil -} - -// Int64Flag is a flag with type int64 -type Int64Flag struct { - Name string - Usage string - EnvVar string - Hidden bool - Value int64 - Destination *int64 -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f Int64Flag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f Int64Flag) GetName() string { - return f.Name -} - -// Int64 looks up the value of a local Int64Flag, returns -// 0 if not found -func (c *Context) Int64(name string) int64 { - return lookupInt64(name, c.flagSet) -} - -// GlobalInt64 looks up the value of a global Int64Flag, returns -// 0 if not found -func (c *Context) GlobalInt64(name string) int64 { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupInt64(name, fs) - } - return 0 -} - -func lookupInt64(name string, set *flag.FlagSet) int64 { - f := set.Lookup(name) - if f != nil { - parsed, err := strconv.ParseInt(f.Value.String(), 0, 64) - if err != nil { - return 0 - } - return parsed - } - return 0 -} - -// IntFlag is a flag with type int -type IntFlag struct { - Name string - Usage string - EnvVar string - Hidden bool - Value int - Destination *int -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f IntFlag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f IntFlag) GetName() string { - return f.Name -} - -// Int looks up the value of a local IntFlag, returns -// 0 if not found -func (c *Context) Int(name string) int { - return lookupInt(name, c.flagSet) -} - -// GlobalInt looks up the value of a global IntFlag, returns -// 0 if not found -func (c *Context) GlobalInt(name string) int { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupInt(name, fs) - } - return 0 -} - -func lookupInt(name string, set *flag.FlagSet) int { - f := set.Lookup(name) - if f != nil { - parsed, err := strconv.ParseInt(f.Value.String(), 0, 64) - if err != nil { - return 0 - } - return int(parsed) - } - return 0 -} - -// IntSliceFlag is a flag with type *IntSlice -type IntSliceFlag struct { - Name string - Usage string - EnvVar string - Hidden bool - Value *IntSlice -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f IntSliceFlag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f IntSliceFlag) GetName() string { - return f.Name -} - -// IntSlice looks up the value of a local IntSliceFlag, returns -// nil if not found -func (c *Context) IntSlice(name string) []int { - return lookupIntSlice(name, c.flagSet) -} - -// GlobalIntSlice looks up the value of a global IntSliceFlag, returns -// nil if not found -func (c *Context) GlobalIntSlice(name string) []int { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupIntSlice(name, fs) - } - return nil -} - -func lookupIntSlice(name string, set *flag.FlagSet) []int { - f := set.Lookup(name) - if f != nil { - parsed, err := (f.Value.(*IntSlice)).Value(), error(nil) - if err != nil { - return nil - } - return parsed - } - return nil -} - -// Int64SliceFlag is a flag with type *Int64Slice -type Int64SliceFlag struct { - Name string - Usage string - EnvVar string - Hidden bool - Value *Int64Slice -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f Int64SliceFlag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f Int64SliceFlag) GetName() string { - return f.Name -} - -// Int64Slice looks up the value of a local Int64SliceFlag, returns -// nil if not found -func (c *Context) Int64Slice(name string) []int64 { - return lookupInt64Slice(name, c.flagSet) -} - -// GlobalInt64Slice looks up the value of a global Int64SliceFlag, returns -// nil if not found -func (c *Context) GlobalInt64Slice(name string) []int64 { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupInt64Slice(name, fs) - } - return nil -} - -func lookupInt64Slice(name string, set *flag.FlagSet) []int64 { - f := set.Lookup(name) - if f != nil { - parsed, err := (f.Value.(*Int64Slice)).Value(), error(nil) - if err != nil { - return nil - } - return parsed - } - return nil -} - -// StringFlag is a flag with type string -type StringFlag struct { - Name string - Usage string - EnvVar string - Hidden bool - Value string - Destination *string -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f StringFlag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f StringFlag) GetName() string { - return f.Name -} - -// String looks up the value of a local StringFlag, returns -// "" if not found -func (c *Context) String(name string) string { - return lookupString(name, c.flagSet) -} - -// GlobalString looks up the value of a global StringFlag, returns -// "" if not found -func (c *Context) GlobalString(name string) string { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupString(name, fs) - } - return "" -} - -func lookupString(name string, set *flag.FlagSet) string { - f := set.Lookup(name) - if f != nil { - parsed, err := f.Value.String(), error(nil) - if err != nil { - return "" - } - return parsed - } - return "" -} - -// StringSliceFlag is a flag with type *StringSlice -type StringSliceFlag struct { - Name string - Usage string - EnvVar string - Hidden bool - Value *StringSlice -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f StringSliceFlag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f StringSliceFlag) GetName() string { - return f.Name -} - -// StringSlice looks up the value of a local StringSliceFlag, returns -// nil if not found -func (c *Context) StringSlice(name string) []string { - return lookupStringSlice(name, c.flagSet) -} - -// GlobalStringSlice looks up the value of a global StringSliceFlag, returns -// nil if not found -func (c *Context) GlobalStringSlice(name string) []string { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupStringSlice(name, fs) - } - return nil -} - -func lookupStringSlice(name string, set *flag.FlagSet) []string { - f := set.Lookup(name) - if f != nil { - parsed, err := (f.Value.(*StringSlice)).Value(), error(nil) - if err != nil { - return nil - } - return parsed - } - return nil -} - -// Uint64Flag is a flag with type uint64 -type Uint64Flag struct { - Name string - Usage string - EnvVar string - Hidden bool - Value uint64 - Destination *uint64 -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f Uint64Flag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f Uint64Flag) GetName() string { - return f.Name -} - -// Uint64 looks up the value of a local Uint64Flag, returns -// 0 if not found -func (c *Context) Uint64(name string) uint64 { - return lookupUint64(name, c.flagSet) -} - -// GlobalUint64 looks up the value of a global Uint64Flag, returns -// 0 if not found -func (c *Context) GlobalUint64(name string) uint64 { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupUint64(name, fs) - } - return 0 -} - -func lookupUint64(name string, set *flag.FlagSet) uint64 { - f := set.Lookup(name) - if f != nil { - parsed, err := strconv.ParseUint(f.Value.String(), 0, 64) - if err != nil { - return 0 - } - return parsed - } - return 0 -} - -// UintFlag is a flag with type uint -type UintFlag struct { - Name string - Usage string - EnvVar string - Hidden bool - Value uint - Destination *uint -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f UintFlag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f UintFlag) GetName() string { - return f.Name -} - -// Uint looks up the value of a local UintFlag, returns -// 0 if not found -func (c *Context) Uint(name string) uint { - return lookupUint(name, c.flagSet) -} - -// GlobalUint looks up the value of a global UintFlag, returns -// 0 if not found -func (c *Context) GlobalUint(name string) uint { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupUint(name, fs) - } - return 0 -} - -func lookupUint(name string, set *flag.FlagSet) uint { - f := set.Lookup(name) - if f != nil { - parsed, err := strconv.ParseUint(f.Value.String(), 0, 64) - if err != nil { - return 0 - } - return uint(parsed) - } - return 0 -} diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/funcs.go b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/funcs.go deleted file mode 100644 index cba5e6c..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/funcs.go +++ /dev/null @@ -1,28 +0,0 @@ -package cli - -// BashCompleteFunc is an action to execute when the bash-completion flag is set -type BashCompleteFunc func(*Context) - -// BeforeFunc is an action to execute before any subcommands are run, but after -// the context is ready if a non-nil error is returned, no subcommands are run -type BeforeFunc func(*Context) error - -// AfterFunc is an action to execute after any subcommands are run, but after the -// subcommand has finished it is run even if Action() panics -type AfterFunc func(*Context) error - -// ActionFunc is the action to execute when no subcommands are specified -type ActionFunc func(*Context) error - -// CommandNotFoundFunc is executed if the proper command cannot be found -type CommandNotFoundFunc func(*Context, string) - -// OnUsageErrorFunc is executed if an usage error occurs. This is useful for displaying -// customized usage error messages. This function is able to replace the -// original error messages. If this function is not set, the "Incorrect usage" -// is displayed and the execution is interrupted. -type OnUsageErrorFunc func(context *Context, err error, isSubcommand bool) error - -// FlagStringFunc is used by the help generation to display a flag, which is -// expected to be a single line. -type FlagStringFunc func(Flag) string diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/generate-flag-types b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/generate-flag-types deleted file mode 100755 index 47a168b..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/generate-flag-types +++ /dev/null @@ -1,248 +0,0 @@ -#!/usr/bin/env python -""" -The flag types that ship with the cli library have many things in common, and -so we can take advantage of the `go generate` command to create much of the -source code from a list of definitions. These definitions attempt to cover -the parts that vary between flag types, and should evolve as needed. - -An example of the minimum definition needed is: - - { - "name": "SomeType", - "type": "sometype", - "context_default": "nil" - } - -In this example, the code generated for the `cli` package will include a type -named `SomeTypeFlag` that is expected to wrap a value of type `sometype`. -Fetching values by name via `*cli.Context` will default to a value of `nil`. - -A more complete, albeit somewhat redundant, example showing all available -definition keys is: - - { - "name": "VeryMuchType", - "type": "*VeryMuchType", - "value": true, - "dest": false, - "doctail": " which really only wraps a []float64, oh well!", - "context_type": "[]float64", - "context_default": "nil", - "parser": "parseVeryMuchType(f.Value.String())", - "parser_cast": "[]float64(parsed)" - } - -The meaning of each field is as follows: - - name (string) - The type "name", which will be suffixed with - `Flag` when generating the type definition - for `cli` and the wrapper type for `altsrc` - type (string) - The type that the generated `Flag` type for `cli` - is expected to "contain" as its `.Value` member - value (bool) - Should the generated `cli` type have a `Value` - member? - dest (bool) - Should the generated `cli` type support a - destination pointer? - doctail (string) - Additional docs for the `cli` flag type comment - context_type (string) - The literal type used in the `*cli.Context` - reader func signature - context_default (string) - The literal value used as the default by the - `*cli.Context` reader funcs when no value is - present - parser (string) - Literal code used to parse the flag `f`, - expected to have a return signature of - (value, error) - parser_cast (string) - Literal code used to cast the `parsed` value - returned from the `parser` code -""" - -from __future__ import print_function, unicode_literals - -import argparse -import json -import os -import subprocess -import sys -import tempfile -import textwrap - - -class _FancyFormatter(argparse.ArgumentDefaultsHelpFormatter, - argparse.RawDescriptionHelpFormatter): - pass - - -def main(sysargs=sys.argv[:]): - parser = argparse.ArgumentParser( - description='Generate flag type code!', - formatter_class=_FancyFormatter) - parser.add_argument( - 'package', - type=str, default='cli', choices=_WRITEFUNCS.keys(), - help='Package for which flag types will be generated' - ) - parser.add_argument( - '-i', '--in-json', - type=argparse.FileType('r'), - default=sys.stdin, - help='Input JSON file which defines each type to be generated' - ) - parser.add_argument( - '-o', '--out-go', - type=argparse.FileType('w'), - default=sys.stdout, - help='Output file/stream to which generated source will be written' - ) - parser.epilog = __doc__ - - args = parser.parse_args(sysargs[1:]) - _generate_flag_types(_WRITEFUNCS[args.package], args.out_go, args.in_json) - return 0 - - -def _generate_flag_types(writefunc, output_go, input_json): - types = json.load(input_json) - - tmp = tempfile.NamedTemporaryFile(suffix='.go', delete=False) - writefunc(tmp, types) - tmp.close() - - new_content = subprocess.check_output( - ['goimports', tmp.name] - ).decode('utf-8') - - print(new_content, file=output_go, end='') - output_go.flush() - os.remove(tmp.name) - - -def _set_typedef_defaults(typedef): - typedef.setdefault('doctail', '') - typedef.setdefault('context_type', typedef['type']) - typedef.setdefault('dest', True) - typedef.setdefault('value', True) - typedef.setdefault('parser', 'f.Value, error(nil)') - typedef.setdefault('parser_cast', 'parsed') - - -def _write_cli_flag_types(outfile, types): - _fwrite(outfile, """\ - package cli - - // WARNING: This file is generated! - - """) - - for typedef in types: - _set_typedef_defaults(typedef) - - _fwrite(outfile, """\ - // {name}Flag is a flag with type {type}{doctail} - type {name}Flag struct {{ - Name string - Usage string - EnvVar string - Hidden bool - """.format(**typedef)) - - if typedef['value']: - _fwrite(outfile, """\ - Value {type} - """.format(**typedef)) - - if typedef['dest']: - _fwrite(outfile, """\ - Destination *{type} - """.format(**typedef)) - - _fwrite(outfile, "\n}\n\n") - - _fwrite(outfile, """\ - // String returns a readable representation of this value - // (for usage defaults) - func (f {name}Flag) String() string {{ - return FlagStringer(f) - }} - - // GetName returns the name of the flag - func (f {name}Flag) GetName() string {{ - return f.Name - }} - - // {name} looks up the value of a local {name}Flag, returns - // {context_default} if not found - func (c *Context) {name}(name string) {context_type} {{ - return lookup{name}(name, c.flagSet) - }} - - // Global{name} looks up the value of a global {name}Flag, returns - // {context_default} if not found - func (c *Context) Global{name}(name string) {context_type} {{ - if fs := lookupGlobalFlagSet(name, c); fs != nil {{ - return lookup{name}(name, fs) - }} - return {context_default} - }} - - func lookup{name}(name string, set *flag.FlagSet) {context_type} {{ - f := set.Lookup(name) - if f != nil {{ - parsed, err := {parser} - if err != nil {{ - return {context_default} - }} - return {parser_cast} - }} - return {context_default} - }} - """.format(**typedef)) - - -def _write_altsrc_flag_types(outfile, types): - _fwrite(outfile, """\ - package altsrc - - import ( - "gopkg.in/urfave/cli.v1" - ) - - // WARNING: This file is generated! - - """) - - for typedef in types: - _set_typedef_defaults(typedef) - - _fwrite(outfile, """\ - // {name}Flag is the flag type that wraps cli.{name}Flag to allow - // for other values to be specified - type {name}Flag struct {{ - cli.{name}Flag - set *flag.FlagSet - }} - - // New{name}Flag creates a new {name}Flag - func New{name}Flag(fl cli.{name}Flag) *{name}Flag {{ - return &{name}Flag{{{name}Flag: fl, set: nil}} - }} - - // Apply saves the flagSet for later usage calls, then calls the - // wrapped {name}Flag.Apply - func (f *{name}Flag) Apply(set *flag.FlagSet) {{ - f.set = set - f.{name}Flag.Apply(set) - }} - """.format(**typedef)) - - -def _fwrite(outfile, text): - print(textwrap.dedent(text), end='', file=outfile) - - -_WRITEFUNCS = { - 'cli': _write_cli_flag_types, - 'altsrc': _write_altsrc_flag_types -} - -if __name__ == '__main__': - sys.exit(main()) diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/help.go b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/help.go deleted file mode 100644 index ba34719..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/help.go +++ /dev/null @@ -1,267 +0,0 @@ -package cli - -import ( - "fmt" - "io" - "os" - "strings" - "text/tabwriter" - "text/template" -) - -// AppHelpTemplate is the text template for the Default help topic. -// cli.go uses text/template to render templates. You can -// render custom help text by setting this variable. -var AppHelpTemplate = `NAME: - {{.Name}} - {{.Usage}} - -USAGE: - {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}} - {{if .Version}}{{if not .HideVersion}} -VERSION: - {{.Version}} - {{end}}{{end}}{{if len .Authors}} -AUTHOR(S): - {{range .Authors}}{{.}}{{end}} - {{end}}{{if .VisibleCommands}} -COMMANDS:{{range .VisibleCategories}}{{if .Name}} - {{.Name}}:{{end}}{{range .VisibleCommands}} - {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}} -{{end}}{{end}}{{if .VisibleFlags}} -GLOBAL OPTIONS: - {{range .VisibleFlags}}{{.}} - {{end}}{{end}}{{if .Copyright}} -COPYRIGHT: - {{.Copyright}} - {{end}} -` - -// CommandHelpTemplate is the text template for the command help topic. -// cli.go uses text/template to render templates. You can -// render custom help text by setting this variable. -var CommandHelpTemplate = `NAME: - {{.HelpName}} - {{.Usage}} - -USAGE: - {{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{if .Category}} - -CATEGORY: - {{.Category}}{{end}}{{if .Description}} - -DESCRIPTION: - {{.Description}}{{end}}{{if .VisibleFlags}} - -OPTIONS: - {{range .VisibleFlags}}{{.}} - {{end}}{{end}} -` - -// SubcommandHelpTemplate is the text template for the subcommand help topic. -// cli.go uses text/template to render templates. You can -// render custom help text by setting this variable. -var SubcommandHelpTemplate = `NAME: - {{.HelpName}} - {{.Usage}} - -USAGE: - {{.HelpName}} command{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}} - -COMMANDS:{{range .VisibleCategories}}{{if .Name}} - {{.Name}}:{{end}}{{range .VisibleCommands}} - {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}} -{{end}}{{if .VisibleFlags}} -OPTIONS: - {{range .VisibleFlags}}{{.}} - {{end}}{{end}} -` - -var helpCommand = Command{ - Name: "help", - Aliases: []string{"h"}, - Usage: "Shows a list of commands or help for one command", - ArgsUsage: "[command]", - Action: func(c *Context) error { - args := c.Args() - if args.Present() { - return ShowCommandHelp(c, args.First()) - } - - ShowAppHelp(c) - return nil - }, -} - -var helpSubcommand = Command{ - Name: "help", - Aliases: []string{"h"}, - Usage: "Shows a list of commands or help for one command", - ArgsUsage: "[command]", - Action: func(c *Context) error { - args := c.Args() - if args.Present() { - return ShowCommandHelp(c, args.First()) - } - - return ShowSubcommandHelp(c) - }, -} - -// Prints help for the App or Command -type helpPrinter func(w io.Writer, templ string, data interface{}) - -// HelpPrinter is a function that writes the help output. If not set a default -// is used. The function signature is: -// func(w io.Writer, templ string, data interface{}) -var HelpPrinter helpPrinter = printHelp - -// VersionPrinter prints the version for the App -var VersionPrinter = printVersion - -// ShowAppHelp is an action that displays the help. -func ShowAppHelp(c *Context) error { - HelpPrinter(c.App.Writer, AppHelpTemplate, c.App) - return nil -} - -// DefaultAppComplete prints the list of subcommands as the default app completion method -func DefaultAppComplete(c *Context) { - for _, command := range c.App.Commands { - if command.Hidden { - continue - } - for _, name := range command.Names() { - fmt.Fprintln(c.App.Writer, name) - } - } -} - -// ShowCommandHelp prints help for the given command -func ShowCommandHelp(ctx *Context, command string) error { - // show the subcommand help for a command with subcommands - if command == "" { - HelpPrinter(ctx.App.Writer, SubcommandHelpTemplate, ctx.App) - return nil - } - - for _, c := range ctx.App.Commands { - if c.HasName(command) { - HelpPrinter(ctx.App.Writer, CommandHelpTemplate, c) - return nil - } - } - - if ctx.App.CommandNotFound == nil { - return NewExitError(fmt.Sprintf("No help topic for '%v'", command), 3) - } - - ctx.App.CommandNotFound(ctx, command) - return nil -} - -// ShowSubcommandHelp prints help for the given subcommand -func ShowSubcommandHelp(c *Context) error { - return ShowCommandHelp(c, c.Command.Name) -} - -// ShowVersion prints the version number of the App -func ShowVersion(c *Context) { - VersionPrinter(c) -} - -func printVersion(c *Context) { - fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version) -} - -// ShowCompletions prints the lists of commands within a given context -func ShowCompletions(c *Context) { - a := c.App - if a != nil && a.BashComplete != nil { - a.BashComplete(c) - } -} - -// ShowCommandCompletions prints the custom completions for a given command -func ShowCommandCompletions(ctx *Context, command string) { - c := ctx.App.Command(command) - if c != nil && c.BashComplete != nil { - c.BashComplete(ctx) - } -} - -func printHelp(out io.Writer, templ string, data interface{}) { - funcMap := template.FuncMap{ - "join": strings.Join, - } - - w := tabwriter.NewWriter(out, 1, 8, 2, ' ', 0) - t := template.Must(template.New("help").Funcs(funcMap).Parse(templ)) - err := t.Execute(w, data) - if err != nil { - // If the writer is closed, t.Execute will fail, and there's nothing - // we can do to recover. - if os.Getenv("CLI_TEMPLATE_ERROR_DEBUG") != "" { - fmt.Fprintf(ErrWriter, "CLI TEMPLATE ERROR: %#v\n", err) - } - return - } - w.Flush() -} - -func checkVersion(c *Context) bool { - found := false - if VersionFlag.Name != "" { - eachName(VersionFlag.Name, func(name string) { - if c.GlobalBool(name) || c.Bool(name) { - found = true - } - }) - } - return found -} - -func checkHelp(c *Context) bool { - found := false - if HelpFlag.Name != "" { - eachName(HelpFlag.Name, func(name string) { - if c.GlobalBool(name) || c.Bool(name) { - found = true - } - }) - } - return found -} - -func checkCommandHelp(c *Context, name string) bool { - if c.Bool("h") || c.Bool("help") { - ShowCommandHelp(c, name) - return true - } - - return false -} - -func checkSubcommandHelp(c *Context) bool { - if c.Bool("h") || c.Bool("help") { - ShowSubcommandHelp(c) - return true - } - - return false -} - -func checkCompletions(c *Context) bool { - if (c.GlobalBool(BashCompletionFlag.Name) || c.Bool(BashCompletionFlag.Name)) && c.App.EnableBashCompletion { - ShowCompletions(c) - return true - } - - return false -} - -func checkCommandCompletions(c *Context, name string) bool { - if c.Bool(BashCompletionFlag.Name) && c.App.EnableBashCompletion { - ShowCommandCompletions(c, name) - return true - } - - return false -} diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/runtests b/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/runtests deleted file mode 100755 index ee22bde..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/github.com/urfave/cli/runtests +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/env python -from __future__ import print_function - -import argparse -import os -import sys -import tempfile - -from subprocess import check_call, check_output - - -PACKAGE_NAME = os.environ.get( - 'CLI_PACKAGE_NAME', 'github.com/urfave/cli' -) - - -def main(sysargs=sys.argv[:]): - targets = { - 'vet': _vet, - 'test': _test, - 'gfmrun': _gfmrun, - 'toc': _toc, - 'gen': _gen, - } - - parser = argparse.ArgumentParser() - parser.add_argument( - 'target', nargs='?', choices=tuple(targets.keys()), default='test' - ) - args = parser.parse_args(sysargs[1:]) - - targets[args.target]() - return 0 - - -def _test(): - if check_output('go version'.split()).split()[2] < 'go1.2': - _run('go test -v .') - return - - coverprofiles = [] - for subpackage in ['', 'altsrc']: - coverprofile = 'cli.coverprofile' - if subpackage != '': - coverprofile = '{}.coverprofile'.format(subpackage) - - coverprofiles.append(coverprofile) - - _run('go test -v'.split() + [ - '-coverprofile={}'.format(coverprofile), - ('{}/{}'.format(PACKAGE_NAME, subpackage)).rstrip('/') - ]) - - combined_name = _combine_coverprofiles(coverprofiles) - _run('go tool cover -func={}'.format(combined_name)) - os.remove(combined_name) - - -def _gfmrun(): - go_version = check_output('go version'.split()).split()[2] - if go_version < 'go1.3': - print('runtests: skip on {}'.format(go_version), file=sys.stderr) - return - _run(['gfmrun', '-c', str(_gfmrun_count()), '-s', 'README.md']) - - -def _vet(): - _run('go vet ./...') - - -def _toc(): - _run('node_modules/.bin/markdown-toc -i README.md') - _run('git diff --exit-code') - - -def _gen(): - go_version = check_output('go version'.split()).split()[2] - if go_version < 'go1.5': - print('runtests: skip on {}'.format(go_version), file=sys.stderr) - return - - _run('go generate ./...') - _run('git diff --exit-code') - - -def _run(command): - if hasattr(command, 'split'): - command = command.split() - print('runtests: {}'.format(' '.join(command)), file=sys.stderr) - check_call(command) - - -def _gfmrun_count(): - with open('README.md') as infile: - lines = infile.read().splitlines() - return len(filter(_is_go_runnable, lines)) - - -def _is_go_runnable(line): - return line.startswith('package main') - - -def _combine_coverprofiles(coverprofiles): - combined = tempfile.NamedTemporaryFile( - suffix='.coverprofile', delete=False - ) - combined.write('mode: set\n') - - for coverprofile in coverprofiles: - with open(coverprofile, 'r') as infile: - for line in infile.readlines(): - if not line.startswith('mode: '): - combined.write(line) - - combined.flush() - name = combined.name - combined.close() - return name - - -if __name__ == '__main__': - sys.exit(main()) diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/golang.org/x/crypto/LICENSE b/vendor/github.com/Shopify/toxiproxy/vendor/golang.org/x/crypto/LICENSE deleted file mode 100644 index 6a66aea..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/golang.org/x/crypto/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/golang.org/x/crypto/PATENTS b/vendor/github.com/Shopify/toxiproxy/vendor/golang.org/x/crypto/PATENTS deleted file mode 100644 index 7330990..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/golang.org/x/crypto/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/vendor/github.com/Shopify/toxiproxy/vendor/golang.org/x/crypto/ssh/terminal/terminal.go deleted file mode 100644 index 965f0cf..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/golang.org/x/crypto/ssh/terminal/terminal.go +++ /dev/null @@ -1,888 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package terminal - -import ( - "bytes" - "io" - "sync" - "unicode/utf8" -) - -// EscapeCodes contains escape sequences that can be written to the terminal in -// order to achieve different styles of text. -type EscapeCodes struct { - // Foreground colors - Black, Red, Green, Yellow, Blue, Magenta, Cyan, White []byte - - // Reset all attributes - Reset []byte -} - -var vt100EscapeCodes = EscapeCodes{ - Black: []byte{keyEscape, '[', '3', '0', 'm'}, - Red: []byte{keyEscape, '[', '3', '1', 'm'}, - Green: []byte{keyEscape, '[', '3', '2', 'm'}, - Yellow: []byte{keyEscape, '[', '3', '3', 'm'}, - Blue: []byte{keyEscape, '[', '3', '4', 'm'}, - Magenta: []byte{keyEscape, '[', '3', '5', 'm'}, - Cyan: []byte{keyEscape, '[', '3', '6', 'm'}, - White: []byte{keyEscape, '[', '3', '7', 'm'}, - - Reset: []byte{keyEscape, '[', '0', 'm'}, -} - -// Terminal contains the state for running a VT100 terminal that is capable of -// reading lines of input. -type Terminal struct { - // AutoCompleteCallback, if non-null, is called for each keypress with - // the full input line and the current position of the cursor (in - // bytes, as an index into |line|). If it returns ok=false, the key - // press is processed normally. Otherwise it returns a replacement line - // and the new cursor position. - AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool) - - // Escape contains a pointer to the escape codes for this terminal. - // It's always a valid pointer, although the escape codes themselves - // may be empty if the terminal doesn't support them. - Escape *EscapeCodes - - // lock protects the terminal and the state in this object from - // concurrent processing of a key press and a Write() call. - lock sync.Mutex - - c io.ReadWriter - prompt []rune - - // line is the current line being entered. - line []rune - // pos is the logical position of the cursor in line - pos int - // echo is true if local echo is enabled - echo bool - // pasteActive is true iff there is a bracketed paste operation in - // progress. - pasteActive bool - - // cursorX contains the current X value of the cursor where the left - // edge is 0. cursorY contains the row number where the first row of - // the current line is 0. - cursorX, cursorY int - // maxLine is the greatest value of cursorY so far. - maxLine int - - termWidth, termHeight int - - // outBuf contains the terminal data to be sent. - outBuf []byte - // remainder contains the remainder of any partial key sequences after - // a read. It aliases into inBuf. - remainder []byte - inBuf [256]byte - - // history contains previously entered commands so that they can be - // accessed with the up and down keys. - history stRingBuffer - // historyIndex stores the currently accessed history entry, where zero - // means the immediately previous entry. - historyIndex int - // When navigating up and down the history it's possible to return to - // the incomplete, initial line. That value is stored in - // historyPending. - historyPending string -} - -// NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is -// a local terminal, that terminal must first have been put into raw mode. -// prompt is a string that is written at the start of each input line (i.e. -// "> "). -func NewTerminal(c io.ReadWriter, prompt string) *Terminal { - return &Terminal{ - Escape: &vt100EscapeCodes, - c: c, - prompt: []rune(prompt), - termWidth: 80, - termHeight: 24, - echo: true, - historyIndex: -1, - } -} - -const ( - keyCtrlD = 4 - keyCtrlU = 21 - keyEnter = '\r' - keyEscape = 27 - keyBackspace = 127 - keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota - keyUp - keyDown - keyLeft - keyRight - keyAltLeft - keyAltRight - keyHome - keyEnd - keyDeleteWord - keyDeleteLine - keyClearScreen - keyPasteStart - keyPasteEnd -) - -var pasteStart = []byte{keyEscape, '[', '2', '0', '0', '~'} -var pasteEnd = []byte{keyEscape, '[', '2', '0', '1', '~'} - -// bytesToKey tries to parse a key sequence from b. If successful, it returns -// the key and the remainder of the input. Otherwise it returns utf8.RuneError. -func bytesToKey(b []byte, pasteActive bool) (rune, []byte) { - if len(b) == 0 { - return utf8.RuneError, nil - } - - if !pasteActive { - switch b[0] { - case 1: // ^A - return keyHome, b[1:] - case 5: // ^E - return keyEnd, b[1:] - case 8: // ^H - return keyBackspace, b[1:] - case 11: // ^K - return keyDeleteLine, b[1:] - case 12: // ^L - return keyClearScreen, b[1:] - case 23: // ^W - return keyDeleteWord, b[1:] - } - } - - if b[0] != keyEscape { - if !utf8.FullRune(b) { - return utf8.RuneError, b - } - r, l := utf8.DecodeRune(b) - return r, b[l:] - } - - if !pasteActive && len(b) >= 3 && b[0] == keyEscape && b[1] == '[' { - switch b[2] { - case 'A': - return keyUp, b[3:] - case 'B': - return keyDown, b[3:] - case 'C': - return keyRight, b[3:] - case 'D': - return keyLeft, b[3:] - case 'H': - return keyHome, b[3:] - case 'F': - return keyEnd, b[3:] - } - } - - if !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' { - switch b[5] { - case 'C': - return keyAltRight, b[6:] - case 'D': - return keyAltLeft, b[6:] - } - } - - if !pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteStart) { - return keyPasteStart, b[6:] - } - - if pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteEnd) { - return keyPasteEnd, b[6:] - } - - // If we get here then we have a key that we don't recognise, or a - // partial sequence. It's not clear how one should find the end of a - // sequence without knowing them all, but it seems that [a-zA-Z~] only - // appears at the end of a sequence. - for i, c := range b[0:] { - if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c == '~' { - return keyUnknown, b[i+1:] - } - } - - return utf8.RuneError, b -} - -// queue appends data to the end of t.outBuf -func (t *Terminal) queue(data []rune) { - t.outBuf = append(t.outBuf, []byte(string(data))...) -} - -var eraseUnderCursor = []rune{' ', keyEscape, '[', 'D'} -var space = []rune{' '} - -func isPrintable(key rune) bool { - isInSurrogateArea := key >= 0xd800 && key <= 0xdbff - return key >= 32 && !isInSurrogateArea -} - -// moveCursorToPos appends data to t.outBuf which will move the cursor to the -// given, logical position in the text. -func (t *Terminal) moveCursorToPos(pos int) { - if !t.echo { - return - } - - x := visualLength(t.prompt) + pos - y := x / t.termWidth - x = x % t.termWidth - - up := 0 - if y < t.cursorY { - up = t.cursorY - y - } - - down := 0 - if y > t.cursorY { - down = y - t.cursorY - } - - left := 0 - if x < t.cursorX { - left = t.cursorX - x - } - - right := 0 - if x > t.cursorX { - right = x - t.cursorX - } - - t.cursorX = x - t.cursorY = y - t.move(up, down, left, right) -} - -func (t *Terminal) move(up, down, left, right int) { - movement := make([]rune, 3*(up+down+left+right)) - m := movement - for i := 0; i < up; i++ { - m[0] = keyEscape - m[1] = '[' - m[2] = 'A' - m = m[3:] - } - for i := 0; i < down; i++ { - m[0] = keyEscape - m[1] = '[' - m[2] = 'B' - m = m[3:] - } - for i := 0; i < left; i++ { - m[0] = keyEscape - m[1] = '[' - m[2] = 'D' - m = m[3:] - } - for i := 0; i < right; i++ { - m[0] = keyEscape - m[1] = '[' - m[2] = 'C' - m = m[3:] - } - - t.queue(movement) -} - -func (t *Terminal) clearLineToRight() { - op := []rune{keyEscape, '[', 'K'} - t.queue(op) -} - -const maxLineLength = 4096 - -func (t *Terminal) setLine(newLine []rune, newPos int) { - if t.echo { - t.moveCursorToPos(0) - t.writeLine(newLine) - for i := len(newLine); i < len(t.line); i++ { - t.writeLine(space) - } - t.moveCursorToPos(newPos) - } - t.line = newLine - t.pos = newPos -} - -func (t *Terminal) advanceCursor(places int) { - t.cursorX += places - t.cursorY += t.cursorX / t.termWidth - if t.cursorY > t.maxLine { - t.maxLine = t.cursorY - } - t.cursorX = t.cursorX % t.termWidth - - if places > 0 && t.cursorX == 0 { - // Normally terminals will advance the current position - // when writing a character. But that doesn't happen - // for the last character in a line. However, when - // writing a character (except a new line) that causes - // a line wrap, the position will be advanced two - // places. - // - // So, if we are stopping at the end of a line, we - // need to write a newline so that our cursor can be - // advanced to the next line. - t.outBuf = append(t.outBuf, '\n') - } -} - -func (t *Terminal) eraseNPreviousChars(n int) { - if n == 0 { - return - } - - if t.pos < n { - n = t.pos - } - t.pos -= n - t.moveCursorToPos(t.pos) - - copy(t.line[t.pos:], t.line[n+t.pos:]) - t.line = t.line[:len(t.line)-n] - if t.echo { - t.writeLine(t.line[t.pos:]) - for i := 0; i < n; i++ { - t.queue(space) - } - t.advanceCursor(n) - t.moveCursorToPos(t.pos) - } -} - -// countToLeftWord returns then number of characters from the cursor to the -// start of the previous word. -func (t *Terminal) countToLeftWord() int { - if t.pos == 0 { - return 0 - } - - pos := t.pos - 1 - for pos > 0 { - if t.line[pos] != ' ' { - break - } - pos-- - } - for pos > 0 { - if t.line[pos] == ' ' { - pos++ - break - } - pos-- - } - - return t.pos - pos -} - -// countToRightWord returns then number of characters from the cursor to the -// start of the next word. -func (t *Terminal) countToRightWord() int { - pos := t.pos - for pos < len(t.line) { - if t.line[pos] == ' ' { - break - } - pos++ - } - for pos < len(t.line) { - if t.line[pos] != ' ' { - break - } - pos++ - } - return pos - t.pos -} - -// visualLength returns the number of visible glyphs in s. -func visualLength(runes []rune) int { - inEscapeSeq := false - length := 0 - - for _, r := range runes { - switch { - case inEscapeSeq: - if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') { - inEscapeSeq = false - } - case r == '\x1b': - inEscapeSeq = true - default: - length++ - } - } - - return length -} - -// handleKey processes the given key and, optionally, returns a line of text -// that the user has entered. -func (t *Terminal) handleKey(key rune) (line string, ok bool) { - if t.pasteActive && key != keyEnter { - t.addKeyToLine(key) - return - } - - switch key { - case keyBackspace: - if t.pos == 0 { - return - } - t.eraseNPreviousChars(1) - case keyAltLeft: - // move left by a word. - t.pos -= t.countToLeftWord() - t.moveCursorToPos(t.pos) - case keyAltRight: - // move right by a word. - t.pos += t.countToRightWord() - t.moveCursorToPos(t.pos) - case keyLeft: - if t.pos == 0 { - return - } - t.pos-- - t.moveCursorToPos(t.pos) - case keyRight: - if t.pos == len(t.line) { - return - } - t.pos++ - t.moveCursorToPos(t.pos) - case keyHome: - if t.pos == 0 { - return - } - t.pos = 0 - t.moveCursorToPos(t.pos) - case keyEnd: - if t.pos == len(t.line) { - return - } - t.pos = len(t.line) - t.moveCursorToPos(t.pos) - case keyUp: - entry, ok := t.history.NthPreviousEntry(t.historyIndex + 1) - if !ok { - return "", false - } - if t.historyIndex == -1 { - t.historyPending = string(t.line) - } - t.historyIndex++ - runes := []rune(entry) - t.setLine(runes, len(runes)) - case keyDown: - switch t.historyIndex { - case -1: - return - case 0: - runes := []rune(t.historyPending) - t.setLine(runes, len(runes)) - t.historyIndex-- - default: - entry, ok := t.history.NthPreviousEntry(t.historyIndex - 1) - if ok { - t.historyIndex-- - runes := []rune(entry) - t.setLine(runes, len(runes)) - } - } - case keyEnter: - t.moveCursorToPos(len(t.line)) - t.queue([]rune("\r\n")) - line = string(t.line) - ok = true - t.line = t.line[:0] - t.pos = 0 - t.cursorX = 0 - t.cursorY = 0 - t.maxLine = 0 - case keyDeleteWord: - // Delete zero or more spaces and then one or more characters. - t.eraseNPreviousChars(t.countToLeftWord()) - case keyDeleteLine: - // Delete everything from the current cursor position to the - // end of line. - for i := t.pos; i < len(t.line); i++ { - t.queue(space) - t.advanceCursor(1) - } - t.line = t.line[:t.pos] - t.moveCursorToPos(t.pos) - case keyCtrlD: - // Erase the character under the current position. - // The EOF case when the line is empty is handled in - // readLine(). - if t.pos < len(t.line) { - t.pos++ - t.eraseNPreviousChars(1) - } - case keyCtrlU: - t.eraseNPreviousChars(t.pos) - case keyClearScreen: - // Erases the screen and moves the cursor to the home position. - t.queue([]rune("\x1b[2J\x1b[H")) - t.queue(t.prompt) - t.cursorX, t.cursorY = 0, 0 - t.advanceCursor(visualLength(t.prompt)) - t.setLine(t.line, t.pos) - default: - if t.AutoCompleteCallback != nil { - prefix := string(t.line[:t.pos]) - suffix := string(t.line[t.pos:]) - - t.lock.Unlock() - newLine, newPos, completeOk := t.AutoCompleteCallback(prefix+suffix, len(prefix), key) - t.lock.Lock() - - if completeOk { - t.setLine([]rune(newLine), utf8.RuneCount([]byte(newLine)[:newPos])) - return - } - } - if !isPrintable(key) { - return - } - if len(t.line) == maxLineLength { - return - } - t.addKeyToLine(key) - } - return -} - -// addKeyToLine inserts the given key at the current position in the current -// line. -func (t *Terminal) addKeyToLine(key rune) { - if len(t.line) == cap(t.line) { - newLine := make([]rune, len(t.line), 2*(1+len(t.line))) - copy(newLine, t.line) - t.line = newLine - } - t.line = t.line[:len(t.line)+1] - copy(t.line[t.pos+1:], t.line[t.pos:]) - t.line[t.pos] = key - if t.echo { - t.writeLine(t.line[t.pos:]) - } - t.pos++ - t.moveCursorToPos(t.pos) -} - -func (t *Terminal) writeLine(line []rune) { - for len(line) != 0 { - remainingOnLine := t.termWidth - t.cursorX - todo := len(line) - if todo > remainingOnLine { - todo = remainingOnLine - } - t.queue(line[:todo]) - t.advanceCursor(visualLength(line[:todo])) - line = line[todo:] - } -} - -func (t *Terminal) Write(buf []byte) (n int, err error) { - t.lock.Lock() - defer t.lock.Unlock() - - if t.cursorX == 0 && t.cursorY == 0 { - // This is the easy case: there's nothing on the screen that we - // have to move out of the way. - return t.c.Write(buf) - } - - // We have a prompt and possibly user input on the screen. We - // have to clear it first. - t.move(0 /* up */, 0 /* down */, t.cursorX /* left */, 0 /* right */) - t.cursorX = 0 - t.clearLineToRight() - - for t.cursorY > 0 { - t.move(1 /* up */, 0, 0, 0) - t.cursorY-- - t.clearLineToRight() - } - - if _, err = t.c.Write(t.outBuf); err != nil { - return - } - t.outBuf = t.outBuf[:0] - - if n, err = t.c.Write(buf); err != nil { - return - } - - t.writeLine(t.prompt) - if t.echo { - t.writeLine(t.line) - } - - t.moveCursorToPos(t.pos) - - if _, err = t.c.Write(t.outBuf); err != nil { - return - } - t.outBuf = t.outBuf[:0] - return -} - -// ReadPassword temporarily changes the prompt and reads a password, without -// echo, from the terminal. -func (t *Terminal) ReadPassword(prompt string) (line string, err error) { - t.lock.Lock() - defer t.lock.Unlock() - - oldPrompt := t.prompt - t.prompt = []rune(prompt) - t.echo = false - - line, err = t.readLine() - - t.prompt = oldPrompt - t.echo = true - - return -} - -// ReadLine returns a line of input from the terminal. -func (t *Terminal) ReadLine() (line string, err error) { - t.lock.Lock() - defer t.lock.Unlock() - - return t.readLine() -} - -func (t *Terminal) readLine() (line string, err error) { - // t.lock must be held at this point - - if t.cursorX == 0 && t.cursorY == 0 { - t.writeLine(t.prompt) - t.c.Write(t.outBuf) - t.outBuf = t.outBuf[:0] - } - - lineIsPasted := t.pasteActive - - for { - rest := t.remainder - lineOk := false - for !lineOk { - var key rune - key, rest = bytesToKey(rest, t.pasteActive) - if key == utf8.RuneError { - break - } - if !t.pasteActive { - if key == keyCtrlD { - if len(t.line) == 0 { - return "", io.EOF - } - } - if key == keyPasteStart { - t.pasteActive = true - if len(t.line) == 0 { - lineIsPasted = true - } - continue - } - } else if key == keyPasteEnd { - t.pasteActive = false - continue - } - if !t.pasteActive { - lineIsPasted = false - } - line, lineOk = t.handleKey(key) - } - if len(rest) > 0 { - n := copy(t.inBuf[:], rest) - t.remainder = t.inBuf[:n] - } else { - t.remainder = nil - } - t.c.Write(t.outBuf) - t.outBuf = t.outBuf[:0] - if lineOk { - if t.echo { - t.historyIndex = -1 - t.history.Add(line) - } - if lineIsPasted { - err = ErrPasteIndicator - } - return - } - - // t.remainder is a slice at the beginning of t.inBuf - // containing a partial key sequence - readBuf := t.inBuf[len(t.remainder):] - var n int - - t.lock.Unlock() - n, err = t.c.Read(readBuf) - t.lock.Lock() - - if err != nil { - return - } - - t.remainder = t.inBuf[:n+len(t.remainder)] - } - - panic("unreachable") // for Go 1.0. -} - -// SetPrompt sets the prompt to be used when reading subsequent lines. -func (t *Terminal) SetPrompt(prompt string) { - t.lock.Lock() - defer t.lock.Unlock() - - t.prompt = []rune(prompt) -} - -func (t *Terminal) clearAndRepaintLinePlusNPrevious(numPrevLines int) { - // Move cursor to column zero at the start of the line. - t.move(t.cursorY, 0, t.cursorX, 0) - t.cursorX, t.cursorY = 0, 0 - t.clearLineToRight() - for t.cursorY < numPrevLines { - // Move down a line - t.move(0, 1, 0, 0) - t.cursorY++ - t.clearLineToRight() - } - // Move back to beginning. - t.move(t.cursorY, 0, 0, 0) - t.cursorX, t.cursorY = 0, 0 - - t.queue(t.prompt) - t.advanceCursor(visualLength(t.prompt)) - t.writeLine(t.line) - t.moveCursorToPos(t.pos) -} - -func (t *Terminal) SetSize(width, height int) error { - t.lock.Lock() - defer t.lock.Unlock() - - if width == 0 { - width = 1 - } - - oldWidth := t.termWidth - t.termWidth, t.termHeight = width, height - - switch { - case width == oldWidth: - // If the width didn't change then nothing else needs to be - // done. - return nil - case width < oldWidth: - // Some terminals (e.g. xterm) will truncate lines that were - // too long when shinking. Others, (e.g. gnome-terminal) will - // attempt to wrap them. For the former, repainting t.maxLine - // works great, but that behaviour goes badly wrong in the case - // of the latter because they have doubled every full line. - - // We assume that we are working on a terminal that wraps lines - // and adjust the cursor position based on every previous line - // wrapping and turning into two. This causes the prompt on - // xterms to move upwards, which isn't great, but it avoids a - // huge mess with gnome-terminal. - if t.cursorX >= t.termWidth { - t.cursorX = t.termWidth - 1 - } - t.cursorY *= 2 - t.clearAndRepaintLinePlusNPrevious(t.maxLine * 2) - case width > oldWidth: - // If the terminal expands then our position calculations will - // be wrong in the future because we think the cursor is - // |t.pos| chars into the string, but there will be a gap at - // the end of any wrapped line. - // - // But the position will actually be correct until we move, so - // we can move back to the beginning and repaint everything. - t.clearAndRepaintLinePlusNPrevious(t.maxLine) - } - - _, err := t.c.Write(t.outBuf) - t.outBuf = t.outBuf[:0] - return err -} - -type pasteIndicatorError struct{} - -func (pasteIndicatorError) Error() string { - return "terminal: ErrPasteIndicator not correctly handled" -} - -// ErrPasteIndicator may be returned from ReadLine as the error, in addition -// to valid line data. It indicates that bracketed paste mode is enabled and -// that the returned line consists only of pasted data. Programs may wish to -// interpret pasted data more literally than typed data. -var ErrPasteIndicator = pasteIndicatorError{} - -// SetBracketedPasteMode requests that the terminal bracket paste operations -// with markers. Not all terminals support this but, if it is supported, then -// enabling this mode will stop any autocomplete callback from running due to -// pastes. Additionally, any lines that are completely pasted will be returned -// from ReadLine with the error set to ErrPasteIndicator. -func (t *Terminal) SetBracketedPasteMode(on bool) { - if on { - io.WriteString(t.c, "\x1b[?2004h") - } else { - io.WriteString(t.c, "\x1b[?2004l") - } -} - -// stRingBuffer is a ring buffer of strings. -type stRingBuffer struct { - // entries contains max elements. - entries []string - max int - // head contains the index of the element most recently added to the ring. - head int - // size contains the number of elements in the ring. - size int -} - -func (s *stRingBuffer) Add(a string) { - if s.entries == nil { - const defaultNumEntries = 100 - s.entries = make([]string, defaultNumEntries) - s.max = defaultNumEntries - } - - s.head = (s.head + 1) % s.max - s.entries[s.head] = a - if s.size < s.max { - s.size++ - } -} - -// NthPreviousEntry returns the value passed to the nth previous call to Add. -// If n is zero then the immediately prior value is returned, if one, then the -// next most recent, and so on. If such an element doesn't exist then ok is -// false. -func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) { - if n >= s.size { - return "", false - } - index := s.head - n - if index < 0 { - index += s.max - } - return s.entries[index], true -} diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/golang.org/x/crypto/ssh/terminal/util.go b/vendor/github.com/Shopify/toxiproxy/vendor/golang.org/x/crypto/ssh/terminal/util.go deleted file mode 100644 index 0763c9a..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/golang.org/x/crypto/ssh/terminal/util.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd - -// Package terminal provides support functions for dealing with terminals, as -// commonly found on UNIX systems. -// -// Putting a terminal into raw mode is the most common requirement: -// -// oldState, err := terminal.MakeRaw(0) -// if err != nil { -// panic(err) -// } -// defer terminal.Restore(0, oldState) -package terminal - -import ( - "io" - "syscall" - "unsafe" -) - -// State contains the state of a terminal. -type State struct { - termios syscall.Termios -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd int) bool { - var termios syscall.Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd int) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 { - return nil, err - } - - newState := oldState.termios - newState.Iflag &^= syscall.ISTRIP | syscall.INLCR | syscall.ICRNL | syscall.IGNCR | syscall.IXON | syscall.IXOFF - newState.Lflag &^= syscall.ECHO | syscall.ICANON | syscall.ISIG - if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 { - return nil, err - } - - return &oldState, nil -} - -// GetState returns the current state of a terminal which may be useful to -// restore the terminal after a signal. -func GetState(fd int) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 { - return nil, err - } - - return &oldState, nil -} - -// Restore restores the terminal connected to the given file descriptor to a -// previous state. -func Restore(fd int, state *State) error { - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&state.termios)), 0, 0, 0) - return err -} - -// GetSize returns the dimensions of the given terminal. -func GetSize(fd int) (width, height int, err error) { - var dimensions [4]uint16 - - if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0); err != 0 { - return -1, -1, err - } - return int(dimensions[1]), int(dimensions[0]), nil -} - -// ReadPassword reads a line of input from a terminal without local echo. This -// is commonly used for inputting passwords and other sensitive data. The slice -// returned does not include the \n. -func ReadPassword(fd int) ([]byte, error) { - var oldState syscall.Termios - if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); err != 0 { - return nil, err - } - - newState := oldState - newState.Lflag &^= syscall.ECHO - newState.Lflag |= syscall.ICANON | syscall.ISIG - newState.Iflag |= syscall.ICRNL - if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 { - return nil, err - } - - defer func() { - syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0) - }() - - var buf [16]byte - var ret []byte - for { - n, err := syscall.Read(fd, buf[:]) - if err != nil { - return nil, err - } - if n == 0 { - if len(ret) == 0 { - return nil, io.EOF - } - break - } - if buf[n-1] == '\n' { - n-- - } - ret = append(ret, buf[:n]...) - if n < len(buf) { - break - } - } - - return ret, nil -} diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go b/vendor/github.com/Shopify/toxiproxy/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go deleted file mode 100644 index 9c1ffd1..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd netbsd openbsd - -package terminal - -import "syscall" - -const ioctlReadTermios = syscall.TIOCGETA -const ioctlWriteTermios = syscall.TIOCSETA diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go b/vendor/github.com/Shopify/toxiproxy/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go deleted file mode 100644 index 5883b22..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package terminal - -// These constants are declared here, rather than importing -// them from the syscall package as some syscall packages, even -// on linux, for example gccgo, do not declare them. -const ioctlReadTermios = 0x5401 // syscall.TCGETS -const ioctlWriteTermios = 0x5402 // syscall.TCSETS diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/vendor/github.com/Shopify/toxiproxy/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go deleted file mode 100644 index 2dd6c3d..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -// Package terminal provides support functions for dealing with terminals, as -// commonly found on UNIX systems. -// -// Putting a terminal into raw mode is the most common requirement: -// -// oldState, err := terminal.MakeRaw(0) -// if err != nil { -// panic(err) -// } -// defer terminal.Restore(0, oldState) -package terminal - -import ( - "io" - "syscall" - "unsafe" -) - -const ( - enableLineInput = 2 - enableEchoInput = 4 - enableProcessedInput = 1 - enableWindowInput = 8 - enableMouseInput = 16 - enableInsertMode = 32 - enableQuickEditMode = 64 - enableExtendedFlags = 128 - enableAutoPosition = 256 - enableProcessedOutput = 1 - enableWrapAtEolOutput = 2 -) - -var kernel32 = syscall.NewLazyDLL("kernel32.dll") - -var ( - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") - procSetConsoleMode = kernel32.NewProc("SetConsoleMode") - procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") -) - -type ( - short int16 - word uint16 - - coord struct { - x short - y short - } - smallRect struct { - left short - top short - right short - bottom short - } - consoleScreenBufferInfo struct { - size coord - cursorPosition coord - attributes word - window smallRect - maximumWindowSize coord - } -) - -type State struct { - mode uint32 -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd int) bool { - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd int) (*State, error) { - var st uint32 - _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - if e != 0 { - return nil, error(e) - } - st &^= (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput) - _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0) - if e != 0 { - return nil, error(e) - } - return &State{st}, nil -} - -// GetState returns the current state of a terminal which may be useful to -// restore the terminal after a signal. -func GetState(fd int) (*State, error) { - var st uint32 - _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - if e != 0 { - return nil, error(e) - } - return &State{st}, nil -} - -// Restore restores the terminal connected to the given file descriptor to a -// previous state. -func Restore(fd int, state *State) error { - _, _, err := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(state.mode), 0) - return err -} - -// GetSize returns the dimensions of the given terminal. -func GetSize(fd int) (width, height int, err error) { - var info consoleScreenBufferInfo - _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&info)), 0) - if e != 0 { - return 0, 0, error(e) - } - return int(info.size.x), int(info.size.y), nil -} - -// ReadPassword reads a line of input from a terminal without local echo. This -// is commonly used for inputting passwords and other sensitive data. The slice -// returned does not include the \n. -func ReadPassword(fd int) ([]byte, error) { - var st uint32 - _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - if e != 0 { - return nil, error(e) - } - old := st - - st &^= (enableEchoInput) - st |= (enableProcessedInput | enableLineInput | enableProcessedOutput) - _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0) - if e != 0 { - return nil, error(e) - } - - defer func() { - syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(old), 0) - }() - - var buf [16]byte - var ret []byte - for { - n, err := syscall.Read(syscall.Handle(fd), buf[:]) - if err != nil { - return nil, err - } - if n == 0 { - if len(ret) == 0 { - return nil, io.EOF - } - break - } - if buf[n-1] == '\n' { - n-- - } - if n > 0 && buf[n-1] == '\r' { - n-- - } - ret = append(ret, buf[:n]...) - if n < len(buf) { - break - } - } - - return ret, nil -} diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/gopkg.in/tomb.v1/LICENSE b/vendor/github.com/Shopify/toxiproxy/vendor/gopkg.in/tomb.v1/LICENSE deleted file mode 100644 index a4249bb..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/gopkg.in/tomb.v1/LICENSE +++ /dev/null @@ -1,29 +0,0 @@ -tomb - support for clean goroutine termination in Go. - -Copyright (c) 2010-2011 - Gustavo Niemeyer - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - * Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/gopkg.in/tomb.v1/README.md b/vendor/github.com/Shopify/toxiproxy/vendor/gopkg.in/tomb.v1/README.md deleted file mode 100644 index 3ae8788..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/gopkg.in/tomb.v1/README.md +++ /dev/null @@ -1,4 +0,0 @@ -Installation and usage ----------------------- - -See [gopkg.in/tomb.v1](https://gopkg.in/tomb.v1) for documentation and usage details. diff --git a/vendor/github.com/Shopify/toxiproxy/vendor/gopkg.in/tomb.v1/tomb.go b/vendor/github.com/Shopify/toxiproxy/vendor/gopkg.in/tomb.v1/tomb.go deleted file mode 100644 index 5bcd5f8..0000000 --- a/vendor/github.com/Shopify/toxiproxy/vendor/gopkg.in/tomb.v1/tomb.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright (c) 2011 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// * Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// * Neither the name of the copyright holder nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// The tomb package offers a conventional API for clean goroutine termination. -// -// A Tomb tracks the lifecycle of a goroutine as alive, dying or dead, -// and the reason for its death. -// -// The zero value of a Tomb assumes that a goroutine is about to be -// created or already alive. Once Kill or Killf is called with an -// argument that informs the reason for death, the goroutine is in -// a dying state and is expected to terminate soon. Right before the -// goroutine function or method returns, Done must be called to inform -// that the goroutine is indeed dead and about to stop running. -// -// A Tomb exposes Dying and Dead channels. These channels are closed -// when the Tomb state changes in the respective way. They enable -// explicit blocking until the state changes, and also to selectively -// unblock select statements accordingly. -// -// When the tomb state changes to dying and there's still logic going -// on within the goroutine, nested functions and methods may choose to -// return ErrDying as their error value, as this error won't alter the -// tomb state if provied to the Kill method. This is a convenient way to -// follow standard Go practices in the context of a dying tomb. -// -// For background and a detailed example, see the following blog post: -// -// http://blog.labix.org/2011/10/09/death-of-goroutines-under-control -// -// For a more complex code snippet demonstrating the use of multiple -// goroutines with a single Tomb, see: -// -// http://play.golang.org/p/Xh7qWsDPZP -// -package tomb - -import ( - "errors" - "fmt" - "sync" -) - -// A Tomb tracks the lifecycle of a goroutine as alive, dying or dead, -// and the reason for its death. -// -// See the package documentation for details. -type Tomb struct { - m sync.Mutex - dying chan struct{} - dead chan struct{} - reason error -} - -var ( - ErrStillAlive = errors.New("tomb: still alive") - ErrDying = errors.New("tomb: dying") -) - -func (t *Tomb) init() { - t.m.Lock() - if t.dead == nil { - t.dead = make(chan struct{}) - t.dying = make(chan struct{}) - t.reason = ErrStillAlive - } - t.m.Unlock() -} - -// Dead returns the channel that can be used to wait -// until t.Done has been called. -func (t *Tomb) Dead() <-chan struct{} { - t.init() - return t.dead -} - -// Dying returns the channel that can be used to wait -// until t.Kill or t.Done has been called. -func (t *Tomb) Dying() <-chan struct{} { - t.init() - return t.dying -} - -// Wait blocks until the goroutine is in a dead state and returns the -// reason for its death. -func (t *Tomb) Wait() error { - t.init() - <-t.dead - t.m.Lock() - reason := t.reason - t.m.Unlock() - return reason -} - -// Done flags the goroutine as dead, and should be called a single time -// right before the goroutine function or method returns. -// If the goroutine was not already in a dying state before Done is -// called, it will be flagged as dying and dead at once with no -// error. -func (t *Tomb) Done() { - t.Kill(nil) - close(t.dead) -} - -// Kill flags the goroutine as dying for the given reason. -// Kill may be called multiple times, but only the first -// non-nil error is recorded as the reason for termination. -// -// If reason is ErrDying, the previous reason isn't replaced -// even if it is nil. It's a runtime error to call Kill with -// ErrDying if t is not in a dying state. -func (t *Tomb) Kill(reason error) { - t.init() - t.m.Lock() - defer t.m.Unlock() - if reason == ErrDying { - if t.reason == ErrStillAlive { - panic("tomb: Kill with ErrDying while still alive") - } - return - } - if t.reason == nil || t.reason == ErrStillAlive { - t.reason = reason - } - // If the receive on t.dying succeeds, then - // it can only be because we have already closed it. - // If it blocks, then we know that it needs to be closed. - select { - case <-t.dying: - default: - close(t.dying) - } -} - -// Killf works like Kill, but builds the reason providing the received -// arguments to fmt.Errorf. The generated error is also returned. -func (t *Tomb) Killf(f string, a ...interface{}) error { - err := fmt.Errorf(f, a...) - t.Kill(err) - return err -} - -// Err returns the reason for the goroutine death provided via Kill -// or Killf, or ErrStillAlive when the goroutine is still alive. -func (t *Tomb) Err() (reason error) { - t.init() - t.m.Lock() - reason = t.reason - t.m.Unlock() - return -} diff --git a/vendor/github.com/Shopify/toxiproxy/version.go b/vendor/github.com/Shopify/toxiproxy/version.go deleted file mode 100644 index cfc0014..0000000 --- a/vendor/github.com/Shopify/toxiproxy/version.go +++ /dev/null @@ -1,3 +0,0 @@ -package toxiproxy - -var Version = "git" diff --git a/vendor/github.com/Sirupsen/logrus/.gitignore b/vendor/github.com/Sirupsen/logrus/.gitignore deleted file mode 100644 index 66be63a..0000000 --- a/vendor/github.com/Sirupsen/logrus/.gitignore +++ /dev/null @@ -1 +0,0 @@ -logrus diff --git a/vendor/github.com/Sirupsen/logrus/.travis.yml b/vendor/github.com/Sirupsen/logrus/.travis.yml deleted file mode 100644 index d5a559f..0000000 --- a/vendor/github.com/Sirupsen/logrus/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - 1.2 - - 1.3 - - tip -install: - - go get github.com/stretchr/testify - - go get github.com/stvp/go-udp-testing - - go get github.com/tobi/airbrake-go diff --git a/vendor/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/Sirupsen/logrus/LICENSE deleted file mode 100644 index f090cb4..0000000 --- a/vendor/github.com/Sirupsen/logrus/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Simon Eskildsen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md deleted file mode 100644 index 46dd102..0000000 --- a/vendor/github.com/Sirupsen/logrus/README.md +++ /dev/null @@ -1,342 +0,0 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) - -Logrus is a structured logger for Go (golang), completely API compatible with -the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not -yet stable (pre 1.0), the core API is unlikely change much but please version -control your Logrus to make sure you aren't fetching latest `master` on every -build.** - -Nicely color-coded in development (when a TTY is attached, otherwise just -plain text): - -![Colored](http://i.imgur.com/PY7qMwd.png) - -With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash -or Splunk: - -```json -{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the -ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} - -{"level":"warning","msg":"The group's number increased tremendously!", -"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"A giant walrus appears!", -"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", -"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} - -{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, -"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} -``` - -With the default `log.Formatter = new(logrus.TextFormatter)` when a TTY is not -attached, the output is compatible with the -[l2met](http://r.32k.io/l2met-introduction) format: - -```text -time="2014-04-20 15:36:23.830442383 -0400 EDT" level="info" msg="A group of walrus emerges from the ocean" animal="walrus" size=10 -time="2014-04-20 15:36:23.830584199 -0400 EDT" level="warning" msg="The group's number increased tremendously!" omg=true number=122 -time="2014-04-20 15:36:23.830596521 -0400 EDT" level="info" msg="A giant walrus appears!" animal="walrus" size=10 -time="2014-04-20 15:36:23.830611837 -0400 EDT" level="info" msg="Tremendously sized cow enters the ocean." animal="walrus" size=9 -time="2014-04-20 15:36:23.830626464 -0400 EDT" level="fatal" msg="The ice breaks!" omg=true number=100 -``` - -#### Example - -The simplest way to use Logrus is simply the package-level exported logger: - -```go -package main - -import ( - log "github.com/Sirupsen/logrus" -) - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - }).Info("A walrus appears") -} -``` - -Note that it's completely api-compatible with the stdlib logger, so you can -replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` -and you'll now have the flexibility of Logrus. You can customize it all you -want: - -```go -package main - -import ( - "os" - log "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/airbrake" -) - -func init() { - // Log as JSON instead of the default ASCII formatter. - log.SetFormatter(&log.JSONFormatter{}) - - // Use the Airbrake hook to report errors that have Error severity or above to - // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(&logrus_airbrake.AirbrakeHook{}) - - // Output to stderr instead of stdout, could also be a file. - log.SetOutput(os.Stderr) - - // Only log the warning severity or above. - log.SetLevel(log.WarnLevel) -} - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(log.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(log.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") -} -``` - -For more advanced usage such as logging to multiple locations from the same -application, you can also create an instance of the `logrus` Logger: - -```go -package main - -import ( - "github.com/Sirupsen/logrus" -) - -// Create a new instance of the logger. You can have any number of instances. -var log = logrus.New() - -func main() { - // The API for setting attributes is a little different than the package level - // exported logger. See Godoc. - log.Out = os.Stderr - - log.WithFields(log.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") -} -``` - -#### Fields - -Logrus encourages careful, structured logging though logging fields instead of -long, unparseable error messages. For example, instead of: `log.Fatalf("Failed -to send event %s to topic %s with key %d")`, you should log the much more -discoverable: - -```go -log.WithFields(log.Fields{ - "event": event, - "topic": topic, - "key": key, -}).Fatal("Failed to send event") -``` - -We've found this API forces you to think about logging in a way that produces -much more useful logging messages. We've been in countless situations where just -a single added field to a log statement that was already there would've saved us -hours. The `WithFields` call is optional. - -In general, with Logrus using any of the `printf`-family functions should be -seen as a hint you should add a field, however, you can still use the -`printf`-family functions with Logrus. - -#### Hooks - -You can add hooks for logging levels. For example to send errors to an exception -tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to -multiple places simultaneously, e.g. syslog. - -```go -// Not the real implementation of the Airbrake hook. Just a simple sample. -import ( - log "github.com/Sirupsen/logrus" -) - -func init() { - log.AddHook(new(AirbrakeHook)) -} - -type AirbrakeHook struct{} - -// `Fire()` takes the entry that the hook is fired for. `entry.Data[]` contains -// the fields for the entry. See the Fields section of the README. -func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error { - err := airbrake.Notify(entry.Data["error"].(error)) - if err != nil { - log.WithFields(log.Fields{ - "source": "airbrake", - "endpoint": airbrake.Endpoint, - }).Info("Failed to send error to Airbrake") - } - - return nil -} - -// `Levels()` returns a slice of `Levels` the hook is fired for. -func (hook *AirbrakeHook) Levels() []log.Level { - return []log.Level{ - log.ErrorLevel, - log.FatalLevel, - log.PanicLevel, - } -} -``` - -Logrus comes with built-in hooks. Add those, or your custom hook, in `init`: - -```go -import ( - log "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/airbrake" - "github.com/Sirupsen/logrus/hooks/syslog" -) - -func init() { - log.AddHook(new(logrus_airbrake.AirbrakeHook)) - log.AddHook(logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")) -} -``` - -* [`github.com/Sirupsen/logrus/hooks/airbrake`](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) - Send errors to an exception tracking service compatible with the Airbrake API. - Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. - -* [`github.com/Sirupsen/logrus/hooks/papertrail`](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) - Send errors to the Papertrail hosted logging service via UDP. - -* [`github.com/Sirupsen/logrus/hooks/syslog`](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) - Send errors to remote syslog server. - Uses standard library `log/syslog` behind the scenes. - -* [`github.com/nubo/hiprus`](https://github.com/nubo/hiprus) - Send errors to a channel in hipchat. - -#### Level logging - -Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. - -```go -log.Debug("Useful debugging information.") -log.Info("Something noteworthy happened!") -log.Warn("You should probably take a look at this.") -log.Error("Something failed but I'm not quitting.") -// Calls os.Exit(1) after logging -log.Fatal("Bye.") -// Calls panic() after logging -log.Panic("I'm bailing.") -``` - -You can set the logging level on a `Logger`, then it will only log entries with -that severity or anything above it: - -```go -// Will log anything that is info or above (warn, error, fatal, panic). Default. -log.SetLevel(log.InfoLevel) -``` - -It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose -environment if your application has that. - -#### Entries - -Besides the fields added with `WithField` or `WithFields` some fields are -automatically added to all logging events: - -1. `time`. The timestamp when the entry was created. -2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after - the `AddFields` call. E.g. `Failed to send event.` -3. `level`. The logging level. E.g. `info`. - -#### Environments - -Logrus has no notion of environment. - -If you wish for hooks and formatters to only be used in specific environments, -you should handle that yourself. For example, if your application has a global -variable `Environment`, which is a string representation of the environment you -could do: - -```go -import ( - log "github.com/Sirupsen/logrus" -) - -init() { - // do something here to set environment depending on an environment variable - // or command-line flag - if Environment == "production" { - log.SetFormatter(logrus.JSONFormatter) - } else { - // The TextFormatter is default, you don't actually have to do this. - log.SetFormatter(logrus.TextFormatter) - } -} -``` - -This configuration is how `logrus` was intended to be used, but JSON in -production is mostly only useful if you do log aggregation with tools like -Splunk or Logstash. - -#### Formatters - -The built-in logging formatters are: - -* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise - without colors. - * *Note:* to force colored output when there is no TTY, set the `ForceColors` - field to `true`. To force no colored output even if there is a TTY set the - `DisableColors` field to `true` -* `logrus.JSONFormatter`. Logs fields as JSON. - -Third party logging formatters: - -* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. - -You can define your formatter by implementing the `Formatter` interface, -requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a -`Fields` type (`map[string]interface{}`) with all your fields as well as the -default ones (see Entries section above): - -```go -type MyJSONFormatter struct { -} - -log.SetFormatter(new(MyJSONFormatter)) - -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - // Note this doesn't include Time, Level and Message which are available on - // the Entry. Consult `godoc` on information about those fields or read the - // source of the official loggers. - serialized, err := json.Marshal(entry.Data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} -``` - -#### Rotation - -Log rotation is not provided with Logrus. Log rotation should be done by an -external program (like `logrotated(8)`) that can compress and delete old log -entries. It should not be a feature of the application-level logger. - - -[godoc]: https://godoc.org/github.com/Sirupsen/logrus diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go deleted file mode 100644 index dc2b0a7..0000000 --- a/vendor/github.com/Sirupsen/logrus/entry.go +++ /dev/null @@ -1,244 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "io" - "os" - "time" -) - -// An entry is the final or intermediate Logrus logging entry. It containts all -// the fields passed with WithField{,s}. It's finally logged when Debug, Info, -// Warn, Error, Fatal or Panic is called on it. These objects can be reused and -// passed around as much as you wish to avoid field duplication. -type Entry struct { - Logger *Logger - - // Contains all the fields set by the user. - Data Fields - - // Time at which the log entry was created - Time time.Time - - // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic - Level Level - - // Message passed to Debug, Info, Warn, Error, Fatal or Panic - Message string -} - -func NewEntry(logger *Logger) *Entry { - return &Entry{ - Logger: logger, - // Default is three fields, give a little extra room - Data: make(Fields, 5), - } -} - -// Returns a reader for the entry, which is a proxy to the formatter. -func (entry *Entry) Reader() (*bytes.Buffer, error) { - serialized, err := entry.Logger.Formatter.Format(entry) - return bytes.NewBuffer(serialized), err -} - -// Returns the string representation from the reader and ultimately the -// formatter. -func (entry *Entry) String() (string, error) { - reader, err := entry.Reader() - if err != nil { - return "", err - } - - return reader.String(), err -} - -// Add a single field to the Entry. -func (entry *Entry) WithField(key string, value interface{}) *Entry { - return entry.WithFields(Fields{key: value}) -} - -// Add a map of fields to the Entry. -func (entry *Entry) WithFields(fields Fields) *Entry { - data := Fields{} - for k, v := range entry.Data { - data[k] = v - } - for k, v := range fields { - data[k] = v - } - return &Entry{Logger: entry.Logger, Data: data} -} - -func (entry *Entry) log(level Level, msg string) { - entry.Time = time.Now() - entry.Level = level - entry.Message = msg - - if err := entry.Logger.Hooks.Fire(level, entry); err != nil { - fmt.Fprintf(os.Stderr, "Failed to fire hook\n", err) - } - - reader, err := entry.Reader() - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) - } - - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() - - _, err = io.Copy(entry.Logger.Out, reader) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) - } - - // To avoid Entry#log() returning a value that only would make sense for - // panic() to use in Entry#Panic(), we avoid the allocation by checking - // directly here. - if level <= PanicLevel { - panic(reader.String()) - } -} - -func (entry *Entry) Debug(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.log(DebugLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Print(args ...interface{}) { - entry.Info(args...) -} - -func (entry *Entry) Info(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.log(InfoLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Warn(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.log(WarnLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Error(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.log(ErrorLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Fatal(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.log(FatalLevel, fmt.Sprint(args...)) - } - os.Exit(1) -} - -func (entry *Entry) Panic(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.log(PanicLevel, fmt.Sprint(args...)) - } - panic(fmt.Sprint(args...)) -} - -// Entry Printf family functions - -func (entry *Entry) Debugf(format string, args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.Debug(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Infof(format string, args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.Info(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Printf(format string, args ...interface{}) { - entry.Infof(format, args...) -} - -func (entry *Entry) Warnf(format string, args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.Warn(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Warningf(format string, args ...interface{}) { - entry.Warnf(format, args...) -} - -func (entry *Entry) Errorf(format string, args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.Error(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Fatalf(format string, args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.Fatal(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Panicf(format string, args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.Panic(fmt.Sprintf(format, args...)) - } -} - -// Entry Println family functions - -func (entry *Entry) Debugln(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.Debug(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Infoln(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.Info(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Println(args ...interface{}) { - entry.Infoln(args...) -} - -func (entry *Entry) Warnln(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.Warn(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Warningln(args ...interface{}) { - entry.Warnln(args...) -} - -func (entry *Entry) Errorln(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.Error(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Fatalln(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.Fatal(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Panicln(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.Panic(entry.sprintlnn(args...)) - } -} - -// Sprintlnn => Sprint no newline. This is to get the behavior of how -// fmt.Sprintln where spaces are always added between operands, regardless of -// their type. Instead of vendoring the Sprintln implementation to spare a -// string allocation, we do the simplest thing. -func (entry *Entry) sprintlnn(args ...interface{}) string { - msg := fmt.Sprintln(args...) - return msg[:len(msg)-1] -} diff --git a/vendor/github.com/Sirupsen/logrus/examples/basic/basic.go b/vendor/github.com/Sirupsen/logrus/examples/basic/basic.go deleted file mode 100644 index 3594550..0000000 --- a/vendor/github.com/Sirupsen/logrus/examples/basic/basic.go +++ /dev/null @@ -1,29 +0,0 @@ -package main - -import ( - "github.com/Sirupsen/logrus" -) - -var log = logrus.New() - -func init() { - log.Formatter = new(logrus.JSONFormatter) - log.Formatter = new(logrus.TextFormatter) // default -} - -func main() { - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(logrus.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(logrus.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") -} diff --git a/vendor/github.com/Sirupsen/logrus/examples/hook/hook.go b/vendor/github.com/Sirupsen/logrus/examples/hook/hook.go deleted file mode 100644 index 42e7a4c..0000000 --- a/vendor/github.com/Sirupsen/logrus/examples/hook/hook.go +++ /dev/null @@ -1,35 +0,0 @@ -package main - -import ( - "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/airbrake" - "github.com/tobi/airbrake-go" -) - -var log = logrus.New() - -func init() { - log.Formatter = new(logrus.TextFormatter) // default - log.Hooks.Add(new(logrus_airbrake.AirbrakeHook)) -} - -func main() { - airbrake.Endpoint = "https://exceptions.whatever.com/notifier_api/v2/notices.xml" - airbrake.ApiKey = "whatever" - airbrake.Environment = "production" - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(logrus.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(logrus.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") -} diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go deleted file mode 100644 index 383ce93..0000000 --- a/vendor/github.com/Sirupsen/logrus/exported.go +++ /dev/null @@ -1,177 +0,0 @@ -package logrus - -import ( - "io" -) - -var ( - // std is the name of the standard logger in stdlib `log` - std = New() -) - -// SetOutput sets the standard logger output. -func SetOutput(out io.Writer) { - std.mu.Lock() - defer std.mu.Unlock() - std.Out = out -} - -// SetFormatter sets the standard logger formatter. -func SetFormatter(formatter Formatter) { - std.mu.Lock() - defer std.mu.Unlock() - std.Formatter = formatter -} - -// SetLevel sets the standard logger level. -func SetLevel(level Level) { - std.mu.Lock() - defer std.mu.Unlock() - std.Level = level -} - -// AddHook adds a hook to the standard logger hooks. -func AddHook(hook Hook) { - std.mu.Lock() - defer std.mu.Unlock() - std.Hooks.Add(hook) -} - -// WithField creates an entry from the standard logger and adds a field to -// it. If you want multiple fields, use `WithFields`. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithField(key string, value interface{}) *Entry { - return std.WithField(key, value) -} - -// WithFields creates an entry from the standard logger and adds multiple -// fields to it. This is simply a helper for `WithField`, invoking it -// once for each field. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithFields(fields Fields) *Entry { - return std.WithFields(fields) -} - -// Debug logs a message at level Debug on the standard logger. -func Debug(args ...interface{}) { - std.Debug(args...) -} - -// Print logs a message at level Info on the standard logger. -func Print(args ...interface{}) { - std.Print(args...) -} - -// Info logs a message at level Info on the standard logger. -func Info(args ...interface{}) { - std.Info(args...) -} - -// Warn logs a message at level Warn on the standard logger. -func Warn(args ...interface{}) { - std.Warn(args...) -} - -// Warning logs a message at level Warn on the standard logger. -func Warning(args ...interface{}) { - std.Warning(args...) -} - -// Error logs a message at level Error on the standard logger. -func Error(args ...interface{}) { - std.Error(args...) -} - -// Panic logs a message at level Panic on the standard logger. -func Panic(args ...interface{}) { - std.Panic(args...) -} - -// Fatal logs a message at level Fatal on the standard logger. -func Fatal(args ...interface{}) { - std.Fatal(args...) -} - -// Debugf logs a message at level Debugf on the standard logger. -func Debugf(format string, args ...interface{}) { - std.Debugf(format, args...) -} - -// Printf logs a message at level Info on the standard logger. -func Printf(format string, args ...interface{}) { - std.Printf(format, args...) -} - -// Infof logs a message at level Info on the standard logger. -func Infof(format string, args ...interface{}) { - std.Infof(format, args...) -} - -// Warnf logs a message at level Warn on the standard logger. -func Warnf(format string, args ...interface{}) { - std.Warnf(format, args...) -} - -// Warningf logs a message at level Warn on the standard logger. -func Warningf(format string, args ...interface{}) { - std.Warningf(format, args...) -} - -// Errorf logs a message at level Error on the standard logger. -func Errorf(format string, args ...interface{}) { - std.Errorf(format, args...) -} - -// Panicf logs a message at level Pancf on the standard logger. -func Panicf(format string, args ...interface{}) { - std.Panicf(format, args...) -} - -// Fatalf logs a message at level Fatal on the standard logger. -func Fatalf(format string, args ...interface{}) { - std.Fatalf(format, args...) -} - -// Debugln logs a message at level Debug on the standard logger. -func Debugln(args ...interface{}) { - std.Debugln(args...) -} - -// Println logs a message at level Info on the standard logger. -func Println(args ...interface{}) { - std.Println(args...) -} - -// Infoln logs a message at level Info on the standard logger. -func Infoln(args ...interface{}) { - std.Infoln(args...) -} - -// Warnln logs a message at level Warn on the standard logger. -func Warnln(args ...interface{}) { - std.Warnln(args...) -} - -// Warningln logs a message at level Warn on the standard logger. -func Warningln(args ...interface{}) { - std.Warningln(args...) -} - -// Errorln logs a message at level Error on the standard logger. -func Errorln(args ...interface{}) { - std.Errorln(args...) -} - -// Panicln logs a message at level Panic on the standard logger. -func Panicln(args ...interface{}) { - std.Panicln(args...) -} - -// Fatalln logs a message at level Fatal on the standard logger. -func Fatalln(args ...interface{}) { - std.Fatalln(args...) -} diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Sirupsen/logrus/formatter.go deleted file mode 100644 index cccf1c2..0000000 --- a/vendor/github.com/Sirupsen/logrus/formatter.go +++ /dev/null @@ -1,44 +0,0 @@ -package logrus - -// The Formatter interface is used to implement a custom Formatter. It takes an -// `Entry`. It exposes all the fields, including the default ones: -// -// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. -// * `entry.Data["time"]`. The timestamp. -// * `entry.Data["level"]. The level the entry was logged at. -// -// Any additional fields added with `WithField` or `WithFields` are also in -// `entry.Data`. Format is expected to return an array of bytes which are then -// logged to `logger.Out`. -type Formatter interface { - Format(*Entry) ([]byte, error) -} - -// This is to not silently overwrite `time`, `msg` and `level` fields when -// dumping it. If this code wasn't there doing: -// -// logrus.WithField("level", 1).Info("hello") -// -// Would just silently drop the user provided level. Instead with this code -// it'll logged as: -// -// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} -// -// It's not exported because it's still using Data in an opionated way. It's to -// avoid code duplication between the two default formatters. -func prefixFieldClashes(entry *Entry) { - _, ok := entry.Data["time"] - if ok { - entry.Data["fields.time"] = entry.Data["time"] - } - - _, ok = entry.Data["msg"] - if ok { - entry.Data["fields.msg"] = entry.Data["msg"] - } - - _, ok = entry.Data["level"] - if ok { - entry.Data["fields.level"] = entry.Data["level"] - } -} diff --git a/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go b/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go deleted file mode 100644 index 77989da..0000000 --- a/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package logrus - -import ( - "testing" - "time" -) - -// smallFields is a small size data set for benchmarking -var smallFields = Fields{ - "foo": "bar", - "baz": "qux", - "one": "two", - "three": "four", -} - -// largeFields is a large size data set for benchmarking -var largeFields = Fields{ - "foo": "bar", - "baz": "qux", - "one": "two", - "three": "four", - "five": "six", - "seven": "eight", - "nine": "ten", - "eleven": "twelve", - "thirteen": "fourteen", - "fifteen": "sixteen", - "seventeen": "eighteen", - "nineteen": "twenty", - "a": "b", - "c": "d", - "e": "f", - "g": "h", - "i": "j", - "k": "l", - "m": "n", - "o": "p", - "q": "r", - "s": "t", - "u": "v", - "w": "x", - "y": "z", - "this": "will", - "make": "thirty", - "entries": "yeah", -} - -func BenchmarkSmallTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields) -} - -func BenchmarkLargeTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields) -} - -func BenchmarkSmallColoredTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields) -} - -func BenchmarkLargeColoredTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields) -} - -func BenchmarkSmallJSONFormatter(b *testing.B) { - doBenchmark(b, &JSONFormatter{}, smallFields) -} - -func BenchmarkLargeJSONFormatter(b *testing.B) { - doBenchmark(b, &JSONFormatter{}, largeFields) -} - -func doBenchmark(b *testing.B, formatter Formatter, fields Fields) { - entry := &Entry{ - Time: time.Time{}, - Level: InfoLevel, - Message: "message", - Data: fields, - } - var d []byte - var err error - for i := 0; i < b.N; i++ { - d, err = formatter.Format(entry) - if err != nil { - b.Fatal(err) - } - b.SetBytes(int64(len(d))) - } -} diff --git a/vendor/github.com/Sirupsen/logrus/hook_test.go b/vendor/github.com/Sirupsen/logrus/hook_test.go deleted file mode 100644 index 13f34cb..0000000 --- a/vendor/github.com/Sirupsen/logrus/hook_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package logrus - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -type TestHook struct { - Fired bool -} - -func (hook *TestHook) Fire(entry *Entry) error { - hook.Fired = true - return nil -} - -func (hook *TestHook) Levels() []Level { - return []Level{ - DebugLevel, - InfoLevel, - WarnLevel, - ErrorLevel, - FatalLevel, - PanicLevel, - } -} - -func TestHookFires(t *testing.T) { - hook := new(TestHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - assert.Equal(t, hook.Fired, false) - - log.Print("test") - }, func(fields Fields) { - assert.Equal(t, hook.Fired, true) - }) -} - -type ModifyHook struct { -} - -func (hook *ModifyHook) Fire(entry *Entry) error { - entry.Data["wow"] = "whale" - return nil -} - -func (hook *ModifyHook) Levels() []Level { - return []Level{ - DebugLevel, - InfoLevel, - WarnLevel, - ErrorLevel, - FatalLevel, - PanicLevel, - } -} - -func TestHookCanModifyEntry(t *testing.T) { - hook := new(ModifyHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - log.WithField("wow", "elephant").Print("test") - }, func(fields Fields) { - assert.Equal(t, fields["wow"], "whale") - }) -} - -func TestCanFireMultipleHooks(t *testing.T) { - hook1 := new(ModifyHook) - hook2 := new(TestHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook1) - log.Hooks.Add(hook2) - - log.WithField("wow", "elephant").Print("test") - }, func(fields Fields) { - assert.Equal(t, fields["wow"], "whale") - assert.Equal(t, hook2.Fired, true) - }) -} - -type ErrorHook struct { - Fired bool -} - -func (hook *ErrorHook) Fire(entry *Entry) error { - hook.Fired = true - return nil -} - -func (hook *ErrorHook) Levels() []Level { - return []Level{ - ErrorLevel, - } -} - -func TestErrorHookShouldntFireOnInfo(t *testing.T) { - hook := new(ErrorHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - log.Info("test") - }, func(fields Fields) { - assert.Equal(t, hook.Fired, false) - }) -} - -func TestErrorHookShouldFireOnError(t *testing.T) { - hook := new(ErrorHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - log.Error("test") - }, func(fields Fields) { - assert.Equal(t, hook.Fired, true) - }) -} diff --git a/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/Sirupsen/logrus/hooks.go deleted file mode 100644 index 0da2b36..0000000 --- a/vendor/github.com/Sirupsen/logrus/hooks.go +++ /dev/null @@ -1,34 +0,0 @@ -package logrus - -// A hook to be fired when logging on the logging levels returned from -// `Levels()` on your implementation of the interface. Note that this is not -// fired in a goroutine or a channel with workers, you should handle such -// functionality yourself if your call is non-blocking and you don't wish for -// the logging calls for levels returned from `Levels()` to block. -type Hook interface { - Levels() []Level - Fire(*Entry) error -} - -// Internal type for storing the hooks on a logger instance. -type levelHooks map[Level][]Hook - -// Add a hook to an instance of logger. This is called with -// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. -func (hooks levelHooks) Add(hook Hook) { - for _, level := range hook.Levels() { - hooks[level] = append(hooks[level], hook) - } -} - -// Fire all the hooks for the passed level. Used by `entry.log` to fire -// appropriate hooks for a log entry. -func (hooks levelHooks) Fire(level Level, entry *Entry) error { - for _, hook := range hooks[level] { - if err := hook.Fire(entry); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go b/vendor/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go deleted file mode 100644 index 880d21e..0000000 --- a/vendor/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go +++ /dev/null @@ -1,54 +0,0 @@ -package logrus_airbrake - -import ( - "github.com/Sirupsen/logrus" - "github.com/tobi/airbrake-go" -) - -// AirbrakeHook to send exceptions to an exception-tracking service compatible -// with the Airbrake API. You must set: -// * airbrake.Endpoint -// * airbrake.ApiKey -// * airbrake.Environment (only sends exceptions when set to "production") -// -// Before using this hook, to send an error. Entries that trigger an Error, -// Fatal or Panic should now include an "error" field to send to Airbrake. -type AirbrakeHook struct{} - -func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error { - if entry.Data["error"] == nil { - entry.Logger.WithFields(logrus.Fields{ - "source": "airbrake", - "endpoint": airbrake.Endpoint, - }).Warn("Exceptions sent to Airbrake must have an 'error' key with the error") - return nil - } - - err, ok := entry.Data["error"].(error) - if !ok { - entry.Logger.WithFields(logrus.Fields{ - "source": "airbrake", - "endpoint": airbrake.Endpoint, - }).Warn("Exceptions sent to Airbrake must have an `error` key of type `error`") - return nil - } - - airErr := airbrake.Notify(err) - if airErr != nil { - entry.Logger.WithFields(logrus.Fields{ - "source": "airbrake", - "endpoint": airbrake.Endpoint, - "error": airErr, - }).Warn("Failed to send error to Airbrake") - } - - return nil -} - -func (hook *AirbrakeHook) Levels() []logrus.Level { - return []logrus.Level{ - logrus.ErrorLevel, - logrus.FatalLevel, - logrus.PanicLevel, - } -} diff --git a/vendor/github.com/Sirupsen/logrus/hooks/papertrail/README.md b/vendor/github.com/Sirupsen/logrus/hooks/papertrail/README.md deleted file mode 100644 index ae61e92..0000000 --- a/vendor/github.com/Sirupsen/logrus/hooks/papertrail/README.md +++ /dev/null @@ -1,28 +0,0 @@ -# Papertrail Hook for Logrus :walrus: - -[Papertrail](https://papertrailapp.com) provides hosted log management. Once stored in Papertrail, you can [group](http://help.papertrailapp.com/kb/how-it-works/groups/) your logs on various dimensions, [search](http://help.papertrailapp.com/kb/how-it-works/search-syntax) them, and trigger [alerts](http://help.papertrailapp.com/kb/how-it-works/alerts). - -In most deployments, you'll want to send logs to Papertrail via their [remote_syslog](http://help.papertrailapp.com/kb/configuration/configuring-centralized-logging-from-text-log-files-in-unix/) daemon, which requires no application-specific configuration. This hook is intended for relatively low-volume logging, likely in managed cloud hosting deployments where installing `remote_syslog` is not possible. - -## Usage - -You can find your Papertrail UDP port on your [Papertrail account page](https://papertrailapp.com/account/destinations). Substitute it below for `YOUR_PAPERTRAIL_UDP_PORT`. - -For `YOUR_APP_NAME`, substitute a short string that will readily identify your application or service in the logs. - -```go -import ( - "log/syslog" - "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/papertrail" -) - -func main() { - log := logrus.New() - hook, err := logrus_papertrail.NewPapertrailHook("logs.papertrailapp.com", YOUR_PAPERTRAIL_UDP_PORT, YOUR_APP_NAME) - - if err == nil { - log.Hooks.Add(hook) - } -} -``` diff --git a/vendor/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go b/vendor/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go deleted file mode 100644 index 48e2fea..0000000 --- a/vendor/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go +++ /dev/null @@ -1,54 +0,0 @@ -package logrus_papertrail - -import ( - "fmt" - "net" - "os" - "time" - - "github.com/Sirupsen/logrus" -) - -const ( - format = "Jan 2 15:04:05" -) - -// PapertrailHook to send logs to a logging service compatible with the Papertrail API. -type PapertrailHook struct { - Host string - Port int - AppName string - UDPConn net.Conn -} - -// NewPapertrailHook creates a hook to be added to an instance of logger. -func NewPapertrailHook(host string, port int, appName string) (*PapertrailHook, error) { - conn, err := net.Dial("udp", fmt.Sprintf("%s:%d", host, port)) - return &PapertrailHook{host, port, appName, conn}, err -} - -// Fire is called when a log event is fired. -func (hook *PapertrailHook) Fire(entry *logrus.Entry) error { - date := time.Now().Format(format) - payload := fmt.Sprintf("<22> %s %s: [%s] %s", date, hook.AppName, entry.Data["level"], entry.Message) - - bytesWritten, err := hook.UDPConn.Write([]byte(payload)) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to send log line to Papertrail via UDP. Wrote %d bytes before error: %v", bytesWritten, err) - return err - } - - return nil -} - -// Levels returns the available logging levels. -func (hook *PapertrailHook) Levels() []logrus.Level { - return []logrus.Level{ - logrus.PanicLevel, - logrus.FatalLevel, - logrus.ErrorLevel, - logrus.WarnLevel, - logrus.InfoLevel, - logrus.DebugLevel, - } -} diff --git a/vendor/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go b/vendor/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go deleted file mode 100644 index 96318d0..0000000 --- a/vendor/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package logrus_papertrail - -import ( - "fmt" - "testing" - - "github.com/Sirupsen/logrus" - "github.com/stvp/go-udp-testing" -) - -func TestWritingToUDP(t *testing.T) { - port := 16661 - udp.SetAddr(fmt.Sprintf(":%d", port)) - - hook, err := NewPapertrailHook("localhost", port, "test") - if err != nil { - t.Errorf("Unable to connect to local UDP server.") - } - - log := logrus.New() - log.Hooks.Add(hook) - - udp.ShouldReceive(t, "foo", func() { - log.Info("foo") - }) -} diff --git a/vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md b/vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md deleted file mode 100644 index cd706bc..0000000 --- a/vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md +++ /dev/null @@ -1,20 +0,0 @@ -# Syslog Hooks for Logrus :walrus: - -## Usage - -```go -import ( - "log/syslog" - "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/syslog" -) - -func main() { - log := logrus.New() - hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - - if err == nil { - log.Hooks.Add(hook) - } -} -``` \ No newline at end of file diff --git a/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go b/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go deleted file mode 100644 index 2a18ce6..0000000 --- a/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go +++ /dev/null @@ -1,59 +0,0 @@ -package logrus_syslog - -import ( - "fmt" - "github.com/Sirupsen/logrus" - "log/syslog" - "os" -) - -// SyslogHook to send logs via syslog. -type SyslogHook struct { - Writer *syslog.Writer - SyslogNetwork string - SyslogRaddr string -} - -// Creates a hook to be added to an instance of logger. This is called with -// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")` -// `if err == nil { log.Hooks.Add(hook) }` -func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) { - w, err := syslog.Dial(network, raddr, priority, tag) - return &SyslogHook{w, network, raddr}, err -} - -func (hook *SyslogHook) Fire(entry *logrus.Entry) error { - line, err := entry.String() - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err) - return err - } - - switch entry.Data["level"] { - case "panic": - return hook.Writer.Crit(line) - case "fatal": - return hook.Writer.Crit(line) - case "error": - return hook.Writer.Err(line) - case "warn": - return hook.Writer.Warning(line) - case "info": - return hook.Writer.Info(line) - case "debug": - return hook.Writer.Debug(line) - default: - return nil - } -} - -func (hook *SyslogHook) Levels() []logrus.Level { - return []logrus.Level{ - logrus.PanicLevel, - logrus.FatalLevel, - logrus.ErrorLevel, - logrus.WarnLevel, - logrus.InfoLevel, - logrus.DebugLevel, - } -} diff --git a/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go b/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go deleted file mode 100644 index 42762dc..0000000 --- a/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package logrus_syslog - -import ( - "github.com/Sirupsen/logrus" - "log/syslog" - "testing" -) - -func TestLocalhostAddAndPrint(t *testing.T) { - log := logrus.New() - hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - - if err != nil { - t.Errorf("Unable to connect to local syslog.") - } - - log.Hooks.Add(hook) - - for _, level := range hook.Levels() { - if len(log.Hooks[level]) != 1 { - t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level])) - } - } - - log.Info("Congratulations!") -} diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go deleted file mode 100644 index 9d11b64..0000000 --- a/vendor/github.com/Sirupsen/logrus/json_formatter.go +++ /dev/null @@ -1,22 +0,0 @@ -package logrus - -import ( - "encoding/json" - "fmt" - "time" -) - -type JSONFormatter struct{} - -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - prefixFieldClashes(entry) - entry.Data["time"] = entry.Time.Format(time.RFC3339) - entry.Data["msg"] = entry.Message - entry.Data["level"] = entry.Level.String() - - serialized, err := json.Marshal(entry.Data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go deleted file mode 100644 index 7374fe3..0000000 --- a/vendor/github.com/Sirupsen/logrus/logger.go +++ /dev/null @@ -1,161 +0,0 @@ -package logrus - -import ( - "io" - "os" - "sync" -) - -type Logger struct { - // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a - // file, or leave it default which is `os.Stdout`. You can also set this to - // something more adventorous, such as logging to Kafka. - Out io.Writer - // Hooks for the logger instance. These allow firing events based on logging - // levels and log entries. For example, to send errors to an error tracking - // service, log to StatsD or dump the core on fatal errors. - Hooks levelHooks - // All log entries pass through the formatter before logged to Out. The - // included formatters are `TextFormatter` and `JSONFormatter` for which - // TextFormatter is the default. In development (when a TTY is attached) it - // logs with colors, but to a file it wouldn't. You can easily implement your - // own that implements the `Formatter` interface, see the `README` or included - // formatters for examples. - Formatter Formatter - // The logging level the logger should log at. This is typically (and defaults - // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be - // logged. `logrus.Debug` is useful in - Level Level - // Used to sync writing to the log. - mu sync.Mutex -} - -// Creates a new logger. Configuration should be set by changing `Formatter`, -// `Out` and `Hooks` directly on the default logger instance. You can also just -// instantiate your own: -// -// var log = &Logger{ -// Out: os.Stderr, -// Formatter: new(JSONFormatter), -// Hooks: make(levelHooks), -// Level: logrus.Debug, -// } -// -// It's recommended to make this a global instance called `log`. -func New() *Logger { - return &Logger{ - Out: os.Stdout, - Formatter: new(TextFormatter), - Hooks: make(levelHooks), - Level: InfoLevel, - } -} - -// Adds a field to the log entry, note that you it doesn't log until you call -// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. -// Ff you want multiple fields, use `WithFields`. -func (logger *Logger) WithField(key string, value interface{}) *Entry { - return NewEntry(logger).WithField(key, value) -} - -// Adds a struct of fields to the log entry. All it does is call `WithField` for -// each `Field`. -func (logger *Logger) WithFields(fields Fields) *Entry { - return NewEntry(logger).WithFields(fields) -} - -func (logger *Logger) Debugf(format string, args ...interface{}) { - NewEntry(logger).Debugf(format, args...) -} - -func (logger *Logger) Infof(format string, args ...interface{}) { - NewEntry(logger).Infof(format, args...) -} - -func (logger *Logger) Printf(format string, args ...interface{}) { - NewEntry(logger).Printf(format, args...) -} - -func (logger *Logger) Warnf(format string, args ...interface{}) { - NewEntry(logger).Warnf(format, args...) -} - -func (logger *Logger) Warningf(format string, args ...interface{}) { - NewEntry(logger).Warnf(format, args...) -} - -func (logger *Logger) Errorf(format string, args ...interface{}) { - NewEntry(logger).Errorf(format, args...) -} - -func (logger *Logger) Fatalf(format string, args ...interface{}) { - NewEntry(logger).Fatalf(format, args...) -} - -func (logger *Logger) Panicf(format string, args ...interface{}) { - NewEntry(logger).Panicf(format, args...) -} - -func (logger *Logger) Debug(args ...interface{}) { - NewEntry(logger).Debug(args...) -} - -func (logger *Logger) Info(args ...interface{}) { - NewEntry(logger).Info(args...) -} - -func (logger *Logger) Print(args ...interface{}) { - NewEntry(logger).Info(args...) -} - -func (logger *Logger) Warn(args ...interface{}) { - NewEntry(logger).Warn(args...) -} - -func (logger *Logger) Warning(args ...interface{}) { - NewEntry(logger).Warn(args...) -} - -func (logger *Logger) Error(args ...interface{}) { - NewEntry(logger).Error(args...) -} - -func (logger *Logger) Fatal(args ...interface{}) { - NewEntry(logger).Fatal(args...) -} - -func (logger *Logger) Panic(args ...interface{}) { - NewEntry(logger).Panic(args...) -} - -func (logger *Logger) Debugln(args ...interface{}) { - NewEntry(logger).Debugln(args...) -} - -func (logger *Logger) Infoln(args ...interface{}) { - NewEntry(logger).Infoln(args...) -} - -func (logger *Logger) Println(args ...interface{}) { - NewEntry(logger).Println(args...) -} - -func (logger *Logger) Warnln(args ...interface{}) { - NewEntry(logger).Warnln(args...) -} - -func (logger *Logger) Warningln(args ...interface{}) { - NewEntry(logger).Warnln(args...) -} - -func (logger *Logger) Errorln(args ...interface{}) { - NewEntry(logger).Errorln(args...) -} - -func (logger *Logger) Fatalln(args ...interface{}) { - NewEntry(logger).Fatalln(args...) -} - -func (logger *Logger) Panicln(args ...interface{}) { - NewEntry(logger).Panicln(args...) -} diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Sirupsen/logrus/logrus.go deleted file mode 100644 index 43ee12e..0000000 --- a/vendor/github.com/Sirupsen/logrus/logrus.go +++ /dev/null @@ -1,94 +0,0 @@ -package logrus - -import ( - "fmt" - "log" -) - -// Fields type, used to pass to `WithFields`. -type Fields map[string]interface{} - -// Level type -type Level uint8 - -// Convert the Level to a string. E.g. PanicLevel becomes "panic". -func (level Level) String() string { - switch level { - case DebugLevel: - return "debug" - case InfoLevel: - return "info" - case WarnLevel: - return "warning" - case ErrorLevel: - return "error" - case FatalLevel: - return "fatal" - case PanicLevel: - return "panic" - } - - return "unknown" -} - -// ParseLevel takes a string level and returns the Logrus log level constant. -func ParseLevel(lvl string) (Level, error) { - switch lvl { - case "panic": - return PanicLevel, nil - case "fatal": - return FatalLevel, nil - case "error": - return ErrorLevel, nil - case "warn", "warning": - return WarnLevel, nil - case "info": - return InfoLevel, nil - case "debug": - return DebugLevel, nil - } - - var l Level - return l, fmt.Errorf("not a valid logrus Level: %q", lvl) -} - -// These are the different logging levels. You can set the logging level to log -// on your instance of logger, obtained with `logrus.New()`. -const ( - // PanicLevel level, highest level of severity. Logs and then calls panic with the - // message passed to Debug, Info, ... - PanicLevel Level = iota - // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the - // logging level is set to Panic. - FatalLevel - // ErrorLevel level. Logs. Used for errors that should definitely be noted. - // Commonly used for hooks to send errors to an error tracking service. - ErrorLevel - // WarnLevel level. Non-critical entries that deserve eyes. - WarnLevel - // InfoLevel level. General operational entries about what's going on inside the - // application. - InfoLevel - // DebugLevel level. Usually only enabled when debugging. Very verbose logging. - DebugLevel -) - -// Won't compile if StdLogger can't be realized by a log.Logger -var _ StdLogger = &log.Logger{} - -// StdLogger is what your logrus-enabled library should take, that way -// it'll accept a stdlib logger and a logrus logger. There's no standard -// interface, this is the closest we get, unfortunately. -type StdLogger interface { - Print(...interface{}) - Printf(string, ...interface{}) - Println(...interface{}) - - Fatal(...interface{}) - Fatalf(string, ...interface{}) - Fatalln(...interface{}) - - Panic(...interface{}) - Panicf(string, ...interface{}) - Panicln(...interface{}) -} diff --git a/vendor/github.com/Sirupsen/logrus/logrus_test.go b/vendor/github.com/Sirupsen/logrus/logrus_test.go deleted file mode 100644 index 15157d1..0000000 --- a/vendor/github.com/Sirupsen/logrus/logrus_test.go +++ /dev/null @@ -1,247 +0,0 @@ -package logrus - -import ( - "bytes" - "encoding/json" - "strconv" - "strings" - "testing" - - "github.com/stretchr/testify/assert" -) - -func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) { - var buffer bytes.Buffer - var fields Fields - - logger := New() - logger.Out = &buffer - logger.Formatter = new(JSONFormatter) - - log(logger) - - err := json.Unmarshal(buffer.Bytes(), &fields) - assert.Nil(t, err) - - assertions(fields) -} - -func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) { - var buffer bytes.Buffer - - logger := New() - logger.Out = &buffer - logger.Formatter = &TextFormatter{ - DisableColors: true, - } - - log(logger) - - fields := make(map[string]string) - for _, kv := range strings.Split(buffer.String(), " ") { - if !strings.Contains(kv, "=") { - continue - } - kvArr := strings.Split(kv, "=") - key := strings.TrimSpace(kvArr[0]) - val, err := strconv.Unquote(kvArr[1]) - assert.NoError(t, err) - fields[key] = val - } - assertions(fields) -} - -func TestPrint(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Print("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["level"], "info") - }) -} - -func TestInfo(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["level"], "info") - }) -} - -func TestWarn(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Warn("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["level"], "warning") - }) -} - -func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln("test", "test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test test") - }) -} - -func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln("test", 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test 10") - }) -} - -func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln(10, 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "10 10") - }) -} - -func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln(10, 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "10 10") - }) -} - -func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Info("test", 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test10") - }) -} - -func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Info("test", "test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "testtest") - }) -} - -func TestWithFieldsShouldAllowAssignments(t *testing.T) { - var buffer bytes.Buffer - var fields Fields - - logger := New() - logger.Out = &buffer - logger.Formatter = new(JSONFormatter) - - localLog := logger.WithFields(Fields{ - "key1": "value1", - }) - - localLog.WithField("key2", "value2").Info("test") - err := json.Unmarshal(buffer.Bytes(), &fields) - assert.Nil(t, err) - - assert.Equal(t, "value2", fields["key2"]) - assert.Equal(t, "value1", fields["key1"]) - - buffer = bytes.Buffer{} - fields = Fields{} - localLog.Info("test") - err = json.Unmarshal(buffer.Bytes(), &fields) - assert.Nil(t, err) - - _, ok := fields["key2"] - assert.Equal(t, false, ok) - assert.Equal(t, "value1", fields["key1"]) -} - -func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("msg", "hello").Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - }) -} - -func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("msg", "hello").Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["fields.msg"], "hello") - }) -} - -func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("time", "hello").Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["fields.time"], "hello") - }) -} - -func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("level", 1).Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["level"], "info") - assert.Equal(t, fields["fields.level"], 1) - }) -} - -func TestDefaultFieldsAreNotPrefixed(t *testing.T) { - LogAndAssertText(t, func(log *Logger) { - ll := log.WithField("herp", "derp") - ll.Info("hello") - ll.Info("bye") - }, func(fields map[string]string) { - for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} { - if _, ok := fields[fieldName]; ok { - t.Fatalf("should not have prefixed %q: %v", fieldName, fields) - } - } - }) -} - -func TestConvertLevelToString(t *testing.T) { - assert.Equal(t, "debug", DebugLevel.String()) - assert.Equal(t, "info", InfoLevel.String()) - assert.Equal(t, "warning", WarnLevel.String()) - assert.Equal(t, "error", ErrorLevel.String()) - assert.Equal(t, "fatal", FatalLevel.String()) - assert.Equal(t, "panic", PanicLevel.String()) -} - -func TestParseLevel(t *testing.T) { - l, err := ParseLevel("panic") - assert.Nil(t, err) - assert.Equal(t, PanicLevel, l) - - l, err = ParseLevel("fatal") - assert.Nil(t, err) - assert.Equal(t, FatalLevel, l) - - l, err = ParseLevel("error") - assert.Nil(t, err) - assert.Equal(t, ErrorLevel, l) - - l, err = ParseLevel("warn") - assert.Nil(t, err) - assert.Equal(t, WarnLevel, l) - - l, err = ParseLevel("warning") - assert.Nil(t, err) - assert.Equal(t, WarnLevel, l) - - l, err = ParseLevel("info") - assert.Nil(t, err) - assert.Equal(t, InfoLevel, l) - - l, err = ParseLevel("debug") - assert.Nil(t, err) - assert.Equal(t, DebugLevel, l) - - l, err = ParseLevel("invalid") - assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error()) -} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_darwin.go b/vendor/github.com/Sirupsen/logrus/terminal_darwin.go deleted file mode 100644 index 8fe02a4..0000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_darwin.go +++ /dev/null @@ -1,12 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logrus - -import "syscall" - -const ioctlReadTermios = syscall.TIOCGETA - -type Termios syscall.Termios diff --git a/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go b/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go deleted file mode 100644 index 0428ee5..0000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go +++ /dev/null @@ -1,20 +0,0 @@ -/* - Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin. -*/ -package logrus - -import ( - "syscall" -) - -const ioctlReadTermios = syscall.TIOCGETA - -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]uint8 - Ispeed uint32 - Ospeed uint32 -} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/Sirupsen/logrus/terminal_linux.go deleted file mode 100644 index a2c0b40..0000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_linux.go +++ /dev/null @@ -1,12 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logrus - -import "syscall" - -const ioctlReadTermios = syscall.TCGETS - -type Termios syscall.Termios diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go deleted file mode 100644 index 276447b..0000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go +++ /dev/null @@ -1,21 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux,!appengine darwin freebsd - -package logrus - -import ( - "syscall" - "unsafe" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stdout - var termios Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_windows.go deleted file mode 100644 index 2e09f6f..0000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package logrus - -import ( - "syscall" - "unsafe" -) - -var kernel32 = syscall.NewLazyDLL("kernel32.dll") - -var ( - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stdout - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go deleted file mode 100644 index 2ab0139..0000000 --- a/vendor/github.com/Sirupsen/logrus/text_formatter.go +++ /dev/null @@ -1,94 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "sort" - "strings" - "time" -) - -const ( - nocolor = 0 - red = 31 - green = 32 - yellow = 33 - blue = 34 -) - -var ( - baseTimestamp time.Time - isTerminal bool -) - -func init() { - baseTimestamp = time.Now() - isTerminal = IsTerminal() -} - -func miniTS() int { - return int(time.Since(baseTimestamp) / time.Second) -} - -type TextFormatter struct { - // Set to true to bypass checking for a TTY before outputting colors. - ForceColors bool - DisableColors bool -} - -func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { - - var keys []string - for k := range entry.Data { - keys = append(keys, k) - } - sort.Strings(keys) - - b := &bytes.Buffer{} - - prefixFieldClashes(entry) - - isColored := (f.ForceColors || isTerminal) && !f.DisableColors - - if isColored { - printColored(b, entry, keys) - } else { - f.AppendKeyValue(b, "time", entry.Time.Format(time.RFC3339)) - f.AppendKeyValue(b, "level", entry.Level.String()) - f.AppendKeyValue(b, "msg", entry.Message) - for _, key := range keys { - f.AppendKeyValue(b, key, entry.Data[key]) - } - } - - b.WriteByte('\n') - return b.Bytes(), nil -} - -func printColored(b *bytes.Buffer, entry *Entry, keys []string) { - var levelColor int - switch entry.Level { - case WarnLevel: - levelColor = yellow - case ErrorLevel, FatalLevel, PanicLevel: - levelColor = red - default: - levelColor = blue - } - - levelText := strings.ToUpper(entry.Level.String())[0:4] - - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) - for _, k := range keys { - v := entry.Data[k] - fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v) - } -} - -func (f *TextFormatter) AppendKeyValue(b *bytes.Buffer, key, value interface{}) { - if _, ok := value.(string); ok { - fmt.Fprintf(b, "%v=%q ", key, value) - } else { - fmt.Fprintf(b, "%v=%v ", key, value) - } -} diff --git a/vendor/github.com/davecgh/go-spew/.gitignore b/vendor/github.com/davecgh/go-spew/.gitignore deleted file mode 100644 index 0026861..0000000 --- a/vendor/github.com/davecgh/go-spew/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/vendor/github.com/davecgh/go-spew/.travis.yml b/vendor/github.com/davecgh/go-spew/.travis.yml deleted file mode 100644 index 984e073..0000000 --- a/vendor/github.com/davecgh/go-spew/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go -go: - - 1.5.4 - - 1.6.3 - - 1.7 -install: - - go get -v golang.org/x/tools/cmd/cover -script: - - go test -v -tags=safe ./spew - - go test -v -tags=testcgo ./spew -covermode=count -coverprofile=profile.cov -after_success: - - go get -v github.com/mattn/goveralls - - export PATH=$PATH:$HOME/gopath/bin - - goveralls -coverprofile=profile.cov -service=travis-ci diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE deleted file mode 100644 index bb67332..0000000 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -ISC License - -Copyright (c) 2012-2013 Dave Collins - -Permission to use, copy, modify, and distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/README.md b/vendor/github.com/davecgh/go-spew/README.md deleted file mode 100644 index 556170a..0000000 --- a/vendor/github.com/davecgh/go-spew/README.md +++ /dev/null @@ -1,194 +0,0 @@ -go-spew -======= - -[![Build Status](https://travis-ci.org/davecgh/go-spew.png?branch=master)] -(https://travis-ci.org/davecgh/go-spew) [![Coverage Status] -(https://coveralls.io/repos/davecgh/go-spew/badge.png?branch=master)] -(https://coveralls.io/r/davecgh/go-spew?branch=master) - -Go-spew implements a deep pretty printer for Go data structures to aid in -debugging. A comprehensive suite of tests with 100% test coverage is provided -to ensure proper functionality. See `test_coverage.txt` for the gocov coverage -report. Go-spew is licensed under the liberal ISC license, so it may be used in -open source or commercial projects. - -If you're interested in reading about how this package came to life and some -of the challenges involved in providing a deep pretty printer, there is a blog -post about it -[here](https://web.archive.org/web/20160304013555/https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/). - -## Documentation - -[![GoDoc](https://godoc.org/github.com/davecgh/go-spew/spew?status.png)] -(http://godoc.org/github.com/davecgh/go-spew/spew) - -Full `go doc` style documentation for the project can be viewed online without -installing this package by using the excellent GoDoc site here: -http://godoc.org/github.com/davecgh/go-spew/spew - -You can also view the documentation locally once the package is installed with -the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to -http://localhost:6060/pkg/github.com/davecgh/go-spew/spew - -## Installation - -```bash -$ go get -u github.com/davecgh/go-spew/spew -``` - -## Quick Start - -Add this import line to the file you're working in: - -```Go -import "github.com/davecgh/go-spew/spew" -``` - -To dump a variable with full newlines, indentation, type, and pointer -information use Dump, Fdump, or Sdump: - -```Go -spew.Dump(myVar1, myVar2, ...) -spew.Fdump(someWriter, myVar1, myVar2, ...) -str := spew.Sdump(myVar1, myVar2, ...) -``` - -Alternatively, if you would prefer to use format strings with a compacted inline -printing style, use the convenience wrappers Printf, Fprintf, etc with %v (most -compact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types -and pointer addresses): - -```Go -spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) -spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) -spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) -spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) -``` - -## Debugging a Web Application Example - -Here is an example of how you can use `spew.Sdump()` to help debug a web application. Please be sure to wrap your output using the `html.EscapeString()` function for safety reasons. You should also only use this debugging technique in a development environment, never in production. - -```Go -package main - -import ( - "fmt" - "html" - "net/http" - - "github.com/davecgh/go-spew/spew" -) - -func handler(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/html") - fmt.Fprintf(w, "Hi there, %s!", r.URL.Path[1:]) - fmt.Fprintf(w, "") -} - -func main() { - http.HandleFunc("/", handler) - http.ListenAndServe(":8080", nil) -} -``` - -## Sample Dump Output - -``` -(main.Foo) { - unexportedField: (*main.Bar)(0xf84002e210)({ - flag: (main.Flag) flagTwo, - data: (uintptr) - }), - ExportedField: (map[interface {}]interface {}) { - (string) "one": (bool) true - } -} -([]uint8) { - 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | - 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| - 00000020 31 32 |12| -} -``` - -## Sample Formatter Output - -Double pointer to a uint8: -``` - %v: <**>5 - %+v: <**>(0xf8400420d0->0xf8400420c8)5 - %#v: (**uint8)5 - %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 -``` - -Pointer to circular struct with a uint8 field and a pointer to itself: -``` - %v: <*>{1 <*>} - %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} - %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} - %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} -``` - -## Configuration Options - -Configuration of spew is handled by fields in the ConfigState type. For -convenience, all of the top-level functions use a global state available via the -spew.Config global. - -It is also possible to create a ConfigState instance that provides methods -equivalent to the top-level functions. This allows concurrent configuration -options. See the ConfigState documentation for more details. - -``` -* Indent - String to use for each indentation level for Dump functions. - It is a single space by default. A popular alternative is "\t". - -* MaxDepth - Maximum number of levels to descend into nested data structures. - There is no limit by default. - -* DisableMethods - Disables invocation of error and Stringer interface methods. - Method invocation is enabled by default. - -* DisablePointerMethods - Disables invocation of error and Stringer interface methods on types - which only accept pointer receivers from non-pointer variables. This option - relies on access to the unsafe package, so it will not have any effect when - running in environments without access to the unsafe package such as Google - App Engine or with the "safe" build tag specified. - Pointer method invocation is enabled by default. - -* ContinueOnMethod - Enables recursion into types after invoking error and Stringer interface - methods. Recursion after method invocation is disabled by default. - -* SortKeys - Specifies map keys should be sorted before being printed. Use - this to have a more deterministic, diffable output. Note that - only native types (bool, int, uint, floats, uintptr and string) - and types which implement error or Stringer interfaces are supported, - with other types sorted according to the reflect.Value.String() output - which guarantees display stability. Natural map order is used by - default. - -* SpewKeys - SpewKeys specifies that, as a last resort attempt, map keys should be - spewed to strings and sorted by those strings. This is only considered - if SortKeys is true. - -``` - -## Unsafe Package Dependency - -This package relies on the unsafe package to perform some of the more advanced -features, however it also supports a "limited" mode which allows it to work in -environments where the unsafe package is not available. By default, it will -operate in this mode on Google App Engine and when compiled with GopherJS. The -"safe" build tag may also be specified to force the package to build without -using the unsafe package. - -## License - -Go-spew is licensed under the liberal ISC License. diff --git a/vendor/github.com/davecgh/go-spew/cov_report.sh b/vendor/github.com/davecgh/go-spew/cov_report.sh deleted file mode 100644 index 9579497..0000000 --- a/vendor/github.com/davecgh/go-spew/cov_report.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/sh - -# This script uses gocov to generate a test coverage report. -# The gocov tool my be obtained with the following command: -# go get github.com/axw/gocov/gocov -# -# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH. - -# Check for gocov. -if ! type gocov >/dev/null 2>&1; then - echo >&2 "This script requires the gocov tool." - echo >&2 "You may obtain it with the following command:" - echo >&2 "go get github.com/axw/gocov/gocov" - exit 1 -fi - -# Only run the cgo tests if gcc is installed. -if type gcc >/dev/null 2>&1; then - (cd spew && gocov test -tags testcgo | gocov report) -else - (cd spew && gocov test | gocov report) -fi diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go deleted file mode 100644 index d42a0bc..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (c) 2015 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is not running on Google App Engine, compiled by GopherJS, and -// "-tags safe" is not added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// +build !js,!appengine,!safe,!disableunsafe - -package spew - -import ( - "reflect" - "unsafe" -) - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = false - - // ptrSize is the size of a pointer on the current arch. - ptrSize = unsafe.Sizeof((*byte)(nil)) -) - -var ( - // offsetPtr, offsetScalar, and offsetFlag are the offsets for the - // internal reflect.Value fields. These values are valid before golang - // commit ecccf07e7f9d which changed the format. The are also valid - // after commit 82f48826c6c7 which changed the format again to mirror - // the original format. Code in the init function updates these offsets - // as necessary. - offsetPtr = uintptr(ptrSize) - offsetScalar = uintptr(0) - offsetFlag = uintptr(ptrSize * 2) - - // flagKindWidth and flagKindShift indicate various bits that the - // reflect package uses internally to track kind information. - // - // flagRO indicates whether or not the value field of a reflect.Value is - // read-only. - // - // flagIndir indicates whether the value field of a reflect.Value is - // the actual data or a pointer to the data. - // - // These values are valid before golang commit 90a7c3c86944 which - // changed their positions. Code in the init function updates these - // flags as necessary. - flagKindWidth = uintptr(5) - flagKindShift = uintptr(flagKindWidth - 1) - flagRO = uintptr(1 << 0) - flagIndir = uintptr(1 << 1) -) - -func init() { - // Older versions of reflect.Value stored small integers directly in the - // ptr field (which is named val in the older versions). Versions - // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named - // scalar for this purpose which unfortunately came before the flag - // field, so the offset of the flag field is different for those - // versions. - // - // This code constructs a new reflect.Value from a known small integer - // and checks if the size of the reflect.Value struct indicates it has - // the scalar field. When it does, the offsets are updated accordingly. - vv := reflect.ValueOf(0xf00) - if unsafe.Sizeof(vv) == (ptrSize * 4) { - offsetScalar = ptrSize * 2 - offsetFlag = ptrSize * 3 - } - - // Commit 90a7c3c86944 changed the flag positions such that the low - // order bits are the kind. This code extracts the kind from the flags - // field and ensures it's the correct type. When it's not, the flag - // order has been changed to the newer format, so the flags are updated - // accordingly. - upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) - upfv := *(*uintptr)(upf) - flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { - flagKindShift = 0 - flagRO = 1 << 5 - flagIndir = 1 << 6 - - // Commit adf9b30e5594 modified the flags to separate the - // flagRO flag into two bits which specifies whether or not the - // field is embedded. This causes flagIndir to move over a bit - // and means that flagRO is the combination of either of the - // original flagRO bit and the new bit. - // - // This code detects the change by extracting what used to be - // the indirect bit to ensure it's set. When it's not, the flag - // order has been changed to the newer format, so the flags are - // updated accordingly. - if upfv&flagIndir == 0 { - flagRO = 3 << 5 - flagIndir = 1 << 7 - } - } -} - -// unsafeReflectValue converts the passed reflect.Value into a one that bypasses -// the typical safety restrictions preventing access to unaddressable and -// unexported data. It works by digging the raw pointer to the underlying -// value out of the protected value and generating a new unprotected (unsafe) -// reflect.Value to it. -// -// This allows us to check for implementations of the Stringer and error -// interfaces to be used for pretty printing ordinarily unaddressable and -// inaccessible values such as unexported struct fields. -func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { - indirects := 1 - vt := v.Type() - upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) - rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) - if rvf&flagIndir != 0 { - vt = reflect.PtrTo(v.Type()) - indirects++ - } else if offsetScalar != 0 { - // The value is in the scalar field when it's not one of the - // reference types. - switch vt.Kind() { - case reflect.Uintptr: - case reflect.Chan: - case reflect.Func: - case reflect.Map: - case reflect.Ptr: - case reflect.UnsafePointer: - default: - upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + - offsetScalar) - } - } - - pv := reflect.NewAt(vt, upv) - rv = pv - for i := 0; i < indirects; i++ { - rv = rv.Elem() - } - return rv -} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go deleted file mode 100644 index e47a4e7..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2015 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is running on Google App Engine, compiled by GopherJS, or -// "-tags safe" is added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// +build js appengine safe disableunsafe - -package spew - -import "reflect" - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = true -) - -// unsafeReflectValue typically converts the passed reflect.Value into a one -// that bypasses the typical safety restrictions preventing access to -// unaddressable and unexported data. However, doing this relies on access to -// the unsafe package. This is a stub version which simply returns the passed -// reflect.Value when the unsafe package is not available. -func unsafeReflectValue(v reflect.Value) reflect.Value { - return v -} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go deleted file mode 100644 index 14f02dc..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/common.go +++ /dev/null @@ -1,341 +0,0 @@ -/* - * Copyright (c) 2013 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "reflect" - "sort" - "strconv" -) - -// Some constants in the form of bytes to avoid string overhead. This mirrors -// the technique used in the fmt package. -var ( - panicBytes = []byte("(PANIC=") - plusBytes = []byte("+") - iBytes = []byte("i") - trueBytes = []byte("true") - falseBytes = []byte("false") - interfaceBytes = []byte("(interface {})") - commaNewlineBytes = []byte(",\n") - newlineBytes = []byte("\n") - openBraceBytes = []byte("{") - openBraceNewlineBytes = []byte("{\n") - closeBraceBytes = []byte("}") - asteriskBytes = []byte("*") - colonBytes = []byte(":") - colonSpaceBytes = []byte(": ") - openParenBytes = []byte("(") - closeParenBytes = []byte(")") - spaceBytes = []byte(" ") - pointerChainBytes = []byte("->") - nilAngleBytes = []byte("") - maxNewlineBytes = []byte("\n") - maxShortBytes = []byte("") - circularBytes = []byte("") - circularShortBytes = []byte("") - invalidAngleBytes = []byte("") - openBracketBytes = []byte("[") - closeBracketBytes = []byte("]") - percentBytes = []byte("%") - precisionBytes = []byte(".") - openAngleBytes = []byte("<") - closeAngleBytes = []byte(">") - openMapBytes = []byte("map[") - closeMapBytes = []byte("]") - lenEqualsBytes = []byte("len=") - capEqualsBytes = []byte("cap=") -) - -// hexDigits is used to map a decimal value to a hex digit. -var hexDigits = "0123456789abcdef" - -// catchPanic handles any panics that might occur during the handleMethods -// calls. -func catchPanic(w io.Writer, v reflect.Value) { - if err := recover(); err != nil { - w.Write(panicBytes) - fmt.Fprintf(w, "%v", err) - w.Write(closeParenBytes) - } -} - -// handleMethods attempts to call the Error and String methods on the underlying -// type the passed reflect.Value represents and outputes the result to Writer w. -// -// It handles panics in any called methods by catching and displaying the error -// as the formatted value. -func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { - // We need an interface to check if the type implements the error or - // Stringer interface. However, the reflect package won't give us an - // interface on certain things like unexported struct fields in order - // to enforce visibility rules. We use unsafe, when it's available, - // to bypass these restrictions since this package does not mutate the - // values. - if !v.CanInterface() { - if UnsafeDisabled { - return false - } - - v = unsafeReflectValue(v) - } - - // Choose whether or not to do error and Stringer interface lookups against - // the base type or a pointer to the base type depending on settings. - // Technically calling one of these methods with a pointer receiver can - // mutate the value, however, types which choose to satisify an error or - // Stringer interface with a pointer receiver should not be mutating their - // state inside these interface methods. - if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { - v = unsafeReflectValue(v) - } - if v.CanAddr() { - v = v.Addr() - } - - // Is it an error or Stringer? - switch iface := v.Interface().(type) { - case error: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.Error())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - - w.Write([]byte(iface.Error())) - return true - - case fmt.Stringer: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.String())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - w.Write([]byte(iface.String())) - return true - } - return false -} - -// printBool outputs a boolean value as true or false to Writer w. -func printBool(w io.Writer, val bool) { - if val { - w.Write(trueBytes) - } else { - w.Write(falseBytes) - } -} - -// printInt outputs a signed integer value to Writer w. -func printInt(w io.Writer, val int64, base int) { - w.Write([]byte(strconv.FormatInt(val, base))) -} - -// printUint outputs an unsigned integer value to Writer w. -func printUint(w io.Writer, val uint64, base int) { - w.Write([]byte(strconv.FormatUint(val, base))) -} - -// printFloat outputs a floating point value using the specified precision, -// which is expected to be 32 or 64bit, to Writer w. -func printFloat(w io.Writer, val float64, precision int) { - w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) -} - -// printComplex outputs a complex value using the specified float precision -// for the real and imaginary parts to Writer w. -func printComplex(w io.Writer, c complex128, floatPrecision int) { - r := real(c) - w.Write(openParenBytes) - w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) - i := imag(c) - if i >= 0 { - w.Write(plusBytes) - } - w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) - w.Write(iBytes) - w.Write(closeParenBytes) -} - -// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' -// prefix to Writer w. -func printHexPtr(w io.Writer, p uintptr) { - // Null pointer. - num := uint64(p) - if num == 0 { - w.Write(nilAngleBytes) - return - } - - // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix - buf := make([]byte, 18) - - // It's simpler to construct the hex string right to left. - base := uint64(16) - i := len(buf) - 1 - for num >= base { - buf[i] = hexDigits[num%base] - num /= base - i-- - } - buf[i] = hexDigits[num] - - // Add '0x' prefix. - i-- - buf[i] = 'x' - i-- - buf[i] = '0' - - // Strip unused leading bytes. - buf = buf[i:] - w.Write(buf) -} - -// valuesSorter implements sort.Interface to allow a slice of reflect.Value -// elements to be sorted. -type valuesSorter struct { - values []reflect.Value - strings []string // either nil or same len and values - cs *ConfigState -} - -// newValuesSorter initializes a valuesSorter instance, which holds a set of -// surrogate keys on which the data should be sorted. It uses flags in -// ConfigState to decide if and how to populate those surrogate keys. -func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { - vs := &valuesSorter{values: values, cs: cs} - if canSortSimply(vs.values[0].Kind()) { - return vs - } - if !cs.DisableMethods { - vs.strings = make([]string, len(values)) - for i := range vs.values { - b := bytes.Buffer{} - if !handleMethods(cs, &b, vs.values[i]) { - vs.strings = nil - break - } - vs.strings[i] = b.String() - } - } - if vs.strings == nil && cs.SpewKeys { - vs.strings = make([]string, len(values)) - for i := range vs.values { - vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) - } - } - return vs -} - -// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted -// directly, or whether it should be considered for sorting by surrogate keys -// (if the ConfigState allows it). -func canSortSimply(kind reflect.Kind) bool { - // This switch parallels valueSortLess, except for the default case. - switch kind { - case reflect.Bool: - return true - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return true - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.String: - return true - case reflect.Uintptr: - return true - case reflect.Array: - return true - } - return false -} - -// Len returns the number of values in the slice. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Len() int { - return len(s.values) -} - -// Swap swaps the values at the passed indices. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Swap(i, j int) { - s.values[i], s.values[j] = s.values[j], s.values[i] - if s.strings != nil { - s.strings[i], s.strings[j] = s.strings[j], s.strings[i] - } -} - -// valueSortLess returns whether the first value should sort before the second -// value. It is used by valueSorter.Less as part of the sort.Interface -// implementation. -func valueSortLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Bool: - return !a.Bool() && b.Bool() - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return a.Int() < b.Int() - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return a.Uint() < b.Uint() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.String: - return a.String() < b.String() - case reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Array: - // Compare the contents of both arrays. - l := a.Len() - for i := 0; i < l; i++ { - av := a.Index(i) - bv := b.Index(i) - if av.Interface() == bv.Interface() { - continue - } - return valueSortLess(av, bv) - } - } - return a.String() < b.String() -} - -// Less returns whether the value at index i should sort before the -// value at index j. It is part of the sort.Interface implementation. -func (s *valuesSorter) Less(i, j int) bool { - if s.strings == nil { - return valueSortLess(s.values[i], s.values[j]) - } - return s.strings[i] < s.strings[j] -} - -// sortValues is a sort function that handles both native types and any type that -// can be converted to error or Stringer. Other inputs are sorted according to -// their Value.String() value to ensure display stability. -func sortValues(values []reflect.Value, cs *ConfigState) { - if len(values) == 0 { - return - } - sort.Sort(newValuesSorter(values, cs)) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/common_test.go b/vendor/github.com/davecgh/go-spew/spew/common_test.go deleted file mode 100644 index 39b7525..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/common_test.go +++ /dev/null @@ -1,298 +0,0 @@ -/* - * Copyright (c) 2013 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew_test - -import ( - "fmt" - "reflect" - "testing" - - "github.com/davecgh/go-spew/spew" -) - -// custom type to test Stinger interface on non-pointer receiver. -type stringer string - -// String implements the Stringer interface for testing invocation of custom -// stringers on types with non-pointer receivers. -func (s stringer) String() string { - return "stringer " + string(s) -} - -// custom type to test Stinger interface on pointer receiver. -type pstringer string - -// String implements the Stringer interface for testing invocation of custom -// stringers on types with only pointer receivers. -func (s *pstringer) String() string { - return "stringer " + string(*s) -} - -// xref1 and xref2 are cross referencing structs for testing circular reference -// detection. -type xref1 struct { - ps2 *xref2 -} -type xref2 struct { - ps1 *xref1 -} - -// indirCir1, indirCir2, and indirCir3 are used to generate an indirect circular -// reference for testing detection. -type indirCir1 struct { - ps2 *indirCir2 -} -type indirCir2 struct { - ps3 *indirCir3 -} -type indirCir3 struct { - ps1 *indirCir1 -} - -// embed is used to test embedded structures. -type embed struct { - a string -} - -// embedwrap is used to test embedded structures. -type embedwrap struct { - *embed - e *embed -} - -// panicer is used to intentionally cause a panic for testing spew properly -// handles them -type panicer int - -func (p panicer) String() string { - panic("test panic") -} - -// customError is used to test custom error interface invocation. -type customError int - -func (e customError) Error() string { - return fmt.Sprintf("error: %d", int(e)) -} - -// stringizeWants converts a slice of wanted test output into a format suitable -// for a test error message. -func stringizeWants(wants []string) string { - s := "" - for i, want := range wants { - if i > 0 { - s += fmt.Sprintf("want%d: %s", i+1, want) - } else { - s += "want: " + want - } - } - return s -} - -// testFailed returns whether or not a test failed by checking if the result -// of the test is in the slice of wanted strings. -func testFailed(result string, wants []string) bool { - for _, want := range wants { - if result == want { - return false - } - } - return true -} - -type sortableStruct struct { - x int -} - -func (ss sortableStruct) String() string { - return fmt.Sprintf("ss.%d", ss.x) -} - -type unsortableStruct struct { - x int -} - -type sortTestCase struct { - input []reflect.Value - expected []reflect.Value -} - -func helpTestSortValues(tests []sortTestCase, cs *spew.ConfigState, t *testing.T) { - getInterfaces := func(values []reflect.Value) []interface{} { - interfaces := []interface{}{} - for _, v := range values { - interfaces = append(interfaces, v.Interface()) - } - return interfaces - } - - for _, test := range tests { - spew.SortValues(test.input, cs) - // reflect.DeepEqual cannot really make sense of reflect.Value, - // probably because of all the pointer tricks. For instance, - // v(2.0) != v(2.0) on a 32-bits system. Turn them into interface{} - // instead. - input := getInterfaces(test.input) - expected := getInterfaces(test.expected) - if !reflect.DeepEqual(input, expected) { - t.Errorf("Sort mismatch:\n %v != %v", input, expected) - } - } -} - -// TestSortValues ensures the sort functionality for relect.Value based sorting -// works as intended. -func TestSortValues(t *testing.T) { - v := reflect.ValueOf - - a := v("a") - b := v("b") - c := v("c") - embedA := v(embed{"a"}) - embedB := v(embed{"b"}) - embedC := v(embed{"c"}) - tests := []sortTestCase{ - // No values. - { - []reflect.Value{}, - []reflect.Value{}, - }, - // Bools. - { - []reflect.Value{v(false), v(true), v(false)}, - []reflect.Value{v(false), v(false), v(true)}, - }, - // Ints. - { - []reflect.Value{v(2), v(1), v(3)}, - []reflect.Value{v(1), v(2), v(3)}, - }, - // Uints. - { - []reflect.Value{v(uint8(2)), v(uint8(1)), v(uint8(3))}, - []reflect.Value{v(uint8(1)), v(uint8(2)), v(uint8(3))}, - }, - // Floats. - { - []reflect.Value{v(2.0), v(1.0), v(3.0)}, - []reflect.Value{v(1.0), v(2.0), v(3.0)}, - }, - // Strings. - { - []reflect.Value{b, a, c}, - []reflect.Value{a, b, c}, - }, - // Array - { - []reflect.Value{v([3]int{3, 2, 1}), v([3]int{1, 3, 2}), v([3]int{1, 2, 3})}, - []reflect.Value{v([3]int{1, 2, 3}), v([3]int{1, 3, 2}), v([3]int{3, 2, 1})}, - }, - // Uintptrs. - { - []reflect.Value{v(uintptr(2)), v(uintptr(1)), v(uintptr(3))}, - []reflect.Value{v(uintptr(1)), v(uintptr(2)), v(uintptr(3))}, - }, - // SortableStructs. - { - // Note: not sorted - DisableMethods is set. - []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, - []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, - }, - // UnsortableStructs. - { - // Note: not sorted - SpewKeys is false. - []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, - []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, - }, - // Invalid. - { - []reflect.Value{embedB, embedA, embedC}, - []reflect.Value{embedB, embedA, embedC}, - }, - } - cs := spew.ConfigState{DisableMethods: true, SpewKeys: false} - helpTestSortValues(tests, &cs, t) -} - -// TestSortValuesWithMethods ensures the sort functionality for relect.Value -// based sorting works as intended when using string methods. -func TestSortValuesWithMethods(t *testing.T) { - v := reflect.ValueOf - - a := v("a") - b := v("b") - c := v("c") - tests := []sortTestCase{ - // Ints. - { - []reflect.Value{v(2), v(1), v(3)}, - []reflect.Value{v(1), v(2), v(3)}, - }, - // Strings. - { - []reflect.Value{b, a, c}, - []reflect.Value{a, b, c}, - }, - // SortableStructs. - { - []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, - []reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})}, - }, - // UnsortableStructs. - { - // Note: not sorted - SpewKeys is false. - []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, - []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, - }, - } - cs := spew.ConfigState{DisableMethods: false, SpewKeys: false} - helpTestSortValues(tests, &cs, t) -} - -// TestSortValuesWithSpew ensures the sort functionality for relect.Value -// based sorting works as intended when using spew to stringify keys. -func TestSortValuesWithSpew(t *testing.T) { - v := reflect.ValueOf - - a := v("a") - b := v("b") - c := v("c") - tests := []sortTestCase{ - // Ints. - { - []reflect.Value{v(2), v(1), v(3)}, - []reflect.Value{v(1), v(2), v(3)}, - }, - // Strings. - { - []reflect.Value{b, a, c}, - []reflect.Value{a, b, c}, - }, - // SortableStructs. - { - []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, - []reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})}, - }, - // UnsortableStructs. - { - []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, - []reflect.Value{v(unsortableStruct{1}), v(unsortableStruct{2}), v(unsortableStruct{3})}, - }, - } - cs := spew.ConfigState{DisableMethods: true, SpewKeys: true} - helpTestSortValues(tests, &cs, t) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go deleted file mode 100644 index 5552827..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/config.go +++ /dev/null @@ -1,297 +0,0 @@ -/* - * Copyright (c) 2013 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "os" -) - -// ConfigState houses the configuration options used by spew to format and -// display values. There is a global instance, Config, that is used to control -// all top-level Formatter and Dump functionality. Each ConfigState instance -// provides methods equivalent to the top-level functions. -// -// The zero value for ConfigState provides no indentation. You would typically -// want to set it to a space or a tab. -// -// Alternatively, you can use NewDefaultConfig to get a ConfigState instance -// with default settings. See the documentation of NewDefaultConfig for default -// values. -type ConfigState struct { - // Indent specifies the string to use for each indentation level. The - // global config instance that all top-level functions use set this to a - // single space by default. If you would like more indentation, you might - // set this to a tab with "\t" or perhaps two spaces with " ". - Indent string - - // MaxDepth controls the maximum number of levels to descend into nested - // data structures. The default, 0, means there is no limit. - // - // NOTE: Circular data structures are properly detected, so it is not - // necessary to set this value unless you specifically want to limit deeply - // nested data structures. - MaxDepth int - - // DisableMethods specifies whether or not error and Stringer interfaces are - // invoked for types that implement them. - DisableMethods bool - - // DisablePointerMethods specifies whether or not to check for and invoke - // error and Stringer interfaces on types which only accept a pointer - // receiver when the current type is not a pointer. - // - // NOTE: This might be an unsafe action since calling one of these methods - // with a pointer receiver could technically mutate the value, however, - // in practice, types which choose to satisify an error or Stringer - // interface with a pointer receiver should not be mutating their state - // inside these interface methods. As a result, this option relies on - // access to the unsafe package, so it will not have any effect when - // running in environments without access to the unsafe package such as - // Google App Engine or with the "safe" build tag specified. - DisablePointerMethods bool - - // ContinueOnMethod specifies whether or not recursion should continue once - // a custom error or Stringer interface is invoked. The default, false, - // means it will print the results of invoking the custom error or Stringer - // interface and return immediately instead of continuing to recurse into - // the internals of the data type. - // - // NOTE: This flag does not have any effect if method invocation is disabled - // via the DisableMethods or DisablePointerMethods options. - ContinueOnMethod bool - - // SortKeys specifies map keys should be sorted before being printed. Use - // this to have a more deterministic, diffable output. Note that only - // native types (bool, int, uint, floats, uintptr and string) and types - // that support the error or Stringer interfaces (if methods are - // enabled) are supported, with other types sorted according to the - // reflect.Value.String() output which guarantees display stability. - SortKeys bool - - // SpewKeys specifies that, as a last resort attempt, map keys should - // be spewed to strings and sorted by those strings. This is only - // considered if SortKeys is true. - SpewKeys bool -} - -// Config is the active configuration of the top-level functions. -// The configuration can be changed by modifying the contents of spew.Config. -var Config = ConfigState{Indent: " "} - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the formatted string as a value that satisfies error. See NewFormatter -// for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, c.convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, c.convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, c.convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a Formatter interface returned by c.NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, c.convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Print(a ...interface{}) (n int, err error) { - return fmt.Print(c.convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, c.convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Println(a ...interface{}) (n int, err error) { - return fmt.Println(c.convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprint(a ...interface{}) string { - return fmt.Sprint(c.convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, c.convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a Formatter interface returned by c.NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintln(a ...interface{}) string { - return fmt.Sprintln(c.convertArgs(a)...) -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -c.Printf, c.Println, or c.Printf. -*/ -func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(c, v) -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { - fdump(c, w, a...) -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by modifying the public members -of c. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func (c *ConfigState) Dump(a ...interface{}) { - fdump(c, os.Stdout, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func (c *ConfigState) Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(c, &buf, a...) - return buf.String() -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a spew Formatter interface using -// the ConfigState associated with s. -func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = newFormatter(c, arg) - } - return formatters -} - -// NewDefaultConfig returns a ConfigState with the following default settings. -// -// Indent: " " -// MaxDepth: 0 -// DisableMethods: false -// DisablePointerMethods: false -// ContinueOnMethod: false -// SortKeys: false -func NewDefaultConfig() *ConfigState { - return &ConfigState{Indent: " "} -} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go deleted file mode 100644 index 5be0c40..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/doc.go +++ /dev/null @@ -1,202 +0,0 @@ -/* - * Copyright (c) 2013 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -Package spew implements a deep pretty printer for Go data structures to aid in -debugging. - -A quick overview of the additional features spew provides over the built-in -printing facilities for Go data types are as follows: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output (only when using - Dump style) - -There are two different approaches spew allows for dumping Go data structures: - - * Dump style which prints with newlines, customizable indentation, - and additional debug information such as types and all pointer addresses - used to indirect to the final value - * A custom Formatter interface that integrates cleanly with the standard fmt - package and replaces %v, %+v, %#v, and %#+v to provide inline printing - similar to the default %v while providing the additional functionality - outlined above and passing unsupported format verbs such as %x and %q - along to fmt - -Quick Start - -This section demonstrates how to quickly get started with spew. See the -sections below for further details on formatting and configuration options. - -To dump a variable with full newlines, indentation, type, and pointer -information use Dump, Fdump, or Sdump: - spew.Dump(myVar1, myVar2, ...) - spew.Fdump(someWriter, myVar1, myVar2, ...) - str := spew.Sdump(myVar1, myVar2, ...) - -Alternatively, if you would prefer to use format strings with a compacted inline -printing style, use the convenience wrappers Printf, Fprintf, etc with -%v (most compact), %+v (adds pointer addresses), %#v (adds types), or -%#+v (adds types and pointer addresses): - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -Configuration Options - -Configuration of spew is handled by fields in the ConfigState type. For -convenience, all of the top-level functions use a global state available -via the spew.Config global. - -It is also possible to create a ConfigState instance that provides methods -equivalent to the top-level functions. This allows concurrent configuration -options. See the ConfigState documentation for more details. - -The following configuration options are available: - * Indent - String to use for each indentation level for Dump functions. - It is a single space by default. A popular alternative is "\t". - - * MaxDepth - Maximum number of levels to descend into nested data structures. - There is no limit by default. - - * DisableMethods - Disables invocation of error and Stringer interface methods. - Method invocation is enabled by default. - - * DisablePointerMethods - Disables invocation of error and Stringer interface methods on types - which only accept pointer receivers from non-pointer variables. - Pointer method invocation is enabled by default. - - * ContinueOnMethod - Enables recursion into types after invoking error and Stringer interface - methods. Recursion after method invocation is disabled by default. - - * SortKeys - Specifies map keys should be sorted before being printed. Use - this to have a more deterministic, diffable output. Note that - only native types (bool, int, uint, floats, uintptr and string) - and types which implement error or Stringer interfaces are - supported with other types sorted according to the - reflect.Value.String() output which guarantees display - stability. Natural map order is used by default. - - * SpewKeys - Specifies that, as a last resort attempt, map keys should be - spewed to strings and sorted by those strings. This is only - considered if SortKeys is true. - -Dump Usage - -Simply call spew.Dump with a list of variables you want to dump: - - spew.Dump(myVar1, myVar2, ...) - -You may also call spew.Fdump if you would prefer to output to an arbitrary -io.Writer. For example, to dump to standard error: - - spew.Fdump(os.Stderr, myVar1, myVar2, ...) - -A third option is to call spew.Sdump to get the formatted output as a string: - - str := spew.Sdump(myVar1, myVar2, ...) - -Sample Dump Output - -See the Dump example for details on the setup of the types and variables being -shown here. - - (main.Foo) { - unexportedField: (*main.Bar)(0xf84002e210)({ - flag: (main.Flag) flagTwo, - data: (uintptr) - }), - ExportedField: (map[interface {}]interface {}) (len=1) { - (string) (len=3) "one": (bool) true - } - } - -Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C -command as shown. - ([]uint8) (len=32 cap=32) { - 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | - 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| - 00000020 31 32 |12| - } - -Custom Formatter - -Spew provides a custom formatter that implements the fmt.Formatter interface -so that it integrates cleanly with standard fmt package printing functions. The -formatter is useful for inline printing of smaller data types similar to the -standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Custom Formatter Usage - -The simplest way to make use of the spew custom formatter is to call one of the -convenience functions such as spew.Printf, spew.Println, or spew.Printf. The -functions have syntax you are most likely already familiar with: - - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Println(myVar, myVar2) - spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -See the Index for the full list convenience functions. - -Sample Formatter Output - -Double pointer to a uint8: - %v: <**>5 - %+v: <**>(0xf8400420d0->0xf8400420c8)5 - %#v: (**uint8)5 - %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 - -Pointer to circular struct with a uint8 field and a pointer to itself: - %v: <*>{1 <*>} - %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} - %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} - %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} - -See the Printf example for details on the setup of variables being shown -here. - -Errors - -Since it is possible for custom Stringer/error interfaces to panic, spew -detects them and handles them internally by printing the panic information -inline with the output. Since spew is intended to provide deep pretty printing -capabilities on structures, it intentionally does not return any errors. -*/ -package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go deleted file mode 100644 index a0ff95e..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ /dev/null @@ -1,509 +0,0 @@ -/* - * Copyright (c) 2013 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "encoding/hex" - "fmt" - "io" - "os" - "reflect" - "regexp" - "strconv" - "strings" -) - -var ( - // uint8Type is a reflect.Type representing a uint8. It is used to - // convert cgo types to uint8 slices for hexdumping. - uint8Type = reflect.TypeOf(uint8(0)) - - // cCharRE is a regular expression that matches a cgo char. - // It is used to detect character arrays to hexdump them. - cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") - - // cUnsignedCharRE is a regular expression that matches a cgo unsigned - // char. It is used to detect unsigned character arrays to hexdump - // them. - cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") - - // cUint8tCharRE is a regular expression that matches a cgo uint8_t. - // It is used to detect uint8_t arrays to hexdump them. - cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") -) - -// dumpState contains information about the state of a dump operation. -type dumpState struct { - w io.Writer - depth int - pointers map[uintptr]int - ignoreNextType bool - ignoreNextIndent bool - cs *ConfigState -} - -// indent performs indentation according to the depth level and cs.Indent -// option. -func (d *dumpState) indent() { - if d.ignoreNextIndent { - d.ignoreNextIndent = false - return - } - d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) -} - -// unpackValue returns values inside of non-nil interfaces when possible. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface && !v.IsNil() { - v = v.Elem() - } - return v -} - -// dumpPtr handles formatting of pointers by indirecting them as necessary. -func (d *dumpState) dumpPtr(v reflect.Value) { - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range d.pointers { - if depth >= d.depth { - delete(d.pointers, k) - } - } - - // Keep list of all dereferenced pointers to show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by dereferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := d.pointers[addr]; ok && pd < d.depth { - cycleFound = true - indirects-- - break - } - d.pointers[addr] = d.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type information. - d.w.Write(openParenBytes) - d.w.Write(bytes.Repeat(asteriskBytes, indirects)) - d.w.Write([]byte(ve.Type().String())) - d.w.Write(closeParenBytes) - - // Display pointer information. - if len(pointerChain) > 0 { - d.w.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - d.w.Write(pointerChainBytes) - } - printHexPtr(d.w, addr) - } - d.w.Write(closeParenBytes) - } - - // Display dereferenced value. - d.w.Write(openParenBytes) - switch { - case nilFound == true: - d.w.Write(nilAngleBytes) - - case cycleFound == true: - d.w.Write(circularBytes) - - default: - d.ignoreNextType = true - d.dump(ve) - } - d.w.Write(closeParenBytes) -} - -// dumpSlice handles formatting of arrays and slices. Byte (uint8 under -// reflection) arrays and slices are dumped in hexdump -C fashion. -func (d *dumpState) dumpSlice(v reflect.Value) { - // Determine whether this type should be hex dumped or not. Also, - // for types which should be hexdumped, try to use the underlying data - // first, then fall back to trying to convert them to a uint8 slice. - var buf []uint8 - doConvert := false - doHexDump := false - numEntries := v.Len() - if numEntries > 0 { - vt := v.Index(0).Type() - vts := vt.String() - switch { - // C types that need to be converted. - case cCharRE.MatchString(vts): - fallthrough - case cUnsignedCharRE.MatchString(vts): - fallthrough - case cUint8tCharRE.MatchString(vts): - doConvert = true - - // Try to use existing uint8 slices and fall back to converting - // and copying if that fails. - case vt.Kind() == reflect.Uint8: - // We need an addressable interface to convert the type - // to a byte slice. However, the reflect package won't - // give us an interface on certain things like - // unexported struct fields in order to enforce - // visibility rules. We use unsafe, when available, to - // bypass these restrictions since this package does not - // mutate the values. - vs := v - if !vs.CanInterface() || !vs.CanAddr() { - vs = unsafeReflectValue(vs) - } - if !UnsafeDisabled { - vs = vs.Slice(0, numEntries) - - // Use the existing uint8 slice if it can be - // type asserted. - iface := vs.Interface() - if slice, ok := iface.([]uint8); ok { - buf = slice - doHexDump = true - break - } - } - - // The underlying data needs to be converted if it can't - // be type asserted to a uint8 slice. - doConvert = true - } - - // Copy and convert the underlying type if needed. - if doConvert && vt.ConvertibleTo(uint8Type) { - // Convert and copy each element into a uint8 byte - // slice. - buf = make([]uint8, numEntries) - for i := 0; i < numEntries; i++ { - vv := v.Index(i) - buf[i] = uint8(vv.Convert(uint8Type).Uint()) - } - doHexDump = true - } - } - - // Hexdump the entire slice as needed. - if doHexDump { - indent := strings.Repeat(d.cs.Indent, d.depth) - str := indent + hex.Dump(buf) - str = strings.Replace(str, "\n", "\n"+indent, -1) - str = strings.TrimRight(str, d.cs.Indent) - d.w.Write([]byte(str)) - return - } - - // Recursively call dump for each item. - for i := 0; i < numEntries; i++ { - d.dump(d.unpackValue(v.Index(i))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } -} - -// dump is the main workhorse for dumping a value. It uses the passed reflect -// value to figure out what kind of object we are dealing with and formats it -// appropriately. It is a recursive function, however circular data structures -// are detected and handled properly. -func (d *dumpState) dump(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - d.w.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - d.indent() - d.dumpPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !d.ignoreNextType { - d.indent() - d.w.Write(openParenBytes) - d.w.Write([]byte(v.Type().String())) - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - d.ignoreNextType = false - - // Display length and capacity if the built-in len and cap functions - // work with the value's kind and the len/cap itself is non-zero. - valueLen, valueCap := 0, 0 - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - valueLen, valueCap = v.Len(), v.Cap() - case reflect.Map, reflect.String: - valueLen = v.Len() - } - if valueLen != 0 || valueCap != 0 { - d.w.Write(openParenBytes) - if valueLen != 0 { - d.w.Write(lenEqualsBytes) - printInt(d.w, int64(valueLen), 10) - } - if valueCap != 0 { - if valueLen != 0 { - d.w.Write(spaceBytes) - } - d.w.Write(capEqualsBytes) - printInt(d.w, int64(valueCap), 10) - } - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - - // Call Stringer/error interfaces if they exist and the handle methods flag - // is enabled - if !d.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(d.cs, d.w, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(d.w, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(d.w, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(d.w, v.Uint(), 10) - - case reflect.Float32: - printFloat(d.w, v.Float(), 32) - - case reflect.Float64: - printFloat(d.w, v.Float(), 64) - - case reflect.Complex64: - printComplex(d.w, v.Complex(), 32) - - case reflect.Complex128: - printComplex(d.w, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - d.dumpSlice(v) - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.String: - d.w.Write([]byte(strconv.Quote(v.String()))) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - d.w.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - numEntries := v.Len() - keys := v.MapKeys() - if d.cs.SortKeys { - sortValues(keys, d.cs) - } - for i, key := range keys { - d.dump(d.unpackValue(key)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.MapIndex(key))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Struct: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - vt := v.Type() - numFields := v.NumField() - for i := 0; i < numFields; i++ { - d.indent() - vtf := vt.Field(i) - d.w.Write([]byte(vtf.Name)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.Field(i))) - if i < (numFields - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(d.w, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(d.w, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it in case any new - // types are added. - default: - if v.CanInterface() { - fmt.Fprintf(d.w, "%v", v.Interface()) - } else { - fmt.Fprintf(d.w, "%v", v.String()) - } - } -} - -// fdump is a helper function to consolidate the logic from the various public -// methods which take varying writers and config states. -func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { - for _, arg := range a { - if arg == nil { - w.Write(interfaceBytes) - w.Write(spaceBytes) - w.Write(nilAngleBytes) - w.Write(newlineBytes) - continue - } - - d := dumpState{w: w, cs: cs} - d.pointers = make(map[uintptr]int) - d.dump(reflect.ValueOf(arg)) - d.w.Write(newlineBytes) - } -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func Fdump(w io.Writer, a ...interface{}) { - fdump(&Config, w, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(&Config, &buf, a...) - return buf.String() -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by an exported package global, -spew.Config. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func Dump(a ...interface{}) { - fdump(&Config, os.Stdout, a...) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/dump_test.go b/vendor/github.com/davecgh/go-spew/spew/dump_test.go deleted file mode 100644 index 2b32040..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/dump_test.go +++ /dev/null @@ -1,1042 +0,0 @@ -/* - * Copyright (c) 2013 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -Test Summary: -NOTE: For each test, a nil pointer, a single pointer and double pointer to the -base test element are also tested to ensure proper indirection across all types. - -- Max int8, int16, int32, int64, int -- Max uint8, uint16, uint32, uint64, uint -- Boolean true and false -- Standard complex64 and complex128 -- Array containing standard ints -- Array containing type with custom formatter on pointer receiver only -- Array containing interfaces -- Array containing bytes -- Slice containing standard float32 values -- Slice containing type with custom formatter on pointer receiver only -- Slice containing interfaces -- Slice containing bytes -- Nil slice -- Standard string -- Nil interface -- Sub-interface -- Map with string keys and int vals -- Map with custom formatter type on pointer receiver only keys and vals -- Map with interface keys and values -- Map with nil interface value -- Struct with primitives -- Struct that contains another struct -- Struct that contains custom type with Stringer pointer interface via both - exported and unexported fields -- Struct that contains embedded struct and field to same struct -- Uintptr to 0 (null pointer) -- Uintptr address of real variable -- Unsafe.Pointer to 0 (null pointer) -- Unsafe.Pointer to address of real variable -- Nil channel -- Standard int channel -- Function with no params and no returns -- Function with param and no returns -- Function with multiple params and multiple returns -- Struct that is circular through self referencing -- Structs that are circular through cross referencing -- Structs that are indirectly circular -- Type that panics in its Stringer interface -*/ - -package spew_test - -import ( - "bytes" - "fmt" - "testing" - "unsafe" - - "github.com/davecgh/go-spew/spew" -) - -// dumpTest is used to describe a test to be perfomed against the Dump method. -type dumpTest struct { - in interface{} - wants []string -} - -// dumpTests houses all of the tests to be performed against the Dump method. -var dumpTests = make([]dumpTest, 0) - -// addDumpTest is a helper method to append the passed input and desired result -// to dumpTests -func addDumpTest(in interface{}, wants ...string) { - test := dumpTest{in, wants} - dumpTests = append(dumpTests, test) -} - -func addIntDumpTests() { - // Max int8. - v := int8(127) - nv := (*int8)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "int8" - vs := "127" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Max int16. - v2 := int16(32767) - nv2 := (*int16)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "int16" - v2s := "32767" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*"+v2t+")()\n") - - // Max int32. - v3 := int32(2147483647) - nv3 := (*int32)(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "int32" - v3s := "2147483647" - addDumpTest(v3, "("+v3t+") "+v3s+"\n") - addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") - addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") - addDumpTest(nv3, "(*"+v3t+")()\n") - - // Max int64. - v4 := int64(9223372036854775807) - nv4 := (*int64)(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "int64" - v4s := "9223372036854775807" - addDumpTest(v4, "("+v4t+") "+v4s+"\n") - addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") - addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") - addDumpTest(nv4, "(*"+v4t+")()\n") - - // Max int. - v5 := int(2147483647) - nv5 := (*int)(nil) - pv5 := &v5 - v5Addr := fmt.Sprintf("%p", pv5) - pv5Addr := fmt.Sprintf("%p", &pv5) - v5t := "int" - v5s := "2147483647" - addDumpTest(v5, "("+v5t+") "+v5s+"\n") - addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") - addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") - addDumpTest(nv5, "(*"+v5t+")()\n") -} - -func addUintDumpTests() { - // Max uint8. - v := uint8(255) - nv := (*uint8)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "uint8" - vs := "255" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Max uint16. - v2 := uint16(65535) - nv2 := (*uint16)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "uint16" - v2s := "65535" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*"+v2t+")()\n") - - // Max uint32. - v3 := uint32(4294967295) - nv3 := (*uint32)(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "uint32" - v3s := "4294967295" - addDumpTest(v3, "("+v3t+") "+v3s+"\n") - addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") - addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") - addDumpTest(nv3, "(*"+v3t+")()\n") - - // Max uint64. - v4 := uint64(18446744073709551615) - nv4 := (*uint64)(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "uint64" - v4s := "18446744073709551615" - addDumpTest(v4, "("+v4t+") "+v4s+"\n") - addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") - addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") - addDumpTest(nv4, "(*"+v4t+")()\n") - - // Max uint. - v5 := uint(4294967295) - nv5 := (*uint)(nil) - pv5 := &v5 - v5Addr := fmt.Sprintf("%p", pv5) - pv5Addr := fmt.Sprintf("%p", &pv5) - v5t := "uint" - v5s := "4294967295" - addDumpTest(v5, "("+v5t+") "+v5s+"\n") - addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") - addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") - addDumpTest(nv5, "(*"+v5t+")()\n") -} - -func addBoolDumpTests() { - // Boolean true. - v := bool(true) - nv := (*bool)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "bool" - vs := "true" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Boolean false. - v2 := bool(false) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "bool" - v2s := "false" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") -} - -func addFloatDumpTests() { - // Standard float32. - v := float32(3.1415) - nv := (*float32)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "float32" - vs := "3.1415" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Standard float64. - v2 := float64(3.1415926) - nv2 := (*float64)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "float64" - v2s := "3.1415926" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*"+v2t+")()\n") -} - -func addComplexDumpTests() { - // Standard complex64. - v := complex(float32(6), -2) - nv := (*complex64)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "complex64" - vs := "(6-2i)" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Standard complex128. - v2 := complex(float64(-6), 2) - nv2 := (*complex128)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "complex128" - v2s := "(-6+2i)" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*"+v2t+")()\n") -} - -func addArrayDumpTests() { - // Array containing standard ints. - v := [3]int{1, 2, 3} - vLen := fmt.Sprintf("%d", len(v)) - vCap := fmt.Sprintf("%d", cap(v)) - nv := (*[3]int)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "int" - vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 1,\n (" + - vt + ") 2,\n (" + vt + ") 3\n}" - addDumpTest(v, "([3]"+vt+") "+vs+"\n") - addDumpTest(pv, "(*[3]"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**[3]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*[3]"+vt+")()\n") - - // Array containing type with custom formatter on pointer receiver only. - v2i0 := pstringer("1") - v2i1 := pstringer("2") - v2i2 := pstringer("3") - v2 := [3]pstringer{v2i0, v2i1, v2i2} - v2i0Len := fmt.Sprintf("%d", len(v2i0)) - v2i1Len := fmt.Sprintf("%d", len(v2i1)) - v2i2Len := fmt.Sprintf("%d", len(v2i2)) - v2Len := fmt.Sprintf("%d", len(v2)) - v2Cap := fmt.Sprintf("%d", cap(v2)) - nv2 := (*[3]pstringer)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "spew_test.pstringer" - v2sp := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + - ") (len=" + v2i0Len + ") stringer 1,\n (" + v2t + - ") (len=" + v2i1Len + ") stringer 2,\n (" + v2t + - ") (len=" + v2i2Len + ") " + "stringer 3\n}" - v2s := v2sp - if spew.UnsafeDisabled { - v2s = "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + - ") (len=" + v2i0Len + ") \"1\",\n (" + v2t + ") (len=" + - v2i1Len + ") \"2\",\n (" + v2t + ") (len=" + v2i2Len + - ") " + "\"3\"\n}" - } - addDumpTest(v2, "([3]"+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*[3]"+v2t+")("+v2Addr+")("+v2sp+")\n") - addDumpTest(&pv2, "(**[3]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2sp+")\n") - addDumpTest(nv2, "(*[3]"+v2t+")()\n") - - // Array containing interfaces. - v3i0 := "one" - v3 := [3]interface{}{v3i0, int(2), uint(3)} - v3i0Len := fmt.Sprintf("%d", len(v3i0)) - v3Len := fmt.Sprintf("%d", len(v3)) - v3Cap := fmt.Sprintf("%d", cap(v3)) - nv3 := (*[3]interface{})(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "[3]interface {}" - v3t2 := "string" - v3t3 := "int" - v3t4 := "uint" - v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " + - "(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" + - v3t4 + ") 3\n}" - addDumpTest(v3, "("+v3t+") "+v3s+"\n") - addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") - addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") - addDumpTest(nv3, "(*"+v3t+")()\n") - - // Array containing bytes. - v4 := [34]byte{ - 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, - 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, - 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, - 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, - 0x31, 0x32, - } - v4Len := fmt.Sprintf("%d", len(v4)) - v4Cap := fmt.Sprintf("%d", cap(v4)) - nv4 := (*[34]byte)(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "[34]uint8" - v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " + - "{\n 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20" + - " |............... |\n" + - " 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30" + - " |!\"#$%&'()*+,-./0|\n" + - " 00000020 31 32 " + - " |12|\n}" - addDumpTest(v4, "("+v4t+") "+v4s+"\n") - addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") - addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") - addDumpTest(nv4, "(*"+v4t+")()\n") -} - -func addSliceDumpTests() { - // Slice containing standard float32 values. - v := []float32{3.14, 6.28, 12.56} - vLen := fmt.Sprintf("%d", len(v)) - vCap := fmt.Sprintf("%d", cap(v)) - nv := (*[]float32)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "float32" - vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 3.14,\n (" + - vt + ") 6.28,\n (" + vt + ") 12.56\n}" - addDumpTest(v, "([]"+vt+") "+vs+"\n") - addDumpTest(pv, "(*[]"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**[]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*[]"+vt+")()\n") - - // Slice containing type with custom formatter on pointer receiver only. - v2i0 := pstringer("1") - v2i1 := pstringer("2") - v2i2 := pstringer("3") - v2 := []pstringer{v2i0, v2i1, v2i2} - v2i0Len := fmt.Sprintf("%d", len(v2i0)) - v2i1Len := fmt.Sprintf("%d", len(v2i1)) - v2i2Len := fmt.Sprintf("%d", len(v2i2)) - v2Len := fmt.Sprintf("%d", len(v2)) - v2Cap := fmt.Sprintf("%d", cap(v2)) - nv2 := (*[]pstringer)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "spew_test.pstringer" - v2s := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + ") (len=" + - v2i0Len + ") stringer 1,\n (" + v2t + ") (len=" + v2i1Len + - ") stringer 2,\n (" + v2t + ") (len=" + v2i2Len + ") " + - "stringer 3\n}" - addDumpTest(v2, "([]"+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*[]"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**[]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*[]"+v2t+")()\n") - - // Slice containing interfaces. - v3i0 := "one" - v3 := []interface{}{v3i0, int(2), uint(3), nil} - v3i0Len := fmt.Sprintf("%d", len(v3i0)) - v3Len := fmt.Sprintf("%d", len(v3)) - v3Cap := fmt.Sprintf("%d", cap(v3)) - nv3 := (*[]interface{})(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "[]interface {}" - v3t2 := "string" - v3t3 := "int" - v3t4 := "uint" - v3t5 := "interface {}" - v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " + - "(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" + - v3t4 + ") 3,\n (" + v3t5 + ") \n}" - addDumpTest(v3, "("+v3t+") "+v3s+"\n") - addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") - addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") - addDumpTest(nv3, "(*"+v3t+")()\n") - - // Slice containing bytes. - v4 := []byte{ - 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, - 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, - 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, - 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, - 0x31, 0x32, - } - v4Len := fmt.Sprintf("%d", len(v4)) - v4Cap := fmt.Sprintf("%d", cap(v4)) - nv4 := (*[]byte)(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "[]uint8" - v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " + - "{\n 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20" + - " |............... |\n" + - " 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30" + - " |!\"#$%&'()*+,-./0|\n" + - " 00000020 31 32 " + - " |12|\n}" - addDumpTest(v4, "("+v4t+") "+v4s+"\n") - addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") - addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") - addDumpTest(nv4, "(*"+v4t+")()\n") - - // Nil slice. - v5 := []int(nil) - nv5 := (*[]int)(nil) - pv5 := &v5 - v5Addr := fmt.Sprintf("%p", pv5) - pv5Addr := fmt.Sprintf("%p", &pv5) - v5t := "[]int" - v5s := "" - addDumpTest(v5, "("+v5t+") "+v5s+"\n") - addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") - addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") - addDumpTest(nv5, "(*"+v5t+")()\n") -} - -func addStringDumpTests() { - // Standard string. - v := "test" - vLen := fmt.Sprintf("%d", len(v)) - nv := (*string)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "string" - vs := "(len=" + vLen + ") \"test\"" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") -} - -func addInterfaceDumpTests() { - // Nil interface. - var v interface{} - nv := (*interface{})(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "interface {}" - vs := "" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Sub-interface. - v2 := interface{}(uint16(65535)) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "uint16" - v2s := "65535" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") -} - -func addMapDumpTests() { - // Map with string keys and int vals. - k := "one" - kk := "two" - m := map[string]int{k: 1, kk: 2} - klen := fmt.Sprintf("%d", len(k)) // not kLen to shut golint up - kkLen := fmt.Sprintf("%d", len(kk)) - mLen := fmt.Sprintf("%d", len(m)) - nilMap := map[string]int(nil) - nm := (*map[string]int)(nil) - pm := &m - mAddr := fmt.Sprintf("%p", pm) - pmAddr := fmt.Sprintf("%p", &pm) - mt := "map[string]int" - mt1 := "string" - mt2 := "int" - ms := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + klen + ") " + - "\"one\": (" + mt2 + ") 1,\n (" + mt1 + ") (len=" + kkLen + - ") \"two\": (" + mt2 + ") 2\n}" - ms2 := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + kkLen + ") " + - "\"two\": (" + mt2 + ") 2,\n (" + mt1 + ") (len=" + klen + - ") \"one\": (" + mt2 + ") 1\n}" - addDumpTest(m, "("+mt+") "+ms+"\n", "("+mt+") "+ms2+"\n") - addDumpTest(pm, "(*"+mt+")("+mAddr+")("+ms+")\n", - "(*"+mt+")("+mAddr+")("+ms2+")\n") - addDumpTest(&pm, "(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms+")\n", - "(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms2+")\n") - addDumpTest(nm, "(*"+mt+")()\n") - addDumpTest(nilMap, "("+mt+") \n") - - // Map with custom formatter type on pointer receiver only keys and vals. - k2 := pstringer("one") - v2 := pstringer("1") - m2 := map[pstringer]pstringer{k2: v2} - k2Len := fmt.Sprintf("%d", len(k2)) - v2Len := fmt.Sprintf("%d", len(v2)) - m2Len := fmt.Sprintf("%d", len(m2)) - nilMap2 := map[pstringer]pstringer(nil) - nm2 := (*map[pstringer]pstringer)(nil) - pm2 := &m2 - m2Addr := fmt.Sprintf("%p", pm2) - pm2Addr := fmt.Sprintf("%p", &pm2) - m2t := "map[spew_test.pstringer]spew_test.pstringer" - m2t1 := "spew_test.pstringer" - m2t2 := "spew_test.pstringer" - m2s := "(len=" + m2Len + ") {\n (" + m2t1 + ") (len=" + k2Len + ") " + - "stringer one: (" + m2t2 + ") (len=" + v2Len + ") stringer 1\n}" - if spew.UnsafeDisabled { - m2s = "(len=" + m2Len + ") {\n (" + m2t1 + ") (len=" + k2Len + - ") " + "\"one\": (" + m2t2 + ") (len=" + v2Len + - ") \"1\"\n}" - } - addDumpTest(m2, "("+m2t+") "+m2s+"\n") - addDumpTest(pm2, "(*"+m2t+")("+m2Addr+")("+m2s+")\n") - addDumpTest(&pm2, "(**"+m2t+")("+pm2Addr+"->"+m2Addr+")("+m2s+")\n") - addDumpTest(nm2, "(*"+m2t+")()\n") - addDumpTest(nilMap2, "("+m2t+") \n") - - // Map with interface keys and values. - k3 := "one" - k3Len := fmt.Sprintf("%d", len(k3)) - m3 := map[interface{}]interface{}{k3: 1} - m3Len := fmt.Sprintf("%d", len(m3)) - nilMap3 := map[interface{}]interface{}(nil) - nm3 := (*map[interface{}]interface{})(nil) - pm3 := &m3 - m3Addr := fmt.Sprintf("%p", pm3) - pm3Addr := fmt.Sprintf("%p", &pm3) - m3t := "map[interface {}]interface {}" - m3t1 := "string" - m3t2 := "int" - m3s := "(len=" + m3Len + ") {\n (" + m3t1 + ") (len=" + k3Len + ") " + - "\"one\": (" + m3t2 + ") 1\n}" - addDumpTest(m3, "("+m3t+") "+m3s+"\n") - addDumpTest(pm3, "(*"+m3t+")("+m3Addr+")("+m3s+")\n") - addDumpTest(&pm3, "(**"+m3t+")("+pm3Addr+"->"+m3Addr+")("+m3s+")\n") - addDumpTest(nm3, "(*"+m3t+")()\n") - addDumpTest(nilMap3, "("+m3t+") \n") - - // Map with nil interface value. - k4 := "nil" - k4Len := fmt.Sprintf("%d", len(k4)) - m4 := map[string]interface{}{k4: nil} - m4Len := fmt.Sprintf("%d", len(m4)) - nilMap4 := map[string]interface{}(nil) - nm4 := (*map[string]interface{})(nil) - pm4 := &m4 - m4Addr := fmt.Sprintf("%p", pm4) - pm4Addr := fmt.Sprintf("%p", &pm4) - m4t := "map[string]interface {}" - m4t1 := "string" - m4t2 := "interface {}" - m4s := "(len=" + m4Len + ") {\n (" + m4t1 + ") (len=" + k4Len + ")" + - " \"nil\": (" + m4t2 + ") \n}" - addDumpTest(m4, "("+m4t+") "+m4s+"\n") - addDumpTest(pm4, "(*"+m4t+")("+m4Addr+")("+m4s+")\n") - addDumpTest(&pm4, "(**"+m4t+")("+pm4Addr+"->"+m4Addr+")("+m4s+")\n") - addDumpTest(nm4, "(*"+m4t+")()\n") - addDumpTest(nilMap4, "("+m4t+") \n") -} - -func addStructDumpTests() { - // Struct with primitives. - type s1 struct { - a int8 - b uint8 - } - v := s1{127, 255} - nv := (*s1)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.s1" - vt2 := "int8" - vt3 := "uint8" - vs := "{\n a: (" + vt2 + ") 127,\n b: (" + vt3 + ") 255\n}" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Struct that contains another struct. - type s2 struct { - s1 s1 - b bool - } - v2 := s2{s1{127, 255}, true} - nv2 := (*s2)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "spew_test.s2" - v2t2 := "spew_test.s1" - v2t3 := "int8" - v2t4 := "uint8" - v2t5 := "bool" - v2s := "{\n s1: (" + v2t2 + ") {\n a: (" + v2t3 + ") 127,\n b: (" + - v2t4 + ") 255\n },\n b: (" + v2t5 + ") true\n}" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*"+v2t+")()\n") - - // Struct that contains custom type with Stringer pointer interface via both - // exported and unexported fields. - type s3 struct { - s pstringer - S pstringer - } - v3 := s3{"test", "test2"} - nv3 := (*s3)(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "spew_test.s3" - v3t2 := "spew_test.pstringer" - v3s := "{\n s: (" + v3t2 + ") (len=4) stringer test,\n S: (" + v3t2 + - ") (len=5) stringer test2\n}" - v3sp := v3s - if spew.UnsafeDisabled { - v3s = "{\n s: (" + v3t2 + ") (len=4) \"test\",\n S: (" + - v3t2 + ") (len=5) \"test2\"\n}" - v3sp = "{\n s: (" + v3t2 + ") (len=4) \"test\",\n S: (" + - v3t2 + ") (len=5) stringer test2\n}" - } - addDumpTest(v3, "("+v3t+") "+v3s+"\n") - addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3sp+")\n") - addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3sp+")\n") - addDumpTest(nv3, "(*"+v3t+")()\n") - - // Struct that contains embedded struct and field to same struct. - e := embed{"embedstr"} - eLen := fmt.Sprintf("%d", len("embedstr")) - v4 := embedwrap{embed: &e, e: &e} - nv4 := (*embedwrap)(nil) - pv4 := &v4 - eAddr := fmt.Sprintf("%p", &e) - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "spew_test.embedwrap" - v4t2 := "spew_test.embed" - v4t3 := "string" - v4s := "{\n embed: (*" + v4t2 + ")(" + eAddr + ")({\n a: (" + v4t3 + - ") (len=" + eLen + ") \"embedstr\"\n }),\n e: (*" + v4t2 + - ")(" + eAddr + ")({\n a: (" + v4t3 + ") (len=" + eLen + ")" + - " \"embedstr\"\n })\n}" - addDumpTest(v4, "("+v4t+") "+v4s+"\n") - addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") - addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") - addDumpTest(nv4, "(*"+v4t+")()\n") -} - -func addUintptrDumpTests() { - // Null pointer. - v := uintptr(0) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "uintptr" - vs := "" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - - // Address of real variable. - i := 1 - v2 := uintptr(unsafe.Pointer(&i)) - nv2 := (*uintptr)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "uintptr" - v2s := fmt.Sprintf("%p", &i) - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*"+v2t+")()\n") -} - -func addUnsafePointerDumpTests() { - // Null pointer. - v := unsafe.Pointer(uintptr(0)) - nv := (*unsafe.Pointer)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "unsafe.Pointer" - vs := "" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Address of real variable. - i := 1 - v2 := unsafe.Pointer(&i) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "unsafe.Pointer" - v2s := fmt.Sprintf("%p", &i) - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv, "(*"+vt+")()\n") -} - -func addChanDumpTests() { - // Nil channel. - var v chan int - pv := &v - nv := (*chan int)(nil) - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "chan int" - vs := "" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Real channel. - v2 := make(chan int) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "chan int" - v2s := fmt.Sprintf("%p", v2) - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") -} - -func addFuncDumpTests() { - // Function with no params and no returns. - v := addIntDumpTests - nv := (*func())(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "func()" - vs := fmt.Sprintf("%p", v) - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") - - // Function with param and no returns. - v2 := TestDump - nv2 := (*func(*testing.T))(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "func(*testing.T)" - v2s := fmt.Sprintf("%p", v2) - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") - addDumpTest(nv2, "(*"+v2t+")()\n") - - // Function with multiple params and multiple returns. - var v3 = func(i int, s string) (b bool, err error) { - return true, nil - } - nv3 := (*func(int, string) (bool, error))(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "func(int, string) (bool, error)" - v3s := fmt.Sprintf("%p", v3) - addDumpTest(v3, "("+v3t+") "+v3s+"\n") - addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") - addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") - addDumpTest(nv3, "(*"+v3t+")()\n") -} - -func addCircularDumpTests() { - // Struct that is circular through self referencing. - type circular struct { - c *circular - } - v := circular{nil} - v.c = &v - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.circular" - vs := "{\n c: (*" + vt + ")(" + vAddr + ")({\n c: (*" + vt + ")(" + - vAddr + ")()\n })\n}" - vs2 := "{\n c: (*" + vt + ")(" + vAddr + ")()\n}" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs2+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs2+")\n") - - // Structs that are circular through cross referencing. - v2 := xref1{nil} - ts2 := xref2{&v2} - v2.ps2 = &ts2 - pv2 := &v2 - ts2Addr := fmt.Sprintf("%p", &ts2) - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "spew_test.xref1" - v2t2 := "spew_test.xref2" - v2s := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t + - ")(" + v2Addr + ")({\n ps2: (*" + v2t2 + ")(" + ts2Addr + - ")()\n })\n })\n}" - v2s2 := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t + - ")(" + v2Addr + ")()\n })\n}" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s2+")\n") - addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s2+")\n") - - // Structs that are indirectly circular. - v3 := indirCir1{nil} - tic2 := indirCir2{nil} - tic3 := indirCir3{&v3} - tic2.ps3 = &tic3 - v3.ps2 = &tic2 - pv3 := &v3 - tic2Addr := fmt.Sprintf("%p", &tic2) - tic3Addr := fmt.Sprintf("%p", &tic3) - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "spew_test.indirCir1" - v3t2 := "spew_test.indirCir2" - v3t3 := "spew_test.indirCir3" - v3s := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 + - ")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr + - ")({\n ps2: (*" + v3t2 + ")(" + tic2Addr + - ")()\n })\n })\n })\n}" - v3s2 := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 + - ")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr + - ")()\n })\n })\n}" - addDumpTest(v3, "("+v3t+") "+v3s+"\n") - addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s2+")\n") - addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s2+")\n") -} - -func addPanicDumpTests() { - // Type that panics in its Stringer interface. - v := panicer(127) - nv := (*panicer)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.panicer" - vs := "(PANIC=test panic)127" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") -} - -func addErrorDumpTests() { - // Type that has a custom Error interface. - v := customError(127) - nv := (*customError)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.customError" - vs := "error: 127" - addDumpTest(v, "("+vt+") "+vs+"\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") - addDumpTest(nv, "(*"+vt+")()\n") -} - -// TestDump executes all of the tests described by dumpTests. -func TestDump(t *testing.T) { - // Setup tests. - addIntDumpTests() - addUintDumpTests() - addBoolDumpTests() - addFloatDumpTests() - addComplexDumpTests() - addArrayDumpTests() - addSliceDumpTests() - addStringDumpTests() - addInterfaceDumpTests() - addMapDumpTests() - addStructDumpTests() - addUintptrDumpTests() - addUnsafePointerDumpTests() - addChanDumpTests() - addFuncDumpTests() - addCircularDumpTests() - addPanicDumpTests() - addErrorDumpTests() - addCgoDumpTests() - - t.Logf("Running %d tests", len(dumpTests)) - for i, test := range dumpTests { - buf := new(bytes.Buffer) - spew.Fdump(buf, test.in) - s := buf.String() - if testFailed(s, test.wants) { - t.Errorf("Dump #%d\n got: %s %s", i, s, stringizeWants(test.wants)) - continue - } - } -} - -func TestDumpSortedKeys(t *testing.T) { - cfg := spew.ConfigState{SortKeys: true} - s := cfg.Sdump(map[int]string{1: "1", 3: "3", 2: "2"}) - expected := "(map[int]string) (len=3) {\n(int) 1: (string) (len=1) " + - "\"1\",\n(int) 2: (string) (len=1) \"2\",\n(int) 3: (string) " + - "(len=1) \"3\"\n" + - "}\n" - if s != expected { - t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) - } - - s = cfg.Sdump(map[stringer]int{"1": 1, "3": 3, "2": 2}) - expected = "(map[spew_test.stringer]int) (len=3) {\n" + - "(spew_test.stringer) (len=1) stringer 1: (int) 1,\n" + - "(spew_test.stringer) (len=1) stringer 2: (int) 2,\n" + - "(spew_test.stringer) (len=1) stringer 3: (int) 3\n" + - "}\n" - if s != expected { - t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) - } - - s = cfg.Sdump(map[pstringer]int{pstringer("1"): 1, pstringer("3"): 3, pstringer("2"): 2}) - expected = "(map[spew_test.pstringer]int) (len=3) {\n" + - "(spew_test.pstringer) (len=1) stringer 1: (int) 1,\n" + - "(spew_test.pstringer) (len=1) stringer 2: (int) 2,\n" + - "(spew_test.pstringer) (len=1) stringer 3: (int) 3\n" + - "}\n" - if spew.UnsafeDisabled { - expected = "(map[spew_test.pstringer]int) (len=3) {\n" + - "(spew_test.pstringer) (len=1) \"1\": (int) 1,\n" + - "(spew_test.pstringer) (len=1) \"2\": (int) 2,\n" + - "(spew_test.pstringer) (len=1) \"3\": (int) 3\n" + - "}\n" - } - if s != expected { - t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) - } - - s = cfg.Sdump(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2}) - expected = "(map[spew_test.customError]int) (len=3) {\n" + - "(spew_test.customError) error: 1: (int) 1,\n" + - "(spew_test.customError) error: 2: (int) 2,\n" + - "(spew_test.customError) error: 3: (int) 3\n" + - "}\n" - if s != expected { - t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) - } - -} diff --git a/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go b/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go deleted file mode 100644 index ed3e3c3..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright (c) 2013 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when both cgo is supported and "-tags testcgo" is added to the go test -// command line. This means the cgo tests are only added (and hence run) when -// specifially requested. This configuration is used because spew itself -// does not require cgo to run even though it does handle certain cgo types -// specially. Rather than forcing all clients to require cgo and an external -// C compiler just to run the tests, this scheme makes them optional. -// +build cgo,testcgo - -package spew_test - -import ( - "fmt" - - "github.com/davecgh/go-spew/spew/testdata" -) - -func addCgoDumpTests() { - // C char pointer. - v := testdata.GetCgoCharPointer() - nv := testdata.GetCgoNullCharPointer() - pv := &v - vcAddr := fmt.Sprintf("%p", v) - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "*testdata._Ctype_char" - vs := "116" - addDumpTest(v, "("+vt+")("+vcAddr+")("+vs+")\n") - addDumpTest(pv, "(*"+vt+")("+vAddr+"->"+vcAddr+")("+vs+")\n") - addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+"->"+vcAddr+")("+vs+")\n") - addDumpTest(nv, "("+vt+")()\n") - - // C char array. - v2, v2l, v2c := testdata.GetCgoCharArray() - v2Len := fmt.Sprintf("%d", v2l) - v2Cap := fmt.Sprintf("%d", v2c) - v2t := "[6]testdata._Ctype_char" - v2s := "(len=" + v2Len + " cap=" + v2Cap + ") " + - "{\n 00000000 74 65 73 74 32 00 " + - " |test2.|\n}" - addDumpTest(v2, "("+v2t+") "+v2s+"\n") - - // C unsigned char array. - v3, v3l, v3c := testdata.GetCgoUnsignedCharArray() - v3Len := fmt.Sprintf("%d", v3l) - v3Cap := fmt.Sprintf("%d", v3c) - v3t := "[6]testdata._Ctype_unsignedchar" - v3t2 := "[6]testdata._Ctype_uchar" - v3s := "(len=" + v3Len + " cap=" + v3Cap + ") " + - "{\n 00000000 74 65 73 74 33 00 " + - " |test3.|\n}" - addDumpTest(v3, "("+v3t+") "+v3s+"\n", "("+v3t2+") "+v3s+"\n") - - // C signed char array. - v4, v4l, v4c := testdata.GetCgoSignedCharArray() - v4Len := fmt.Sprintf("%d", v4l) - v4Cap := fmt.Sprintf("%d", v4c) - v4t := "[6]testdata._Ctype_schar" - v4t2 := "testdata._Ctype_schar" - v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " + - "{\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 101,\n (" + v4t2 + - ") 115,\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 52,\n (" + v4t2 + - ") 0\n}" - addDumpTest(v4, "("+v4t+") "+v4s+"\n") - - // C uint8_t array. - v5, v5l, v5c := testdata.GetCgoUint8tArray() - v5Len := fmt.Sprintf("%d", v5l) - v5Cap := fmt.Sprintf("%d", v5c) - v5t := "[6]testdata._Ctype_uint8_t" - v5s := "(len=" + v5Len + " cap=" + v5Cap + ") " + - "{\n 00000000 74 65 73 74 35 00 " + - " |test5.|\n}" - addDumpTest(v5, "("+v5t+") "+v5s+"\n") - - // C typedefed unsigned char array. - v6, v6l, v6c := testdata.GetCgoTypdefedUnsignedCharArray() - v6Len := fmt.Sprintf("%d", v6l) - v6Cap := fmt.Sprintf("%d", v6c) - v6t := "[6]testdata._Ctype_custom_uchar_t" - v6s := "(len=" + v6Len + " cap=" + v6Cap + ") " + - "{\n 00000000 74 65 73 74 36 00 " + - " |test6.|\n}" - addDumpTest(v6, "("+v6t+") "+v6s+"\n") -} diff --git a/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go b/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go deleted file mode 100644 index 52a0971..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) 2013 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when either cgo is not supported or "-tags testcgo" is not added to the go -// test command line. This file intentionally does not setup any cgo tests in -// this scenario. -// +build !cgo !testcgo - -package spew_test - -func addCgoDumpTests() { - // Don't add any tests for cgo since this file is only compiled when - // there should not be any cgo tests. -} diff --git a/vendor/github.com/davecgh/go-spew/spew/example_test.go b/vendor/github.com/davecgh/go-spew/spew/example_test.go deleted file mode 100644 index de6c4e3..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/example_test.go +++ /dev/null @@ -1,226 +0,0 @@ -/* - * Copyright (c) 2013 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew_test - -import ( - "fmt" - - "github.com/davecgh/go-spew/spew" -) - -type Flag int - -const ( - flagOne Flag = iota - flagTwo -) - -var flagStrings = map[Flag]string{ - flagOne: "flagOne", - flagTwo: "flagTwo", -} - -func (f Flag) String() string { - if s, ok := flagStrings[f]; ok { - return s - } - return fmt.Sprintf("Unknown flag (%d)", int(f)) -} - -type Bar struct { - data uintptr -} - -type Foo struct { - unexportedField Bar - ExportedField map[interface{}]interface{} -} - -// This example demonstrates how to use Dump to dump variables to stdout. -func ExampleDump() { - // The following package level declarations are assumed for this example: - /* - type Flag int - - const ( - flagOne Flag = iota - flagTwo - ) - - var flagStrings = map[Flag]string{ - flagOne: "flagOne", - flagTwo: "flagTwo", - } - - func (f Flag) String() string { - if s, ok := flagStrings[f]; ok { - return s - } - return fmt.Sprintf("Unknown flag (%d)", int(f)) - } - - type Bar struct { - data uintptr - } - - type Foo struct { - unexportedField Bar - ExportedField map[interface{}]interface{} - } - */ - - // Setup some sample data structures for the example. - bar := Bar{uintptr(0)} - s1 := Foo{bar, map[interface{}]interface{}{"one": true}} - f := Flag(5) - b := []byte{ - 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, - 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, - 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, - 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, - 0x31, 0x32, - } - - // Dump! - spew.Dump(s1, f, b) - - // Output: - // (spew_test.Foo) { - // unexportedField: (spew_test.Bar) { - // data: (uintptr) - // }, - // ExportedField: (map[interface {}]interface {}) (len=1) { - // (string) (len=3) "one": (bool) true - // } - // } - // (spew_test.Flag) Unknown flag (5) - // ([]uint8) (len=34 cap=34) { - // 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | - // 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| - // 00000020 31 32 |12| - // } - // -} - -// This example demonstrates how to use Printf to display a variable with a -// format string and inline formatting. -func ExamplePrintf() { - // Create a double pointer to a uint 8. - ui8 := uint8(5) - pui8 := &ui8 - ppui8 := &pui8 - - // Create a circular data type. - type circular struct { - ui8 uint8 - c *circular - } - c := circular{ui8: 1} - c.c = &c - - // Print! - spew.Printf("ppui8: %v\n", ppui8) - spew.Printf("circular: %v\n", c) - - // Output: - // ppui8: <**>5 - // circular: {1 <*>{1 <*>}} -} - -// This example demonstrates how to use a ConfigState. -func ExampleConfigState() { - // Modify the indent level of the ConfigState only. The global - // configuration is not modified. - scs := spew.ConfigState{Indent: "\t"} - - // Output using the ConfigState instance. - v := map[string]int{"one": 1} - scs.Printf("v: %v\n", v) - scs.Dump(v) - - // Output: - // v: map[one:1] - // (map[string]int) (len=1) { - // (string) (len=3) "one": (int) 1 - // } -} - -// This example demonstrates how to use ConfigState.Dump to dump variables to -// stdout -func ExampleConfigState_Dump() { - // See the top-level Dump example for details on the types used in this - // example. - - // Create two ConfigState instances with different indentation. - scs := spew.ConfigState{Indent: "\t"} - scs2 := spew.ConfigState{Indent: " "} - - // Setup some sample data structures for the example. - bar := Bar{uintptr(0)} - s1 := Foo{bar, map[interface{}]interface{}{"one": true}} - - // Dump using the ConfigState instances. - scs.Dump(s1) - scs2.Dump(s1) - - // Output: - // (spew_test.Foo) { - // unexportedField: (spew_test.Bar) { - // data: (uintptr) - // }, - // ExportedField: (map[interface {}]interface {}) (len=1) { - // (string) (len=3) "one": (bool) true - // } - // } - // (spew_test.Foo) { - // unexportedField: (spew_test.Bar) { - // data: (uintptr) - // }, - // ExportedField: (map[interface {}]interface {}) (len=1) { - // (string) (len=3) "one": (bool) true - // } - // } - // -} - -// This example demonstrates how to use ConfigState.Printf to display a variable -// with a format string and inline formatting. -func ExampleConfigState_Printf() { - // See the top-level Dump example for details on the types used in this - // example. - - // Create two ConfigState instances and modify the method handling of the - // first ConfigState only. - scs := spew.NewDefaultConfig() - scs2 := spew.NewDefaultConfig() - scs.DisableMethods = true - - // Alternatively - // scs := spew.ConfigState{Indent: " ", DisableMethods: true} - // scs2 := spew.ConfigState{Indent: " "} - - // This is of type Flag which implements a Stringer and has raw value 1. - f := flagTwo - - // Dump using the ConfigState instances. - scs.Printf("f: %v\n", f) - scs2.Printf("f: %v\n", f) - - // Output: - // f: 1 - // f: flagTwo -} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go deleted file mode 100644 index ecf3b80..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Copyright (c) 2013 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "reflect" - "strconv" - "strings" -) - -// supportedFlags is a list of all the character flags supported by fmt package. -const supportedFlags = "0-+# " - -// formatState implements the fmt.Formatter interface and contains information -// about the state of a formatting operation. The NewFormatter function can -// be used to get a new Formatter which can be used directly as arguments -// in standard fmt package printing calls. -type formatState struct { - value interface{} - fs fmt.State - depth int - pointers map[uintptr]int - ignoreNextType bool - cs *ConfigState -} - -// buildDefaultFormat recreates the original format string without precision -// and width information to pass in to fmt.Sprintf in the case of an -// unrecognized type. Unless new types are added to the language, this -// function won't ever be called. -func (f *formatState) buildDefaultFormat() (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - buf.WriteRune('v') - - format = buf.String() - return format -} - -// constructOrigFormat recreates the original format string including precision -// and width information to pass along to the standard fmt package. This allows -// automatic deferral of all format strings this package doesn't support. -func (f *formatState) constructOrigFormat(verb rune) (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - if width, ok := f.fs.Width(); ok { - buf.WriteString(strconv.Itoa(width)) - } - - if precision, ok := f.fs.Precision(); ok { - buf.Write(precisionBytes) - buf.WriteString(strconv.Itoa(precision)) - } - - buf.WriteRune(verb) - - format = buf.String() - return format -} - -// unpackValue returns values inside of non-nil interfaces when possible and -// ensures that types for values which have been unpacked from an interface -// are displayed when the show types flag is also set. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (f *formatState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface { - f.ignoreNextType = false - if !v.IsNil() { - v = v.Elem() - } - } - return v -} - -// formatPtr handles formatting of pointers by indirecting them as necessary. -func (f *formatState) formatPtr(v reflect.Value) { - // Display nil if top level pointer is nil. - showTypes := f.fs.Flag('#') - if v.IsNil() && (!showTypes || f.ignoreNextType) { - f.fs.Write(nilAngleBytes) - return - } - - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range f.pointers { - if depth >= f.depth { - delete(f.pointers, k) - } - } - - // Keep list of all dereferenced pointers to possibly show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by derferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := f.pointers[addr]; ok && pd < f.depth { - cycleFound = true - indirects-- - break - } - f.pointers[addr] = f.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type or indirection level depending on flags. - if showTypes && !f.ignoreNextType { - f.fs.Write(openParenBytes) - f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) - f.fs.Write([]byte(ve.Type().String())) - f.fs.Write(closeParenBytes) - } else { - if nilFound || cycleFound { - indirects += strings.Count(ve.Type().String(), "*") - } - f.fs.Write(openAngleBytes) - f.fs.Write([]byte(strings.Repeat("*", indirects))) - f.fs.Write(closeAngleBytes) - } - - // Display pointer information depending on flags. - if f.fs.Flag('+') && (len(pointerChain) > 0) { - f.fs.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - f.fs.Write(pointerChainBytes) - } - printHexPtr(f.fs, addr) - } - f.fs.Write(closeParenBytes) - } - - // Display dereferenced value. - switch { - case nilFound == true: - f.fs.Write(nilAngleBytes) - - case cycleFound == true: - f.fs.Write(circularShortBytes) - - default: - f.ignoreNextType = true - f.format(ve) - } -} - -// format is the main workhorse for providing the Formatter interface. It -// uses the passed reflect value to figure out what kind of object we are -// dealing with and formats it appropriately. It is a recursive function, -// however circular data structures are detected and handled properly. -func (f *formatState) format(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - f.fs.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - f.formatPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !f.ignoreNextType && f.fs.Flag('#') { - f.fs.Write(openParenBytes) - f.fs.Write([]byte(v.Type().String())) - f.fs.Write(closeParenBytes) - } - f.ignoreNextType = false - - // Call Stringer/error interfaces if they exist and the handle methods - // flag is enabled. - if !f.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(f.cs, f.fs, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(f.fs, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(f.fs, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(f.fs, v.Uint(), 10) - - case reflect.Float32: - printFloat(f.fs, v.Float(), 32) - - case reflect.Float64: - printFloat(f.fs, v.Float(), 64) - - case reflect.Complex64: - printComplex(f.fs, v.Complex(), 32) - - case reflect.Complex128: - printComplex(f.fs, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - f.fs.Write(openBracketBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - numEntries := v.Len() - for i := 0; i < numEntries; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(v.Index(i))) - } - } - f.depth-- - f.fs.Write(closeBracketBytes) - - case reflect.String: - f.fs.Write([]byte(v.String())) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - f.fs.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - - f.fs.Write(openMapBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - keys := v.MapKeys() - if f.cs.SortKeys { - sortValues(keys, f.cs) - } - for i, key := range keys { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(key)) - f.fs.Write(colonBytes) - f.ignoreNextType = true - f.format(f.unpackValue(v.MapIndex(key))) - } - } - f.depth-- - f.fs.Write(closeMapBytes) - - case reflect.Struct: - numFields := v.NumField() - f.fs.Write(openBraceBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - vt := v.Type() - for i := 0; i < numFields; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - vtf := vt.Field(i) - if f.fs.Flag('+') || f.fs.Flag('#') { - f.fs.Write([]byte(vtf.Name)) - f.fs.Write(colonBytes) - } - f.format(f.unpackValue(v.Field(i))) - } - } - f.depth-- - f.fs.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(f.fs, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(f.fs, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it if any get added. - default: - format := f.buildDefaultFormat() - if v.CanInterface() { - fmt.Fprintf(f.fs, format, v.Interface()) - } else { - fmt.Fprintf(f.fs, format, v.String()) - } - } -} - -// Format satisfies the fmt.Formatter interface. See NewFormatter for usage -// details. -func (f *formatState) Format(fs fmt.State, verb rune) { - f.fs = fs - - // Use standard formatting for verbs that are not v. - if verb != 'v' { - format := f.constructOrigFormat(verb) - fmt.Fprintf(fs, format, f.value) - return - } - - if f.value == nil { - if fs.Flag('#') { - fs.Write(interfaceBytes) - } - fs.Write(nilAngleBytes) - return - } - - f.format(reflect.ValueOf(f.value)) -} - -// newFormatter is a helper function to consolidate the logic from the various -// public methods which take varying config states. -func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { - fs := &formatState{value: v, cs: cs} - fs.pointers = make(map[uintptr]int) - return fs -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -Printf, Println, or Fprintf. -*/ -func NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(&Config, v) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/format_test.go b/vendor/github.com/davecgh/go-spew/spew/format_test.go deleted file mode 100644 index b664b3f..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/format_test.go +++ /dev/null @@ -1,1558 +0,0 @@ -/* - * Copyright (c) 2013 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -Test Summary: -NOTE: For each test, a nil pointer, a single pointer and double pointer to the -base test element are also tested to ensure proper indirection across all types. - -- Max int8, int16, int32, int64, int -- Max uint8, uint16, uint32, uint64, uint -- Boolean true and false -- Standard complex64 and complex128 -- Array containing standard ints -- Array containing type with custom formatter on pointer receiver only -- Array containing interfaces -- Slice containing standard float32 values -- Slice containing type with custom formatter on pointer receiver only -- Slice containing interfaces -- Nil slice -- Standard string -- Nil interface -- Sub-interface -- Map with string keys and int vals -- Map with custom formatter type on pointer receiver only keys and vals -- Map with interface keys and values -- Map with nil interface value -- Struct with primitives -- Struct that contains another struct -- Struct that contains custom type with Stringer pointer interface via both - exported and unexported fields -- Struct that contains embedded struct and field to same struct -- Uintptr to 0 (null pointer) -- Uintptr address of real variable -- Unsafe.Pointer to 0 (null pointer) -- Unsafe.Pointer to address of real variable -- Nil channel -- Standard int channel -- Function with no params and no returns -- Function with param and no returns -- Function with multiple params and multiple returns -- Struct that is circular through self referencing -- Structs that are circular through cross referencing -- Structs that are indirectly circular -- Type that panics in its Stringer interface -- Type that has a custom Error interface -- %x passthrough with uint -- %#x passthrough with uint -- %f passthrough with precision -- %f passthrough with width and precision -- %d passthrough with width -- %q passthrough with string -*/ - -package spew_test - -import ( - "bytes" - "fmt" - "testing" - "unsafe" - - "github.com/davecgh/go-spew/spew" -) - -// formatterTest is used to describe a test to be perfomed against NewFormatter. -type formatterTest struct { - format string - in interface{} - wants []string -} - -// formatterTests houses all of the tests to be performed against NewFormatter. -var formatterTests = make([]formatterTest, 0) - -// addFormatterTest is a helper method to append the passed input and desired -// result to formatterTests. -func addFormatterTest(format string, in interface{}, wants ...string) { - test := formatterTest{format, in, wants} - formatterTests = append(formatterTests, test) -} - -func addIntFormatterTests() { - // Max int8. - v := int8(127) - nv := (*int8)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "int8" - vs := "127" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Max int16. - v2 := int16(32767) - nv2 := (*int16)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "int16" - v2s := "32767" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") - - // Max int32. - v3 := int32(2147483647) - nv3 := (*int32)(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "int32" - v3s := "2147483647" - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3s) - addFormatterTest("%v", &pv3, "<**>"+v3s) - addFormatterTest("%v", nv3, "") - addFormatterTest("%+v", v3, v3s) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%#v", v3, "("+v3t+")"+v3s) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - - // Max int64. - v4 := int64(9223372036854775807) - nv4 := (*int64)(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "int64" - v4s := "9223372036854775807" - addFormatterTest("%v", v4, v4s) - addFormatterTest("%v", pv4, "<*>"+v4s) - addFormatterTest("%v", &pv4, "<**>"+v4s) - addFormatterTest("%v", nv4, "") - addFormatterTest("%+v", v4, v4s) - addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) - addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%#v", v4, "("+v4t+")"+v4s) - addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) - addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) - addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") - addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) - addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) - addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) - addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") - - // Max int. - v5 := int(2147483647) - nv5 := (*int)(nil) - pv5 := &v5 - v5Addr := fmt.Sprintf("%p", pv5) - pv5Addr := fmt.Sprintf("%p", &pv5) - v5t := "int" - v5s := "2147483647" - addFormatterTest("%v", v5, v5s) - addFormatterTest("%v", pv5, "<*>"+v5s) - addFormatterTest("%v", &pv5, "<**>"+v5s) - addFormatterTest("%v", nv5, "") - addFormatterTest("%+v", v5, v5s) - addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s) - addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s) - addFormatterTest("%+v", nv5, "") - addFormatterTest("%#v", v5, "("+v5t+")"+v5s) - addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s) - addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s) - addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") - addFormatterTest("%#+v", v5, "("+v5t+")"+v5s) - addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s) - addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s) - addFormatterTest("%#+v", nv5, "(*"+v5t+")"+"") -} - -func addUintFormatterTests() { - // Max uint8. - v := uint8(255) - nv := (*uint8)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "uint8" - vs := "255" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Max uint16. - v2 := uint16(65535) - nv2 := (*uint16)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "uint16" - v2s := "65535" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") - - // Max uint32. - v3 := uint32(4294967295) - nv3 := (*uint32)(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "uint32" - v3s := "4294967295" - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3s) - addFormatterTest("%v", &pv3, "<**>"+v3s) - addFormatterTest("%v", nv3, "") - addFormatterTest("%+v", v3, v3s) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%#v", v3, "("+v3t+")"+v3s) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - - // Max uint64. - v4 := uint64(18446744073709551615) - nv4 := (*uint64)(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "uint64" - v4s := "18446744073709551615" - addFormatterTest("%v", v4, v4s) - addFormatterTest("%v", pv4, "<*>"+v4s) - addFormatterTest("%v", &pv4, "<**>"+v4s) - addFormatterTest("%v", nv4, "") - addFormatterTest("%+v", v4, v4s) - addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) - addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%#v", v4, "("+v4t+")"+v4s) - addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) - addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) - addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") - addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) - addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) - addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) - addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") - - // Max uint. - v5 := uint(4294967295) - nv5 := (*uint)(nil) - pv5 := &v5 - v5Addr := fmt.Sprintf("%p", pv5) - pv5Addr := fmt.Sprintf("%p", &pv5) - v5t := "uint" - v5s := "4294967295" - addFormatterTest("%v", v5, v5s) - addFormatterTest("%v", pv5, "<*>"+v5s) - addFormatterTest("%v", &pv5, "<**>"+v5s) - addFormatterTest("%v", nv5, "") - addFormatterTest("%+v", v5, v5s) - addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s) - addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s) - addFormatterTest("%+v", nv5, "") - addFormatterTest("%#v", v5, "("+v5t+")"+v5s) - addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s) - addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s) - addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") - addFormatterTest("%#+v", v5, "("+v5t+")"+v5s) - addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s) - addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s) - addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") -} - -func addBoolFormatterTests() { - // Boolean true. - v := bool(true) - nv := (*bool)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "bool" - vs := "true" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Boolean false. - v2 := bool(false) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "bool" - v2s := "false" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) -} - -func addFloatFormatterTests() { - // Standard float32. - v := float32(3.1415) - nv := (*float32)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "float32" - vs := "3.1415" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Standard float64. - v2 := float64(3.1415926) - nv2 := (*float64)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "float64" - v2s := "3.1415926" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") -} - -func addComplexFormatterTests() { - // Standard complex64. - v := complex(float32(6), -2) - nv := (*complex64)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "complex64" - vs := "(6-2i)" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Standard complex128. - v2 := complex(float64(-6), 2) - nv2 := (*complex128)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "complex128" - v2s := "(-6+2i)" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") -} - -func addArrayFormatterTests() { - // Array containing standard ints. - v := [3]int{1, 2, 3} - nv := (*[3]int)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "[3]int" - vs := "[1 2 3]" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Array containing type with custom formatter on pointer receiver only. - v2 := [3]pstringer{"1", "2", "3"} - nv2 := (*[3]pstringer)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "[3]spew_test.pstringer" - v2sp := "[stringer 1 stringer 2 stringer 3]" - v2s := v2sp - if spew.UnsafeDisabled { - v2s = "[1 2 3]" - } - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2sp) - addFormatterTest("%v", &pv2, "<**>"+v2sp) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2sp) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2sp) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2sp) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2sp) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2sp) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2sp) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") - - // Array containing interfaces. - v3 := [3]interface{}{"one", int(2), uint(3)} - nv3 := (*[3]interface{})(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "[3]interface {}" - v3t2 := "string" - v3t3 := "int" - v3t4 := "uint" - v3s := "[one 2 3]" - v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3]" - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3s) - addFormatterTest("%v", &pv3, "<**>"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%+v", v3, v3s) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) - addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") -} - -func addSliceFormatterTests() { - // Slice containing standard float32 values. - v := []float32{3.14, 6.28, 12.56} - nv := (*[]float32)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "[]float32" - vs := "[3.14 6.28 12.56]" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Slice containing type with custom formatter on pointer receiver only. - v2 := []pstringer{"1", "2", "3"} - nv2 := (*[]pstringer)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "[]spew_test.pstringer" - v2s := "[stringer 1 stringer 2 stringer 3]" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") - - // Slice containing interfaces. - v3 := []interface{}{"one", int(2), uint(3), nil} - nv3 := (*[]interface{})(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "[]interface {}" - v3t2 := "string" - v3t3 := "int" - v3t4 := "uint" - v3t5 := "interface {}" - v3s := "[one 2 3 ]" - v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3 (" + v3t5 + - ")]" - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3s) - addFormatterTest("%v", &pv3, "<**>"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%+v", v3, v3s) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) - addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") - - // Nil slice. - var v4 []int - nv4 := (*[]int)(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "[]int" - v4s := "" - addFormatterTest("%v", v4, v4s) - addFormatterTest("%v", pv4, "<*>"+v4s) - addFormatterTest("%v", &pv4, "<**>"+v4s) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%+v", v4, v4s) - addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) - addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%#v", v4, "("+v4t+")"+v4s) - addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) - addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) - addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") - addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) - addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) - addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) - addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") -} - -func addStringFormatterTests() { - // Standard string. - v := "test" - nv := (*string)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "string" - vs := "test" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") -} - -func addInterfaceFormatterTests() { - // Nil interface. - var v interface{} - nv := (*interface{})(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "interface {}" - vs := "" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Sub-interface. - v2 := interface{}(uint16(65535)) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "uint16" - v2s := "65535" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) -} - -func addMapFormatterTests() { - // Map with string keys and int vals. - v := map[string]int{"one": 1, "two": 2} - nilMap := map[string]int(nil) - nv := (*map[string]int)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "map[string]int" - vs := "map[one:1 two:2]" - vs2 := "map[two:2 one:1]" - addFormatterTest("%v", v, vs, vs2) - addFormatterTest("%v", pv, "<*>"+vs, "<*>"+vs2) - addFormatterTest("%v", &pv, "<**>"+vs, "<**>"+vs2) - addFormatterTest("%+v", nilMap, "") - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs, vs2) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs, "<*>("+vAddr+")"+vs2) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs, - "<**>("+pvAddr+"->"+vAddr+")"+vs2) - addFormatterTest("%+v", nilMap, "") - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs, "("+vt+")"+vs2) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs, "(*"+vt+")"+vs2) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs, "(**"+vt+")"+vs2) - addFormatterTest("%#v", nilMap, "("+vt+")"+"") - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs, "("+vt+")"+vs2) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs, - "(*"+vt+")("+vAddr+")"+vs2) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs, - "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs2) - addFormatterTest("%#+v", nilMap, "("+vt+")"+"") - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Map with custom formatter type on pointer receiver only keys and vals. - v2 := map[pstringer]pstringer{"one": "1"} - nv2 := (*map[pstringer]pstringer)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "map[spew_test.pstringer]spew_test.pstringer" - v2s := "map[stringer one:stringer 1]" - if spew.UnsafeDisabled { - v2s = "map[one:1]" - } - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") - - // Map with interface keys and values. - v3 := map[interface{}]interface{}{"one": 1} - nv3 := (*map[interface{}]interface{})(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "map[interface {}]interface {}" - v3t1 := "string" - v3t2 := "int" - v3s := "map[one:1]" - v3s2 := "map[(" + v3t1 + ")one:(" + v3t2 + ")1]" - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3s) - addFormatterTest("%v", &pv3, "<**>"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%+v", v3, v3s) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) - addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") - - // Map with nil interface value - v4 := map[string]interface{}{"nil": nil} - nv4 := (*map[string]interface{})(nil) - pv4 := &v4 - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "map[string]interface {}" - v4t1 := "interface {}" - v4s := "map[nil:]" - v4s2 := "map[nil:(" + v4t1 + ")]" - addFormatterTest("%v", v4, v4s) - addFormatterTest("%v", pv4, "<*>"+v4s) - addFormatterTest("%v", &pv4, "<**>"+v4s) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%+v", v4, v4s) - addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) - addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%#v", v4, "("+v4t+")"+v4s2) - addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s2) - addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s2) - addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") - addFormatterTest("%#+v", v4, "("+v4t+")"+v4s2) - addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s2) - addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s2) - addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") -} - -func addStructFormatterTests() { - // Struct with primitives. - type s1 struct { - a int8 - b uint8 - } - v := s1{127, 255} - nv := (*s1)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.s1" - vt2 := "int8" - vt3 := "uint8" - vs := "{127 255}" - vs2 := "{a:127 b:255}" - vs3 := "{a:(" + vt2 + ")127 b:(" + vt3 + ")255}" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs2) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs2) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs2) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs3) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs3) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs3) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs3) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs3) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs3) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Struct that contains another struct. - type s2 struct { - s1 s1 - b bool - } - v2 := s2{s1{127, 255}, true} - nv2 := (*s2)(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "spew_test.s2" - v2t2 := "spew_test.s1" - v2t3 := "int8" - v2t4 := "uint8" - v2t5 := "bool" - v2s := "{{127 255} true}" - v2s2 := "{s1:{a:127 b:255} b:true}" - v2s3 := "{s1:(" + v2t2 + "){a:(" + v2t3 + ")127 b:(" + v2t4 + ")255} b:(" + - v2t5 + ")true}" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%+v", v2, v2s2) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s2) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s2) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s3) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s3) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s3) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s3) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s3) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s3) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") - - // Struct that contains custom type with Stringer pointer interface via both - // exported and unexported fields. - type s3 struct { - s pstringer - S pstringer - } - v3 := s3{"test", "test2"} - nv3 := (*s3)(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "spew_test.s3" - v3t2 := "spew_test.pstringer" - v3s := "{stringer test stringer test2}" - v3sp := v3s - v3s2 := "{s:stringer test S:stringer test2}" - v3s2p := v3s2 - v3s3 := "{s:(" + v3t2 + ")stringer test S:(" + v3t2 + ")stringer test2}" - v3s3p := v3s3 - if spew.UnsafeDisabled { - v3s = "{test test2}" - v3sp = "{test stringer test2}" - v3s2 = "{s:test S:test2}" - v3s2p = "{s:test S:stringer test2}" - v3s3 = "{s:(" + v3t2 + ")test S:(" + v3t2 + ")test2}" - v3s3p = "{s:(" + v3t2 + ")test S:(" + v3t2 + ")stringer test2}" - } - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3sp) - addFormatterTest("%v", &pv3, "<**>"+v3sp) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%+v", v3, v3s2) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s2p) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s2p) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%#v", v3, "("+v3t+")"+v3s3) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s3p) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s3p) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s3) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s3p) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s3p) - addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") - - // Struct that contains embedded struct and field to same struct. - e := embed{"embedstr"} - v4 := embedwrap{embed: &e, e: &e} - nv4 := (*embedwrap)(nil) - pv4 := &v4 - eAddr := fmt.Sprintf("%p", &e) - v4Addr := fmt.Sprintf("%p", pv4) - pv4Addr := fmt.Sprintf("%p", &pv4) - v4t := "spew_test.embedwrap" - v4t2 := "spew_test.embed" - v4t3 := "string" - v4s := "{<*>{embedstr} <*>{embedstr}}" - v4s2 := "{embed:<*>(" + eAddr + "){a:embedstr} e:<*>(" + eAddr + - "){a:embedstr}}" - v4s3 := "{embed:(*" + v4t2 + "){a:(" + v4t3 + ")embedstr} e:(*" + v4t2 + - "){a:(" + v4t3 + ")embedstr}}" - v4s4 := "{embed:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 + - ")embedstr} e:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 + ")embedstr}}" - addFormatterTest("%v", v4, v4s) - addFormatterTest("%v", pv4, "<*>"+v4s) - addFormatterTest("%v", &pv4, "<**>"+v4s) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%+v", v4, v4s2) - addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s2) - addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s2) - addFormatterTest("%+v", nv4, "") - addFormatterTest("%#v", v4, "("+v4t+")"+v4s3) - addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s3) - addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s3) - addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") - addFormatterTest("%#+v", v4, "("+v4t+")"+v4s4) - addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s4) - addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s4) - addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") -} - -func addUintptrFormatterTests() { - // Null pointer. - v := uintptr(0) - nv := (*uintptr)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "uintptr" - vs := "" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Address of real variable. - i := 1 - v2 := uintptr(unsafe.Pointer(&i)) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "uintptr" - v2s := fmt.Sprintf("%p", &i) - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) -} - -func addUnsafePointerFormatterTests() { - // Null pointer. - v := unsafe.Pointer(uintptr(0)) - nv := (*unsafe.Pointer)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "unsafe.Pointer" - vs := "" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Address of real variable. - i := 1 - v2 := unsafe.Pointer(&i) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "unsafe.Pointer" - v2s := fmt.Sprintf("%p", &i) - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) -} - -func addChanFormatterTests() { - // Nil channel. - var v chan int - pv := &v - nv := (*chan int)(nil) - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "chan int" - vs := "" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Real channel. - v2 := make(chan int) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "chan int" - v2s := fmt.Sprintf("%p", v2) - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) -} - -func addFuncFormatterTests() { - // Function with no params and no returns. - v := addIntFormatterTests - nv := (*func())(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "func()" - vs := fmt.Sprintf("%p", v) - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") - - // Function with param and no returns. - v2 := TestFormatter - nv2 := (*func(*testing.T))(nil) - pv2 := &v2 - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "func(*testing.T)" - v2s := fmt.Sprintf("%p", v2) - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s) - addFormatterTest("%v", &pv2, "<**>"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%+v", v2, v2s) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%+v", nv2, "") - addFormatterTest("%#v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) - addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) - addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") - - // Function with multiple params and multiple returns. - var v3 = func(i int, s string) (b bool, err error) { - return true, nil - } - nv3 := (*func(int, string) (bool, error))(nil) - pv3 := &v3 - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "func(int, string) (bool, error)" - v3s := fmt.Sprintf("%p", v3) - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3s) - addFormatterTest("%v", &pv3, "<**>"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%+v", v3, v3s) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%+v", nv3, "") - addFormatterTest("%#v", v3, "("+v3t+")"+v3s) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) - addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) - addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") -} - -func addCircularFormatterTests() { - // Struct that is circular through self referencing. - type circular struct { - c *circular - } - v := circular{nil} - v.c = &v - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.circular" - vs := "{<*>{<*>}}" - vs2 := "{<*>}" - vs3 := "{c:<*>(" + vAddr + "){c:<*>(" + vAddr + ")}}" - vs4 := "{c:<*>(" + vAddr + ")}" - vs5 := "{c:(*" + vt + "){c:(*" + vt + ")}}" - vs6 := "{c:(*" + vt + ")}" - vs7 := "{c:(*" + vt + ")(" + vAddr + "){c:(*" + vt + ")(" + vAddr + - ")}}" - vs8 := "{c:(*" + vt + ")(" + vAddr + ")}" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs2) - addFormatterTest("%v", &pv, "<**>"+vs2) - addFormatterTest("%+v", v, vs3) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs4) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs4) - addFormatterTest("%#v", v, "("+vt+")"+vs5) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs6) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs6) - addFormatterTest("%#+v", v, "("+vt+")"+vs7) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs8) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs8) - - // Structs that are circular through cross referencing. - v2 := xref1{nil} - ts2 := xref2{&v2} - v2.ps2 = &ts2 - pv2 := &v2 - ts2Addr := fmt.Sprintf("%p", &ts2) - v2Addr := fmt.Sprintf("%p", pv2) - pv2Addr := fmt.Sprintf("%p", &pv2) - v2t := "spew_test.xref1" - v2t2 := "spew_test.xref2" - v2s := "{<*>{<*>{<*>}}}" - v2s2 := "{<*>{<*>}}" - v2s3 := "{ps2:<*>(" + ts2Addr + "){ps1:<*>(" + v2Addr + "){ps2:<*>(" + - ts2Addr + ")}}}" - v2s4 := "{ps2:<*>(" + ts2Addr + "){ps1:<*>(" + v2Addr + ")}}" - v2s5 := "{ps2:(*" + v2t2 + "){ps1:(*" + v2t + "){ps2:(*" + v2t2 + - ")}}}" - v2s6 := "{ps2:(*" + v2t2 + "){ps1:(*" + v2t + ")}}" - v2s7 := "{ps2:(*" + v2t2 + ")(" + ts2Addr + "){ps1:(*" + v2t + - ")(" + v2Addr + "){ps2:(*" + v2t2 + ")(" + ts2Addr + - ")}}}" - v2s8 := "{ps2:(*" + v2t2 + ")(" + ts2Addr + "){ps1:(*" + v2t + - ")(" + v2Addr + ")}}" - addFormatterTest("%v", v2, v2s) - addFormatterTest("%v", pv2, "<*>"+v2s2) - addFormatterTest("%v", &pv2, "<**>"+v2s2) - addFormatterTest("%+v", v2, v2s3) - addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s4) - addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s4) - addFormatterTest("%#v", v2, "("+v2t+")"+v2s5) - addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s6) - addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s6) - addFormatterTest("%#+v", v2, "("+v2t+")"+v2s7) - addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s8) - addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s8) - - // Structs that are indirectly circular. - v3 := indirCir1{nil} - tic2 := indirCir2{nil} - tic3 := indirCir3{&v3} - tic2.ps3 = &tic3 - v3.ps2 = &tic2 - pv3 := &v3 - tic2Addr := fmt.Sprintf("%p", &tic2) - tic3Addr := fmt.Sprintf("%p", &tic3) - v3Addr := fmt.Sprintf("%p", pv3) - pv3Addr := fmt.Sprintf("%p", &pv3) - v3t := "spew_test.indirCir1" - v3t2 := "spew_test.indirCir2" - v3t3 := "spew_test.indirCir3" - v3s := "{<*>{<*>{<*>{<*>}}}}" - v3s2 := "{<*>{<*>{<*>}}}" - v3s3 := "{ps2:<*>(" + tic2Addr + "){ps3:<*>(" + tic3Addr + "){ps1:<*>(" + - v3Addr + "){ps2:<*>(" + tic2Addr + ")}}}}" - v3s4 := "{ps2:<*>(" + tic2Addr + "){ps3:<*>(" + tic3Addr + "){ps1:<*>(" + - v3Addr + ")}}}" - v3s5 := "{ps2:(*" + v3t2 + "){ps3:(*" + v3t3 + "){ps1:(*" + v3t + - "){ps2:(*" + v3t2 + ")}}}}" - v3s6 := "{ps2:(*" + v3t2 + "){ps3:(*" + v3t3 + "){ps1:(*" + v3t + - ")}}}" - v3s7 := "{ps2:(*" + v3t2 + ")(" + tic2Addr + "){ps3:(*" + v3t3 + ")(" + - tic3Addr + "){ps1:(*" + v3t + ")(" + v3Addr + "){ps2:(*" + v3t2 + - ")(" + tic2Addr + ")}}}}" - v3s8 := "{ps2:(*" + v3t2 + ")(" + tic2Addr + "){ps3:(*" + v3t3 + ")(" + - tic3Addr + "){ps1:(*" + v3t + ")(" + v3Addr + ")}}}" - addFormatterTest("%v", v3, v3s) - addFormatterTest("%v", pv3, "<*>"+v3s2) - addFormatterTest("%v", &pv3, "<**>"+v3s2) - addFormatterTest("%+v", v3, v3s3) - addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s4) - addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s4) - addFormatterTest("%#v", v3, "("+v3t+")"+v3s5) - addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s6) - addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s6) - addFormatterTest("%#+v", v3, "("+v3t+")"+v3s7) - addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s8) - addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s8) -} - -func addPanicFormatterTests() { - // Type that panics in its Stringer interface. - v := panicer(127) - nv := (*panicer)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.panicer" - vs := "(PANIC=test panic)127" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") -} - -func addErrorFormatterTests() { - // Type that has a custom Error interface. - v := customError(127) - nv := (*customError)(nil) - pv := &v - vAddr := fmt.Sprintf("%p", pv) - pvAddr := fmt.Sprintf("%p", &pv) - vt := "spew_test.customError" - vs := "error: 127" - addFormatterTest("%v", v, vs) - addFormatterTest("%v", pv, "<*>"+vs) - addFormatterTest("%v", &pv, "<**>"+vs) - addFormatterTest("%v", nv, "") - addFormatterTest("%+v", v, vs) - addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) - addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%+v", nv, "") - addFormatterTest("%#v", v, "("+vt+")"+vs) - addFormatterTest("%#v", pv, "(*"+vt+")"+vs) - addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) - addFormatterTest("%#v", nv, "(*"+vt+")"+"") - addFormatterTest("%#+v", v, "("+vt+")"+vs) - addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) - addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) - addFormatterTest("%#+v", nv, "(*"+vt+")"+"") -} - -func addPassthroughFormatterTests() { - // %x passthrough with uint. - v := uint(4294967295) - pv := &v - vAddr := fmt.Sprintf("%x", pv) - pvAddr := fmt.Sprintf("%x", &pv) - vs := "ffffffff" - addFormatterTest("%x", v, vs) - addFormatterTest("%x", pv, vAddr) - addFormatterTest("%x", &pv, pvAddr) - - // %#x passthrough with uint. - v2 := int(2147483647) - pv2 := &v2 - v2Addr := fmt.Sprintf("%#x", pv2) - pv2Addr := fmt.Sprintf("%#x", &pv2) - v2s := "0x7fffffff" - addFormatterTest("%#x", v2, v2s) - addFormatterTest("%#x", pv2, v2Addr) - addFormatterTest("%#x", &pv2, pv2Addr) - - // %f passthrough with precision. - addFormatterTest("%.2f", 3.1415, "3.14") - addFormatterTest("%.3f", 3.1415, "3.142") - addFormatterTest("%.4f", 3.1415, "3.1415") - - // %f passthrough with width and precision. - addFormatterTest("%5.2f", 3.1415, " 3.14") - addFormatterTest("%6.3f", 3.1415, " 3.142") - addFormatterTest("%7.4f", 3.1415, " 3.1415") - - // %d passthrough with width. - addFormatterTest("%3d", 127, "127") - addFormatterTest("%4d", 127, " 127") - addFormatterTest("%5d", 127, " 127") - - // %q passthrough with string. - addFormatterTest("%q", "test", "\"test\"") -} - -// TestFormatter executes all of the tests described by formatterTests. -func TestFormatter(t *testing.T) { - // Setup tests. - addIntFormatterTests() - addUintFormatterTests() - addBoolFormatterTests() - addFloatFormatterTests() - addComplexFormatterTests() - addArrayFormatterTests() - addSliceFormatterTests() - addStringFormatterTests() - addInterfaceFormatterTests() - addMapFormatterTests() - addStructFormatterTests() - addUintptrFormatterTests() - addUnsafePointerFormatterTests() - addChanFormatterTests() - addFuncFormatterTests() - addCircularFormatterTests() - addPanicFormatterTests() - addErrorFormatterTests() - addPassthroughFormatterTests() - - t.Logf("Running %d tests", len(formatterTests)) - for i, test := range formatterTests { - buf := new(bytes.Buffer) - spew.Fprintf(buf, test.format, test.in) - s := buf.String() - if testFailed(s, test.wants) { - t.Errorf("Formatter #%d format: %s got: %s %s", i, test.format, s, - stringizeWants(test.wants)) - continue - } - } -} - -type testStruct struct { - x int -} - -func (ts testStruct) String() string { - return fmt.Sprintf("ts.%d", ts.x) -} - -type testStructP struct { - x int -} - -func (ts *testStructP) String() string { - return fmt.Sprintf("ts.%d", ts.x) -} - -func TestPrintSortedKeys(t *testing.T) { - cfg := spew.ConfigState{SortKeys: true} - s := cfg.Sprint(map[int]string{1: "1", 3: "3", 2: "2"}) - expected := "map[1:1 2:2 3:3]" - if s != expected { - t.Errorf("Sorted keys mismatch 1:\n %v %v", s, expected) - } - - s = cfg.Sprint(map[stringer]int{"1": 1, "3": 3, "2": 2}) - expected = "map[stringer 1:1 stringer 2:2 stringer 3:3]" - if s != expected { - t.Errorf("Sorted keys mismatch 2:\n %v %v", s, expected) - } - - s = cfg.Sprint(map[pstringer]int{pstringer("1"): 1, pstringer("3"): 3, pstringer("2"): 2}) - expected = "map[stringer 1:1 stringer 2:2 stringer 3:3]" - if spew.UnsafeDisabled { - expected = "map[1:1 2:2 3:3]" - } - if s != expected { - t.Errorf("Sorted keys mismatch 3:\n %v %v", s, expected) - } - - s = cfg.Sprint(map[testStruct]int{testStruct{1}: 1, testStruct{3}: 3, testStruct{2}: 2}) - expected = "map[ts.1:1 ts.2:2 ts.3:3]" - if s != expected { - t.Errorf("Sorted keys mismatch 4:\n %v %v", s, expected) - } - - if !spew.UnsafeDisabled { - s = cfg.Sprint(map[testStructP]int{testStructP{1}: 1, testStructP{3}: 3, testStructP{2}: 2}) - expected = "map[ts.1:1 ts.2:2 ts.3:3]" - if s != expected { - t.Errorf("Sorted keys mismatch 5:\n %v %v", s, expected) - } - } - - s = cfg.Sprint(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2}) - expected = "map[error: 1:1 error: 2:2 error: 3:3]" - if s != expected { - t.Errorf("Sorted keys mismatch 6:\n %v %v", s, expected) - } -} diff --git a/vendor/github.com/davecgh/go-spew/spew/internal_test.go b/vendor/github.com/davecgh/go-spew/spew/internal_test.go deleted file mode 100644 index 1069ee2..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/internal_test.go +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright (c) 2013 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -This test file is part of the spew package rather than than the spew_test -package because it needs access to internals to properly test certain cases -which are not possible via the public interface since they should never happen. -*/ - -package spew - -import ( - "bytes" - "reflect" - "testing" -) - -// dummyFmtState implements a fake fmt.State to use for testing invalid -// reflect.Value handling. This is necessary because the fmt package catches -// invalid values before invoking the formatter on them. -type dummyFmtState struct { - bytes.Buffer -} - -func (dfs *dummyFmtState) Flag(f int) bool { - if f == int('+') { - return true - } - return false -} - -func (dfs *dummyFmtState) Precision() (int, bool) { - return 0, false -} - -func (dfs *dummyFmtState) Width() (int, bool) { - return 0, false -} - -// TestInvalidReflectValue ensures the dump and formatter code handles an -// invalid reflect value properly. This needs access to internal state since it -// should never happen in real code and therefore can't be tested via the public -// API. -func TestInvalidReflectValue(t *testing.T) { - i := 1 - - // Dump invalid reflect value. - v := new(reflect.Value) - buf := new(bytes.Buffer) - d := dumpState{w: buf, cs: &Config} - d.dump(*v) - s := buf.String() - want := "" - if s != want { - t.Errorf("InvalidReflectValue #%d\n got: %s want: %s", i, s, want) - } - i++ - - // Formatter invalid reflect value. - buf2 := new(dummyFmtState) - f := formatState{value: *v, cs: &Config, fs: buf2} - f.format(*v) - s = buf2.String() - want = "" - if s != want { - t.Errorf("InvalidReflectValue #%d got: %s want: %s", i, s, want) - } -} - -// SortValues makes the internal sortValues function available to the test -// package. -func SortValues(values []reflect.Value, cs *ConfigState) { - sortValues(values, cs) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go b/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go deleted file mode 100644 index 863b62c..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright (c) 2013-2015 Dave Collins - -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. - -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is not running on Google App Engine, compiled by GopherJS, and -// "-tags safe" is not added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// +build !js,!appengine,!safe,!disableunsafe - -/* -This test file is part of the spew package rather than than the spew_test -package because it needs access to internals to properly test certain cases -which are not possible via the public interface since they should never happen. -*/ - -package spew - -import ( - "bytes" - "reflect" - "testing" - "unsafe" -) - -// changeKind uses unsafe to intentionally change the kind of a reflect.Value to -// the maximum kind value which does not exist. This is needed to test the -// fallback code which punts to the standard fmt library for new types that -// might get added to the language. -func changeKind(v *reflect.Value, readOnly bool) { - rvf := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + offsetFlag)) - *rvf = *rvf | ((1< - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "fmt" - "io" -) - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the formatted string as a value that satisfies error. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a default Formatter interface returned by NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) -func Print(a ...interface{}) (n int, err error) { - return fmt.Print(convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) -func Println(a ...interface{}) (n int, err error) { - return fmt.Println(convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprint(a ...interface{}) string { - return fmt.Sprint(convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintln(a ...interface{}) string { - return fmt.Sprintln(convertArgs(a)...) -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a default spew Formatter interface. -func convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = NewFormatter(arg) - } - return formatters -} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew_test.go b/vendor/github.com/davecgh/go-spew/spew/spew_test.go deleted file mode 100644 index dbbc085..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/spew_test.go +++ /dev/null @@ -1,309 +0,0 @@ -/* - * Copyright (c) 2013 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew_test - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "testing" - - "github.com/davecgh/go-spew/spew" -) - -// spewFunc is used to identify which public function of the spew package or -// ConfigState a test applies to. -type spewFunc int - -const ( - fCSFdump spewFunc = iota - fCSFprint - fCSFprintf - fCSFprintln - fCSPrint - fCSPrintln - fCSSdump - fCSSprint - fCSSprintf - fCSSprintln - fCSErrorf - fCSNewFormatter - fErrorf - fFprint - fFprintln - fPrint - fPrintln - fSdump - fSprint - fSprintf - fSprintln -) - -// Map of spewFunc values to names for pretty printing. -var spewFuncStrings = map[spewFunc]string{ - fCSFdump: "ConfigState.Fdump", - fCSFprint: "ConfigState.Fprint", - fCSFprintf: "ConfigState.Fprintf", - fCSFprintln: "ConfigState.Fprintln", - fCSSdump: "ConfigState.Sdump", - fCSPrint: "ConfigState.Print", - fCSPrintln: "ConfigState.Println", - fCSSprint: "ConfigState.Sprint", - fCSSprintf: "ConfigState.Sprintf", - fCSSprintln: "ConfigState.Sprintln", - fCSErrorf: "ConfigState.Errorf", - fCSNewFormatter: "ConfigState.NewFormatter", - fErrorf: "spew.Errorf", - fFprint: "spew.Fprint", - fFprintln: "spew.Fprintln", - fPrint: "spew.Print", - fPrintln: "spew.Println", - fSdump: "spew.Sdump", - fSprint: "spew.Sprint", - fSprintf: "spew.Sprintf", - fSprintln: "spew.Sprintln", -} - -func (f spewFunc) String() string { - if s, ok := spewFuncStrings[f]; ok { - return s - } - return fmt.Sprintf("Unknown spewFunc (%d)", int(f)) -} - -// spewTest is used to describe a test to be performed against the public -// functions of the spew package or ConfigState. -type spewTest struct { - cs *spew.ConfigState - f spewFunc - format string - in interface{} - want string -} - -// spewTests houses the tests to be performed against the public functions of -// the spew package and ConfigState. -// -// These tests are only intended to ensure the public functions are exercised -// and are intentionally not exhaustive of types. The exhaustive type -// tests are handled in the dump and format tests. -var spewTests []spewTest - -// redirStdout is a helper function to return the standard output from f as a -// byte slice. -func redirStdout(f func()) ([]byte, error) { - tempFile, err := ioutil.TempFile("", "ss-test") - if err != nil { - return nil, err - } - fileName := tempFile.Name() - defer os.Remove(fileName) // Ignore error - - origStdout := os.Stdout - os.Stdout = tempFile - f() - os.Stdout = origStdout - tempFile.Close() - - return ioutil.ReadFile(fileName) -} - -func initSpewTests() { - // Config states with various settings. - scsDefault := spew.NewDefaultConfig() - scsNoMethods := &spew.ConfigState{Indent: " ", DisableMethods: true} - scsNoPmethods := &spew.ConfigState{Indent: " ", DisablePointerMethods: true} - scsMaxDepth := &spew.ConfigState{Indent: " ", MaxDepth: 1} - scsContinue := &spew.ConfigState{Indent: " ", ContinueOnMethod: true} - - // Variables for tests on types which implement Stringer interface with and - // without a pointer receiver. - ts := stringer("test") - tps := pstringer("test") - - // depthTester is used to test max depth handling for structs, array, slices - // and maps. - type depthTester struct { - ic indirCir1 - arr [1]string - slice []string - m map[string]int - } - dt := depthTester{indirCir1{nil}, [1]string{"arr"}, []string{"slice"}, - map[string]int{"one": 1}} - - // Variable for tests on types which implement error interface. - te := customError(10) - - spewTests = []spewTest{ - {scsDefault, fCSFdump, "", int8(127), "(int8) 127\n"}, - {scsDefault, fCSFprint, "", int16(32767), "32767"}, - {scsDefault, fCSFprintf, "%v", int32(2147483647), "2147483647"}, - {scsDefault, fCSFprintln, "", int(2147483647), "2147483647\n"}, - {scsDefault, fCSPrint, "", int64(9223372036854775807), "9223372036854775807"}, - {scsDefault, fCSPrintln, "", uint8(255), "255\n"}, - {scsDefault, fCSSdump, "", uint8(64), "(uint8) 64\n"}, - {scsDefault, fCSSprint, "", complex(1, 2), "(1+2i)"}, - {scsDefault, fCSSprintf, "%v", complex(float32(3), 4), "(3+4i)"}, - {scsDefault, fCSSprintln, "", complex(float64(5), 6), "(5+6i)\n"}, - {scsDefault, fCSErrorf, "%#v", uint16(65535), "(uint16)65535"}, - {scsDefault, fCSNewFormatter, "%v", uint32(4294967295), "4294967295"}, - {scsDefault, fErrorf, "%v", uint64(18446744073709551615), "18446744073709551615"}, - {scsDefault, fFprint, "", float32(3.14), "3.14"}, - {scsDefault, fFprintln, "", float64(6.28), "6.28\n"}, - {scsDefault, fPrint, "", true, "true"}, - {scsDefault, fPrintln, "", false, "false\n"}, - {scsDefault, fSdump, "", complex(-10, -20), "(complex128) (-10-20i)\n"}, - {scsDefault, fSprint, "", complex(-1, -2), "(-1-2i)"}, - {scsDefault, fSprintf, "%v", complex(float32(-3), -4), "(-3-4i)"}, - {scsDefault, fSprintln, "", complex(float64(-5), -6), "(-5-6i)\n"}, - {scsNoMethods, fCSFprint, "", ts, "test"}, - {scsNoMethods, fCSFprint, "", &ts, "<*>test"}, - {scsNoMethods, fCSFprint, "", tps, "test"}, - {scsNoMethods, fCSFprint, "", &tps, "<*>test"}, - {scsNoPmethods, fCSFprint, "", ts, "stringer test"}, - {scsNoPmethods, fCSFprint, "", &ts, "<*>stringer test"}, - {scsNoPmethods, fCSFprint, "", tps, "test"}, - {scsNoPmethods, fCSFprint, "", &tps, "<*>stringer test"}, - {scsMaxDepth, fCSFprint, "", dt, "{{} [] [] map[]}"}, - {scsMaxDepth, fCSFdump, "", dt, "(spew_test.depthTester) {\n" + - " ic: (spew_test.indirCir1) {\n \n },\n" + - " arr: ([1]string) (len=1 cap=1) {\n \n },\n" + - " slice: ([]string) (len=1 cap=1) {\n \n },\n" + - " m: (map[string]int) (len=1) {\n \n }\n}\n"}, - {scsContinue, fCSFprint, "", ts, "(stringer test) test"}, - {scsContinue, fCSFdump, "", ts, "(spew_test.stringer) " + - "(len=4) (stringer test) \"test\"\n"}, - {scsContinue, fCSFprint, "", te, "(error: 10) 10"}, - {scsContinue, fCSFdump, "", te, "(spew_test.customError) " + - "(error: 10) 10\n"}, - } -} - -// TestSpew executes all of the tests described by spewTests. -func TestSpew(t *testing.T) { - initSpewTests() - - t.Logf("Running %d tests", len(spewTests)) - for i, test := range spewTests { - buf := new(bytes.Buffer) - switch test.f { - case fCSFdump: - test.cs.Fdump(buf, test.in) - - case fCSFprint: - test.cs.Fprint(buf, test.in) - - case fCSFprintf: - test.cs.Fprintf(buf, test.format, test.in) - - case fCSFprintln: - test.cs.Fprintln(buf, test.in) - - case fCSPrint: - b, err := redirStdout(func() { test.cs.Print(test.in) }) - if err != nil { - t.Errorf("%v #%d %v", test.f, i, err) - continue - } - buf.Write(b) - - case fCSPrintln: - b, err := redirStdout(func() { test.cs.Println(test.in) }) - if err != nil { - t.Errorf("%v #%d %v", test.f, i, err) - continue - } - buf.Write(b) - - case fCSSdump: - str := test.cs.Sdump(test.in) - buf.WriteString(str) - - case fCSSprint: - str := test.cs.Sprint(test.in) - buf.WriteString(str) - - case fCSSprintf: - str := test.cs.Sprintf(test.format, test.in) - buf.WriteString(str) - - case fCSSprintln: - str := test.cs.Sprintln(test.in) - buf.WriteString(str) - - case fCSErrorf: - err := test.cs.Errorf(test.format, test.in) - buf.WriteString(err.Error()) - - case fCSNewFormatter: - fmt.Fprintf(buf, test.format, test.cs.NewFormatter(test.in)) - - case fErrorf: - err := spew.Errorf(test.format, test.in) - buf.WriteString(err.Error()) - - case fFprint: - spew.Fprint(buf, test.in) - - case fFprintln: - spew.Fprintln(buf, test.in) - - case fPrint: - b, err := redirStdout(func() { spew.Print(test.in) }) - if err != nil { - t.Errorf("%v #%d %v", test.f, i, err) - continue - } - buf.Write(b) - - case fPrintln: - b, err := redirStdout(func() { spew.Println(test.in) }) - if err != nil { - t.Errorf("%v #%d %v", test.f, i, err) - continue - } - buf.Write(b) - - case fSdump: - str := spew.Sdump(test.in) - buf.WriteString(str) - - case fSprint: - str := spew.Sprint(test.in) - buf.WriteString(str) - - case fSprintf: - str := spew.Sprintf(test.format, test.in) - buf.WriteString(str) - - case fSprintln: - str := spew.Sprintln(test.in) - buf.WriteString(str) - - default: - t.Errorf("%v #%d unrecognized function", test.f, i) - continue - } - s := buf.String() - if test.want != s { - t.Errorf("ConfigState #%d\n got: %s want: %s", i, s, test.want) - continue - } - } -} diff --git a/vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go b/vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go deleted file mode 100644 index 5c87dd4..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (c) 2013 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when both cgo is supported and "-tags testcgo" is added to the go test -// command line. This code should really only be in the dumpcgo_test.go file, -// but unfortunately Go will not allow cgo in test files, so this is a -// workaround to allow cgo types to be tested. This configuration is used -// because spew itself does not require cgo to run even though it does handle -// certain cgo types specially. Rather than forcing all clients to require cgo -// and an external C compiler just to run the tests, this scheme makes them -// optional. -// +build cgo,testcgo - -package testdata - -/* -#include -typedef unsigned char custom_uchar_t; - -char *ncp = 0; -char *cp = "test"; -char ca[6] = {'t', 'e', 's', 't', '2', '\0'}; -unsigned char uca[6] = {'t', 'e', 's', 't', '3', '\0'}; -signed char sca[6] = {'t', 'e', 's', 't', '4', '\0'}; -uint8_t ui8ta[6] = {'t', 'e', 's', 't', '5', '\0'}; -custom_uchar_t tuca[6] = {'t', 'e', 's', 't', '6', '\0'}; -*/ -import "C" - -// GetCgoNullCharPointer returns a null char pointer via cgo. This is only -// used for tests. -func GetCgoNullCharPointer() interface{} { - return C.ncp -} - -// GetCgoCharPointer returns a char pointer via cgo. This is only used for -// tests. -func GetCgoCharPointer() interface{} { - return C.cp -} - -// GetCgoCharArray returns a char array via cgo and the array's len and cap. -// This is only used for tests. -func GetCgoCharArray() (interface{}, int, int) { - return C.ca, len(C.ca), cap(C.ca) -} - -// GetCgoUnsignedCharArray returns an unsigned char array via cgo and the -// array's len and cap. This is only used for tests. -func GetCgoUnsignedCharArray() (interface{}, int, int) { - return C.uca, len(C.uca), cap(C.uca) -} - -// GetCgoSignedCharArray returns a signed char array via cgo and the array's len -// and cap. This is only used for tests. -func GetCgoSignedCharArray() (interface{}, int, int) { - return C.sca, len(C.sca), cap(C.sca) -} - -// GetCgoUint8tArray returns a uint8_t array via cgo and the array's len and -// cap. This is only used for tests. -func GetCgoUint8tArray() (interface{}, int, int) { - return C.ui8ta, len(C.ui8ta), cap(C.ui8ta) -} - -// GetCgoTypdefedUnsignedCharArray returns a typedefed unsigned char array via -// cgo and the array's len and cap. This is only used for tests. -func GetCgoTypdefedUnsignedCharArray() (interface{}, int, int) { - return C.tuca, len(C.tuca), cap(C.tuca) -} diff --git a/vendor/github.com/davecgh/go-spew/test_coverage.txt b/vendor/github.com/davecgh/go-spew/test_coverage.txt deleted file mode 100644 index 2cd087a..0000000 --- a/vendor/github.com/davecgh/go-spew/test_coverage.txt +++ /dev/null @@ -1,61 +0,0 @@ - -github.com/davecgh/go-spew/spew/dump.go dumpState.dump 100.00% (88/88) -github.com/davecgh/go-spew/spew/format.go formatState.format 100.00% (82/82) -github.com/davecgh/go-spew/spew/format.go formatState.formatPtr 100.00% (52/52) -github.com/davecgh/go-spew/spew/dump.go dumpState.dumpPtr 100.00% (44/44) -github.com/davecgh/go-spew/spew/dump.go dumpState.dumpSlice 100.00% (39/39) -github.com/davecgh/go-spew/spew/common.go handleMethods 100.00% (30/30) -github.com/davecgh/go-spew/spew/common.go printHexPtr 100.00% (18/18) -github.com/davecgh/go-spew/spew/common.go unsafeReflectValue 100.00% (13/13) -github.com/davecgh/go-spew/spew/format.go formatState.constructOrigFormat 100.00% (12/12) -github.com/davecgh/go-spew/spew/dump.go fdump 100.00% (11/11) -github.com/davecgh/go-spew/spew/format.go formatState.Format 100.00% (11/11) -github.com/davecgh/go-spew/spew/common.go init 100.00% (10/10) -github.com/davecgh/go-spew/spew/common.go printComplex 100.00% (9/9) -github.com/davecgh/go-spew/spew/common.go valuesSorter.Less 100.00% (8/8) -github.com/davecgh/go-spew/spew/format.go formatState.buildDefaultFormat 100.00% (7/7) -github.com/davecgh/go-spew/spew/format.go formatState.unpackValue 100.00% (5/5) -github.com/davecgh/go-spew/spew/dump.go dumpState.indent 100.00% (4/4) -github.com/davecgh/go-spew/spew/common.go catchPanic 100.00% (4/4) -github.com/davecgh/go-spew/spew/config.go ConfigState.convertArgs 100.00% (4/4) -github.com/davecgh/go-spew/spew/spew.go convertArgs 100.00% (4/4) -github.com/davecgh/go-spew/spew/format.go newFormatter 100.00% (3/3) -github.com/davecgh/go-spew/spew/dump.go Sdump 100.00% (3/3) -github.com/davecgh/go-spew/spew/common.go printBool 100.00% (3/3) -github.com/davecgh/go-spew/spew/common.go sortValues 100.00% (3/3) -github.com/davecgh/go-spew/spew/config.go ConfigState.Sdump 100.00% (3/3) -github.com/davecgh/go-spew/spew/dump.go dumpState.unpackValue 100.00% (3/3) -github.com/davecgh/go-spew/spew/spew.go Printf 100.00% (1/1) -github.com/davecgh/go-spew/spew/spew.go Println 100.00% (1/1) -github.com/davecgh/go-spew/spew/spew.go Sprint 100.00% (1/1) -github.com/davecgh/go-spew/spew/spew.go Sprintf 100.00% (1/1) -github.com/davecgh/go-spew/spew/spew.go Sprintln 100.00% (1/1) -github.com/davecgh/go-spew/spew/common.go printFloat 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go NewDefaultConfig 100.00% (1/1) -github.com/davecgh/go-spew/spew/common.go printInt 100.00% (1/1) -github.com/davecgh/go-spew/spew/common.go printUint 100.00% (1/1) -github.com/davecgh/go-spew/spew/common.go valuesSorter.Len 100.00% (1/1) -github.com/davecgh/go-spew/spew/common.go valuesSorter.Swap 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Errorf 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Fprint 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintf 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintln 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Print 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Printf 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Println 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Sprint 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintf 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintln 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.NewFormatter 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Fdump 100.00% (1/1) -github.com/davecgh/go-spew/spew/config.go ConfigState.Dump 100.00% (1/1) -github.com/davecgh/go-spew/spew/dump.go Fdump 100.00% (1/1) -github.com/davecgh/go-spew/spew/dump.go Dump 100.00% (1/1) -github.com/davecgh/go-spew/spew/spew.go Fprintln 100.00% (1/1) -github.com/davecgh/go-spew/spew/format.go NewFormatter 100.00% (1/1) -github.com/davecgh/go-spew/spew/spew.go Errorf 100.00% (1/1) -github.com/davecgh/go-spew/spew/spew.go Fprint 100.00% (1/1) -github.com/davecgh/go-spew/spew/spew.go Fprintf 100.00% (1/1) -github.com/davecgh/go-spew/spew/spew.go Print 100.00% (1/1) -github.com/davecgh/go-spew/spew ------------------------------- 100.00% (505/505) - diff --git a/vendor/github.com/gorilla/context/LICENSE b/vendor/github.com/gorilla/context/LICENSE deleted file mode 100644 index 0e5fb87..0000000 --- a/vendor/github.com/gorilla/context/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 Rodrigo Moraes. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/context/README.md b/vendor/github.com/gorilla/context/README.md deleted file mode 100644 index 8ee62b4..0000000 --- a/vendor/github.com/gorilla/context/README.md +++ /dev/null @@ -1,6 +0,0 @@ -context -======= - -gorilla/context is a general purpose registry for global request variables. - -Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff --git a/vendor/github.com/gorilla/context/context.go b/vendor/github.com/gorilla/context/context.go deleted file mode 100644 index 35d6556..0000000 --- a/vendor/github.com/gorilla/context/context.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context - -import ( - "net/http" - "sync" - "time" -) - -var ( - mutex sync.Mutex - data = make(map[*http.Request]map[interface{}]interface{}) - datat = make(map[*http.Request]int64) -) - -// Set stores a value for a given key in a given request. -func Set(r *http.Request, key, val interface{}) { - mutex.Lock() - defer mutex.Unlock() - if data[r] == nil { - data[r] = make(map[interface{}]interface{}) - datat[r] = time.Now().Unix() - } - data[r][key] = val -} - -// Get returns a value stored for a given key in a given request. -func Get(r *http.Request, key interface{}) interface{} { - mutex.Lock() - defer mutex.Unlock() - if data[r] != nil { - return data[r][key] - } - return nil -} - -// GetOk returns stored value and presence state like multi-value return of map access. -func GetOk(r *http.Request, key interface{}) (interface{}, bool) { - mutex.Lock() - defer mutex.Unlock() - if _, ok := data[r]; ok { - value, ok := data[r][key] - return value, ok - } - return nil, false -} - -// Delete removes a value stored for a given key in a given request. -func Delete(r *http.Request, key interface{}) { - mutex.Lock() - defer mutex.Unlock() - if data[r] != nil { - delete(data[r], key) - } -} - -// Clear removes all values stored for a given request. -// -// This is usually called by a handler wrapper to clean up request -// variables at the end of a request lifetime. See ClearHandler(). -func Clear(r *http.Request) { - mutex.Lock() - defer mutex.Unlock() - clear(r) -} - -// clear is Clear without the lock. -func clear(r *http.Request) { - delete(data, r) - delete(datat, r) -} - -// Purge removes request data stored for longer than maxAge, in seconds. -// It returns the amount of requests removed. -// -// If maxAge <= 0, all request data is removed. -// -// This is only used for sanity check: in case context cleaning was not -// properly set some request data can be kept forever, consuming an increasing -// amount of memory. In case this is detected, Purge() must be called -// periodically until the problem is fixed. -func Purge(maxAge int) int { - mutex.Lock() - defer mutex.Unlock() - count := 0 - if maxAge <= 0 { - count = len(data) - data = make(map[*http.Request]map[interface{}]interface{}) - datat = make(map[*http.Request]int64) - } else { - min := time.Now().Unix() - int64(maxAge) - for r, _ := range data { - if datat[r] < min { - clear(r) - count++ - } - } - } - return count -} - -// ClearHandler wraps an http.Handler and clears request values at the end -// of a request lifetime. -func ClearHandler(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - defer Clear(r) - h.ServeHTTP(w, r) - }) -} diff --git a/vendor/github.com/gorilla/context/context_test.go b/vendor/github.com/gorilla/context/context_test.go deleted file mode 100644 index ff9e2ad..0000000 --- a/vendor/github.com/gorilla/context/context_test.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context - -import ( - "net/http" - "testing" -) - -type keyType int - -const ( - key1 keyType = iota - key2 -) - -func TestContext(t *testing.T) { - assertEqual := func(val interface{}, exp interface{}) { - if val != exp { - t.Errorf("Expected %v, got %v.", exp, val) - } - } - - r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) - - // Get() - assertEqual(Get(r, key1), nil) - - // Set() - Set(r, key1, "1") - assertEqual(Get(r, key1), "1") - assertEqual(len(data[r]), 1) - - Set(r, key2, "2") - assertEqual(Get(r, key2), "2") - assertEqual(len(data[r]), 2) - - //GetOk - value, ok := GetOk(r, key1) - assertEqual(value, "1") - assertEqual(ok, true) - - value, ok = GetOk(r, "not exists") - assertEqual(value, nil) - assertEqual(ok, false) - - Set(r, "nil value", nil) - value, ok = GetOk(r, "nil value") - assertEqual(value, nil) - assertEqual(ok, true) - - // Delete() - Delete(r, key1) - assertEqual(Get(r, key1), nil) - assertEqual(len(data[r]), 2) - - Delete(r, key2) - assertEqual(Get(r, key2), nil) - assertEqual(len(data[r]), 1) - - // Clear() - Clear(r) - assertEqual(len(data), 0) -} diff --git a/vendor/github.com/gorilla/context/doc.go b/vendor/github.com/gorilla/context/doc.go deleted file mode 100644 index 2976064..0000000 --- a/vendor/github.com/gorilla/context/doc.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package gorilla/context stores values shared during a request lifetime. - -For example, a router can set variables extracted from the URL and later -application handlers can access those values, or it can be used to store -sessions values to be saved at the end of a request. There are several -others common uses. - -The idea was posted by Brad Fitzpatrick to the go-nuts mailing list: - - http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53 - -Here's the basic usage: first define the keys that you will need. The key -type is interface{} so a key can be of any type that supports equality. -Here we define a key using a custom int type to avoid name collisions: - - package foo - - import ( - "github.com/gorilla/context" - ) - - type key int - - const MyKey key = 0 - -Then set a variable. Variables are bound to an http.Request object, so you -need a request instance to set a value: - - context.Set(r, MyKey, "bar") - -The application can later access the variable using the same key you provided: - - func MyHandler(w http.ResponseWriter, r *http.Request) { - // val is "bar". - val := context.Get(r, foo.MyKey) - - // returns ("bar", true) - val, ok := context.GetOk(r, foo.MyKey) - // ... - } - -And that's all about the basic usage. We discuss some other ideas below. - -Any type can be stored in the context. To enforce a given type, make the key -private and wrap Get() and Set() to accept and return values of a specific -type: - - type key int - - const mykey key = 0 - - // GetMyKey returns a value for this package from the request values. - func GetMyKey(r *http.Request) SomeType { - if rv := context.Get(r, mykey); rv != nil { - return rv.(SomeType) - } - return nil - } - - // SetMyKey sets a value for this package in the request values. - func SetMyKey(r *http.Request, val SomeType) { - context.Set(r, mykey, val) - } - -Variables must be cleared at the end of a request, to remove all values -that were stored. This can be done in an http.Handler, after a request was -served. Just call Clear() passing the request: - - context.Clear(r) - -...or use ClearHandler(), which conveniently wraps an http.Handler to clear -variables at the end of a request lifetime. - -The Routers from the packages gorilla/mux and gorilla/pat call Clear() -so if you are using either of them you don't need to clear the context manually. -*/ -package context diff --git a/vendor/github.com/gorilla/mux/.travis.yml b/vendor/github.com/gorilla/mux/.travis.yml deleted file mode 100644 index d87d465..0000000 --- a/vendor/github.com/gorilla/mux/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go - -go: - - 1.0 - - 1.1 - - 1.2 - - tip diff --git a/vendor/github.com/gorilla/mux/LICENSE b/vendor/github.com/gorilla/mux/LICENSE deleted file mode 100644 index 0e5fb87..0000000 --- a/vendor/github.com/gorilla/mux/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 Rodrigo Moraes. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md deleted file mode 100644 index e60301b..0000000 --- a/vendor/github.com/gorilla/mux/README.md +++ /dev/null @@ -1,7 +0,0 @@ -mux -=== -[![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux) - -gorilla/mux is a powerful URL router and dispatcher. - -Read the full documentation here: http://www.gorillatoolkit.org/pkg/mux diff --git a/vendor/github.com/gorilla/mux/bench_test.go b/vendor/github.com/gorilla/mux/bench_test.go deleted file mode 100644 index c5f97b2..0000000 --- a/vendor/github.com/gorilla/mux/bench_test.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "net/http" - "testing" -) - -func BenchmarkMux(b *testing.B) { - router := new(Router) - handler := func(w http.ResponseWriter, r *http.Request) {} - router.HandleFunc("/v1/{v1}", handler) - - request, _ := http.NewRequest("GET", "/v1/anything", nil) - for i := 0; i < b.N; i++ { - router.ServeHTTP(nil, request) - } -} diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go deleted file mode 100644 index b2deed3..0000000 --- a/vendor/github.com/gorilla/mux/doc.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package gorilla/mux implements a request router and dispatcher. - -The name mux stands for "HTTP request multiplexer". Like the standard -http.ServeMux, mux.Router matches incoming requests against a list of -registered routes and calls a handler for the route that matches the URL -or other conditions. The main features are: - - * Requests can be matched based on URL host, path, path prefix, schemes, - header and query values, HTTP methods or using custom matchers. - * URL hosts and paths can have variables with an optional regular - expression. - * Registered URLs can be built, or "reversed", which helps maintaining - references to resources. - * Routes can be used as subrouters: nested routes are only tested if the - parent route matches. This is useful to define groups of routes that - share common conditions like a host, a path prefix or other repeated - attributes. As a bonus, this optimizes request matching. - * It implements the http.Handler interface so it is compatible with the - standard http.ServeMux. - -Let's start registering a couple of URL paths and handlers: - - func main() { - r := mux.NewRouter() - r.HandleFunc("/", HomeHandler) - r.HandleFunc("/products", ProductsHandler) - r.HandleFunc("/articles", ArticlesHandler) - http.Handle("/", r) - } - -Here we register three routes mapping URL paths to handlers. This is -equivalent to how http.HandleFunc() works: if an incoming request URL matches -one of the paths, the corresponding handler is called passing -(http.ResponseWriter, *http.Request) as parameters. - -Paths can have variables. They are defined using the format {name} or -{name:pattern}. If a regular expression pattern is not defined, the matched -variable will be anything until the next slash. For example: - - r := mux.NewRouter() - r.HandleFunc("/products/{key}", ProductHandler) - r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) - -The names are used to create a map of route variables which can be retrieved -calling mux.Vars(): - - vars := mux.Vars(request) - category := vars["category"] - -And this is all you need to know about the basic usage. More advanced options -are explained below. - -Routes can also be restricted to a domain or subdomain. Just define a host -pattern to be matched. They can also have variables: - - r := mux.NewRouter() - // Only matches if domain is "www.domain.com". - r.Host("www.domain.com") - // Matches a dynamic subdomain. - r.Host("{subdomain:[a-z]+}.domain.com") - -There are several other matchers that can be added. To match path prefixes: - - r.PathPrefix("/products/") - -...or HTTP methods: - - r.Methods("GET", "POST") - -...or URL schemes: - - r.Schemes("https") - -...or header values: - - r.Headers("X-Requested-With", "XMLHttpRequest") - -...or query values: - - r.Queries("key", "value") - -...or to use a custom matcher function: - - r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { - return r.ProtoMajor == 0 - }) - -...and finally, it is possible to combine several matchers in a single route: - - r.HandleFunc("/products", ProductsHandler). - Host("www.domain.com"). - Methods("GET"). - Schemes("http") - -Setting the same matching conditions again and again can be boring, so we have -a way to group several routes that share the same requirements. -We call it "subrouting". - -For example, let's say we have several URLs that should only match when the -host is "www.domain.com". Create a route for that host and get a "subrouter" -from it: - - r := mux.NewRouter() - s := r.Host("www.domain.com").Subrouter() - -Then register routes in the subrouter: - - s.HandleFunc("/products/", ProductsHandler) - s.HandleFunc("/products/{key}", ProductHandler) - s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) - -The three URL paths we registered above will only be tested if the domain is -"www.domain.com", because the subrouter is tested first. This is not -only convenient, but also optimizes request matching. You can create -subrouters combining any attribute matchers accepted by a route. - -Subrouters can be used to create domain or path "namespaces": you define -subrouters in a central place and then parts of the app can register its -paths relatively to a given subrouter. - -There's one more thing about subroutes. When a subrouter has a path prefix, -the inner routes use it as base for their paths: - - r := mux.NewRouter() - s := r.PathPrefix("/products").Subrouter() - // "/products/" - s.HandleFunc("/", ProductsHandler) - // "/products/{key}/" - s.HandleFunc("/{key}/", ProductHandler) - // "/products/{key}/details" - s.HandleFunc("/{key}/details", ProductDetailsHandler) - -Now let's see how to build registered URLs. - -Routes can be named. All routes that define a name can have their URLs built, -or "reversed". We define a name calling Name() on a route. For example: - - r := mux.NewRouter() - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). - Name("article") - -To build a URL, get the route and call the URL() method, passing a sequence of -key/value pairs for the route variables. For the previous route, we would do: - - url, err := r.Get("article").URL("category", "technology", "id", "42") - -...and the result will be a url.URL with the following path: - - "/articles/technology/42" - -This also works for host variables: - - r := mux.NewRouter() - r.Host("{subdomain}.domain.com"). - Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - - // url.String() will be "http://news.domain.com/articles/technology/42" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") - -All variables defined in the route are required, and their values must -conform to the corresponding patterns. These requirements guarantee that a -generated URL will always match a registered route -- the only exception is -for explicitly defined "build-only" routes which never match. - -There's also a way to build only the URL host or path for a route: -use the methods URLHost() or URLPath() instead. For the previous route, -we would do: - - // "http://news.domain.com/" - host, err := r.Get("article").URLHost("subdomain", "news") - - // "/articles/technology/42" - path, err := r.Get("article").URLPath("category", "technology", "id", "42") - -And if you use subrouters, host and path defined separately can be built -as well: - - r := mux.NewRouter() - s := r.Host("{subdomain}.domain.com").Subrouter() - s.Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - - // "http://news.domain.com/articles/technology/42" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") -*/ -package mux diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go deleted file mode 100644 index ca51a01..0000000 --- a/vendor/github.com/gorilla/mux/mux.go +++ /dev/null @@ -1,347 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "fmt" - "net/http" - "path" - - "github.com/gorilla/context" -) - -// NewRouter returns a new router instance. -func NewRouter() *Router { - return &Router{namedRoutes: make(map[string]*Route), KeepContext: false} -} - -// Router registers routes to be matched and dispatches a handler. -// -// It implements the http.Handler interface, so it can be registered to serve -// requests: -// -// var router = mux.NewRouter() -// -// func main() { -// http.Handle("/", router) -// } -// -// Or, for Google App Engine, register it in a init() function: -// -// func init() { -// http.Handle("/", router) -// } -// -// This will send all incoming requests to the router. -type Router struct { - // Configurable Handler to be used when no route matches. - NotFoundHandler http.Handler - // Parent route, if this is a subrouter. - parent parentRoute - // Routes to be matched, in order. - routes []*Route - // Routes by name for URL building. - namedRoutes map[string]*Route - // See Router.StrictSlash(). This defines the flag for new routes. - strictSlash bool - // If true, do not clear the request context after handling the request - KeepContext bool -} - -// Match matches registered routes against the request. -func (r *Router) Match(req *http.Request, match *RouteMatch) bool { - for _, route := range r.routes { - if route.Match(req, match) { - return true - } - } - return false -} - -// ServeHTTP dispatches the handler registered in the matched route. -// -// When there is a match, the route variables can be retrieved calling -// mux.Vars(request). -func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { - // Clean path to canonical form and redirect. - if p := cleanPath(req.URL.Path); p != req.URL.Path { - - // Added 3 lines (Philip Schlump) - It was droping the query string and #whatever from query. - // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: - // http://code.google.com/p/go/issues/detail?id=5252 - url := *req.URL - url.Path = p - p = url.String() - - w.Header().Set("Location", p) - w.WriteHeader(http.StatusMovedPermanently) - return - } - var match RouteMatch - var handler http.Handler - if r.Match(req, &match) { - handler = match.Handler - setVars(req, match.Vars) - setCurrentRoute(req, match.Route) - } - if handler == nil { - if r.NotFoundHandler == nil { - r.NotFoundHandler = http.NotFoundHandler() - } - handler = r.NotFoundHandler - } - if !r.KeepContext { - defer context.Clear(req) - } - handler.ServeHTTP(w, req) -} - -// Get returns a route registered with the given name. -func (r *Router) Get(name string) *Route { - return r.getNamedRoutes()[name] -} - -// GetRoute returns a route registered with the given name. This method -// was renamed to Get() and remains here for backwards compatibility. -func (r *Router) GetRoute(name string) *Route { - return r.getNamedRoutes()[name] -} - -// StrictSlash defines the slash behavior for new routes. -// -// When true, if the route path is "/path/", accessing "/path" will redirect -// to the former and vice versa. -// -// Special case: when a route sets a path prefix, strict slash is -// automatically set to false for that route because the redirect behavior -// can't be determined for prefixes. -func (r *Router) StrictSlash(value bool) *Router { - r.strictSlash = value - return r -} - -// ---------------------------------------------------------------------------- -// parentRoute -// ---------------------------------------------------------------------------- - -// getNamedRoutes returns the map where named routes are registered. -func (r *Router) getNamedRoutes() map[string]*Route { - if r.namedRoutes == nil { - if r.parent != nil { - r.namedRoutes = r.parent.getNamedRoutes() - } else { - r.namedRoutes = make(map[string]*Route) - } - } - return r.namedRoutes -} - -// getRegexpGroup returns regexp definitions from the parent route, if any. -func (r *Router) getRegexpGroup() *routeRegexpGroup { - if r.parent != nil { - return r.parent.getRegexpGroup() - } - return nil -} - -// ---------------------------------------------------------------------------- -// Route factories -// ---------------------------------------------------------------------------- - -// NewRoute registers an empty route. -func (r *Router) NewRoute() *Route { - route := &Route{parent: r, strictSlash: r.strictSlash} - r.routes = append(r.routes, route) - return route -} - -// Handle registers a new route with a matcher for the URL path. -// See Route.Path() and Route.Handler(). -func (r *Router) Handle(path string, handler http.Handler) *Route { - return r.NewRoute().Path(path).Handler(handler) -} - -// HandleFunc registers a new route with a matcher for the URL path. -// See Route.Path() and Route.HandlerFunc(). -func (r *Router) HandleFunc(path string, f func(http.ResponseWriter, - *http.Request)) *Route { - return r.NewRoute().Path(path).HandlerFunc(f) -} - -// Headers registers a new route with a matcher for request header values. -// See Route.Headers(). -func (r *Router) Headers(pairs ...string) *Route { - return r.NewRoute().Headers(pairs...) -} - -// Host registers a new route with a matcher for the URL host. -// See Route.Host(). -func (r *Router) Host(tpl string) *Route { - return r.NewRoute().Host(tpl) -} - -// MatcherFunc registers a new route with a custom matcher function. -// See Route.MatcherFunc(). -func (r *Router) MatcherFunc(f MatcherFunc) *Route { - return r.NewRoute().MatcherFunc(f) -} - -// Methods registers a new route with a matcher for HTTP methods. -// See Route.Methods(). -func (r *Router) Methods(methods ...string) *Route { - return r.NewRoute().Methods(methods...) -} - -// Path registers a new route with a matcher for the URL path. -// See Route.Path(). -func (r *Router) Path(tpl string) *Route { - return r.NewRoute().Path(tpl) -} - -// PathPrefix registers a new route with a matcher for the URL path prefix. -// See Route.PathPrefix(). -func (r *Router) PathPrefix(tpl string) *Route { - return r.NewRoute().PathPrefix(tpl) -} - -// Queries registers a new route with a matcher for URL query values. -// See Route.Queries(). -func (r *Router) Queries(pairs ...string) *Route { - return r.NewRoute().Queries(pairs...) -} - -// Schemes registers a new route with a matcher for URL schemes. -// See Route.Schemes(). -func (r *Router) Schemes(schemes ...string) *Route { - return r.NewRoute().Schemes(schemes...) -} - -// ---------------------------------------------------------------------------- -// Context -// ---------------------------------------------------------------------------- - -// RouteMatch stores information about a matched route. -type RouteMatch struct { - Route *Route - Handler http.Handler - Vars map[string]string -} - -type contextKey int - -const ( - varsKey contextKey = iota - routeKey -) - -// Vars returns the route variables for the current request, if any. -func Vars(r *http.Request) map[string]string { - if rv := context.Get(r, varsKey); rv != nil { - return rv.(map[string]string) - } - return nil -} - -// CurrentRoute returns the matched route for the current request, if any. -func CurrentRoute(r *http.Request) *Route { - if rv := context.Get(r, routeKey); rv != nil { - return rv.(*Route) - } - return nil -} - -func setVars(r *http.Request, val interface{}) { - context.Set(r, varsKey, val) -} - -func setCurrentRoute(r *http.Request, val interface{}) { - context.Set(r, routeKey, val) -} - -// ---------------------------------------------------------------------------- -// Helpers -// ---------------------------------------------------------------------------- - -// cleanPath returns the canonical path for p, eliminating . and .. elements. -// Borrowed from the net/http package. -func cleanPath(p string) string { - if p == "" { - return "/" - } - if p[0] != '/' { - p = "/" + p - } - np := path.Clean(p) - // path.Clean removes trailing slash except for root; - // put the trailing slash back if necessary. - if p[len(p)-1] == '/' && np != "/" { - np += "/" - } - return np -} - -// uniqueVars returns an error if two slices contain duplicated strings. -func uniqueVars(s1, s2 []string) error { - for _, v1 := range s1 { - for _, v2 := range s2 { - if v1 == v2 { - return fmt.Errorf("mux: duplicated route variable %q", v2) - } - } - } - return nil -} - -// mapFromPairs converts variadic string parameters to a string map. -func mapFromPairs(pairs ...string) (map[string]string, error) { - length := len(pairs) - if length%2 != 0 { - return nil, fmt.Errorf( - "mux: number of parameters must be multiple of 2, got %v", pairs) - } - m := make(map[string]string, length/2) - for i := 0; i < length; i += 2 { - m[pairs[i]] = pairs[i+1] - } - return m, nil -} - -// matchInArray returns true if the given string value is in the array. -func matchInArray(arr []string, value string) bool { - for _, v := range arr { - if v == value { - return true - } - } - return false -} - -// matchMap returns true if the given key/value pairs exist in a given map. -func matchMap(toCheck map[string]string, toMatch map[string][]string, - canonicalKey bool) bool { - for k, v := range toCheck { - // Check if key exists. - if canonicalKey { - k = http.CanonicalHeaderKey(k) - } - if values := toMatch[k]; values == nil { - return false - } else if v != "" { - // If value was defined as an empty string we only check that the - // key exists. Otherwise we also check for equality. - valueExists := false - for _, value := range values { - if v == value { - valueExists = true - break - } - } - if !valueExists { - return false - } - } - } - return true -} diff --git a/vendor/github.com/gorilla/mux/mux_test.go b/vendor/github.com/gorilla/mux/mux_test.go deleted file mode 100644 index 1a2a092..0000000 --- a/vendor/github.com/gorilla/mux/mux_test.go +++ /dev/null @@ -1,806 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "fmt" - "net/http" - "testing" - - "github.com/gorilla/context" -) - -type routeTest struct { - title string // title of the test - route *Route // the route being tested - request *http.Request // a request to test the route - vars map[string]string // the expected vars of the match - host string // the expected host of the match - path string // the expected path of the match - shouldMatch bool // whether the request is expected to match the route at all -} - - -func TestHost(t *testing.T) { - // newRequestHost a new request with a method, url, and host header - newRequestHost := func(method, url, host string) *http.Request { - req, err := http.NewRequest(method, url, nil) - if err != nil { - panic(err) - } - req.Host = host - return req - } - - tests := []routeTest{ - { - title: "Host route match", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route, wrong host in request URL", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - { - title: "Host route with port, match", - route: new(Route).Host("aaa.bbb.ccc:1234"), - request: newRequest("GET", "http://aaa.bbb.ccc:1234/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc:1234", - path: "", - shouldMatch: true, - }, - { - title: "Host route with port, wrong port in request URL", - route: new(Route).Host("aaa.bbb.ccc:1234"), - request: newRequest("GET", "http://aaa.bbb.ccc:9999/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc:1234", - path: "", - shouldMatch: false, - }, - { - title: "Host route, match with host in request header", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route, wrong host in request header", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequestHost("GET", "/111/222/333", "aaa.222.ccc"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - // BUG {new(Route).Host("aaa.bbb.ccc:1234"), newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:1234"), map[string]string{}, "aaa.bbb.ccc:1234", "", true}, - { - title: "Host route with port, wrong host in request header", - route: new(Route).Host("aaa.bbb.ccc:1234"), - request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:9999"), - vars: map[string]string{}, - host: "aaa.bbb.ccc:1234", - path: "", - shouldMatch: false, - }, - { - title: "Host route with pattern, match", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route with pattern, wrong host in request URL", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - { - title: "Host route with multiple patterns, match", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route with multiple patterns, wrong host in request URL", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - } - for _, test := range tests { - testRoute(t, test) - } -} - -func TestPath(t *testing.T) { - tests := []routeTest{ - { - title: "Path route, match", - route: new(Route).Path("/111/222/333"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{}, - host: "", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Path route, wrong path in request in request URL", - route: new(Route).Path("/111/222/333"), - request: newRequest("GET", "http://localhost/1/2/3"), - vars: map[string]string{}, - host: "", - path: "/111/222/333", - shouldMatch: false, - }, - { - title: "Path route with pattern, match", - route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Path route with pattern, URL in request does not match", - route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222/333", - shouldMatch: false, - }, - { - title: "Path route with multiple patterns, match", - route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, - host: "", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Path route with multiple patterns, URL in request does not match", - route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, - host: "", - path: "/111/222/333", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestPathPrefix(t *testing.T) { - tests := []routeTest{ - { - title: "PathPrefix route, match", - route: new(Route).PathPrefix("/111"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: true, - }, - { - title: "PathPrefix route, URL prefix in request does not match", - route: new(Route).PathPrefix("/111"), - request: newRequest("GET", "http://localhost/1/2/3"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: false, - }, - { - title: "PathPrefix route with pattern, match", - route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222", - shouldMatch: true, - }, - { - title: "PathPrefix route with pattern, URL prefix in request does not match", - route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222", - shouldMatch: false, - }, - { - title: "PathPrefix route with multiple patterns, match", - route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "111", "v2": "222"}, - host: "", - path: "/111/222", - shouldMatch: true, - }, - { - title: "PathPrefix route with multiple patterns, URL prefix in request does not match", - route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "111", "v2": "222"}, - host: "", - path: "/111/222", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestHostPath(t *testing.T) { - tests := []routeTest{ - { - title: "Host and Path route, match", - route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Host and Path route, wrong host in request URL", - route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Host and Path route with pattern, match", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb", "v2": "222"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Host and Path route with pattern, URL in request does not match", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb", "v2": "222"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: false, - }, - { - title: "Host and Path route with multiple patterns, match", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Host and Path route with multiple patterns, URL in request does not match", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestHeaders(t *testing.T) { - // newRequestHeaders creates a new request with a method, url, and headers - newRequestHeaders := func(method, url string, headers map[string]string) *http.Request { - req, err := http.NewRequest(method, url, nil) - if err != nil { - panic(err) - } - for k, v := range headers { - req.Header.Add(k, v) - } - return req - } - - tests := []routeTest{ - { - title: "Headers route, match", - route: new(Route).Headers("foo", "bar", "baz", "ding"), - request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "ding"}), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Headers route, bad header values", - route: new(Route).Headers("foo", "bar", "baz", "ding"), - request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "dong"}), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } - -} - -func TestMethods(t *testing.T) { - tests := []routeTest{ - { - title: "Methods route, match GET", - route: new(Route).Methods("GET", "POST"), - request: newRequest("GET", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Methods route, match POST", - route: new(Route).Methods("GET", "POST"), - request: newRequest("POST", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Methods route, bad method", - route: new(Route).Methods("GET", "POST"), - request: newRequest("PUT", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestQueries(t *testing.T) { - tests := []routeTest{ - { - title: "Queries route, match", - route: new(Route).Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route, match with a query string", - route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://www.example.com/api?foo=bar&baz=ding"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route, bad query", - route: new(Route).Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://localhost?foo=bar&baz=dong"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestSchemes(t *testing.T) { - tests := []routeTest{ - // Schemes - { - title: "Schemes route, match https", - route: new(Route).Schemes("https", "ftp"), - request: newRequest("GET", "https://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Schemes route, match ftp", - route: new(Route).Schemes("https", "ftp"), - request: newRequest("GET", "ftp://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Schemes route, bad scheme", - route: new(Route).Schemes("https", "ftp"), - request: newRequest("GET", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - for _, test := range tests { - testRoute(t, test) - } -} - -func TestMatcherFunc(t *testing.T) { - m := func(r *http.Request, m *RouteMatch) bool { - if r.URL.Host == "aaa.bbb.ccc" { - return true - } - return false - } - - tests := []routeTest{ - { - title: "MatchFunc route, match", - route: new(Route).MatcherFunc(m), - request: newRequest("GET", "http://aaa.bbb.ccc"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "MatchFunc route, non-match", - route: new(Route).MatcherFunc(m), - request: newRequest("GET", "http://aaa.222.ccc"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestSubRouter(t *testing.T) { - subrouter1 := new(Route).Host("{v1:[a-z]+}.google.com").Subrouter() - subrouter2 := new(Route).PathPrefix("/foo/{v1}").Subrouter() - - tests := []routeTest{ - { - route: subrouter1.Path("/{v2:[a-z]+}"), - request: newRequest("GET", "http://aaa.google.com/bbb"), - vars: map[string]string{"v1": "aaa", "v2": "bbb"}, - host: "aaa.google.com", - path: "/bbb", - shouldMatch: true, - }, - { - route: subrouter1.Path("/{v2:[a-z]+}"), - request: newRequest("GET", "http://111.google.com/111"), - vars: map[string]string{"v1": "aaa", "v2": "bbb"}, - host: "aaa.google.com", - path: "/bbb", - shouldMatch: false, - }, - { - route: subrouter2.Path("/baz/{v2}"), - request: newRequest("GET", "http://localhost/foo/bar/baz/ding"), - vars: map[string]string{"v1": "bar", "v2": "ding"}, - host: "", - path: "/foo/bar/baz/ding", - shouldMatch: true, - }, - { - route: subrouter2.Path("/baz/{v2}"), - request: newRequest("GET", "http://localhost/foo/bar"), - vars: map[string]string{"v1": "bar", "v2": "ding"}, - host: "", - path: "/foo/bar/baz/ding", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestNamedRoutes(t *testing.T) { - r1 := NewRouter() - r1.NewRoute().Name("a") - r1.NewRoute().Name("b") - r1.NewRoute().Name("c") - - r2 := r1.NewRoute().Subrouter() - r2.NewRoute().Name("d") - r2.NewRoute().Name("e") - r2.NewRoute().Name("f") - - r3 := r2.NewRoute().Subrouter() - r3.NewRoute().Name("g") - r3.NewRoute().Name("h") - r3.NewRoute().Name("i") - - if r1.namedRoutes == nil || len(r1.namedRoutes) != 9 { - t.Errorf("Expected 9 named routes, got %v", r1.namedRoutes) - } else if r1.Get("i") == nil { - t.Errorf("Subroute name not registered") - } -} - -func TestStrictSlash(t *testing.T) { - var r *Router - var req *http.Request - var route *Route - var match *RouteMatch - var matched bool - - // StrictSlash should be ignored for path prefix. - // So we register a route ending in slash but it doesn't attempt to add - // the slash for a path not ending in slash. - r = NewRouter() - r.StrictSlash(true) - route = r.NewRoute().PathPrefix("/static/") - req, _ = http.NewRequest("GET", "http://localhost/static/logo.png", nil) - match = new(RouteMatch) - matched = r.Match(req, match) - if !matched { - t.Errorf("Should match request %q -- %v", req.URL.Path, getRouteTemplate(route)) - } - if match.Handler != nil { - t.Errorf("Should not redirect") - } -} - -// ---------------------------------------------------------------------------- -// Helpers -// ---------------------------------------------------------------------------- - -func getRouteTemplate(route *Route) string { - host, path := "none", "none" - if route.regexp != nil { - if route.regexp.host != nil { - host = route.regexp.host.template - } - if route.regexp.path != nil { - path = route.regexp.path.template - } - } - return fmt.Sprintf("Host: %v, Path: %v", host, path) -} - -func testRoute(t *testing.T, test routeTest) { - request := test.request - route := test.route - vars := test.vars - shouldMatch := test.shouldMatch - host := test.host - path := test.path - url := test.host + test.path - - var match RouteMatch - ok := route.Match(request, &match) - if ok != shouldMatch { - msg := "Should match" - if !shouldMatch { - msg = "Should not match" - } - t.Errorf("(%v) %v:\nRoute: %#v\nRequest: %#v\nVars: %v\n", test.title, msg, route, request, vars) - return - } - if shouldMatch { - if test.vars != nil && !stringMapEqual(test.vars, match.Vars) { - t.Errorf("(%v) Vars not equal: expected %v, got %v", test.title, vars, match.Vars) - return - } - if host != "" { - u, _ := test.route.URLHost(mapToPairs(match.Vars)...) - if host != u.Host { - t.Errorf("(%v) URLHost not equal: expected %v, got %v -- %v", test.title, host, u.Host, getRouteTemplate(route)) - return - } - } - if path != "" { - u, _ := route.URLPath(mapToPairs(match.Vars)...) - if path != u.Path { - t.Errorf("(%v) URLPath not equal: expected %v, got %v -- %v", test.title, path, u.Path, getRouteTemplate(route)) - return - } - } - if url != "" { - u, _ := route.URL(mapToPairs(match.Vars)...) - if url != u.Host+u.Path { - t.Errorf("(%v) URL not equal: expected %v, got %v -- %v", test.title, url, u.Host+u.Path, getRouteTemplate(route)) - return - } - } - } -} - -// Tests that the context is cleared or not cleared properly depending on -// the configuration of the router -func TestKeepContext(t *testing.T) { - func1 := func(w http.ResponseWriter, r *http.Request) {} - - r:= NewRouter() - r.HandleFunc("/", func1).Name("func1") - - req, _ := http.NewRequest("GET", "http://localhost/", nil) - context.Set(req, "t", 1) - - res := new(http.ResponseWriter) - r.ServeHTTP(*res, req) - - if _, ok := context.GetOk(req, "t"); ok { - t.Error("Context should have been cleared at end of request") - } - - r.KeepContext = true - - req, _ = http.NewRequest("GET", "http://localhost/", nil) - context.Set(req, "t", 1) - - r.ServeHTTP(*res, req) - if _, ok := context.GetOk(req, "t"); !ok { - t.Error("Context should NOT have been cleared at end of request") - } - -} - - -type TestA301ResponseWriter struct { - hh http.Header - status int -} - -func (ho TestA301ResponseWriter) Header() http.Header { - return http.Header(ho.hh) -} - -func (ho TestA301ResponseWriter) Write( b []byte) (int, error) { - return 0, nil -} - -func (ho TestA301ResponseWriter) WriteHeader( code int ) { - ho.status = code -} - -func Test301Redirect(t *testing.T) { - m := make(http.Header) - - func1 := func(w http.ResponseWriter, r *http.Request) {} - func2 := func(w http.ResponseWriter, r *http.Request) {} - - r:= NewRouter() - r.HandleFunc("/api/", func2).Name("func2") - r.HandleFunc("/", func1).Name("func1") - - req, _ := http.NewRequest("GET", "http://localhost//api/?abc=def", nil) - - res := TestA301ResponseWriter{ - hh: m, - status : 0, - } - r.ServeHTTP(&res, req) - - if "http://localhost/api/?abc=def" != res.hh["Location"][0] { - t.Errorf("Should have complete URL with query string") - } -} - -// https://plus.google.com/101022900381697718949/posts/eWy6DjFJ6uW -func TestSubrouterHeader(t *testing.T) { - expected := "func1 response" - func1 := func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, expected) - } - func2 := func(http.ResponseWriter, *http.Request) {} - - r := NewRouter() - s := r.Headers("SomeSpecialHeader", "").Subrouter() - s.HandleFunc("/", func1).Name("func1") - r.HandleFunc("/", func2).Name("func2") - - req, _ := http.NewRequest("GET", "http://localhost/", nil) - req.Header.Add("SomeSpecialHeader", "foo") - match := new(RouteMatch) - matched := r.Match(req, match) - if !matched { - t.Errorf("Should match request") - } - if match.Route.GetName() != "func1" { - t.Errorf("Expecting func1 handler, got %s", match.Route.GetName()) - } - resp := NewRecorder() - match.Handler.ServeHTTP(resp, req) - if resp.Body.String() != expected { - t.Errorf("Expecting %q", expected) - } -} - -// mapToPairs converts a string map to a slice of string pairs -func mapToPairs(m map[string]string) []string { - var i int - p := make([]string, len(m)*2) - for k, v := range m { - p[i] = k - p[i+1] = v - i += 2 - } - return p -} - -// stringMapEqual checks the equality of two string maps -func stringMapEqual(m1, m2 map[string]string) bool { - nil1 := m1 == nil - nil2 := m2 == nil - if nil1 != nil2 || len(m1) != len(m2) { - return false - } - for k, v := range m1 { - if v != m2[k] { - return false - } - } - return true -} - -// newRequest is a helper function to create a new request with a method and url -func newRequest(method, url string) *http.Request { - req, err := http.NewRequest(method, url, nil) - if err != nil { - panic(err) - } - return req -} diff --git a/vendor/github.com/gorilla/mux/old_test.go b/vendor/github.com/gorilla/mux/old_test.go deleted file mode 100644 index 4253059..0000000 --- a/vendor/github.com/gorilla/mux/old_test.go +++ /dev/null @@ -1,758 +0,0 @@ -// Old tests ported to Go1. This is a mess. Want to drop it one day. - -// Copyright 2011 Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "bytes" - "net/http" - "testing" -) - -// ---------------------------------------------------------------------------- -// ResponseRecorder -// ---------------------------------------------------------------------------- -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// ResponseRecorder is an implementation of http.ResponseWriter that -// records its mutations for later inspection in tests. -type ResponseRecorder struct { - Code int // the HTTP response code from WriteHeader - HeaderMap http.Header // the HTTP response headers - Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to - Flushed bool -} - -// NewRecorder returns an initialized ResponseRecorder. -func NewRecorder() *ResponseRecorder { - return &ResponseRecorder{ - HeaderMap: make(http.Header), - Body: new(bytes.Buffer), - } -} - -// DefaultRemoteAddr is the default remote address to return in RemoteAddr if -// an explicit DefaultRemoteAddr isn't set on ResponseRecorder. -const DefaultRemoteAddr = "1.2.3.4" - -// Header returns the response headers. -func (rw *ResponseRecorder) Header() http.Header { - return rw.HeaderMap -} - -// Write always succeeds and writes to rw.Body, if not nil. -func (rw *ResponseRecorder) Write(buf []byte) (int, error) { - if rw.Body != nil { - rw.Body.Write(buf) - } - if rw.Code == 0 { - rw.Code = http.StatusOK - } - return len(buf), nil -} - -// WriteHeader sets rw.Code. -func (rw *ResponseRecorder) WriteHeader(code int) { - rw.Code = code -} - -// Flush sets rw.Flushed to true. -func (rw *ResponseRecorder) Flush() { - rw.Flushed = true -} - -// ---------------------------------------------------------------------------- - -func TestRouteMatchers(t *testing.T) { - var scheme, host, path, query, method string - var headers map[string]string - var resultVars map[bool]map[string]string - - router := NewRouter() - router.NewRoute().Host("{var1}.google.com"). - Path("/{var2:[a-z]+}/{var3:[0-9]+}"). - Queries("foo", "bar"). - Methods("GET"). - Schemes("https"). - Headers("x-requested-with", "XMLHttpRequest") - router.NewRoute().Host("www.{var4}.com"). - PathPrefix("/foo/{var5:[a-z]+}/{var6:[0-9]+}"). - Queries("baz", "ding"). - Methods("POST"). - Schemes("http"). - Headers("Content-Type", "application/json") - - reset := func() { - // Everything match. - scheme = "https" - host = "www.google.com" - path = "/product/42" - query = "?foo=bar" - method = "GET" - headers = map[string]string{"X-Requested-With": "XMLHttpRequest"} - resultVars = map[bool]map[string]string{ - true: {"var1": "www", "var2": "product", "var3": "42"}, - false: {}, - } - } - - reset2 := func() { - // Everything match. - scheme = "http" - host = "www.google.com" - path = "/foo/product/42/path/that/is/ignored" - query = "?baz=ding" - method = "POST" - headers = map[string]string{"Content-Type": "application/json"} - resultVars = map[bool]map[string]string{ - true: {"var4": "google", "var5": "product", "var6": "42"}, - false: {}, - } - } - - match := func(shouldMatch bool) { - url := scheme + "://" + host + path + query - request, _ := http.NewRequest(method, url, nil) - for key, value := range headers { - request.Header.Add(key, value) - } - - var routeMatch RouteMatch - matched := router.Match(request, &routeMatch) - if matched != shouldMatch { - // Need better messages. :) - if matched { - t.Errorf("Should match.") - } else { - t.Errorf("Should not match.") - } - } - - if matched { - currentRoute := routeMatch.Route - if currentRoute == nil { - t.Errorf("Expected a current route.") - } - vars := routeMatch.Vars - expectedVars := resultVars[shouldMatch] - if len(vars) != len(expectedVars) { - t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) - } - for name, value := range vars { - if expectedVars[name] != value { - t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) - } - } - } - } - - // 1st route -------------------------------------------------------------- - - // Everything match. - reset() - match(true) - - // Scheme doesn't match. - reset() - scheme = "http" - match(false) - - // Host doesn't match. - reset() - host = "www.mygoogle.com" - match(false) - - // Path doesn't match. - reset() - path = "/product/notdigits" - match(false) - - // Query doesn't match. - reset() - query = "?foo=baz" - match(false) - - // Method doesn't match. - reset() - method = "POST" - match(false) - - // Header doesn't match. - reset() - headers = map[string]string{} - match(false) - - // Everything match, again. - reset() - match(true) - - // 2nd route -------------------------------------------------------------- - - // Everything match. - reset2() - match(true) - - // Scheme doesn't match. - reset2() - scheme = "https" - match(false) - - // Host doesn't match. - reset2() - host = "sub.google.com" - match(false) - - // Path doesn't match. - reset2() - path = "/bar/product/42" - match(false) - - // Query doesn't match. - reset2() - query = "?foo=baz" - match(false) - - // Method doesn't match. - reset2() - method = "GET" - match(false) - - // Header doesn't match. - reset2() - headers = map[string]string{} - match(false) - - // Everything match, again. - reset2() - match(true) -} - -type headerMatcherTest struct { - matcher headerMatcher - headers map[string]string - result bool -} - -var headerMatcherTests = []headerMatcherTest{ - { - matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), - headers: map[string]string{"X-Requested-With": "XMLHttpRequest"}, - result: true, - }, - { - matcher: headerMatcher(map[string]string{"x-requested-with": ""}), - headers: map[string]string{"X-Requested-With": "anything"}, - result: true, - }, - { - matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), - headers: map[string]string{}, - result: false, - }, -} - -type hostMatcherTest struct { - matcher *Route - url string - vars map[string]string - result bool -} - -var hostMatcherTests = []hostMatcherTest{ - { - matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), - url: "http://abc.def.ghi/", - vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, - result: true, - }, - { - matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), - url: "http://a.b.c/", - vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, - result: false, - }, -} - -type methodMatcherTest struct { - matcher methodMatcher - method string - result bool -} - -var methodMatcherTests = []methodMatcherTest{ - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "GET", - result: true, - }, - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "POST", - result: true, - }, - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "PUT", - result: true, - }, - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "DELETE", - result: false, - }, -} - -type pathMatcherTest struct { - matcher *Route - url string - vars map[string]string - result bool -} - -var pathMatcherTests = []pathMatcherTest{ - { - matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), - url: "http://localhost:8080/123/456/789", - vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, - result: true, - }, - { - matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), - url: "http://localhost:8080/1/2/3", - vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, - result: false, - }, -} - -type queryMatcherTest struct { - matcher queryMatcher - url string - result bool -} - -var queryMatcherTests = []queryMatcherTest{ - { - matcher: queryMatcher(map[string]string{"foo": "bar", "baz": "ding"}), - url: "http://localhost:8080/?foo=bar&baz=ding", - result: true, - }, - { - matcher: queryMatcher(map[string]string{"foo": "", "baz": ""}), - url: "http://localhost:8080/?foo=anything&baz=anything", - result: true, - }, - { - matcher: queryMatcher(map[string]string{"foo": "ding", "baz": "bar"}), - url: "http://localhost:8080/?foo=bar&baz=ding", - result: false, - }, - { - matcher: queryMatcher(map[string]string{"bar": "foo", "ding": "baz"}), - url: "http://localhost:8080/?foo=bar&baz=ding", - result: false, - }, -} - -type schemeMatcherTest struct { - matcher schemeMatcher - url string - result bool -} - -var schemeMatcherTests = []schemeMatcherTest{ - { - matcher: schemeMatcher([]string{"http", "https"}), - url: "http://localhost:8080/", - result: true, - }, - { - matcher: schemeMatcher([]string{"http", "https"}), - url: "https://localhost:8080/", - result: true, - }, - { - matcher: schemeMatcher([]string{"https"}), - url: "http://localhost:8080/", - result: false, - }, - { - matcher: schemeMatcher([]string{"http"}), - url: "https://localhost:8080/", - result: false, - }, -} - -type urlBuildingTest struct { - route *Route - vars []string - url string -} - -var urlBuildingTests = []urlBuildingTest{ - { - route: new(Route).Host("foo.domain.com"), - vars: []string{}, - url: "http://foo.domain.com", - }, - { - route: new(Route).Host("{subdomain}.domain.com"), - vars: []string{"subdomain", "bar"}, - url: "http://bar.domain.com", - }, - { - route: new(Route).Host("foo.domain.com").Path("/articles"), - vars: []string{}, - url: "http://foo.domain.com/articles", - }, - { - route: new(Route).Path("/articles"), - vars: []string{}, - url: "/articles", - }, - { - route: new(Route).Path("/articles/{category}/{id:[0-9]+}"), - vars: []string{"category", "technology", "id", "42"}, - url: "/articles/technology/42", - }, - { - route: new(Route).Host("{subdomain}.domain.com").Path("/articles/{category}/{id:[0-9]+}"), - vars: []string{"subdomain", "foo", "category", "technology", "id", "42"}, - url: "http://foo.domain.com/articles/technology/42", - }, -} - -func TestHeaderMatcher(t *testing.T) { - for _, v := range headerMatcherTests { - request, _ := http.NewRequest("GET", "http://localhost:8080/", nil) - for key, value := range v.headers { - request.Header.Add(key, value) - } - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, request.Header) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, request.Header) - } - } - } -} - -func TestHostMatcher(t *testing.T) { - for _, v := range hostMatcherTests { - request, _ := http.NewRequest("GET", v.url, nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - vars := routeMatch.Vars - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.url) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.url) - } - } - if result { - if len(vars) != len(v.vars) { - t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) - } - for name, value := range vars { - if v.vars[name] != value { - t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) - } - } - } else { - if len(vars) != 0 { - t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) - } - } - } -} - -func TestMethodMatcher(t *testing.T) { - for _, v := range methodMatcherTests { - request, _ := http.NewRequest(v.method, "http://localhost:8080/", nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.method) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.method) - } - } - } -} - -func TestPathMatcher(t *testing.T) { - for _, v := range pathMatcherTests { - request, _ := http.NewRequest("GET", v.url, nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - vars := routeMatch.Vars - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.url) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.url) - } - } - if result { - if len(vars) != len(v.vars) { - t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) - } - for name, value := range vars { - if v.vars[name] != value { - t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) - } - } - } else { - if len(vars) != 0 { - t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) - } - } - } -} - -func TestQueryMatcher(t *testing.T) { - for _, v := range queryMatcherTests { - request, _ := http.NewRequest("GET", v.url, nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.url) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.url) - } - } - } -} - -func TestSchemeMatcher(t *testing.T) { - for _, v := range queryMatcherTests { - request, _ := http.NewRequest("GET", v.url, nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.url) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.url) - } - } - } -} - -func TestUrlBuilding(t *testing.T) { - - for _, v := range urlBuildingTests { - u, _ := v.route.URL(v.vars...) - url := u.String() - if url != v.url { - t.Errorf("expected %v, got %v", v.url, url) - /* - reversePath := "" - reverseHost := "" - if v.route.pathTemplate != nil { - reversePath = v.route.pathTemplate.Reverse - } - if v.route.hostTemplate != nil { - reverseHost = v.route.hostTemplate.Reverse - } - - t.Errorf("%#v:\nexpected: %q\ngot: %q\nreverse path: %q\nreverse host: %q", v.route, v.url, url, reversePath, reverseHost) - */ - } - } - - ArticleHandler := func(w http.ResponseWriter, r *http.Request) { - } - - router := NewRouter() - router.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).Name("article") - - url, _ := router.Get("article").URL("category", "technology", "id", "42") - expected := "/articles/technology/42" - if url.String() != expected { - t.Errorf("Expected %v, got %v", expected, url.String()) - } -} - -func TestMatchedRouteName(t *testing.T) { - routeName := "stock" - router := NewRouter() - route := router.NewRoute().Path("/products/").Name(routeName) - - url := "http://www.domain.com/products/" - request, _ := http.NewRequest("GET", url, nil) - var rv RouteMatch - ok := router.Match(request, &rv) - - if !ok || rv.Route != route { - t.Errorf("Expected same route, got %+v.", rv.Route) - } - - retName := rv.Route.GetName() - if retName != routeName { - t.Errorf("Expected %q, got %q.", routeName, retName) - } -} - -func TestSubRouting(t *testing.T) { - // Example from docs. - router := NewRouter() - subrouter := router.NewRoute().Host("www.domain.com").Subrouter() - route := subrouter.NewRoute().Path("/products/").Name("products") - - url := "http://www.domain.com/products/" - request, _ := http.NewRequest("GET", url, nil) - var rv RouteMatch - ok := router.Match(request, &rv) - - if !ok || rv.Route != route { - t.Errorf("Expected same route, got %+v.", rv.Route) - } - - u, _ := router.Get("products").URL() - builtUrl := u.String() - // Yay, subroute aware of the domain when building! - if builtUrl != url { - t.Errorf("Expected %q, got %q.", url, builtUrl) - } -} - -func TestVariableNames(t *testing.T) { - route := new(Route).Host("{arg1}.domain.com").Path("/{arg1}/{arg2:[0-9]+}") - if route.err == nil { - t.Errorf("Expected error for duplicated variable names") - } -} - -func TestRedirectSlash(t *testing.T) { - var route *Route - var routeMatch RouteMatch - r := NewRouter() - - r.StrictSlash(false) - route = r.NewRoute() - if route.strictSlash != false { - t.Errorf("Expected false redirectSlash.") - } - - r.StrictSlash(true) - route = r.NewRoute() - if route.strictSlash != true { - t.Errorf("Expected true redirectSlash.") - } - - route = new(Route) - route.strictSlash = true - route.Path("/{arg1}/{arg2:[0-9]+}/") - request, _ := http.NewRequest("GET", "http://localhost/foo/123", nil) - routeMatch = RouteMatch{} - _ = route.Match(request, &routeMatch) - vars := routeMatch.Vars - if vars["arg1"] != "foo" { - t.Errorf("Expected foo.") - } - if vars["arg2"] != "123" { - t.Errorf("Expected 123.") - } - rsp := NewRecorder() - routeMatch.Handler.ServeHTTP(rsp, request) - if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123/" { - t.Errorf("Expected redirect header.") - } - - route = new(Route) - route.strictSlash = true - route.Path("/{arg1}/{arg2:[0-9]+}") - request, _ = http.NewRequest("GET", "http://localhost/foo/123/", nil) - routeMatch = RouteMatch{} - _ = route.Match(request, &routeMatch) - vars = routeMatch.Vars - if vars["arg1"] != "foo" { - t.Errorf("Expected foo.") - } - if vars["arg2"] != "123" { - t.Errorf("Expected 123.") - } - rsp = NewRecorder() - routeMatch.Handler.ServeHTTP(rsp, request) - if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123" { - t.Errorf("Expected redirect header.") - } -} - -// Test for the new regexp library, still not available in stable Go. -func TestNewRegexp(t *testing.T) { - var p *routeRegexp - var matches []string - - tests := map[string]map[string][]string{ - "/{foo:a{2}}": { - "/a": nil, - "/aa": {"aa"}, - "/aaa": nil, - "/aaaa": nil, - }, - "/{foo:a{2,}}": { - "/a": nil, - "/aa": {"aa"}, - "/aaa": {"aaa"}, - "/aaaa": {"aaaa"}, - }, - "/{foo:a{2,3}}": { - "/a": nil, - "/aa": {"aa"}, - "/aaa": {"aaa"}, - "/aaaa": nil, - }, - "/{foo:[a-z]{3}}/{bar:[a-z]{2}}": { - "/a": nil, - "/ab": nil, - "/abc": nil, - "/abcd": nil, - "/abc/ab": {"abc", "ab"}, - "/abc/abc": nil, - "/abcd/ab": nil, - }, - `/{foo:\w{3,}}/{bar:\d{2,}}`: { - "/a": nil, - "/ab": nil, - "/abc": nil, - "/abc/1": nil, - "/abc/12": {"abc", "12"}, - "/abcd/12": {"abcd", "12"}, - "/abcd/123": {"abcd", "123"}, - }, - } - - for pattern, paths := range tests { - p, _ = newRouteRegexp(pattern, false, false, false) - for path, result := range paths { - matches = p.regexp.FindStringSubmatch(path) - if result == nil { - if matches != nil { - t.Errorf("%v should not match %v.", pattern, path) - } - } else { - if len(matches) != len(result)+1 { - t.Errorf("Expected %v matches, got %v.", len(result)+1, len(matches)) - } else { - for k, v := range result { - if matches[k+1] != v { - t.Errorf("Expected %v, got %v.", v, matches[k+1]) - } - } - } - } - } - } -} diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go deleted file mode 100644 index 4c3482b..0000000 --- a/vendor/github.com/gorilla/mux/regexp.go +++ /dev/null @@ -1,247 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "bytes" - "fmt" - "net/http" - "net/url" - "regexp" - "strings" -) - -// newRouteRegexp parses a route template and returns a routeRegexp, -// used to match a host or path. -// -// It will extract named variables, assemble a regexp to be matched, create -// a "reverse" template to build URLs and compile regexps to validate variable -// values used in URL building. -// -// Previously we accepted only Python-like identifiers for variable -// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that -// name and pattern can't be empty, and names can't contain a colon. -func newRouteRegexp(tpl string, matchHost, matchPrefix, strictSlash bool) (*routeRegexp, error) { - // Check if it is well-formed. - idxs, errBraces := braceIndices(tpl) - if errBraces != nil { - return nil, errBraces - } - // Backup the original. - template := tpl - // Now let's parse it. - defaultPattern := "[^/]+" - if matchHost { - defaultPattern = "[^.]+" - matchPrefix, strictSlash = false, false - } - if matchPrefix { - strictSlash = false - } - // Set a flag for strictSlash. - endSlash := false - if strictSlash && strings.HasSuffix(tpl, "/") { - tpl = tpl[:len(tpl)-1] - endSlash = true - } - varsN := make([]string, len(idxs)/2) - varsR := make([]*regexp.Regexp, len(idxs)/2) - pattern := bytes.NewBufferString("^") - reverse := bytes.NewBufferString("") - var end int - var err error - for i := 0; i < len(idxs); i += 2 { - // Set all values we are interested in. - raw := tpl[end:idxs[i]] - end = idxs[i+1] - parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) - name := parts[0] - patt := defaultPattern - if len(parts) == 2 { - patt = parts[1] - } - // Name or pattern can't be empty. - if name == "" || patt == "" { - return nil, fmt.Errorf("mux: missing name or pattern in %q", - tpl[idxs[i]:end]) - } - // Build the regexp pattern. - fmt.Fprintf(pattern, "%s(%s)", regexp.QuoteMeta(raw), patt) - // Build the reverse template. - fmt.Fprintf(reverse, "%s%%s", raw) - // Append variable name and compiled pattern. - varsN[i/2] = name - varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) - if err != nil { - return nil, err - } - } - // Add the remaining. - raw := tpl[end:] - pattern.WriteString(regexp.QuoteMeta(raw)) - if strictSlash { - pattern.WriteString("[/]?") - } - if !matchPrefix { - pattern.WriteByte('$') - } - reverse.WriteString(raw) - if endSlash { - reverse.WriteByte('/') - } - // Compile full regexp. - reg, errCompile := regexp.Compile(pattern.String()) - if errCompile != nil { - return nil, errCompile - } - // Done! - return &routeRegexp{ - template: template, - matchHost: matchHost, - regexp: reg, - reverse: reverse.String(), - varsN: varsN, - varsR: varsR, - }, nil -} - -// routeRegexp stores a regexp to match a host or path and information to -// collect and validate route variables. -type routeRegexp struct { - // The unmodified template. - template string - // True for host match, false for path match. - matchHost bool - // Expanded regexp. - regexp *regexp.Regexp - // Reverse template. - reverse string - // Variable names. - varsN []string - // Variable regexps (validators). - varsR []*regexp.Regexp -} - -// Match matches the regexp against the URL host or path. -func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { - if !r.matchHost { - return r.regexp.MatchString(req.URL.Path) - } - return r.regexp.MatchString(getHost(req)) -} - -// url builds a URL part using the given values. -func (r *routeRegexp) url(pairs ...string) (string, error) { - values, err := mapFromPairs(pairs...) - if err != nil { - return "", err - } - urlValues := make([]interface{}, len(r.varsN)) - for k, v := range r.varsN { - value, ok := values[v] - if !ok { - return "", fmt.Errorf("mux: missing route variable %q", v) - } - urlValues[k] = value - } - rv := fmt.Sprintf(r.reverse, urlValues...) - if !r.regexp.MatchString(rv) { - // The URL is checked against the full regexp, instead of checking - // individual variables. This is faster but to provide a good error - // message, we check individual regexps if the URL doesn't match. - for k, v := range r.varsN { - if !r.varsR[k].MatchString(values[v]) { - return "", fmt.Errorf( - "mux: variable %q doesn't match, expected %q", values[v], - r.varsR[k].String()) - } - } - } - return rv, nil -} - -// braceIndices returns the first level curly brace indices from a string. -// It returns an error in case of unbalanced braces. -func braceIndices(s string) ([]int, error) { - var level, idx int - idxs := make([]int, 0) - for i := 0; i < len(s); i++ { - switch s[i] { - case '{': - if level++; level == 1 { - idx = i - } - case '}': - if level--; level == 0 { - idxs = append(idxs, idx, i+1) - } else if level < 0 { - return nil, fmt.Errorf("mux: unbalanced braces in %q", s) - } - } - } - if level != 0 { - return nil, fmt.Errorf("mux: unbalanced braces in %q", s) - } - return idxs, nil -} - -// ---------------------------------------------------------------------------- -// routeRegexpGroup -// ---------------------------------------------------------------------------- - -// routeRegexpGroup groups the route matchers that carry variables. -type routeRegexpGroup struct { - host *routeRegexp - path *routeRegexp -} - -// setMatch extracts the variables from the URL once a route matches. -func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { - // Store host variables. - if v.host != nil { - hostVars := v.host.regexp.FindStringSubmatch(getHost(req)) - if hostVars != nil { - for k, v := range v.host.varsN { - m.Vars[v] = hostVars[k+1] - } - } - } - // Store path variables. - if v.path != nil { - pathVars := v.path.regexp.FindStringSubmatch(req.URL.Path) - if pathVars != nil { - for k, v := range v.path.varsN { - m.Vars[v] = pathVars[k+1] - } - // Check if we should redirect. - if r.strictSlash { - p1 := strings.HasSuffix(req.URL.Path, "/") - p2 := strings.HasSuffix(v.path.template, "/") - if p1 != p2 { - u, _ := url.Parse(req.URL.String()) - if p1 { - u.Path = u.Path[:len(u.Path)-1] - } else { - u.Path += "/" - } - m.Handler = http.RedirectHandler(u.String(), 301) - } - } - } - } -} - -// getHost tries its best to return the request host. -func getHost(r *http.Request) string { - if !r.URL.IsAbs() { - host := r.Host - // Slice off any port information. - if i := strings.Index(host, ":"); i != -1 { - host = host[:i] - } - return host - } - return r.URL.Host -} diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go deleted file mode 100644 index 7766254..0000000 --- a/vendor/github.com/gorilla/mux/route.go +++ /dev/null @@ -1,499 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "errors" - "fmt" - "net/http" - "net/url" - "strings" -) - -// Route stores information to match a request and build URLs. -type Route struct { - // Parent where the route was registered (a Router). - parent parentRoute - // Request handler for the route. - handler http.Handler - // List of matchers. - matchers []matcher - // Manager for the variables from host and path. - regexp *routeRegexpGroup - // If true, when the path pattern is "/path/", accessing "/path" will - // redirect to the former and vice versa. - strictSlash bool - // If true, this route never matches: it is only used to build URLs. - buildOnly bool - // The name used to build URLs. - name string - // Error resulted from building a route. - err error -} - -// Match matches the route against the request. -func (r *Route) Match(req *http.Request, match *RouteMatch) bool { - if r.buildOnly || r.err != nil { - return false - } - // Match everything. - for _, m := range r.matchers { - if matched := m.Match(req, match); !matched { - return false - } - } - // Yay, we have a match. Let's collect some info about it. - if match.Route == nil { - match.Route = r - } - if match.Handler == nil { - match.Handler = r.handler - } - if match.Vars == nil { - match.Vars = make(map[string]string) - } - // Set variables. - if r.regexp != nil { - r.regexp.setMatch(req, match, r) - } - return true -} - -// ---------------------------------------------------------------------------- -// Route attributes -// ---------------------------------------------------------------------------- - -// GetError returns an error resulted from building the route, if any. -func (r *Route) GetError() error { - return r.err -} - -// BuildOnly sets the route to never match: it is only used to build URLs. -func (r *Route) BuildOnly() *Route { - r.buildOnly = true - return r -} - -// Handler -------------------------------------------------------------------- - -// Handler sets a handler for the route. -func (r *Route) Handler(handler http.Handler) *Route { - if r.err == nil { - r.handler = handler - } - return r -} - -// HandlerFunc sets a handler function for the route. -func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route { - return r.Handler(http.HandlerFunc(f)) -} - -// GetHandler returns the handler for the route, if any. -func (r *Route) GetHandler() http.Handler { - return r.handler -} - -// Name ----------------------------------------------------------------------- - -// Name sets the name for the route, used to build URLs. -// If the name was registered already it will be overwritten. -func (r *Route) Name(name string) *Route { - if r.name != "" { - r.err = fmt.Errorf("mux: route already has name %q, can't set %q", - r.name, name) - } - if r.err == nil { - r.name = name - r.getNamedRoutes()[name] = r - } - return r -} - -// GetName returns the name for the route, if any. -func (r *Route) GetName() string { - return r.name -} - -// ---------------------------------------------------------------------------- -// Matchers -// ---------------------------------------------------------------------------- - -// matcher types try to match a request. -type matcher interface { - Match(*http.Request, *RouteMatch) bool -} - -// addMatcher adds a matcher to the route. -func (r *Route) addMatcher(m matcher) *Route { - if r.err == nil { - r.matchers = append(r.matchers, m) - } - return r -} - -// addRegexpMatcher adds a host or path matcher and builder to a route. -func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix bool) error { - if r.err != nil { - return r.err - } - r.regexp = r.getRegexpGroup() - if !matchHost { - if len(tpl) == 0 || tpl[0] != '/' { - return fmt.Errorf("mux: path must start with a slash, got %q", tpl) - } - if r.regexp.path != nil { - tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl - } - } - rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, r.strictSlash) - if err != nil { - return err - } - if matchHost { - if r.regexp.path != nil { - if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { - return err - } - } - r.regexp.host = rr - } else { - if r.regexp.host != nil { - if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil { - return err - } - } - r.regexp.path = rr - } - r.addMatcher(rr) - return nil -} - -// Headers -------------------------------------------------------------------- - -// headerMatcher matches the request against header values. -type headerMatcher map[string]string - -func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMap(m, r.Header, true) -} - -// Headers adds a matcher for request header values. -// It accepts a sequence of key/value pairs to be matched. For example: -// -// r := mux.NewRouter() -// r.Headers("Content-Type", "application/json", -// "X-Requested-With", "XMLHttpRequest") -// -// The above route will only match if both request header values match. -// -// It the value is an empty string, it will match any value if the key is set. -func (r *Route) Headers(pairs ...string) *Route { - if r.err == nil { - var headers map[string]string - headers, r.err = mapFromPairs(pairs...) - return r.addMatcher(headerMatcher(headers)) - } - return r -} - -// Host ----------------------------------------------------------------------- - -// Host adds a matcher for the URL host. -// It accepts a template with zero or more URL variables enclosed by {}. -// Variables can define an optional regexp pattern to me matched: -// -// - {name} matches anything until the next dot. -// -// - {name:pattern} matches the given regexp pattern. -// -// For example: -// -// r := mux.NewRouter() -// r.Host("www.domain.com") -// r.Host("{subdomain}.domain.com") -// r.Host("{subdomain:[a-z]+}.domain.com") -// -// Variable names must be unique in a given route. They can be retrieved -// calling mux.Vars(request). -func (r *Route) Host(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, true, false) - return r -} - -// MatcherFunc ---------------------------------------------------------------- - -// MatcherFunc is the function signature used by custom matchers. -type MatcherFunc func(*http.Request, *RouteMatch) bool - -func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { - return m(r, match) -} - -// MatcherFunc adds a custom function to be used as request matcher. -func (r *Route) MatcherFunc(f MatcherFunc) *Route { - return r.addMatcher(f) -} - -// Methods -------------------------------------------------------------------- - -// methodMatcher matches the request against HTTP methods. -type methodMatcher []string - -func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchInArray(m, r.Method) -} - -// Methods adds a matcher for HTTP methods. -// It accepts a sequence of one or more methods to be matched, e.g.: -// "GET", "POST", "PUT". -func (r *Route) Methods(methods ...string) *Route { - for k, v := range methods { - methods[k] = strings.ToUpper(v) - } - return r.addMatcher(methodMatcher(methods)) -} - -// Path ----------------------------------------------------------------------- - -// Path adds a matcher for the URL path. -// It accepts a template with zero or more URL variables enclosed by {}. -// Variables can define an optional regexp pattern to me matched: -// -// - {name} matches anything until the next slash. -// -// - {name:pattern} matches the given regexp pattern. -// -// For example: -// -// r := mux.NewRouter() -// r.Path("/products/").Handler(ProductsHandler) -// r.Path("/products/{key}").Handler(ProductsHandler) -// r.Path("/articles/{category}/{id:[0-9]+}"). -// Handler(ArticleHandler) -// -// Variable names must be unique in a given route. They can be retrieved -// calling mux.Vars(request). -func (r *Route) Path(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, false, false) - return r -} - -// PathPrefix ----------------------------------------------------------------- - -// PathPrefix adds a matcher for the URL path prefix. -func (r *Route) PathPrefix(tpl string) *Route { - r.strictSlash = false - r.err = r.addRegexpMatcher(tpl, false, true) - return r -} - -// Query ---------------------------------------------------------------------- - -// queryMatcher matches the request against URL queries. -type queryMatcher map[string]string - -func (m queryMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMap(m, r.URL.Query(), false) -} - -// Queries adds a matcher for URL query values. -// It accepts a sequence of key/value pairs. For example: -// -// r := mux.NewRouter() -// r.Queries("foo", "bar", "baz", "ding") -// -// The above route will only match if the URL contains the defined queries -// values, e.g.: ?foo=bar&baz=ding. -// -// It the value is an empty string, it will match any value if the key is set. -func (r *Route) Queries(pairs ...string) *Route { - if r.err == nil { - var queries map[string]string - queries, r.err = mapFromPairs(pairs...) - return r.addMatcher(queryMatcher(queries)) - } - return r -} - -// Schemes -------------------------------------------------------------------- - -// schemeMatcher matches the request against URL schemes. -type schemeMatcher []string - -func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchInArray(m, r.URL.Scheme) -} - -// Schemes adds a matcher for URL schemes. -// It accepts a sequence of schemes to be matched, e.g.: "http", "https". -func (r *Route) Schemes(schemes ...string) *Route { - for k, v := range schemes { - schemes[k] = strings.ToLower(v) - } - return r.addMatcher(schemeMatcher(schemes)) -} - -// Subrouter ------------------------------------------------------------------ - -// Subrouter creates a subrouter for the route. -// -// It will test the inner routes only if the parent route matched. For example: -// -// r := mux.NewRouter() -// s := r.Host("www.domain.com").Subrouter() -// s.HandleFunc("/products/", ProductsHandler) -// s.HandleFunc("/products/{key}", ProductHandler) -// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) -// -// Here, the routes registered in the subrouter won't be tested if the host -// doesn't match. -func (r *Route) Subrouter() *Router { - router := &Router{parent: r, strictSlash: r.strictSlash} - r.addMatcher(router) - return router -} - -// ---------------------------------------------------------------------------- -// URL building -// ---------------------------------------------------------------------------- - -// URL builds a URL for the route. -// -// It accepts a sequence of key/value pairs for the route variables. For -// example, given this route: -// -// r := mux.NewRouter() -// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Name("article") -// -// ...a URL for it can be built using: -// -// url, err := r.Get("article").URL("category", "technology", "id", "42") -// -// ...which will return an url.URL with the following path: -// -// "/articles/technology/42" -// -// This also works for host variables: -// -// r := mux.NewRouter() -// r.Host("{subdomain}.domain.com"). -// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Name("article") -// -// // url.String() will be "http://news.domain.com/articles/technology/42" -// url, err := r.Get("article").URL("subdomain", "news", -// "category", "technology", -// "id", "42") -// -// All variables defined in the route are required, and their values must -// conform to the corresponding patterns. -func (r *Route) URL(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp == nil { - return nil, errors.New("mux: route doesn't have a host or path") - } - var scheme, host, path string - var err error - if r.regexp.host != nil { - // Set a default scheme. - scheme = "http" - if host, err = r.regexp.host.url(pairs...); err != nil { - return nil, err - } - } - if r.regexp.path != nil { - if path, err = r.regexp.path.url(pairs...); err != nil { - return nil, err - } - } - return &url.URL{ - Scheme: scheme, - Host: host, - Path: path, - }, nil -} - -// URLHost builds the host part of the URL for a route. See Route.URL(). -// -// The route must have a host defined. -func (r *Route) URLHost(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp == nil || r.regexp.host == nil { - return nil, errors.New("mux: route doesn't have a host") - } - host, err := r.regexp.host.url(pairs...) - if err != nil { - return nil, err - } - return &url.URL{ - Scheme: "http", - Host: host, - }, nil -} - -// URLPath builds the path part of the URL for a route. See Route.URL(). -// -// The route must have a path defined. -func (r *Route) URLPath(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp == nil || r.regexp.path == nil { - return nil, errors.New("mux: route doesn't have a path") - } - path, err := r.regexp.path.url(pairs...) - if err != nil { - return nil, err - } - return &url.URL{ - Path: path, - }, nil -} - -// ---------------------------------------------------------------------------- -// parentRoute -// ---------------------------------------------------------------------------- - -// parentRoute allows routes to know about parent host and path definitions. -type parentRoute interface { - getNamedRoutes() map[string]*Route - getRegexpGroup() *routeRegexpGroup -} - -// getNamedRoutes returns the map where named routes are registered. -func (r *Route) getNamedRoutes() map[string]*Route { - if r.parent == nil { - // During tests router is not always set. - r.parent = NewRouter() - } - return r.parent.getNamedRoutes() -} - -// getRegexpGroup returns regexp definitions from this route. -func (r *Route) getRegexpGroup() *routeRegexpGroup { - if r.regexp == nil { - if r.parent == nil { - // During tests router is not always set. - r.parent = NewRouter() - } - regexp := r.parent.getRegexpGroup() - if regexp == nil { - r.regexp = new(routeRegexpGroup) - } else { - // Copy. - r.regexp = &routeRegexpGroup{ - host: regexp.host, - path: regexp.path, - } - } - } - return r.regexp -} diff --git a/vendor/github.com/pmezard/go-difflib/.travis.yml b/vendor/github.com/pmezard/go-difflib/.travis.yml deleted file mode 100644 index 90c9c6f..0000000 --- a/vendor/github.com/pmezard/go-difflib/.travis.yml +++ /dev/null @@ -1,5 +0,0 @@ -language: go -go: - - 1.5 - - tip - diff --git a/vendor/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/pmezard/go-difflib/LICENSE deleted file mode 100644 index c67dad6..0000000 --- a/vendor/github.com/pmezard/go-difflib/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2013, Patrick Mezard -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. - The names of its contributors may not be used to endorse or promote -products derived from this software without specific prior written -permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pmezard/go-difflib/README.md b/vendor/github.com/pmezard/go-difflib/README.md deleted file mode 100644 index e87f307..0000000 --- a/vendor/github.com/pmezard/go-difflib/README.md +++ /dev/null @@ -1,50 +0,0 @@ -go-difflib -========== - -[![Build Status](https://travis-ci.org/pmezard/go-difflib.png?branch=master)](https://travis-ci.org/pmezard/go-difflib) -[![GoDoc](https://godoc.org/github.com/pmezard/go-difflib/difflib?status.svg)](https://godoc.org/github.com/pmezard/go-difflib/difflib) - -Go-difflib is a partial port of python 3 difflib package. Its main goal -was to make unified and context diff available in pure Go, mostly for -testing purposes. - -The following class and functions (and related tests) have be ported: - -* `SequenceMatcher` -* `unified_diff()` -* `context_diff()` - -## Installation - -```bash -$ go get github.com/pmezard/go-difflib/difflib -``` - -### Quick Start - -Diffs are configured with Unified (or ContextDiff) structures, and can -be output to an io.Writer or returned as a string. - -```Go -diff := UnifiedDiff{ - A: difflib.SplitLines("foo\nbar\n"), - B: difflib.SplitLines("foo\nbaz\n"), - FromFile: "Original", - ToFile: "Current", - Context: 3, -} -text, _ := GetUnifiedDiffString(diff) -fmt.Printf(text) -``` - -would output: - -``` ---- Original -+++ Current -@@ -1,3 +1,3 @@ - foo --bar -+baz -``` - diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go deleted file mode 100644 index 64cc40f..0000000 --- a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go +++ /dev/null @@ -1,758 +0,0 @@ -// Package difflib is a partial port of Python difflib module. -// -// It provides tools to compare sequences of strings and generate textual diffs. -// -// The following class and functions have been ported: -// -// - SequenceMatcher -// -// - unified_diff -// -// - context_diff -// -// Getting unified diffs was the main goal of the port. Keep in mind this code -// is mostly suitable to output text differences in a human friendly way, there -// are no guarantees generated diffs are consumable by patch(1). -package difflib - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strings" -) - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} - -func calculateRatio(matches, length int) float64 { - if length > 0 { - return 2.0 * float64(matches) / float64(length) - } - return 1.0 -} - -type Match struct { - A int - B int - Size int -} - -type OpCode struct { - Tag byte - I1 int - I2 int - J1 int - J2 int -} - -// SequenceMatcher compares sequence of strings. The basic -// algorithm predates, and is a little fancier than, an algorithm -// published in the late 1980's by Ratcliff and Obershelp under the -// hyperbolic name "gestalt pattern matching". The basic idea is to find -// the longest contiguous matching subsequence that contains no "junk" -// elements (R-O doesn't address junk). The same idea is then applied -// recursively to the pieces of the sequences to the left and to the right -// of the matching subsequence. This does not yield minimal edit -// sequences, but does tend to yield matches that "look right" to people. -// -// SequenceMatcher tries to compute a "human-friendly diff" between two -// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the -// longest *contiguous* & junk-free matching subsequence. That's what -// catches peoples' eyes. The Windows(tm) windiff has another interesting -// notion, pairing up elements that appear uniquely in each sequence. -// That, and the method here, appear to yield more intuitive difference -// reports than does diff. This method appears to be the least vulnerable -// to synching up on blocks of "junk lines", though (like blank lines in -// ordinary text files, or maybe "

" lines in HTML files). That may be -// because this is the only method of the 3 that has a *concept* of -// "junk" . -// -// Timing: Basic R-O is cubic time worst case and quadratic time expected -// case. SequenceMatcher is quadratic time for the worst case and has -// expected-case behavior dependent in a complicated way on how many -// elements the sequences have in common; best case time is linear. -type SequenceMatcher struct { - a []string - b []string - b2j map[string][]int - IsJunk func(string) bool - autoJunk bool - bJunk map[string]struct{} - matchingBlocks []Match - fullBCount map[string]int - bPopular map[string]struct{} - opCodes []OpCode -} - -func NewMatcher(a, b []string) *SequenceMatcher { - m := SequenceMatcher{autoJunk: true} - m.SetSeqs(a, b) - return &m -} - -func NewMatcherWithJunk(a, b []string, autoJunk bool, - isJunk func(string) bool) *SequenceMatcher { - - m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} - m.SetSeqs(a, b) - return &m -} - -// Set two sequences to be compared. -func (m *SequenceMatcher) SetSeqs(a, b []string) { - m.SetSeq1(a) - m.SetSeq2(b) -} - -// Set the first sequence to be compared. The second sequence to be compared is -// not changed. -// -// SequenceMatcher computes and caches detailed information about the second -// sequence, so if you want to compare one sequence S against many sequences, -// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other -// sequences. -// -// See also SetSeqs() and SetSeq2(). -func (m *SequenceMatcher) SetSeq1(a []string) { - if &a == &m.a { - return - } - m.a = a - m.matchingBlocks = nil - m.opCodes = nil -} - -// Set the second sequence to be compared. The first sequence to be compared is -// not changed. -func (m *SequenceMatcher) SetSeq2(b []string) { - if &b == &m.b { - return - } - m.b = b - m.matchingBlocks = nil - m.opCodes = nil - m.fullBCount = nil - m.chainB() -} - -func (m *SequenceMatcher) chainB() { - // Populate line -> index mapping - b2j := map[string][]int{} - for i, s := range m.b { - indices := b2j[s] - indices = append(indices, i) - b2j[s] = indices - } - - // Purge junk elements - m.bJunk = map[string]struct{}{} - if m.IsJunk != nil { - junk := m.bJunk - for s, _ := range b2j { - if m.IsJunk(s) { - junk[s] = struct{}{} - } - } - for s, _ := range junk { - delete(b2j, s) - } - } - - // Purge remaining popular elements - popular := map[string]struct{}{} - n := len(m.b) - if m.autoJunk && n >= 200 { - ntest := n/100 + 1 - for s, indices := range b2j { - if len(indices) > ntest { - popular[s] = struct{}{} - } - } - for s, _ := range popular { - delete(b2j, s) - } - } - m.bPopular = popular - m.b2j = b2j -} - -func (m *SequenceMatcher) isBJunk(s string) bool { - _, ok := m.bJunk[s] - return ok -} - -// Find longest matching block in a[alo:ahi] and b[blo:bhi]. -// -// If IsJunk is not defined: -// -// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where -// alo <= i <= i+k <= ahi -// blo <= j <= j+k <= bhi -// and for all (i',j',k') meeting those conditions, -// k >= k' -// i <= i' -// and if i == i', j <= j' -// -// In other words, of all maximal matching blocks, return one that -// starts earliest in a, and of all those maximal matching blocks that -// start earliest in a, return the one that starts earliest in b. -// -// If IsJunk is defined, first the longest matching block is -// determined as above, but with the additional restriction that no -// junk element appears in the block. Then that block is extended as -// far as possible by matching (only) junk elements on both sides. So -// the resulting block never matches on junk except as identical junk -// happens to be adjacent to an "interesting" match. -// -// If no blocks match, return (alo, blo, 0). -func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { - // CAUTION: stripping common prefix or suffix would be incorrect. - // E.g., - // ab - // acab - // Longest matching block is "ab", but if common prefix is - // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so - // strip, so ends up claiming that ab is changed to acab by - // inserting "ca" in the middle. That's minimal but unintuitive: - // "it's obvious" that someone inserted "ac" at the front. - // Windiff ends up at the same place as diff, but by pairing up - // the unique 'b's and then matching the first two 'a's. - besti, bestj, bestsize := alo, blo, 0 - - // find longest junk-free match - // during an iteration of the loop, j2len[j] = length of longest - // junk-free match ending with a[i-1] and b[j] - j2len := map[int]int{} - for i := alo; i != ahi; i++ { - // look at all instances of a[i] in b; note that because - // b2j has no junk keys, the loop is skipped if a[i] is junk - newj2len := map[int]int{} - for _, j := range m.b2j[m.a[i]] { - // a[i] matches b[j] - if j < blo { - continue - } - if j >= bhi { - break - } - k := j2len[j-1] + 1 - newj2len[j] = k - if k > bestsize { - besti, bestj, bestsize = i-k+1, j-k+1, k - } - } - j2len = newj2len - } - - // Extend the best by non-junk elements on each end. In particular, - // "popular" non-junk elements aren't in b2j, which greatly speeds - // the inner loop above, but also means "the best" match so far - // doesn't contain any junk *or* popular non-junk elements. - for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && - m.a[besti-1] == m.b[bestj-1] { - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - } - for besti+bestsize < ahi && bestj+bestsize < bhi && - !m.isBJunk(m.b[bestj+bestsize]) && - m.a[besti+bestsize] == m.b[bestj+bestsize] { - bestsize += 1 - } - - // Now that we have a wholly interesting match (albeit possibly - // empty!), we may as well suck up the matching junk on each - // side of it too. Can't think of a good reason not to, and it - // saves post-processing the (possibly considerable) expense of - // figuring out what to do with it. In the case of an empty - // interesting match, this is clearly the right thing to do, - // because no other kind of match is possible in the regions. - for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && - m.a[besti-1] == m.b[bestj-1] { - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - } - for besti+bestsize < ahi && bestj+bestsize < bhi && - m.isBJunk(m.b[bestj+bestsize]) && - m.a[besti+bestsize] == m.b[bestj+bestsize] { - bestsize += 1 - } - - return Match{A: besti, B: bestj, Size: bestsize} -} - -// Return list of triples describing matching subsequences. -// -// Each triple is of the form (i, j, n), and means that -// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in -// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are -// adjacent triples in the list, and the second is not the last triple in the -// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe -// adjacent equal blocks. -// -// The last triple is a dummy, (len(a), len(b), 0), and is the only -// triple with n==0. -func (m *SequenceMatcher) GetMatchingBlocks() []Match { - if m.matchingBlocks != nil { - return m.matchingBlocks - } - - var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match - matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { - match := m.findLongestMatch(alo, ahi, blo, bhi) - i, j, k := match.A, match.B, match.Size - if match.Size > 0 { - if alo < i && blo < j { - matched = matchBlocks(alo, i, blo, j, matched) - } - matched = append(matched, match) - if i+k < ahi && j+k < bhi { - matched = matchBlocks(i+k, ahi, j+k, bhi, matched) - } - } - return matched - } - matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) - - // It's possible that we have adjacent equal blocks in the - // matching_blocks list now. - nonAdjacent := []Match{} - i1, j1, k1 := 0, 0, 0 - for _, b := range matched { - // Is this block adjacent to i1, j1, k1? - i2, j2, k2 := b.A, b.B, b.Size - if i1+k1 == i2 && j1+k1 == j2 { - // Yes, so collapse them -- this just increases the length of - // the first block by the length of the second, and the first - // block so lengthened remains the block to compare against. - k1 += k2 - } else { - // Not adjacent. Remember the first block (k1==0 means it's - // the dummy we started with), and make the second block the - // new block to compare against. - if k1 > 0 { - nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) - } - i1, j1, k1 = i2, j2, k2 - } - } - if k1 > 0 { - nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) - } - - nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) - m.matchingBlocks = nonAdjacent - return m.matchingBlocks -} - -// Return list of 5-tuples describing how to turn a into b. -// -// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple -// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the -// tuple preceding it, and likewise for j1 == the previous j2. -// -// The tags are characters, with these meanings: -// -// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] -// -// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. -// -// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. -// -// 'e' (equal): a[i1:i2] == b[j1:j2] -func (m *SequenceMatcher) GetOpCodes() []OpCode { - if m.opCodes != nil { - return m.opCodes - } - i, j := 0, 0 - matching := m.GetMatchingBlocks() - opCodes := make([]OpCode, 0, len(matching)) - for _, m := range matching { - // invariant: we've pumped out correct diffs to change - // a[:i] into b[:j], and the next matching block is - // a[ai:ai+size] == b[bj:bj+size]. So we need to pump - // out a diff to change a[i:ai] into b[j:bj], pump out - // the matching block, and move (i,j) beyond the match - ai, bj, size := m.A, m.B, m.Size - tag := byte(0) - if i < ai && j < bj { - tag = 'r' - } else if i < ai { - tag = 'd' - } else if j < bj { - tag = 'i' - } - if tag > 0 { - opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) - } - i, j = ai+size, bj+size - // the list of matching blocks is terminated by a - // sentinel with size 0 - if size > 0 { - opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) - } - } - m.opCodes = opCodes - return m.opCodes -} - -// Isolate change clusters by eliminating ranges with no changes. -// -// Return a generator of groups with up to n lines of context. -// Each group is in the same format as returned by GetOpCodes(). -func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { - if n < 0 { - n = 3 - } - codes := m.GetOpCodes() - if len(codes) == 0 { - codes = []OpCode{OpCode{'e', 0, 1, 0, 1}} - } - // Fixup leading and trailing groups if they show no changes. - if codes[0].Tag == 'e' { - c := codes[0] - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} - } - if codes[len(codes)-1].Tag == 'e' { - c := codes[len(codes)-1] - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} - } - nn := n + n - groups := [][]OpCode{} - group := []OpCode{} - for _, c := range codes { - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - // End the current group and start a new one whenever - // there is a large range with no changes. - if c.Tag == 'e' && i2-i1 > nn { - group = append(group, OpCode{c.Tag, i1, min(i2, i1+n), - j1, min(j2, j1+n)}) - groups = append(groups, group) - group = []OpCode{} - i1, j1 = max(i1, i2-n), max(j1, j2-n) - } - group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) - } - if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { - groups = append(groups, group) - } - return groups -} - -// Return a measure of the sequences' similarity (float in [0,1]). -// -// Where T is the total number of elements in both sequences, and -// M is the number of matches, this is 2.0*M / T. -// Note that this is 1 if the sequences are identical, and 0 if -// they have nothing in common. -// -// .Ratio() is expensive to compute if you haven't already computed -// .GetMatchingBlocks() or .GetOpCodes(), in which case you may -// want to try .QuickRatio() or .RealQuickRation() first to get an -// upper bound. -func (m *SequenceMatcher) Ratio() float64 { - matches := 0 - for _, m := range m.GetMatchingBlocks() { - matches += m.Size - } - return calculateRatio(matches, len(m.a)+len(m.b)) -} - -// Return an upper bound on ratio() relatively quickly. -// -// This isn't defined beyond that it is an upper bound on .Ratio(), and -// is faster to compute. -func (m *SequenceMatcher) QuickRatio() float64 { - // viewing a and b as multisets, set matches to the cardinality - // of their intersection; this counts the number of matches - // without regard to order, so is clearly an upper bound - if m.fullBCount == nil { - m.fullBCount = map[string]int{} - for _, s := range m.b { - m.fullBCount[s] = m.fullBCount[s] + 1 - } - } - - // avail[x] is the number of times x appears in 'b' less the - // number of times we've seen it in 'a' so far ... kinda - avail := map[string]int{} - matches := 0 - for _, s := range m.a { - n, ok := avail[s] - if !ok { - n = m.fullBCount[s] - } - avail[s] = n - 1 - if n > 0 { - matches += 1 - } - } - return calculateRatio(matches, len(m.a)+len(m.b)) -} - -// Return an upper bound on ratio() very quickly. -// -// This isn't defined beyond that it is an upper bound on .Ratio(), and -// is faster to compute than either .Ratio() or .QuickRatio(). -func (m *SequenceMatcher) RealQuickRatio() float64 { - la, lb := len(m.a), len(m.b) - return calculateRatio(min(la, lb), la+lb) -} - -// Convert range to the "ed" format -func formatRangeUnified(start, stop int) string { - // Per the diff spec at http://www.unix.org/single_unix_specification/ - beginning := start + 1 // lines start numbering with one - length := stop - start - if length == 1 { - return fmt.Sprintf("%d", beginning) - } - if length == 0 { - beginning -= 1 // empty ranges begin at line just before the range - } - return fmt.Sprintf("%d,%d", beginning, length) -} - -// Unified diff parameters -type UnifiedDiff struct { - A []string // First sequence lines - FromFile string // First file name - FromDate string // First file time - B []string // Second sequence lines - ToFile string // Second file name - ToDate string // Second file time - Eol string // Headers end of line, defaults to LF - Context int // Number of context lines -} - -// Compare two sequences of lines; generate the delta as a unified diff. -// -// Unified diffs are a compact way of showing line changes and a few -// lines of context. The number of context lines is set by 'n' which -// defaults to three. -// -// By default, the diff control lines (those with ---, +++, or @@) are -// created with a trailing newline. This is helpful so that inputs -// created from file.readlines() result in diffs that are suitable for -// file.writelines() since both the inputs and outputs have trailing -// newlines. -// -// For inputs that do not have trailing newlines, set the lineterm -// argument to "" so that the output will be uniformly newline free. -// -// The unidiff format normally has a header for filenames and modification -// times. Any or all of these may be specified using strings for -// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. -// The modification times are normally expressed in the ISO 8601 format. -func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { - buf := bufio.NewWriter(writer) - defer buf.Flush() - w := func(format string, args ...interface{}) error { - _, err := buf.WriteString(fmt.Sprintf(format, args...)) - return err - } - - if len(diff.Eol) == 0 { - diff.Eol = "\n" - } - - started := false - m := NewMatcher(diff.A, diff.B) - for _, g := range m.GetGroupedOpCodes(diff.Context) { - if !started { - started = true - fromDate := "" - if len(diff.FromDate) > 0 { - fromDate = "\t" + diff.FromDate - } - toDate := "" - if len(diff.ToDate) > 0 { - toDate = "\t" + diff.ToDate - } - err := w("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) - if err != nil { - return err - } - err = w("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) - if err != nil { - return err - } - } - first, last := g[0], g[len(g)-1] - range1 := formatRangeUnified(first.I1, last.I2) - range2 := formatRangeUnified(first.J1, last.J2) - if err := w("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { - return err - } - for _, c := range g { - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - if c.Tag == 'e' { - for _, line := range diff.A[i1:i2] { - if err := w(" " + line); err != nil { - return err - } - } - continue - } - if c.Tag == 'r' || c.Tag == 'd' { - for _, line := range diff.A[i1:i2] { - if err := w("-" + line); err != nil { - return err - } - } - } - if c.Tag == 'r' || c.Tag == 'i' { - for _, line := range diff.B[j1:j2] { - if err := w("+" + line); err != nil { - return err - } - } - } - } - } - return nil -} - -// Like WriteUnifiedDiff but returns the diff a string. -func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { - w := &bytes.Buffer{} - err := WriteUnifiedDiff(w, diff) - return string(w.Bytes()), err -} - -// Convert range to the "ed" format. -func formatRangeContext(start, stop int) string { - // Per the diff spec at http://www.unix.org/single_unix_specification/ - beginning := start + 1 // lines start numbering with one - length := stop - start - if length == 0 { - beginning -= 1 // empty ranges begin at line just before the range - } - if length <= 1 { - return fmt.Sprintf("%d", beginning) - } - return fmt.Sprintf("%d,%d", beginning, beginning+length-1) -} - -type ContextDiff UnifiedDiff - -// Compare two sequences of lines; generate the delta as a context diff. -// -// Context diffs are a compact way of showing line changes and a few -// lines of context. The number of context lines is set by diff.Context -// which defaults to three. -// -// By default, the diff control lines (those with *** or ---) are -// created with a trailing newline. -// -// For inputs that do not have trailing newlines, set the diff.Eol -// argument to "" so that the output will be uniformly newline free. -// -// The context diff format normally has a header for filenames and -// modification times. Any or all of these may be specified using -// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate. -// The modification times are normally expressed in the ISO 8601 format. -// If not specified, the strings default to blanks. -func WriteContextDiff(writer io.Writer, diff ContextDiff) error { - buf := bufio.NewWriter(writer) - defer buf.Flush() - var diffErr error - w := func(format string, args ...interface{}) { - _, err := buf.WriteString(fmt.Sprintf(format, args...)) - if diffErr == nil && err != nil { - diffErr = err - } - } - - if len(diff.Eol) == 0 { - diff.Eol = "\n" - } - - prefix := map[byte]string{ - 'i': "+ ", - 'd': "- ", - 'r': "! ", - 'e': " ", - } - - started := false - m := NewMatcher(diff.A, diff.B) - for _, g := range m.GetGroupedOpCodes(diff.Context) { - if !started { - started = true - fromDate := "" - if len(diff.FromDate) > 0 { - fromDate = "\t" + diff.FromDate - } - toDate := "" - if len(diff.ToDate) > 0 { - toDate = "\t" + diff.ToDate - } - w("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) - w("--- %s%s%s", diff.ToFile, toDate, diff.Eol) - } - - first, last := g[0], g[len(g)-1] - w("***************" + diff.Eol) - - range1 := formatRangeContext(first.I1, last.I2) - w("*** %s ****%s", range1, diff.Eol) - for _, c := range g { - if c.Tag == 'r' || c.Tag == 'd' { - for _, cc := range g { - if cc.Tag == 'i' { - continue - } - for _, line := range diff.A[cc.I1:cc.I2] { - w(prefix[cc.Tag] + line) - } - } - break - } - } - - range2 := formatRangeContext(first.J1, last.J2) - w("--- %s ----%s", range2, diff.Eol) - for _, c := range g { - if c.Tag == 'r' || c.Tag == 'i' { - for _, cc := range g { - if cc.Tag == 'd' { - continue - } - for _, line := range diff.B[cc.J1:cc.J2] { - w(prefix[cc.Tag] + line) - } - } - break - } - } - } - return diffErr -} - -// Like WriteContextDiff but returns the diff a string. -func GetContextDiffString(diff ContextDiff) (string, error) { - w := &bytes.Buffer{} - err := WriteContextDiff(w, diff) - return string(w.Bytes()), err -} - -// Split a string on "\n" while preserving them. The output can be used -// as input for UnifiedDiff and ContextDiff structures. -func SplitLines(s string) []string { - lines := strings.SplitAfter(s, "\n") - lines[len(lines)-1] += "\n" - return lines -} diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib_test.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib_test.go deleted file mode 100644 index 94670be..0000000 --- a/vendor/github.com/pmezard/go-difflib/difflib/difflib_test.go +++ /dev/null @@ -1,352 +0,0 @@ -package difflib - -import ( - "bytes" - "fmt" - "math" - "reflect" - "strings" - "testing" -) - -func assertAlmostEqual(t *testing.T, a, b float64, places int) { - if math.Abs(a-b) > math.Pow10(-places) { - t.Errorf("%.7f != %.7f", a, b) - } -} - -func assertEqual(t *testing.T, a, b interface{}) { - if !reflect.DeepEqual(a, b) { - t.Errorf("%v != %v", a, b) - } -} - -func splitChars(s string) []string { - chars := make([]string, 0, len(s)) - // Assume ASCII inputs - for i := 0; i != len(s); i++ { - chars = append(chars, string(s[i])) - } - return chars -} - -func TestSequenceMatcherRatio(t *testing.T) { - s := NewMatcher(splitChars("abcd"), splitChars("bcde")) - assertEqual(t, s.Ratio(), 0.75) - assertEqual(t, s.QuickRatio(), 0.75) - assertEqual(t, s.RealQuickRatio(), 1.0) -} - -func TestGetOptCodes(t *testing.T) { - a := "qabxcd" - b := "abycdf" - s := NewMatcher(splitChars(a), splitChars(b)) - w := &bytes.Buffer{} - for _, op := range s.GetOpCodes() { - fmt.Fprintf(w, "%s a[%d:%d], (%s) b[%d:%d] (%s)\n", string(op.Tag), - op.I1, op.I2, a[op.I1:op.I2], op.J1, op.J2, b[op.J1:op.J2]) - } - result := string(w.Bytes()) - expected := `d a[0:1], (q) b[0:0] () -e a[1:3], (ab) b[0:2] (ab) -r a[3:4], (x) b[2:3] (y) -e a[4:6], (cd) b[3:5] (cd) -i a[6:6], () b[5:6] (f) -` - if expected != result { - t.Errorf("unexpected op codes: \n%s", result) - } -} - -func TestGroupedOpCodes(t *testing.T) { - a := []string{} - for i := 0; i != 39; i++ { - a = append(a, fmt.Sprintf("%02d", i)) - } - b := []string{} - b = append(b, a[:8]...) - b = append(b, " i") - b = append(b, a[8:19]...) - b = append(b, " x") - b = append(b, a[20:22]...) - b = append(b, a[27:34]...) - b = append(b, " y") - b = append(b, a[35:]...) - s := NewMatcher(a, b) - w := &bytes.Buffer{} - for _, g := range s.GetGroupedOpCodes(-1) { - fmt.Fprintf(w, "group\n") - for _, op := range g { - fmt.Fprintf(w, " %s, %d, %d, %d, %d\n", string(op.Tag), - op.I1, op.I2, op.J1, op.J2) - } - } - result := string(w.Bytes()) - expected := `group - e, 5, 8, 5, 8 - i, 8, 8, 8, 9 - e, 8, 11, 9, 12 -group - e, 16, 19, 17, 20 - r, 19, 20, 20, 21 - e, 20, 22, 21, 23 - d, 22, 27, 23, 23 - e, 27, 30, 23, 26 -group - e, 31, 34, 27, 30 - r, 34, 35, 30, 31 - e, 35, 38, 31, 34 -` - if expected != result { - t.Errorf("unexpected op codes: \n%s", result) - } -} - -func ExampleGetUnifiedDiffString() { - a := `one -two -three -four` - b := `zero -one -three -four` - diff := UnifiedDiff{ - A: SplitLines(a), - B: SplitLines(b), - FromFile: "Original", - FromDate: "2005-01-26 23:30:50", - ToFile: "Current", - ToDate: "2010-04-02 10:20:52", - Context: 3, - } - result, _ := GetUnifiedDiffString(diff) - fmt.Printf(strings.Replace(result, "\t", " ", -1)) - // Output: - // --- Original 2005-01-26 23:30:50 - // +++ Current 2010-04-02 10:20:52 - // @@ -1,4 +1,4 @@ - // +zero - // one - // -two - // three - // four -} - -func ExampleGetContextDiffString() { - a := `one -two -three -four` - b := `zero -one -tree -four` - diff := ContextDiff{ - A: SplitLines(a), - B: SplitLines(b), - FromFile: "Original", - ToFile: "Current", - Context: 3, - Eol: "\n", - } - result, _ := GetContextDiffString(diff) - fmt.Printf(strings.Replace(result, "\t", " ", -1)) - // Output: - // *** Original - // --- Current - // *************** - // *** 1,4 **** - // one - // ! two - // ! three - // four - // --- 1,4 ---- - // + zero - // one - // ! tree - // four -} - -func rep(s string, count int) string { - return strings.Repeat(s, count) -} - -func TestWithAsciiOneInsert(t *testing.T) { - sm := NewMatcher(splitChars(rep("b", 100)), - splitChars("a"+rep("b", 100))) - assertAlmostEqual(t, sm.Ratio(), 0.995, 3) - assertEqual(t, sm.GetOpCodes(), - []OpCode{{'i', 0, 0, 0, 1}, {'e', 0, 100, 1, 101}}) - assertEqual(t, len(sm.bPopular), 0) - - sm = NewMatcher(splitChars(rep("b", 100)), - splitChars(rep("b", 50)+"a"+rep("b", 50))) - assertAlmostEqual(t, sm.Ratio(), 0.995, 3) - assertEqual(t, sm.GetOpCodes(), - []OpCode{{'e', 0, 50, 0, 50}, {'i', 50, 50, 50, 51}, {'e', 50, 100, 51, 101}}) - assertEqual(t, len(sm.bPopular), 0) -} - -func TestWithAsciiOnDelete(t *testing.T) { - sm := NewMatcher(splitChars(rep("a", 40)+"c"+rep("b", 40)), - splitChars(rep("a", 40)+rep("b", 40))) - assertAlmostEqual(t, sm.Ratio(), 0.994, 3) - assertEqual(t, sm.GetOpCodes(), - []OpCode{{'e', 0, 40, 0, 40}, {'d', 40, 41, 40, 40}, {'e', 41, 81, 40, 80}}) -} - -func TestWithAsciiBJunk(t *testing.T) { - isJunk := func(s string) bool { - return s == " " - } - sm := NewMatcherWithJunk(splitChars(rep("a", 40)+rep("b", 40)), - splitChars(rep("a", 44)+rep("b", 40)), true, isJunk) - assertEqual(t, sm.bJunk, map[string]struct{}{}) - - sm = NewMatcherWithJunk(splitChars(rep("a", 40)+rep("b", 40)), - splitChars(rep("a", 44)+rep("b", 40)+rep(" ", 20)), false, isJunk) - assertEqual(t, sm.bJunk, map[string]struct{}{" ": struct{}{}}) - - isJunk = func(s string) bool { - return s == " " || s == "b" - } - sm = NewMatcherWithJunk(splitChars(rep("a", 40)+rep("b", 40)), - splitChars(rep("a", 44)+rep("b", 40)+rep(" ", 20)), false, isJunk) - assertEqual(t, sm.bJunk, map[string]struct{}{" ": struct{}{}, "b": struct{}{}}) -} - -func TestSFBugsRatioForNullSeqn(t *testing.T) { - sm := NewMatcher(nil, nil) - assertEqual(t, sm.Ratio(), 1.0) - assertEqual(t, sm.QuickRatio(), 1.0) - assertEqual(t, sm.RealQuickRatio(), 1.0) -} - -func TestSFBugsComparingEmptyLists(t *testing.T) { - groups := NewMatcher(nil, nil).GetGroupedOpCodes(-1) - assertEqual(t, len(groups), 0) - diff := UnifiedDiff{ - FromFile: "Original", - ToFile: "Current", - Context: 3, - } - result, err := GetUnifiedDiffString(diff) - assertEqual(t, err, nil) - assertEqual(t, result, "") -} - -func TestOutputFormatRangeFormatUnified(t *testing.T) { - // Per the diff spec at http://www.unix.org/single_unix_specification/ - // - // Each field shall be of the form: - // %1d", if the range contains exactly one line, - // and: - // "%1d,%1d", , otherwise. - // If a range is empty, its beginning line number shall be the number of - // the line just before the range, or 0 if the empty range starts the file. - fm := formatRangeUnified - assertEqual(t, fm(3, 3), "3,0") - assertEqual(t, fm(3, 4), "4") - assertEqual(t, fm(3, 5), "4,2") - assertEqual(t, fm(3, 6), "4,3") - assertEqual(t, fm(0, 0), "0,0") -} - -func TestOutputFormatRangeFormatContext(t *testing.T) { - // Per the diff spec at http://www.unix.org/single_unix_specification/ - // - // The range of lines in file1 shall be written in the following format - // if the range contains two or more lines: - // "*** %d,%d ****\n", , - // and the following format otherwise: - // "*** %d ****\n", - // The ending line number of an empty range shall be the number of the preceding line, - // or 0 if the range is at the start of the file. - // - // Next, the range of lines in file2 shall be written in the following format - // if the range contains two or more lines: - // "--- %d,%d ----\n", , - // and the following format otherwise: - // "--- %d ----\n", - fm := formatRangeContext - assertEqual(t, fm(3, 3), "3") - assertEqual(t, fm(3, 4), "4") - assertEqual(t, fm(3, 5), "4,5") - assertEqual(t, fm(3, 6), "4,6") - assertEqual(t, fm(0, 0), "0") -} - -func TestOutputFormatTabDelimiter(t *testing.T) { - diff := UnifiedDiff{ - A: splitChars("one"), - B: splitChars("two"), - FromFile: "Original", - FromDate: "2005-01-26 23:30:50", - ToFile: "Current", - ToDate: "2010-04-12 10:20:52", - Eol: "\n", - } - ud, err := GetUnifiedDiffString(diff) - assertEqual(t, err, nil) - assertEqual(t, SplitLines(ud)[:2], []string{ - "--- Original\t2005-01-26 23:30:50\n", - "+++ Current\t2010-04-12 10:20:52\n", - }) - cd, err := GetContextDiffString(ContextDiff(diff)) - assertEqual(t, err, nil) - assertEqual(t, SplitLines(cd)[:2], []string{ - "*** Original\t2005-01-26 23:30:50\n", - "--- Current\t2010-04-12 10:20:52\n", - }) -} - -func TestOutputFormatNoTrailingTabOnEmptyFiledate(t *testing.T) { - diff := UnifiedDiff{ - A: splitChars("one"), - B: splitChars("two"), - FromFile: "Original", - ToFile: "Current", - Eol: "\n", - } - ud, err := GetUnifiedDiffString(diff) - assertEqual(t, err, nil) - assertEqual(t, SplitLines(ud)[:2], []string{"--- Original\n", "+++ Current\n"}) - - cd, err := GetContextDiffString(ContextDiff(diff)) - assertEqual(t, err, nil) - assertEqual(t, SplitLines(cd)[:2], []string{"*** Original\n", "--- Current\n"}) -} - -func TestSplitLines(t *testing.T) { - allTests := []struct { - input string - want []string - }{ - {"foo", []string{"foo\n"}}, - {"foo\nbar", []string{"foo\n", "bar\n"}}, - {"foo\nbar\n", []string{"foo\n", "bar\n", "\n"}}, - } - for _, test := range allTests { - assertEqual(t, SplitLines(test.input), test.want) - } -} - -func benchmarkSplitLines(b *testing.B, count int) { - str := strings.Repeat("foo\n", count) - - b.ResetTimer() - - n := 0 - for i := 0; i < b.N; i++ { - n += len(SplitLines(str)) - } -} - -func BenchmarkSplitLines100(b *testing.B) { - benchmarkSplitLines(b, 100) -} - -func BenchmarkSplitLines10000(b *testing.B) { - benchmarkSplitLines(b, 10000) -} diff --git a/vendor/github.com/stretchr/testify b/vendor/github.com/stretchr/testify deleted file mode 160000 index 4d4bfba..0000000 --- a/vendor/github.com/stretchr/testify +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 4d4bfba8f1d1027c4fdbe371823030df51419987 diff --git a/vendor/gopkg.in/tomb.v1/LICENSE b/vendor/gopkg.in/tomb.v1/LICENSE deleted file mode 100644 index a4249bb..0000000 --- a/vendor/gopkg.in/tomb.v1/LICENSE +++ /dev/null @@ -1,29 +0,0 @@ -tomb - support for clean goroutine termination in Go. - -Copyright (c) 2010-2011 - Gustavo Niemeyer - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - * Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/tomb.v1/README.md b/vendor/gopkg.in/tomb.v1/README.md deleted file mode 100644 index 3ae8788..0000000 --- a/vendor/gopkg.in/tomb.v1/README.md +++ /dev/null @@ -1,4 +0,0 @@ -Installation and usage ----------------------- - -See [gopkg.in/tomb.v1](https://gopkg.in/tomb.v1) for documentation and usage details. diff --git a/vendor/gopkg.in/tomb.v1/tomb.go b/vendor/gopkg.in/tomb.v1/tomb.go deleted file mode 100644 index 5bcd5f8..0000000 --- a/vendor/gopkg.in/tomb.v1/tomb.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright (c) 2011 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// * Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// * Neither the name of the copyright holder nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// The tomb package offers a conventional API for clean goroutine termination. -// -// A Tomb tracks the lifecycle of a goroutine as alive, dying or dead, -// and the reason for its death. -// -// The zero value of a Tomb assumes that a goroutine is about to be -// created or already alive. Once Kill or Killf is called with an -// argument that informs the reason for death, the goroutine is in -// a dying state and is expected to terminate soon. Right before the -// goroutine function or method returns, Done must be called to inform -// that the goroutine is indeed dead and about to stop running. -// -// A Tomb exposes Dying and Dead channels. These channels are closed -// when the Tomb state changes in the respective way. They enable -// explicit blocking until the state changes, and also to selectively -// unblock select statements accordingly. -// -// When the tomb state changes to dying and there's still logic going -// on within the goroutine, nested functions and methods may choose to -// return ErrDying as their error value, as this error won't alter the -// tomb state if provied to the Kill method. This is a convenient way to -// follow standard Go practices in the context of a dying tomb. -// -// For background and a detailed example, see the following blog post: -// -// http://blog.labix.org/2011/10/09/death-of-goroutines-under-control -// -// For a more complex code snippet demonstrating the use of multiple -// goroutines with a single Tomb, see: -// -// http://play.golang.org/p/Xh7qWsDPZP -// -package tomb - -import ( - "errors" - "fmt" - "sync" -) - -// A Tomb tracks the lifecycle of a goroutine as alive, dying or dead, -// and the reason for its death. -// -// See the package documentation for details. -type Tomb struct { - m sync.Mutex - dying chan struct{} - dead chan struct{} - reason error -} - -var ( - ErrStillAlive = errors.New("tomb: still alive") - ErrDying = errors.New("tomb: dying") -) - -func (t *Tomb) init() { - t.m.Lock() - if t.dead == nil { - t.dead = make(chan struct{}) - t.dying = make(chan struct{}) - t.reason = ErrStillAlive - } - t.m.Unlock() -} - -// Dead returns the channel that can be used to wait -// until t.Done has been called. -func (t *Tomb) Dead() <-chan struct{} { - t.init() - return t.dead -} - -// Dying returns the channel that can be used to wait -// until t.Kill or t.Done has been called. -func (t *Tomb) Dying() <-chan struct{} { - t.init() - return t.dying -} - -// Wait blocks until the goroutine is in a dead state and returns the -// reason for its death. -func (t *Tomb) Wait() error { - t.init() - <-t.dead - t.m.Lock() - reason := t.reason - t.m.Unlock() - return reason -} - -// Done flags the goroutine as dead, and should be called a single time -// right before the goroutine function or method returns. -// If the goroutine was not already in a dying state before Done is -// called, it will be flagged as dying and dead at once with no -// error. -func (t *Tomb) Done() { - t.Kill(nil) - close(t.dead) -} - -// Kill flags the goroutine as dying for the given reason. -// Kill may be called multiple times, but only the first -// non-nil error is recorded as the reason for termination. -// -// If reason is ErrDying, the previous reason isn't replaced -// even if it is nil. It's a runtime error to call Kill with -// ErrDying if t is not in a dying state. -func (t *Tomb) Kill(reason error) { - t.init() - t.m.Lock() - defer t.m.Unlock() - if reason == ErrDying { - if t.reason == ErrStillAlive { - panic("tomb: Kill with ErrDying while still alive") - } - return - } - if t.reason == nil || t.reason == ErrStillAlive { - t.reason = reason - } - // If the receive on t.dying succeeds, then - // it can only be because we have already closed it. - // If it blocks, then we know that it needs to be closed. - select { - case <-t.dying: - default: - close(t.dying) - } -} - -// Killf works like Kill, but builds the reason providing the received -// arguments to fmt.Errorf. The generated error is also returned. -func (t *Tomb) Killf(f string, a ...interface{}) error { - err := fmt.Errorf(f, a...) - t.Kill(err) - return err -} - -// Err returns the reason for the goroutine death provided via Kill -// or Killf, or ErrStillAlive when the goroutine is still alive. -func (t *Tomb) Err() (reason error) { - t.init() - t.m.Lock() - reason = t.reason - t.m.Unlock() - return -} diff --git a/vendor/gopkg.in/tomb.v1/tomb_test.go b/vendor/gopkg.in/tomb.v1/tomb_test.go deleted file mode 100644 index 71f11fa..0000000 --- a/vendor/gopkg.in/tomb.v1/tomb_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package tomb_test - -import ( - "errors" - "gopkg.in/tomb.v1" - "reflect" - "testing" -) - -func TestNewTomb(t *testing.T) { - tb := &tomb.Tomb{} - testState(t, tb, false, false, tomb.ErrStillAlive) - - tb.Done() - testState(t, tb, true, true, nil) -} - -func TestKill(t *testing.T) { - // a nil reason flags the goroutine as dying - tb := &tomb.Tomb{} - tb.Kill(nil) - testState(t, tb, true, false, nil) - - // a non-nil reason now will override Kill - err := errors.New("some error") - tb.Kill(err) - testState(t, tb, true, false, err) - - // another non-nil reason won't replace the first one - tb.Kill(errors.New("ignore me")) - testState(t, tb, true, false, err) - - tb.Done() - testState(t, tb, true, true, err) -} - -func TestKillf(t *testing.T) { - tb := &tomb.Tomb{} - - err := tb.Killf("BO%s", "OM") - if s := err.Error(); s != "BOOM" { - t.Fatalf(`Killf("BO%s", "OM"): want "BOOM", got %q`, s) - } - testState(t, tb, true, false, err) - - // another non-nil reason won't replace the first one - tb.Killf("ignore me") - testState(t, tb, true, false, err) - - tb.Done() - testState(t, tb, true, true, err) -} - -func TestErrDying(t *testing.T) { - // ErrDying being used properly, after a clean death. - tb := &tomb.Tomb{} - tb.Kill(nil) - tb.Kill(tomb.ErrDying) - testState(t, tb, true, false, nil) - - // ErrDying being used properly, after an errorful death. - err := errors.New("some error") - tb.Kill(err) - tb.Kill(tomb.ErrDying) - testState(t, tb, true, false, err) - - // ErrDying being used badly, with an alive tomb. - tb = &tomb.Tomb{} - defer func() { - err := recover() - if err != "tomb: Kill with ErrDying while still alive" { - t.Fatalf("Wrong panic on Kill(ErrDying): %v", err) - } - testState(t, tb, false, false, tomb.ErrStillAlive) - }() - tb.Kill(tomb.ErrDying) -} - -func testState(t *testing.T, tb *tomb.Tomb, wantDying, wantDead bool, wantErr error) { - select { - case <-tb.Dying(): - if !wantDying { - t.Error("<-Dying: should block") - } - default: - if wantDying { - t.Error("<-Dying: should not block") - } - } - seemsDead := false - select { - case <-tb.Dead(): - if !wantDead { - t.Error("<-Dead: should block") - } - seemsDead = true - default: - if wantDead { - t.Error("<-Dead: should not block") - } - } - if err := tb.Err(); err != wantErr { - t.Errorf("Err: want %#v, got %#v", wantErr, err) - } - if wantDead && seemsDead { - waitErr := tb.Wait() - switch { - case waitErr == tomb.ErrStillAlive: - t.Errorf("Wait should not return ErrStillAlive") - case !reflect.DeepEqual(waitErr, wantErr): - t.Errorf("Wait: want %#v, got %#v", wantErr, waitErr) - } - } -}