diff --git a/go.mod b/go.mod index 40ec0f26e2..5cecdd5bcf 100644 --- a/go.mod +++ b/go.mod @@ -60,12 +60,15 @@ require ( github.com/kelseyhightower/envconfig v1.4.0 // indirect github.com/kubernetes-csi/external-snapshotter/client/v3 v3.0.0 github.com/kubesphere/sonargo v0.0.2 + github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect + github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect github.com/mattn/go-runewidth v0.0.4 // indirect github.com/mitchellh/mapstructure v1.2.2 github.com/onsi/ginkgo v1.16.4 - github.com/onsi/gomega v1.14.0 + github.com/onsi/gomega v1.15.0 github.com/open-policy-agent/opa v0.18.0 github.com/opencontainers/go-digest v1.0.0 + github.com/operator-framework/helm-operator-plugins v0.0.8-0.20210810182245-240cc447b3de github.com/patrickmn/go-cache v2.1.0+incompatible // indirect github.com/pkg/errors v0.9.1 github.com/projectcalico/kube-controllers v3.8.8+incompatible @@ -95,11 +98,11 @@ require ( gopkg.in/square/go-jose.v1 v1.1.2 // indirect gopkg.in/square/go-jose.v2 v2.4.0 gopkg.in/src-d/go-billy.v4 v4.3.0 // indirect - gopkg.in/src-d/go-git.v4 v4.11.0 + gopkg.in/src-d/go-git.v4 v4.13.1 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b gotest.tools v2.2.0+incompatible - helm.sh/helm/v3 v3.5.0 + helm.sh/helm/v3 v3.6.3 istio.io/api v0.0.0-20201113182140-d4b7e3fc2b44 istio.io/client-go v0.0.0-20201113183938-0734e976e785 istio.io/gogo-genproto v0.0.0-20201113182723-5b8563d8a012 // indirect @@ -113,7 +116,7 @@ require ( k8s.io/component-base v0.21.2 k8s.io/klog v1.0.0 k8s.io/klog/v2 v2.8.0 - k8s.io/kube-openapi v0.0.0-20210527164424-3c818078ee3d + k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e k8s.io/kubectl v0.21.2 k8s.io/metrics v0.21.2 k8s.io/utils v0.0.0-20210527160623-6fdb442a123b @@ -130,6 +133,7 @@ require ( ) replace ( + bitbucket.org/liamstask/goose => bitbucket.org/liamstask/goose v0.0.0-20150115234039-8488cc47d90c cloud.google.com/go => cloud.google.com/go v0.56.0 cloud.google.com/go/bigquery => cloud.google.com/go/bigquery v1.4.0 cloud.google.com/go/bigtable => cloud.google.com/go/bigtable v1.2.0 @@ -153,6 +157,8 @@ replace ( github.com/BurntSushi/toml => github.com/BurntSushi/toml v0.3.1 github.com/DATA-DOG/go-sqlmock => github.com/DATA-DOG/go-sqlmock v1.3.3 github.com/DataDog/datadog-go => github.com/DataDog/datadog-go v3.2.0+incompatible + github.com/GeertJohan/go.incremental => github.com/GeertJohan/go.incremental v1.0.0 + github.com/GeertJohan/go.rice => github.com/GeertJohan/go.rice v1.0.0 github.com/Knetic/govaluate => github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible github.com/MakeNowJust/heredoc => github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e github.com/Masterminds/goutils => github.com/Masterminds/goutils v1.1.0 @@ -175,6 +181,7 @@ replace ( github.com/afex/hystrix-go => github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5 github.com/agnivade/levenshtein => github.com/agnivade/levenshtein v1.0.1 github.com/ajstarks/svgo => github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af + github.com/akavel/rsrc => github.com/akavel/rsrc v0.8.0 github.com/alcortesm/tgz => github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 github.com/alecthomas/template => github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 github.com/alecthomas/units => github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d @@ -202,6 +209,7 @@ replace ( github.com/bgentry/speakeasy => github.com/bgentry/speakeasy v0.1.0 github.com/bitly/go-hostpool => github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 github.com/blang/semver => github.com/blang/semver v3.5.0+incompatible + github.com/blang/semver/v4 => github.com/blang/semver/v4 v4.0.0 github.com/bmizerany/assert => github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 github.com/bmizerany/pat => github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40 github.com/boltdb/bolt => github.com/boltdb/bolt v1.3.1 @@ -215,6 +223,7 @@ replace ( github.com/casbin/casbin/v2 => github.com/casbin/casbin/v2 v2.1.2 github.com/cenkalti/backoff => github.com/cenkalti/backoff v2.2.1+incompatible github.com/census-instrumentation/opencensus-proto => github.com/census-instrumentation/opencensus-proto v0.2.1 + github.com/certifi/gocertifi => github.com/certifi/gocertifi v0.0.0-20180118203423-deb3ae2ef261 github.com/cespare/xxhash => github.com/cespare/xxhash v1.1.0 github.com/cespare/xxhash/v2 => github.com/cespare/xxhash/v2 v2.1.1 github.com/chai2010/gettext-go => github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 @@ -226,6 +235,10 @@ replace ( github.com/circonus-labs/circonus-gometrics => github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible github.com/circonus-labs/circonusllhist => github.com/circonus-labs/circonusllhist v0.1.3 github.com/clbanning/x2j => github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec + github.com/cloudflare/backoff => github.com/cloudflare/backoff v0.0.0-20161212185259-647f3cdfc87a + github.com/cloudflare/cfssl => github.com/cloudflare/cfssl v1.5.0 + github.com/cloudflare/go-metrics => github.com/cloudflare/go-metrics v0.0.0-20151117154305-6a9aea36fb41 + github.com/cloudflare/redoctober => github.com/cloudflare/redoctober v0.0.0-20171127175943-746a508df14c github.com/cockroachdb/apd => github.com/cockroachdb/apd v1.1.0 github.com/cockroachdb/cockroach-go => github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c github.com/cockroachdb/datadriven => github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa @@ -254,6 +267,7 @@ replace ( github.com/cznic/sortutil => github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65 github.com/cznic/strutil => github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186 github.com/cznic/zappy => github.com/cznic/zappy v0.0.0-20160723133515-2533cb5b45cc + github.com/daaku/go.zipexe => github.com/daaku/go.zipexe v1.0.0 github.com/dave/jennifer => github.com/dave/jennifer v1.2.0 github.com/davecgh/go-spew => github.com/davecgh/go-spew v1.1.1 github.com/daviddengcn/go-colortext => github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd @@ -311,10 +325,12 @@ replace ( github.com/fsouza/fake-gcs-server => github.com/fsouza/fake-gcs-server v1.7.0 github.com/fvbommel/sortorder => github.com/fvbommel/sortorder v1.0.1 github.com/garyburd/redigo => github.com/garyburd/redigo v1.6.0 + github.com/getsentry/raven-go => github.com/getsentry/raven-go v0.0.0-20180121060056-563b81fc02b7 github.com/ghodss/yaml => github.com/ghodss/yaml v1.0.0 github.com/gliderlabs/ssh => github.com/gliderlabs/ssh v0.1.1 github.com/glycerine/go-unsnap-stream => github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd github.com/glycerine/goconvey => github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 + github.com/go-bindata/go-bindata/v3 => github.com/go-bindata/go-bindata/v3 v3.1.3 github.com/go-errors/errors => github.com/go-errors/errors v1.0.1 github.com/go-kit/kit => github.com/go-kit/kit v0.10.0 github.com/go-ldap/ldap => github.com/go-ldap/ldap v3.0.3+incompatible @@ -377,6 +393,7 @@ replace ( github.com/gomodule/redigo => github.com/gomodule/redigo v2.0.0+incompatible github.com/google/addlicense => github.com/google/addlicense v0.0.0-20200906110928-a0294312aa76 github.com/google/btree => github.com/google/btree v1.0.0 + github.com/google/certificate-transparency-go => github.com/google/certificate-transparency-go v1.0.21 github.com/google/flatbuffers => github.com/google/flatbuffers v1.11.0 github.com/google/go-cmp => github.com/google/go-cmp v0.4.0 github.com/google/go-containerregistry => github.com/google/go-containerregistry v0.6.0 @@ -426,6 +443,7 @@ replace ( github.com/hodgesds/perf-utils => github.com/hodgesds/perf-utils v0.0.8 github.com/huandu/xstrings => github.com/huandu/xstrings v1.2.0 github.com/hudl/fargo => github.com/hudl/fargo v1.3.0 + github.com/iancoleman/strcase => github.com/iancoleman/strcase v0.1.2 github.com/ianlancetaylor/demangle => github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6 github.com/imdario/mergo => github.com/imdario/mergo v0.3.9 github.com/inconshreveable/mousetrap => github.com/inconshreveable/mousetrap v1.0.0 @@ -443,7 +461,9 @@ replace ( github.com/jbenet/go-context => github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 github.com/jessevdk/go-flags => github.com/jessevdk/go-flags v1.4.0 github.com/jmespath/go-jmespath => github.com/jmespath/go-jmespath v0.3.0 + github.com/jmhodges/clock => github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548 github.com/jmoiron/sqlx => github.com/jmoiron/sqlx v1.2.0 + github.com/joelanford/go-apidiff => github.com/joelanford/go-apidiff v0.1.0 github.com/joeshaw/multierror => github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 github.com/joho/godotenv => github.com/joho/godotenv v1.3.0 github.com/jonboulle/clockwork => github.com/jonboulle/clockwork v0.1.0 @@ -463,6 +483,8 @@ replace ( github.com/kevinburke/ssh_config => github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e github.com/kisielk/errcheck => github.com/kisielk/errcheck v1.2.0 github.com/kisielk/gotool => github.com/kisielk/gotool v1.0.0 + github.com/kisielk/sqlstruct => github.com/kisielk/sqlstruct v0.0.0-20150923205031-648daed35d49 + github.com/kisom/goutils => github.com/kisom/goutils v1.1.0 github.com/klauspost/compress => github.com/klauspost/compress v1.9.5 github.com/klauspost/cpuid => github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5 github.com/klauspost/crc32 => github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 @@ -475,7 +497,10 @@ replace ( github.com/kshvakov/clickhouse => github.com/kshvakov/clickhouse v1.3.5 github.com/kubernetes-csi/external-snapshotter/client/v3 => github.com/kubernetes-csi/external-snapshotter/client/v3 v3.0.0 github.com/kubesphere/sonargo => github.com/kubesphere/sonargo v0.0.2 + github.com/kylelemons/go-gypsy => github.com/kylelemons/go-gypsy v0.0.0-20160905020020-08cad365cd28 github.com/kylelemons/godebug => github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb + github.com/lann/builder => github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 + github.com/lann/ps => github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 github.com/leanovate/gopter => github.com/leanovate/gopter v0.2.4 github.com/leodido/go-urn => github.com/leodido/go-urn v0.0.0-20181204092800-a67a23e1c1af github.com/lib/pq => github.com/lib/pq v1.2.0 @@ -505,6 +530,8 @@ replace ( github.com/mdlayher/wifi => github.com/mdlayher/wifi v0.0.0-20190303161829-b1436901ddee github.com/mgutz/ansi => github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b github.com/miekg/dns => github.com/miekg/dns v1.1.29 + github.com/mikefarah/yaml/v2 => github.com/mikefarah/yaml/v2 v2.4.0 + github.com/mikefarah/yq/v2 => github.com/mikefarah/yq/v2 v2.4.1 github.com/minio/md5-simd => github.com/minio/md5-simd v1.1.0 github.com/minio/minio-go/v7 => github.com/minio/minio-go/v7 v7.0.2 github.com/minio/sha256-simd => github.com/minio/sha256-simd v0.1.1 @@ -526,6 +553,7 @@ replace ( github.com/morikuni/aec => github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c github.com/mozillazg/go-cos => github.com/mozillazg/go-cos v0.13.0 github.com/mozillazg/go-httpheader => github.com/mozillazg/go-httpheader v0.2.1 + github.com/mreiferson/go-httpclient => github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474 github.com/mschoch/smat => github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae github.com/munnerz/goautoneg => github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/mwitkow/go-conntrack => github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f @@ -538,6 +566,7 @@ replace ( github.com/nats-io/nuid => github.com/nats-io/nuid v1.0.1 github.com/ncw/swift => github.com/ncw/swift v1.0.50 github.com/niemeyer/pretty => github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e + github.com/nkovacs/streamquote => github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229 github.com/nxadm/tail => github.com/nxadm/tail v1.4.4 github.com/oklog/oklog => github.com/oklog/oklog v0.3.2 github.com/oklog/run => github.com/oklog/run v1.1.0 @@ -557,6 +586,9 @@ replace ( github.com/opentracing/opentracing-go => github.com/opentracing/opentracing-go v1.1.0 github.com/openzipkin-contrib/zipkin-go-opentracing => github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5 github.com/openzipkin/zipkin-go => github.com/openzipkin/zipkin-go v0.2.2 + github.com/operator-framework/api => github.com/operator-framework/api v0.4.0 + github.com/operator-framework/helm-operator-plugins => github.com/operator-framework/helm-operator-plugins v0.0.8-0.20210810182245-240cc447b3de + github.com/operator-framework/operator-lib => github.com/operator-framework/operator-lib v0.3.0 github.com/pact-foundation/pact-go => github.com/pact-foundation/pact-go v1.0.4 github.com/pascaldekloe/goe => github.com/pascaldekloe/goe v0.1.0 github.com/patrickmn/go-cache => github.com/patrickmn/go-cache v2.1.0+incompatible @@ -643,9 +675,12 @@ replace ( github.com/uber/jaeger-lib => github.com/uber/jaeger-lib v2.2.0+incompatible github.com/ugorji/go => github.com/ugorji/go v1.1.4 github.com/urfave/cli => github.com/urfave/cli v1.20.0 + github.com/valyala/bytebufferpool => github.com/valyala/bytebufferpool v1.0.0 + github.com/valyala/fasttemplate => github.com/valyala/fasttemplate v1.0.1 github.com/vektah/gqlparser => github.com/vektah/gqlparser v1.1.2 github.com/weaveworks/common => github.com/weaveworks/common v0.0.0-20200820123129-280614068c5e github.com/weaveworks/promrus => github.com/weaveworks/promrus v1.2.0 + github.com/weppos/publicsuffix-go => github.com/weppos/publicsuffix-go v0.13.0 github.com/willf/bitset => github.com/willf/bitset v1.1.3 github.com/xanzy/go-gitlab => github.com/xanzy/go-gitlab v0.15.0 github.com/xanzy/ssh-agent => github.com/xanzy/ssh-agent v0.2.1 @@ -663,6 +698,10 @@ replace ( github.com/yvasiyarov/gorelic => github.com/yvasiyarov/gorelic v0.0.6 github.com/yvasiyarov/newrelic_platform_go => github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f github.com/ziutek/mymysql => github.com/ziutek/mymysql v1.5.4 + github.com/zmap/rc2 => github.com/zmap/rc2 v0.0.0-20131011165748-24b9757f5521 + github.com/zmap/zcertificate => github.com/zmap/zcertificate v0.0.0-20180516150559-0e3d58b1bac4 + github.com/zmap/zcrypto => github.com/zmap/zcrypto v0.0.0-20200911161511-43ff0ea04f21 + github.com/zmap/zlint/v2 => github.com/zmap/zlint/v2 v2.2.1 gitlab.com/nyarla/go-crypt => gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b go.elastic.co/apm => go.elastic.co/apm v1.5.0 go.elastic.co/apm/module/apmhttp => go.elastic.co/apm/module/apmhttp v1.5.0 @@ -716,9 +755,11 @@ replace ( gopkg.in/go-playground/assert.v1 => gopkg.in/go-playground/assert.v1 v1.2.1 gopkg.in/go-playground/validator.v9 => gopkg.in/go-playground/validator.v9 v9.27.0 gopkg.in/gorp.v1 => gopkg.in/gorp.v1 v1.7.2 + gopkg.in/imdario/mergo.v0 => gopkg.in/imdario/mergo.v0 v0.3.7 gopkg.in/inf.v0 => gopkg.in/inf.v0 v0.9.1 gopkg.in/ini.v1 => gopkg.in/ini.v1 v1.57.0 gopkg.in/natefinch/lumberjack.v2 => gopkg.in/natefinch/lumberjack.v2 v2.0.0 + gopkg.in/op/go-logging.v1 => gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 gopkg.in/square/go-jose.v1 => gopkg.in/square/go-jose.v1 v1.1.2 gopkg.in/square/go-jose.v2 => gopkg.in/square/go-jose.v2 v2.4.0 gopkg.in/src-d/go-billy.v4 => gopkg.in/src-d/go-billy.v4 v4.3.0 @@ -731,7 +772,7 @@ replace ( gopkg.in/yaml.v3 => gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c gotest.tools => gotest.tools v2.2.0+incompatible gotest.tools/v3 => gotest.tools/v3 v3.0.3 - helm.sh/helm/v3 => helm.sh/helm/v3 v3.5.0 + helm.sh/helm/v3 => helm.sh/helm/v3 v3.6.3 honnef.co/go/tools => honnef.co/go/tools v0.0.1-2020.1.3 howett.net/plist => howett.net/plist v0.0.0-20181124034731-591f970eefbb istio.io/api => istio.io/api v0.0.0-20201113182140-d4b7e3fc2b44 @@ -767,6 +808,7 @@ replace ( sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.9.3 sigs.k8s.io/controller-tools => sigs.k8s.io/controller-tools v0.6.2 sigs.k8s.io/kind => sigs.k8s.io/kind v0.8.1 + sigs.k8s.io/kubebuilder/v3 => sigs.k8s.io/kubebuilder/v3 v3.0.0-alpha.0.0.20210716121009-fde793f20067 sigs.k8s.io/kubefed => sigs.k8s.io/kubefed v0.8.1 sigs.k8s.io/kustomize/api => sigs.k8s.io/kustomize/api v0.8.8 sigs.k8s.io/kustomize/cmd/config => sigs.k8s.io/kustomize/cmd/config v0.9.10 diff --git a/go.sum b/go.sum index e83a638ae3..60fcfa1d19 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,4 @@ +bitbucket.org/liamstask/goose v0.0.0-20150115234039-8488cc47d90c/go.mod h1:hSVuE3qU7grINVSwrmzHfpg9k87ALBk+XaualNyUzI4= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= @@ -22,15 +23,21 @@ github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6L github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DATA-DOG/go-sqlmock v1.3.3 h1:CWUqKXe0s8A2z6qCgkP4Kru7wC11YoAnoupUKFDnH08= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0= +github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e h1:eb0Pzkt15Bm7f2FFYv7sjY7NPFi3cPkS3tv1CcrFBWA= github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= +github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver/v3 v3.0.1 h1:2kKm5lb7dKVrt5TYUiAavE6oFc1cFT0057UVGT+JqLk= github.com/Masterminds/semver/v3 v3.0.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/sprig/v3 v3.0.0 h1:KSQz7Nb08/3VU9E4ns29dDxcczhOD1q7O1UfM4G3t3g= github.com/Masterminds/sprig/v3 v3.0.0/go.mod h1:NEUY/Qq8Gdm2xgYA+NwJM6wmfdRV9xkh8h/Rld20R0U= +github.com/Masterminds/squirrel v0.0.0-20161115235646-20f192218cf5 h1:PPfYWScYacO3Q6JMCLkyh6Ea2Q/REDTMgmiTAeiV8Jg= github.com/Masterminds/squirrel v0.0.0-20161115235646-20f192218cf5/go.mod h1:xnKTFzjGUiZtiOagBsfnvomW+nJg2usB1ZpordQWqNM= github.com/Masterminds/vcs v1.13.0/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= github.com/Microsoft/go-winio v0.4.12 h1:xAfWHN1IrQ0NJ9TBC0KBZoqLjzDTr1ML+4MywiUOryc= @@ -56,6 +63,7 @@ github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/ github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= @@ -93,6 +101,7 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= @@ -109,6 +118,7 @@ github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/certifi/gocertifi v0.0.0-20180118203423-deb3ae2ef261/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= @@ -122,6 +132,10 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/cloudflare/backoff v0.0.0-20161212185259-647f3cdfc87a/go.mod h1:rzgs2ZOiguV6/NpiDgADjRLPNyZlApIWxKpkT+X8SdY= +github.com/cloudflare/cfssl v1.5.0/go.mod h1:sPPkBS5L8l8sRc/IOO1jG51Xb34u+TYhL6P//JdODMQ= +github.com/cloudflare/go-metrics v0.0.0-20151117154305-6a9aea36fb41/go.mod h1:eaZPlJWD+G9wseg1BuRXlHnjntPMrywMsyxf+LTOdP4= +github.com/cloudflare/redoctober v0.0.0-20171127175943-746a508df14c/go.mod h1:6Se34jNoqrd8bTxrmJB2Bg2aoZ2CdSXonils9NsiNgo= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= @@ -162,6 +176,7 @@ github.com/cznic/ql v1.2.0/go.mod h1:FbpzhyZrqr0PVlK6ury+PoW3T0ODUV22OeWIxcaOrSE github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ= github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc= github.com/cznic/zappy v0.0.0-20160723133515-2533cb5b45cc/go.mod h1:Y1SNZ4dRUOKXshKUbwUapqNncRrho4mkjQebgEHZLj8= +github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E= github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -247,12 +262,14 @@ github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7R github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/garyburd/redigo v1.6.0 h1:0VruCpn7yAIIu7pWVClQC8wxCJEcG3nyzpMSHKi1PQc= github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/getsentry/raven-go v0.0.0-20180121060056-563b81fc02b7/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1 h1:j3L6gSLQalDETeEg/Jg0mGY0/y/N6zI2xX1978P0Uqw= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/go-bindata/go-bindata/v3 v3.1.3/go.mod h1:1/zrpXsLD8YDIbhZRqXzm1Ghc7NhEvIN9+Z6R5/xH4I= github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= @@ -297,17 +314,25 @@ github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.7.0 h1:GlXgaiBkmrYMHco6t4j7SacKO4XUjvh5pwXh0f4uxXU= github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= github.com/gobuffalo/flect v0.2.0 h1:EWCvMGGxOjsgwlWaP+f4+Hh6yrrte7JeFL2S6b+0hdM= github.com/gobuffalo/flect v0.2.0/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= +github.com/gobuffalo/genny v0.1.1 h1:iQ0D6SpNXIxu52WESsD+KoQ7af2e3nCfnSBoSF/hKe0= github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.1.1 h1:dLg+zb+uOyd/mKeQUYIbwbNmfRsr9hd/WtYWepmayhI= github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2 h1:8thhT+kUJMTMy3HlX4+y9Da+BNJck+p109tqqKp7WDs= github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.2 h1:fq9WcL1BYrm36SzK6+aAnZ8hcp+SrmnDyAxhNx8dvJk= github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.1.0 h1:4sGKOD8yaYJ+dek1FDkwcxCHA40M4kfKgFHx8N2kwbU= github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.2.0 h1:Ir9W9XIm9j7bhhkKE9cokvtTl1vBm62A/fene/ZCj6A= github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754 h1:tpom+2CJmpzAWj5/VEHync2rJGi+epHNIeRSWjzGA+4= github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= @@ -346,6 +371,7 @@ github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp github.com/google/addlicense v0.0.0-20200906110928-a0294312aa76/go.mod h1:EMjYTRimagHs1FwlIqKyX3wAM0u3rA+McvlIIWmSamA= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -410,8 +436,10 @@ github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg github.com/hashicorp/memberlist v0.2.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.9.0/go.mod h1:YL0HO+FifKOW2u1ke99DGVu1zhcpZzNwrLIqBC7vbYU= github.com/hodgesds/perf-utils v0.0.8/go.mod h1:F6TfvsbtrF88i++hou29dTXlI2sfsJv+gRZDtmTJkAs= +github.com/huandu/xstrings v1.2.0 h1:yPeWdRnmynF7p+lLYz0H2tthW9lqhMJrQV/U7yy4wX0= github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/iancoleman/strcase v0.1.2/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg= github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -433,8 +461,12 @@ github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548/go.mod h1:hGT6jSUVzF6no3QaDSMLGLEHtHSBSefs+MgcDWnmhmo= +github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA= github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= +github.com/joelanford/go-apidiff v0.1.0/go.mod h1:wgVWgVCwYYkjcYpJtBnWYkyUYZfVovO3Y5pX49mJsqs= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= +github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= @@ -453,6 +485,7 @@ github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 h1:PJPDf8OUfOK1bb/NeTKd4f1QXZItOX389VN3B6qC8ro= github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/karrick/godirwalk v1.10.3 h1:lOpSw2vJP0y5eLBW906QwKsUK/fe/QDyoqM5rnnuPDY= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= @@ -460,6 +493,8 @@ github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e h1:RgQk53JHp github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kisielk/sqlstruct v0.0.0-20150923205031-648daed35d49/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= +github.com/kisom/goutils v1.1.0/go.mod h1:+UBTfd78habUYWFbNWTJNG+jNG/i/lGURakr4A/yNRw= github.com/klauspost/compress v1.9.5 h1:U+CaK85mrNNb4k8BNOfgJtJ/gr6kswUCFj6miSzVC6M= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= @@ -478,7 +513,12 @@ github.com/kubernetes-csi/external-snapshotter/client/v3 v3.0.0 h1:OYDCOjVcx/5wN github.com/kubernetes-csi/external-snapshotter/client/v3 v3.0.0/go.mod h1:Q7VUue/CIrKbtpBdF04a1yjGGgsMaDws1HUxtjzgnEY= github.com/kubesphere/sonargo v0.0.2 h1:hsSRE3sv3mkPcUAeSABdp7rtfcNW2zzeHXzFa01CTkU= github.com/kubesphere/sonargo v0.0.2/go.mod h1:ww8n9ANlDXhX5PBZ18iaRnCgEkXN0GMml3/KZXOZ11w= +github.com/kylelemons/go-gypsy v0.0.0-20160905020020-08cad365cd28/go.mod h1:T/T7jsxVqf9k/zYOqbgNAsANsjxTd1Yq3htjDhQ1H0c= github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= github.com/leanovate/gopter v0.2.4/go.mod h1:gNcbPWNEWRe4lm+bycKqxUYoH5uoVje5SkOJ3uoLer8= github.com/leodido/go-urn v0.0.0-20181204092800-a67a23e1c1af/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= @@ -494,8 +534,10 @@ github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDe github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.7.1 h1:mdxE1MF9o53iCb2Ghj1VfWvh7ZOwHpnVG/xwXrV90U8= github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2 h1:JgVTCPf0uBVcUSWpyXmGpgOc62nK5HWUBKAGc3Qqa5k= github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/pkger v0.17.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= +github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= @@ -518,6 +560,8 @@ github.com/mdlayher/wifi v0.0.0-20190303161829-b1436901ddee/go.mod h1:Evt/EIne46 github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/miekg/dns v1.1.29 h1:xHBEhR+t5RzcFJjBLJlax2daXOrTYtr9z4WdKEfWFzg= github.com/miekg/dns v1.1.29/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/mikefarah/yaml/v2 v2.4.0/go.mod h1:ahVqZF4n1W4NqwvVnZzC4es67xsW9uR/RRf2RRxieJU= +github.com/mikefarah/yq/v2 v2.4.1/go.mod h1:i8SYf1XdgUvY2OFwSqGAtWOOgimD2McJ6iutoxRm4k0= github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= github.com/minio/minio-go/v7 v7.0.2/go.mod h1:dJ80Mv2HeGkYLH1sqS/ksz07ON6csH3S6JUMSQ2zAns= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= @@ -550,6 +594,7 @@ github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8d github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mozillazg/go-cos v0.13.0/go.mod h1:Zp6DvvXn0RUOXGJ2chmWt2bLEqRAnJnS3DnAZsJsoaE= github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60= +github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= @@ -565,6 +610,7 @@ github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/ncw/swift v1.0.50/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= @@ -593,6 +639,11 @@ github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsq github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/operator-framework/api v0.4.0/go.mod h1:xXYReW8+PpSBHMxsf0e7uhtfQTLqIM1iz4X6zUs20+c= +github.com/operator-framework/helm-operator-plugins v0.0.8-0.20210810182245-240cc447b3de h1:3Mwj+lME51OdXRLqfQbI1a05DuQ9qfHK/0wNd7Dphes= +github.com/operator-framework/helm-operator-plugins v0.0.8-0.20210810182245-240cc447b3de/go.mod h1:Y/nfp35BlIMvae+nre4qUWLRl2pHKKnonrfgZUOABqY= +github.com/operator-framework/operator-lib v0.3.0 h1:eGJv0k7dEHIT9vfArWPo6U4eVurAOqhYUzXiHEaZq9g= +github.com/operator-framework/operator-lib v0.3.0/go.mod h1:LTp5UQd8ivq4MXqm/W/XHulHQ0RRoZXsAj73sNMAQxc= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= @@ -655,8 +706,10 @@ github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqn github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= +github.com/rogpeppe/go-internal v1.3.0 h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351 h1:HXr/qUllAWv9riaI4zh2eXWKmCSDqVS/XH1MRHLKRwk= github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351/go.mod h1:DCgfY80j8GYL7MLEfvcpSFvjD0L5yZq/aZUJmhZklyg= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= @@ -722,9 +775,12 @@ github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/ github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/weaveworks/common v0.0.0-20200820123129-280614068c5e/go.mod h1:hz10LOsAdzC3K/iXaKoFxOKTDRgxJl+BTGX1GY+TzO4= github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= +github.com/weppos/publicsuffix-go v0.13.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= @@ -752,7 +808,12 @@ github.com/yvasiyarov/gorelic v0.0.6 h1:qMJQYPNdtJ7UNYHjX38KXZtltKTqimMuoQjNnSVI github.com/yvasiyarov/gorelic v0.0.6/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= +github.com/zmap/rc2 v0.0.0-20131011165748-24b9757f5521/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE= +github.com/zmap/zcertificate v0.0.0-20180516150559-0e3d58b1bac4/go.mod h1:5iU54tB79AMBcySS0R2XIyZBAVmeHranShAFELYx7is= +github.com/zmap/zcrypto v0.0.0-20200911161511-43ff0ea04f21/go.mod h1:TxpejqcVKQjQaVVmMGfzx5HnmFMdIU+vLtaCyPBfGI4= +github.com/zmap/zlint/v2 v2.2.1/go.mod h1:ixPWsdq8qLxYRpNUTbcKig3R7WgmspsHGLhCCs6rFAM= gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= go.elastic.co/apm v1.5.0/go.mod h1:OdB9sPtM6Vt7oz3VXt7+KR96i9li74qrxBGHTQygFvk= go.elastic.co/apm/module/apmhttp v1.5.0/go.mod h1:1FbmNuyD3ddauwzgVwFB0fqY6KbZt3JkV187tGCYYhY= @@ -834,12 +895,15 @@ gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/validator.v9 v9.27.0/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= +gopkg.in/gorp.v1 v1.7.2 h1:j3DWlAyGVv8whO7AcIWznQ2Yj7yJkn34B8s63GViAAw= gopkg.in/gorp.v1 v1.7.2/go.mod h1:Wo3h+DBQZIxATwftsglhdD/62zRFPhGhTiu5jUJmCaw= +gopkg.in/imdario/mergo.v0 v0.3.7/go.mod h1:9qPP6AGrlC1G2PTNXko614FwGZvorN7MiBU0Eppok+U= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog= gopkg.in/square/go-jose.v1 v1.1.2 h1:/5jmADZB+RiKtZGr4HxsEFOEfbfsjTKsVnqpThUpE30= gopkg.in/square/go-jose.v1 v1.1.2/go.mod h1:QpYS+a4WhS+DTlyQIi6Ka7MS3SuR9a055rgXNEe6EiA= gopkg.in/square/go-jose.v2 v2.4.0 h1:0kXPskUMGAXXWJlP05ktEMOV0vmzFQUWw6d+aZJQU8A= @@ -863,8 +927,8 @@ gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -helm.sh/helm/v3 v3.5.0 h1:uqIT3Bh4hVEyZRThyTPik8FkiABj3VJIY+POvDFT3a4= -helm.sh/helm/v3 v3.5.0/go.mod h1:bjwXfmGAF+SEuJZ2AtN1xmTuz4FqaNYOJrXP+vtj6Tw= +helm.sh/helm/v3 v3.6.3 h1:0nKDyXJr23nI3JrcP7HH7NcR+CYRvro/52Dvr1KhGO0= +helm.sh/helm/v3 v3.6.3/go.mod h1:mIIus8EOqj+obtycw3sidsR4ORr2aFDmXMSI3k+oeVY= honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= @@ -922,6 +986,7 @@ sigs.k8s.io/controller-runtime v0.9.3/go.mod h1:TxzMCHyEUpaeuOiZx/bIdc2T81vfs/aK sigs.k8s.io/controller-tools v0.6.2 h1:+Y8L0UsAugDipGRw8lrkPoAi6XqlQVZuf1DQHME3PgU= sigs.k8s.io/controller-tools v0.6.2/go.mod h1:oaeGpjXn6+ZSEIQkUe/+3I40PNiDYp9aeawbt3xTgJ8= sigs.k8s.io/kind v0.8.1/go.mod h1:oNKTxUVPYkV9lWzY6CVMNluVq8cBsyq+UgPJdvA3uu4= +sigs.k8s.io/kubebuilder/v3 v3.0.0-alpha.0.0.20210716121009-fde793f20067/go.mod h1:pUhjQx9f/+cn1OtSa5zMohY1lgk9s/9Mbcvwj82lrNk= sigs.k8s.io/kubefed v0.8.1 h1:xPMQAPi0L7t4MFdSGQvUuNQ1PmmyOp2QP3NSY7nr7nI= sigs.k8s.io/kubefed v0.8.1/go.mod h1:t383bqir7psk7VpABvlhYRldGOoDwKoVo33xAmCGueI= sigs.k8s.io/kustomize/api v0.8.8 h1:G2z6JPSSjtWWgMeWSoHdXqyftJNmMmyxXpwENGoOtGE= diff --git a/vendor/github.com/Masterminds/goutils/.travis.yml b/vendor/github.com/Masterminds/goutils/.travis.yml new file mode 100644 index 0000000000..4025e01ec4 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/.travis.yml @@ -0,0 +1,18 @@ +language: go + +go: + - 1.6 + - 1.7 + - 1.8 + - tip + +script: + - go test -v + +notifications: + webhooks: + urls: + - https://webhooks.gitter.im/e/06e3328629952dabe3e0 + on_success: change # options: [always|never|change] default: always + on_failure: always # options: [always|never|change] default: always + on_start: never # options: [always|never|change] default: always diff --git a/vendor/github.com/Masterminds/goutils/CHANGELOG.md b/vendor/github.com/Masterminds/goutils/CHANGELOG.md new file mode 100644 index 0000000000..d700ec47f2 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/CHANGELOG.md @@ -0,0 +1,8 @@ +# 1.0.1 (2017-05-31) + +## Fixed +- #21: Fix generation of alphanumeric strings (thanks @dbarranco) + +# 1.0.0 (2014-04-30) + +- Initial release. diff --git a/vendor/github.com/Masterminds/goutils/LICENSE.txt b/vendor/github.com/Masterminds/goutils/LICENSE.txt new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Masterminds/goutils/README.md b/vendor/github.com/Masterminds/goutils/README.md new file mode 100644 index 0000000000..163ffe72a8 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/README.md @@ -0,0 +1,70 @@ +GoUtils +=========== +[![Stability: Maintenance](https://masterminds.github.io/stability/maintenance.svg)](https://masterminds.github.io/stability/maintenance.html) +[![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) [![Build Status](https://travis-ci.org/Masterminds/goutils.svg?branch=master)](https://travis-ci.org/Masterminds/goutils) [![Build status](https://ci.appveyor.com/api/projects/status/sc2b1ew0m7f0aiju?svg=true)](https://ci.appveyor.com/project/mattfarina/goutils) + + +GoUtils provides users with utility functions to manipulate strings in various ways. It is a Go implementation of some +string manipulation libraries of Java Apache Commons. GoUtils includes the following Java Apache Commons classes: +* WordUtils +* RandomStringUtils +* StringUtils (partial implementation) + +## Installation +If you have Go set up on your system, from the GOPATH directory within the command line/terminal, enter this: + + go get github.com/Masterminds/goutils + +If you do not have Go set up on your system, please follow the [Go installation directions from the documenation](http://golang.org/doc/install), and then follow the instructions above to install GoUtils. + + +## Documentation +GoUtils doc is available here: [![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) + + +## Usage +The code snippets below show examples of how to use GoUtils. Some functions return errors while others do not. The first instance below, which does not return an error, is the `Initials` function (located within the `wordutils.go` file). + + package main + + import ( + "fmt" + "github.com/Masterminds/goutils" + ) + + func main() { + + // EXAMPLE 1: A goutils function which returns no errors + fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" + + } +Some functions return errors mainly due to illegal arguements used as parameters. The code example below illustrates how to deal with function that returns an error. In this instance, the function is the `Random` function (located within the `randomstringutils.go` file). + + package main + + import ( + "fmt" + "github.com/Masterminds/goutils" + ) + + func main() { + + // EXAMPLE 2: A goutils function which returns an error + rand1, err1 := goutils.Random (-1, 0, 0, true, true) + + if err1 != nil { + fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) + } else { + fmt.Println(rand1) + } + + } + +## License +GoUtils is licensed under the Apache License, Version 2.0. Please check the LICENSE.txt file or visit http://www.apache.org/licenses/LICENSE-2.0 for a copy of the license. + +## Issue Reporting +Make suggestions or report issues using the Git issue tracker: https://github.com/Masterminds/goutils/issues + +## Website +* [GoUtils webpage](http://Masterminds.github.io/goutils/) diff --git a/vendor/github.com/Masterminds/goutils/appveyor.yml b/vendor/github.com/Masterminds/goutils/appveyor.yml new file mode 100644 index 0000000000..657564a847 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/appveyor.yml @@ -0,0 +1,21 @@ +version: build-{build}.{branch} + +clone_folder: C:\gopath\src\github.com\Masterminds\goutils +shallow_clone: true + +environment: + GOPATH: C:\gopath + +platform: + - x64 + +build: off + +install: + - go version + - go env + +test_script: + - go test -v + +deploy: off diff --git a/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go b/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go new file mode 100644 index 0000000000..177dd86584 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go @@ -0,0 +1,251 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package goutils + +import ( + "crypto/rand" + "fmt" + "math" + "math/big" + "regexp" + "unicode" +) + +/* +CryptoRandomNonAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomNonAlphaNumeric(count int) (string, error) { + return CryptoRandomAlphaNumericCustom(count, false, false) +} + +/* +CryptoRandomAscii creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAscii(count int) (string, error) { + return CryptoRandom(count, 32, 127, false, false) +} + +/* +CryptoRandomNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomNumeric(count int) (string, error) { + return CryptoRandom(count, 0, 0, false, true) +} + +/* +CryptoRandomAlphabetic creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. + +Parameters: + count - the length of random string to create + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAlphabetic(count int) (string, error) { + return CryptoRandom(count, 0, 0, true, false) +} + +/* +CryptoRandomAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAlphaNumeric(count int) (string, error) { + if count == 0 { + return "", nil + } + RandomString, err := CryptoRandom(count, 0, 0, true, true) + if err != nil { + return "", fmt.Errorf("Error: %s", err) + } + match, err := regexp.MatchString("([0-9]+)", RandomString) + if err != nil { + panic(err) + } + + if !match { + //Get the position between 0 and the length of the string-1 to insert a random number + position := getCryptoRandomInt(count) + //Insert a random number between [0-9] in the position + RandomString = RandomString[:position] + string('0' + getCryptoRandomInt(10)) + RandomString[position + 1:] + return RandomString, err + } + return RandomString, err + +} + +/* +CryptoRandomAlphaNumericCustom creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. + +Parameters: + count - the length of random string to create + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { + return CryptoRandom(count, 0, 0, letters, numbers) +} + +/* +CryptoRandom creates a random string based on a variety of options, using using golang's crypto/rand source of randomness. +If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, +unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. +If chars is not nil, characters stored in chars that are between start and end are chosen. + +Parameters: + count - the length of random string to create + start - the position in set of chars (ASCII/Unicode int) to start at + end - the position in set of chars (ASCII/Unicode int) to end before + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. + +Returns: + string - the random string + error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) +*/ +func CryptoRandom(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { + if count == 0 { + return "", nil + } else if count < 0 { + err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...") + return "", err + } + if chars != nil && len(chars) == 0 { + err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty") + return "", err + } + + if start == 0 && end == 0 { + if chars != nil { + end = len(chars) + } else { + if !letters && !numbers { + end = math.MaxInt32 + } else { + end = 'z' + 1 + start = ' ' + } + } + } else { + if end <= start { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start) + return "", err + } + + if chars != nil && end > len(chars) { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars)) + return "", err + } + } + + buffer := make([]rune, count) + gap := end - start + + // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 + // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 + + for count != 0 { + count-- + var ch rune + if chars == nil { + ch = rune(getCryptoRandomInt(gap) + int64(start)) + } else { + ch = chars[getCryptoRandomInt(gap) + int64(start)] + } + + if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { + if ch >= 56320 && ch <= 57343 { // low surrogate range + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = ch + count-- + // Insert high surrogate + buffer[count] = rune(55296 + getCryptoRandomInt(128)) + } + } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = rune(56320 + getCryptoRandomInt(128)) + count-- + // Insert high surrogate + buffer[count] = ch + } + } else if ch >= 56192 && ch <= 56319 { + // private high surrogate, skip it + count++ + } else { + // not one of the surrogates* + buffer[count] = ch + } + } else { + count++ + } + } + return string(buffer), nil +} + +func getCryptoRandomInt(count int) int64 { + nBig, err := rand.Int(rand.Reader, big.NewInt(int64(count))) + if err != nil { + panic(err) + } + return nBig.Int64() +} diff --git a/vendor/github.com/Masterminds/goutils/randomstringutils.go b/vendor/github.com/Masterminds/goutils/randomstringutils.go new file mode 100644 index 0000000000..1364e0cafd --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/randomstringutils.go @@ -0,0 +1,268 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package goutils + +import ( + "fmt" + "math" + "math/rand" + "regexp" + "time" + "unicode" +) + +// RANDOM provides the time-based seed used to generate random numbers +var RANDOM = rand.New(rand.NewSource(time.Now().UnixNano())) + +/* +RandomNonAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomNonAlphaNumeric(count int) (string, error) { + return RandomAlphaNumericCustom(count, false, false) +} + +/* +RandomAscii creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAscii(count int) (string, error) { + return Random(count, 32, 127, false, false) +} + +/* +RandomNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomNumeric(count int) (string, error) { + return Random(count, 0, 0, false, true) +} + +/* +RandomAlphabetic creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. + +Parameters: + count - the length of random string to create + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAlphabetic(count int) (string, error) { + return Random(count, 0, 0, true, false) +} + +/* +RandomAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAlphaNumeric(count int) (string, error) { + RandomString, err := Random(count, 0, 0, true, true) + if err != nil { + return "", fmt.Errorf("Error: %s", err) + } + match, err := regexp.MatchString("([0-9]+)", RandomString) + if err != nil { + panic(err) + } + + if !match { + //Get the position between 0 and the length of the string-1 to insert a random number + position := rand.Intn(count) + //Insert a random number between [0-9] in the position + RandomString = RandomString[:position] + string('0'+rand.Intn(10)) + RandomString[position+1:] + return RandomString, err + } + return RandomString, err + +} + +/* +RandomAlphaNumericCustom creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. + +Parameters: + count - the length of random string to create + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { + return Random(count, 0, 0, letters, numbers) +} + +/* +Random creates a random string based on a variety of options, using default source of randomness. +This method has exactly the same semantics as RandomSeed(int, int, int, bool, bool, []char, *rand.Rand), but +instead of using an externally supplied source of randomness, it uses the internal *rand.Rand instance. + +Parameters: + count - the length of random string to create + start - the position in set of chars (ASCII/Unicode int) to start at + end - the position in set of chars (ASCII/Unicode int) to end before + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func Random(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { + return RandomSeed(count, start, end, letters, numbers, chars, RANDOM) +} + +/* +RandomSeed creates a random string based on a variety of options, using supplied source of randomness. +If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, +unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. +If chars is not nil, characters stored in chars that are between start and end are chosen. +This method accepts a user-supplied *rand.Rand instance to use as a source of randomness. By seeding a single *rand.Rand instance +with a fixed seed and using it for each call, the same random sequence of strings can be generated repeatedly and predictably. + +Parameters: + count - the length of random string to create + start - the position in set of chars (ASCII/Unicode decimals) to start at + end - the position in set of chars (ASCII/Unicode decimals) to end before + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. + random - a source of randomness. + +Returns: + string - the random string + error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) +*/ +func RandomSeed(count int, start int, end int, letters bool, numbers bool, chars []rune, random *rand.Rand) (string, error) { + + if count == 0 { + return "", nil + } else if count < 0 { + err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...") + return "", err + } + if chars != nil && len(chars) == 0 { + err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty") + return "", err + } + + if start == 0 && end == 0 { + if chars != nil { + end = len(chars) + } else { + if !letters && !numbers { + end = math.MaxInt32 + } else { + end = 'z' + 1 + start = ' ' + } + } + } else { + if end <= start { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start) + return "", err + } + + if chars != nil && end > len(chars) { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars)) + return "", err + } + } + + buffer := make([]rune, count) + gap := end - start + + // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 + // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 + + for count != 0 { + count-- + var ch rune + if chars == nil { + ch = rune(random.Intn(gap) + start) + } else { + ch = chars[random.Intn(gap)+start] + } + + if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { + if ch >= 56320 && ch <= 57343 { // low surrogate range + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = ch + count-- + // Insert high surrogate + buffer[count] = rune(55296 + random.Intn(128)) + } + } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = rune(56320 + random.Intn(128)) + count-- + // Insert high surrogate + buffer[count] = ch + } + } else if ch >= 56192 && ch <= 56319 { + // private high surrogate, skip it + count++ + } else { + // not one of the surrogates* + buffer[count] = ch + } + } else { + count++ + } + } + return string(buffer), nil +} diff --git a/vendor/github.com/Masterminds/goutils/stringutils.go b/vendor/github.com/Masterminds/goutils/stringutils.go new file mode 100644 index 0000000000..5037c4516b --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/stringutils.go @@ -0,0 +1,224 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package goutils + +import ( + "bytes" + "fmt" + "strings" + "unicode" +) + +// Typically returned by functions where a searched item cannot be found +const INDEX_NOT_FOUND = -1 + +/* +Abbreviate abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "Now is the time for..." + +Specifically, the algorithm is as follows: + + - If str is less than maxWidth characters long, return it. + - Else abbreviate it to (str[0:maxWidth - 3] + "..."). + - If maxWidth is less than 4, return an illegal argument error. + - In no case will it return a string of length greater than maxWidth. + +Parameters: + str - the string to check + maxWidth - maximum length of result string, must be at least 4 + +Returns: + string - abbreviated string + error - if the width is too small +*/ +func Abbreviate(str string, maxWidth int) (string, error) { + return AbbreviateFull(str, 0, maxWidth) +} + +/* +AbbreviateFull abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "...is the time for..." +This function works like Abbreviate(string, int), but allows you to specify a "left edge" offset. Note that this left edge is not +necessarily going to be the leftmost character in the result, or the first character following the ellipses, but it will appear +somewhere in the result. +In no case will it return a string of length greater than maxWidth. + +Parameters: + str - the string to check + offset - left edge of source string + maxWidth - maximum length of result string, must be at least 4 + +Returns: + string - abbreviated string + error - if the width is too small +*/ +func AbbreviateFull(str string, offset int, maxWidth int) (string, error) { + if str == "" { + return "", nil + } + if maxWidth < 4 { + err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width is 4") + return "", err + } + if len(str) <= maxWidth { + return str, nil + } + if offset > len(str) { + offset = len(str) + } + if len(str)-offset < (maxWidth - 3) { // 15 - 5 < 10 - 3 = 10 < 7 + offset = len(str) - (maxWidth - 3) + } + abrevMarker := "..." + if offset <= 4 { + return str[0:maxWidth-3] + abrevMarker, nil // str.substring(0, maxWidth - 3) + abrevMarker; + } + if maxWidth < 7 { + err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width with offset is 7") + return "", err + } + if (offset + maxWidth - 3) < len(str) { // 5 + (10-3) < 15 = 12 < 15 + abrevStr, _ := Abbreviate(str[offset:len(str)], (maxWidth - 3)) + return abrevMarker + abrevStr, nil // abrevMarker + abbreviate(str.substring(offset), maxWidth - 3); + } + return abrevMarker + str[(len(str)-(maxWidth-3)):len(str)], nil // abrevMarker + str.substring(str.length() - (maxWidth - 3)); +} + +/* +DeleteWhiteSpace deletes all whitespaces from a string as defined by unicode.IsSpace(rune). +It returns the string without whitespaces. + +Parameter: + str - the string to delete whitespace from, may be nil + +Returns: + the string without whitespaces +*/ +func DeleteWhiteSpace(str string) string { + if str == "" { + return str + } + sz := len(str) + var chs bytes.Buffer + count := 0 + for i := 0; i < sz; i++ { + ch := rune(str[i]) + if !unicode.IsSpace(ch) { + chs.WriteRune(ch) + count++ + } + } + if count == sz { + return str + } + return chs.String() +} + +/* +IndexOfDifference compares two strings, and returns the index at which the strings begin to differ. + +Parameters: + str1 - the first string + str2 - the second string + +Returns: + the index where str1 and str2 begin to differ; -1 if they are equal +*/ +func IndexOfDifference(str1 string, str2 string) int { + if str1 == str2 { + return INDEX_NOT_FOUND + } + if IsEmpty(str1) || IsEmpty(str2) { + return 0 + } + var i int + for i = 0; i < len(str1) && i < len(str2); i++ { + if rune(str1[i]) != rune(str2[i]) { + break + } + } + if i < len(str2) || i < len(str1) { + return i + } + return INDEX_NOT_FOUND +} + +/* +IsBlank checks if a string is whitespace or empty (""). Observe the following behavior: + + goutils.IsBlank("") = true + goutils.IsBlank(" ") = true + goutils.IsBlank("bob") = false + goutils.IsBlank(" bob ") = false + +Parameter: + str - the string to check + +Returns: + true - if the string is whitespace or empty ("") +*/ +func IsBlank(str string) bool { + strLen := len(str) + if str == "" || strLen == 0 { + return true + } + for i := 0; i < strLen; i++ { + if unicode.IsSpace(rune(str[i])) == false { + return false + } + } + return true +} + +/* +IndexOf returns the index of the first instance of sub in str, with the search beginning from the +index start point specified. -1 is returned if sub is not present in str. + +An empty string ("") will return -1 (INDEX_NOT_FOUND). A negative start position is treated as zero. +A start position greater than the string length returns -1. + +Parameters: + str - the string to check + sub - the substring to find + start - the start position; negative treated as zero + +Returns: + the first index where the sub string was found (always >= start) +*/ +func IndexOf(str string, sub string, start int) int { + + if start < 0 { + start = 0 + } + + if len(str) < start { + return INDEX_NOT_FOUND + } + + if IsEmpty(str) || IsEmpty(sub) { + return INDEX_NOT_FOUND + } + + partialIndex := strings.Index(str[start:len(str)], sub) + if partialIndex == -1 { + return INDEX_NOT_FOUND + } + return partialIndex + start +} + +// IsEmpty checks if a string is empty (""). Returns true if empty, and false otherwise. +func IsEmpty(str string) bool { + return len(str) == 0 +} diff --git a/vendor/github.com/Masterminds/goutils/wordutils.go b/vendor/github.com/Masterminds/goutils/wordutils.go new file mode 100644 index 0000000000..034cad8e21 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/wordutils.go @@ -0,0 +1,357 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package goutils provides utility functions to manipulate strings in various ways. +The code snippets below show examples of how to use goutils. Some functions return +errors while others do not, so usage would vary as a result. + +Example: + + package main + + import ( + "fmt" + "github.com/aokoli/goutils" + ) + + func main() { + + // EXAMPLE 1: A goutils function which returns no errors + fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" + + + + // EXAMPLE 2: A goutils function which returns an error + rand1, err1 := goutils.Random (-1, 0, 0, true, true) + + if err1 != nil { + fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) + } else { + fmt.Println(rand1) + } + } +*/ +package goutils + +import ( + "bytes" + "strings" + "unicode" +) + +// VERSION indicates the current version of goutils +const VERSION = "1.0.0" + +/* +Wrap wraps a single line of text, identifying words by ' '. +New lines will be separated by '\n'. Very long words, such as URLs will not be wrapped. +Leading spaces on a new line are stripped. Trailing spaces are not stripped. + +Parameters: + str - the string to be word wrapped + wrapLength - the column (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 + +Returns: + a line with newlines inserted +*/ +func Wrap(str string, wrapLength int) string { + return WrapCustom(str, wrapLength, "", false) +} + +/* +WrapCustom wraps a single line of text, identifying words by ' '. +Leading spaces on a new line are stripped. Trailing spaces are not stripped. + +Parameters: + str - the string to be word wrapped + wrapLength - the column number (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 + newLineStr - the string to insert for a new line, "" uses '\n' + wrapLongWords - true if long words (such as URLs) should be wrapped + +Returns: + a line with newlines inserted +*/ +func WrapCustom(str string, wrapLength int, newLineStr string, wrapLongWords bool) string { + + if str == "" { + return "" + } + if newLineStr == "" { + newLineStr = "\n" // TODO Assumes "\n" is seperator. Explore SystemUtils.LINE_SEPARATOR from Apache Commons + } + if wrapLength < 1 { + wrapLength = 1 + } + + inputLineLength := len(str) + offset := 0 + + var wrappedLine bytes.Buffer + + for inputLineLength-offset > wrapLength { + + if rune(str[offset]) == ' ' { + offset++ + continue + } + + end := wrapLength + offset + 1 + spaceToWrapAt := strings.LastIndex(str[offset:end], " ") + offset + + if spaceToWrapAt >= offset { + // normal word (not longer than wrapLength) + wrappedLine.WriteString(str[offset:spaceToWrapAt]) + wrappedLine.WriteString(newLineStr) + offset = spaceToWrapAt + 1 + + } else { + // long word or URL + if wrapLongWords { + end := wrapLength + offset + // long words are wrapped one line at a time + wrappedLine.WriteString(str[offset:end]) + wrappedLine.WriteString(newLineStr) + offset += wrapLength + } else { + // long words aren't wrapped, just extended beyond limit + end := wrapLength + offset + index := strings.IndexRune(str[end:len(str)], ' ') + if index == -1 { + wrappedLine.WriteString(str[offset:len(str)]) + offset = inputLineLength + } else { + spaceToWrapAt = index + end + wrappedLine.WriteString(str[offset:spaceToWrapAt]) + wrappedLine.WriteString(newLineStr) + offset = spaceToWrapAt + 1 + } + } + } + } + + wrappedLine.WriteString(str[offset:len(str)]) + + return wrappedLine.String() + +} + +/* +Capitalize capitalizes all the delimiter separated words in a string. Only the first letter of each word is changed. +To convert the rest of each word to lowercase at the same time, use CapitalizeFully(str string, delimiters ...rune). +The delimiters represent a set of characters understood to separate words. The first string character +and the first non-delimiter character after a delimiter will be capitalized. A "" input string returns "". +Capitalization uses the Unicode title case, normally equivalent to upper case. + +Parameters: + str - the string to capitalize + delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter + +Returns: + capitalized string +*/ +func Capitalize(str string, delimiters ...rune) string { + + var delimLen int + + if delimiters == nil { + delimLen = -1 + } else { + delimLen = len(delimiters) + } + + if str == "" || delimLen == 0 { + return str + } + + buffer := []rune(str) + capitalizeNext := true + for i := 0; i < len(buffer); i++ { + ch := buffer[i] + if isDelimiter(ch, delimiters...) { + capitalizeNext = true + } else if capitalizeNext { + buffer[i] = unicode.ToTitle(ch) + capitalizeNext = false + } + } + return string(buffer) + +} + +/* +CapitalizeFully converts all the delimiter separated words in a string into capitalized words, that is each word is made up of a +titlecase character and then a series of lowercase characters. The delimiters represent a set of characters understood +to separate words. The first string character and the first non-delimiter character after a delimiter will be capitalized. +Capitalization uses the Unicode title case, normally equivalent to upper case. + +Parameters: + str - the string to capitalize fully + delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter + +Returns: + capitalized string +*/ +func CapitalizeFully(str string, delimiters ...rune) string { + + var delimLen int + + if delimiters == nil { + delimLen = -1 + } else { + delimLen = len(delimiters) + } + + if str == "" || delimLen == 0 { + return str + } + str = strings.ToLower(str) + return Capitalize(str, delimiters...) +} + +/* +Uncapitalize uncapitalizes all the whitespace separated words in a string. Only the first letter of each word is changed. +The delimiters represent a set of characters understood to separate words. The first string character and the first non-delimiter +character after a delimiter will be uncapitalized. Whitespace is defined by unicode.IsSpace(char). + +Parameters: + str - the string to uncapitalize fully + delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter + +Returns: + uncapitalized string +*/ +func Uncapitalize(str string, delimiters ...rune) string { + + var delimLen int + + if delimiters == nil { + delimLen = -1 + } else { + delimLen = len(delimiters) + } + + if str == "" || delimLen == 0 { + return str + } + + buffer := []rune(str) + uncapitalizeNext := true // TODO Always makes capitalize/un apply to first char. + for i := 0; i < len(buffer); i++ { + ch := buffer[i] + if isDelimiter(ch, delimiters...) { + uncapitalizeNext = true + } else if uncapitalizeNext { + buffer[i] = unicode.ToLower(ch) + uncapitalizeNext = false + } + } + return string(buffer) +} + +/* +SwapCase swaps the case of a string using a word based algorithm. + +Conversion algorithm: + + Upper case character converts to Lower case + Title case character converts to Lower case + Lower case character after Whitespace or at start converts to Title case + Other Lower case character converts to Upper case + Whitespace is defined by unicode.IsSpace(char). + +Parameters: + str - the string to swap case + +Returns: + the changed string +*/ +func SwapCase(str string) string { + if str == "" { + return str + } + buffer := []rune(str) + + whitespace := true + + for i := 0; i < len(buffer); i++ { + ch := buffer[i] + if unicode.IsUpper(ch) { + buffer[i] = unicode.ToLower(ch) + whitespace = false + } else if unicode.IsTitle(ch) { + buffer[i] = unicode.ToLower(ch) + whitespace = false + } else if unicode.IsLower(ch) { + if whitespace { + buffer[i] = unicode.ToTitle(ch) + whitespace = false + } else { + buffer[i] = unicode.ToUpper(ch) + } + } else { + whitespace = unicode.IsSpace(ch) + } + } + return string(buffer) +} + +/* +Initials extracts the initial letters from each word in the string. The first letter of the string and all first +letters after the defined delimiters are returned as a new string. Their case is not changed. If the delimiters +parameter is excluded, then Whitespace is used. Whitespace is defined by unicode.IsSpacea(char). An empty delimiter array returns an empty string. + +Parameters: + str - the string to get initials from + delimiters - set of characters to determine words, exclusion of this parameter means whitespace would be delimeter +Returns: + string of initial letters +*/ +func Initials(str string, delimiters ...rune) string { + if str == "" { + return str + } + if delimiters != nil && len(delimiters) == 0 { + return "" + } + strLen := len(str) + var buf bytes.Buffer + lastWasGap := true + for i := 0; i < strLen; i++ { + ch := rune(str[i]) + + if isDelimiter(ch, delimiters...) { + lastWasGap = true + } else if lastWasGap { + buf.WriteRune(ch) + lastWasGap = false + } + } + return buf.String() +} + +// private function (lower case func name) +func isDelimiter(ch rune, delimiters ...rune) bool { + if delimiters == nil { + return unicode.IsSpace(ch) + } + for _, delimiter := range delimiters { + if ch == delimiter { + return true + } + } + return false +} diff --git a/vendor/github.com/Masterminds/sprig/v3/.gitignore b/vendor/github.com/Masterminds/sprig/v3/.gitignore new file mode 100644 index 0000000000..5e3002f88f --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/.gitignore @@ -0,0 +1,2 @@ +vendor/ +/.glide diff --git a/vendor/github.com/Masterminds/sprig/v3/.travis.yml b/vendor/github.com/Masterminds/sprig/v3/.travis.yml new file mode 100644 index 0000000000..14524a7390 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - 1.12.x + - 1.13.x + - tip + +script: + - make test-cover diff --git a/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md new file mode 100644 index 0000000000..fde9e1355f --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md @@ -0,0 +1,298 @@ +# Changelog + +## Release 3.0.0 (2019-10-02) + +### Added + +- #187: Added durationRound function (thanks @yjp20) +- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn) +- #193: Added toRawJson support (thanks @Dean-Coakley) +- #197: Added get support to dicts (thanks @Dean-Coakley) + +### Changed + +- #186: Moving dependency management to Go modules +- #186: Updated semver to v3. This has changes in the way ^ is handled +- #194: Updated documentation on merging and how it copies. Added example using deepCopy +- #196: trunc now supports negative values (thanks @Dean-Coakley) + +## Release 2.22.0 (2019-10-02) + +### Added + +- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos) +- #195: Added deepCopy function for use with dicts + +### Changed + +- Updated merge and mergeOverwrite documentation to explain copying and how to + use deepCopy with it + +## Release 2.21.0 (2019-09-18) + +### Added + +- #122: Added encryptAES/decryptAES functions (thanks @n0madic) +- #128: Added toDecimal support (thanks @Dean-Coakley) +- #169: Added list contcat (thanks @astorath) +- #174: Added deepEqual function (thanks @bonifaido) +- #170: Added url parse and join functions (thanks @astorath) + +### Changed + +- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify + +### Fixed + +- #172: Fix semver wildcard example (thanks @piepmatz) +- #175: Fix dateInZone doc example (thanks @s3than) + +## Release 2.20.0 (2019-06-18) + +### Added + +- #164: Adding function to get unix epoch for a time (@mattfarina) +- #166: Adding tests for date_in_zone (@mattfarina) + +### Changed + +- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam) +- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19) +- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan) + +### Fixed + +## Release 2.19.0 (2019-03-02) + +IMPORTANT: This release reverts a change from 2.18.0 + +In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random. + +We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience. + +### Changed + +- Fix substr panic 35fb796 (Alexey igrychev) +- Remove extra period 1eb7729 (Matthew Lorimor) +- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor) +- README edits/fixes/suggestions 08fe136 (Lauri Apple) + + +## Release 2.18.0 (2019-02-12) + +### Added + +- Added mergeOverwrite function +- cryptographic functions that use secure random (see fe1de12) + +### Changed + +- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer) +- Handle has for nil list 9c10885 (Daniel Cohen) +- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder) +- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic) +- Replace outdated goutils imports 01893d2 (Matthew Lorimor) +- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor) +- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen) + +### Fixed + +- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder) +- Fix substr var names and comments d581f80 (Dean Coakley) +- Fix substr documentation 2737203 (Dean Coakley) + +## Release 2.17.1 (2019-01-03) + +### Fixed + +The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml. + +## Release 2.17.0 (2019-01-03) + +### Added + +- adds alder32sum function and test 6908fc2 (marshallford) +- Added kebabcase function ca331a1 (Ilyes512) + +### Changed + +- Update goutils to 1.1.0 4e1125d (Matt Butcher) + +### Fixed + +- Fix 'has' documentation e3f2a85 (dean-coakley) +- docs(dict): fix typo in pick example dc424f9 (Dustin Specker) +- fixes spelling errors... not sure how that happened 4cf188a (marshallford) + +## Release 2.16.0 (2018-08-13) + +### Added + +- add splitn function fccb0b0 (Helgi Þorbjörnsson) +- Add slice func df28ca7 (gongdo) +- Generate serial number a3bdffd (Cody Coons) +- Extract values of dict with values function df39312 (Lawrence Jones) + +### Changed + +- Modify panic message for list.slice ae38335 (gongdo) +- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap) +- Remove duplicated documentation 1d97af1 (Matthew Fisher) +- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson) + +### Fixed + +- Fix file permissions c5f40b5 (gongdo) +- Fix example for buildCustomCert 7779e0d (Tin Lam) + +## Release 2.15.0 (2018-04-02) + +### Added + +- #68 and #69: Add json helpers to docs (thanks @arunvelsriram) +- #66: Add ternary function (thanks @binoculars) +- #67: Allow keys function to take multiple dicts (thanks @binoculars) +- #89: Added sha1sum to crypto function (thanks @benkeil) +- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei) +- #92: Add travis testing for go 1.10 +- #93: Adding appveyor config for windows testing + +### Changed + +- #90: Updating to more recent dependencies +- #73: replace satori/go.uuid with google/uuid (thanks @petterw) + +### Fixed + +- #76: Fixed documentation typos (thanks @Thiht) +- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older + +## Release 2.14.1 (2017-12-01) + +### Fixed + +- #60: Fix typo in function name documentation (thanks @neil-ca-moore) +- #61: Removing line with {{ due to blocking github pages genertion +- #64: Update the list functions to handle int, string, and other slices for compatibility + +## Release 2.14.0 (2017-10-06) + +This new version of Sprig adds a set of functions for generating and working with SSL certificates. + +- `genCA` generates an SSL Certificate Authority +- `genSelfSignedCert` generates an SSL self-signed certificate +- `genSignedCert` generates an SSL certificate and key based on a given CA + +## Release 2.13.0 (2017-09-18) + +This release adds new functions, including: + +- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions +- `floor`, `ceil`, and `round` math functions +- `toDate` converts a string to a date +- `nindent` is just like `indent` but also prepends a new line +- `ago` returns the time from `time.Now` + +### Added + +- #40: Added basic regex functionality (thanks @alanquillin) +- #41: Added ceil floor and round functions (thanks @alanquillin) +- #48: Added toDate function (thanks @andreynering) +- #50: Added nindent function (thanks @binoculars) +- #46: Added ago function (thanks @slayer) + +### Changed + +- #51: Updated godocs to include new string functions (thanks @curtisallen) +- #49: Added ability to merge multiple dicts (thanks @binoculars) + +## Release 2.12.0 (2017-05-17) + +- `snakecase`, `camelcase`, and `shuffle` are three new string functions +- `fail` allows you to bail out of a template render when conditions are not met + +## Release 2.11.0 (2017-05-02) + +- Added `toJson` and `toPrettyJson` +- Added `merge` +- Refactored documentation + +## Release 2.10.0 (2017-03-15) + +- Added `semver` and `semverCompare` for Semantic Versions +- `list` replaces `tuple` +- Fixed issue with `join` +- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without` + +## Release 2.9.0 (2017-02-23) + +- Added `splitList` to split a list +- Added crypto functions of `genPrivateKey` and `derivePassword` + +## Release 2.8.0 (2016-12-21) + +- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`) +- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`) + +## Release 2.7.0 (2016-12-01) + +- Added `sha256sum` to generate a hash of an input +- Added functions to convert a numeric or string to `int`, `int64`, `float64` + +## Release 2.6.0 (2016-10-03) + +- Added a `uuidv4` template function for generating UUIDs inside of a template. + +## Release 2.5.0 (2016-08-19) + +- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions +- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`) +- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0 + +## Release 2.4.0 (2016-08-16) + +- Adds two functions: `until` and `untilStep` + +## Release 2.3.0 (2016-06-21) + +- cat: Concatenate strings with whitespace separators. +- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First" +- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos" +- indent: Indent blocks of text in a way that is sensitive to "\n" characters. + +## Release 2.2.0 (2016-04-21) + +- Added a `genPrivateKey` function (Thanks @bacongobbler) + +## Release 2.1.0 (2016-03-30) + +- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`. +- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output. + +## Release 2.0.0 (2016-03-29) + +Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented. + +- `min` complements `max` (formerly `biggest`) +- `empty` indicates that a value is the empty value for its type +- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}` +- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}` +- Date formatters have been added for HTML dates (as used in `date` input fields) +- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`). + +## Release 1.2.0 (2016-02-01) + +- Added quote and squote +- Added b32enc and b32dec +- add now takes varargs +- biggest now takes varargs + +## Release 1.1.0 (2015-12-29) + +- Added #4: Added contains function. strings.Contains, but with the arguments + switched to simplify common pipelines. (thanks krancour) +- Added Travis-CI testing support + +## Release 1.0.0 (2015-12-23) + +- Initial release diff --git a/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt b/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt new file mode 100644 index 0000000000..5c95accc2e --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt @@ -0,0 +1,20 @@ +Sprig +Copyright (C) 2013 Masterminds + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/sprig/v3/Makefile b/vendor/github.com/Masterminds/sprig/v3/Makefile new file mode 100644 index 0000000000..78d409cde2 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/Makefile @@ -0,0 +1,9 @@ +.PHONY: test +test: + @echo "==> Running tests" + GO111MODULE=on go test -v + +.PHONY: test-cover +test-cover: + @echo "==> Running Tests with coverage" + GO111MODULE=on go test -cover . diff --git a/vendor/github.com/Masterminds/sprig/v3/README.md b/vendor/github.com/Masterminds/sprig/v3/README.md new file mode 100644 index 0000000000..454f295784 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/README.md @@ -0,0 +1,91 @@ +# Sprig: Template functions for Go templates [![GoDoc](https://godoc.org/github.com/Masterminds/sprig?status.svg)](https://godoc.org/github.com/Masterminds/sprig) [![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/sprig)](https://goreportcard.com/report/github.com/Masterminds/sprig) + +[![Stability: Sustained](https://masterminds.github.io/stability/sustained.svg)](https://masterminds.github.io/stability/sustained.html) +[![Build Status](https://travis-ci.org/Masterminds/sprig.svg?branch=master)](https://travis-ci.org/Masterminds/sprig) + +The Go language comes with a [built-in template +language](http://golang.org/pkg/text/template/), but not +very many template functions. Sprig is a library that provides more than 100 commonly +used template functions. + +It is inspired by the template functions found in +[Twig](http://twig.sensiolabs.org/documentation) and in various +JavaScript libraries, such as [underscore.js](http://underscorejs.org/). + +## Package Versions + +There are two active major versions of the `sprig` package. + +* v3 is currently stable release series on the `master` branch. The Go API should + remain compatible with v2, the current stable version. Behavior change behind + some functions is the reason for the new major version. +* v2 is the previous stable release series. It has been more than three years since + the initial release of v2. You can read the documentation and see the code + on the [release-2](https://github.com/Masterminds/sprig/tree/release-2) branch. + Bug fixes to this major version will continue for some time. + +## Usage + +**Template developers**: Please use Sprig's [function documentation](http://masterminds.github.io/sprig/) for +detailed instructions and code snippets for the >100 template functions available. + +**Go developers**: If you'd like to include Sprig as a library in your program, +our API documentation is available [at GoDoc.org](http://godoc.org/github.com/Masterminds/sprig). + +For standard usage, read on. + +### Load the Sprig library + +To load the Sprig `FuncMap`: + +```go + +import ( + "github.com/Masterminds/sprig" + "html/template" +) + +// This example illustrates that the FuncMap *must* be set before the +// templates themselves are loaded. +tpl := template.Must( + template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html") +) + + +``` + +### Calling the functions inside of templates + +By convention, all functions are lowercase. This seems to follow the Go +idiom for template functions (as opposed to template methods, which are +TitleCase). For example, this: + +``` +{{ "hello!" | upper | repeat 5 }} +``` + +produces this: + +``` +HELLO!HELLO!HELLO!HELLO!HELLO! +``` + +## Principles Driving Our Function Selection + +We followed these principles to decide which functions to add and how to implement them: + +- Use template functions to build layout. The following + types of operations are within the domain of template functions: + - Formatting + - Layout + - Simple type conversions + - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic) +- Template functions should not return errors unless there is no way to print + a sensible value. For example, converting a string to an integer should not + produce an error if conversion fails. Instead, it should display a default + value. +- Simple math is necessary for grid layouts, pagers, and so on. Complex math + (anything other than arithmetic) should be done outside of templates. +- Template functions only deal with the data passed into them. They never retrieve + data from a source. +- Finally, do not override core Go template functions. diff --git a/vendor/github.com/Masterminds/sprig/v3/appveyor.yml b/vendor/github.com/Masterminds/sprig/v3/appveyor.yml new file mode 100644 index 0000000000..7885f3d749 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/appveyor.yml @@ -0,0 +1,22 @@ + +version: build-{build}.{branch} + +shallow_clone: true + +environment: + GOPATH: C:\gopath + +platform: + - x64 + +install: + - go version + - go env + +build_script: + - go install ./... + +test_script: + - go test -v + +deploy: off diff --git a/vendor/github.com/Masterminds/sprig/v3/crypto.go b/vendor/github.com/Masterminds/sprig/v3/crypto.go new file mode 100644 index 0000000000..380ad0a8e4 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/crypto.go @@ -0,0 +1,495 @@ +package sprig + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/hmac" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "hash/adler32" + "io" + "math/big" + "net" + "time" + + "github.com/google/uuid" + "golang.org/x/crypto/scrypt" +) + +func sha256sum(input string) string { + hash := sha256.Sum256([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func sha1sum(input string) string { + hash := sha1.Sum([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func adler32sum(input string) string { + hash := adler32.Checksum([]byte(input)) + return fmt.Sprintf("%d", hash) +} + +// uuidv4 provides a safe and secure UUID v4 implementation +func uuidv4() string { + return uuid.New().String() +} + +var masterPasswordSeed = "com.lyndir.masterpassword" + +var passwordTypeTemplates = map[string][][]byte{ + "maximum": {[]byte("anoxxxxxxxxxxxxxxxxx"), []byte("axxxxxxxxxxxxxxxxxno")}, + "long": {[]byte("CvcvnoCvcvCvcv"), []byte("CvcvCvcvnoCvcv"), []byte("CvcvCvcvCvcvno"), []byte("CvccnoCvcvCvcv"), []byte("CvccCvcvnoCvcv"), + []byte("CvccCvcvCvcvno"), []byte("CvcvnoCvccCvcv"), []byte("CvcvCvccnoCvcv"), []byte("CvcvCvccCvcvno"), []byte("CvcvnoCvcvCvcc"), + []byte("CvcvCvcvnoCvcc"), []byte("CvcvCvcvCvccno"), []byte("CvccnoCvccCvcv"), []byte("CvccCvccnoCvcv"), []byte("CvccCvccCvcvno"), + []byte("CvcvnoCvccCvcc"), []byte("CvcvCvccnoCvcc"), []byte("CvcvCvccCvccno"), []byte("CvccnoCvcvCvcc"), []byte("CvccCvcvnoCvcc"), + []byte("CvccCvcvCvccno")}, + "medium": {[]byte("CvcnoCvc"), []byte("CvcCvcno")}, + "short": {[]byte("Cvcn")}, + "basic": {[]byte("aaanaaan"), []byte("aannaaan"), []byte("aaannaaa")}, + "pin": {[]byte("nnnn")}, +} + +var templateCharacters = map[byte]string{ + 'V': "AEIOU", + 'C': "BCDFGHJKLMNPQRSTVWXYZ", + 'v': "aeiou", + 'c': "bcdfghjklmnpqrstvwxyz", + 'A': "AEIOUBCDFGHJKLMNPQRSTVWXYZ", + 'a': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz", + 'n': "0123456789", + 'o': "@&%?,=[]_:-+*$#!'^~;()/.", + 'x': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz0123456789!@#$%^&*()", +} + +func derivePassword(counter uint32, passwordType, password, user, site string) string { + var templates = passwordTypeTemplates[passwordType] + if templates == nil { + return fmt.Sprintf("cannot find password template %s", passwordType) + } + + var buffer bytes.Buffer + buffer.WriteString(masterPasswordSeed) + binary.Write(&buffer, binary.BigEndian, uint32(len(user))) + buffer.WriteString(user) + + salt := buffer.Bytes() + key, err := scrypt.Key([]byte(password), salt, 32768, 8, 2, 64) + if err != nil { + return fmt.Sprintf("failed to derive password: %s", err) + } + + buffer.Truncate(len(masterPasswordSeed)) + binary.Write(&buffer, binary.BigEndian, uint32(len(site))) + buffer.WriteString(site) + binary.Write(&buffer, binary.BigEndian, counter) + + var hmacv = hmac.New(sha256.New, key) + hmacv.Write(buffer.Bytes()) + var seed = hmacv.Sum(nil) + var temp = templates[int(seed[0])%len(templates)] + + buffer.Truncate(0) + for i, element := range temp { + passChars := templateCharacters[element] + passChar := passChars[int(seed[i+1])%len(passChars)] + buffer.WriteByte(passChar) + } + + return buffer.String() +} + +func generatePrivateKey(typ string) string { + var priv interface{} + var err error + switch typ { + case "", "rsa": + // good enough for government work + priv, err = rsa.GenerateKey(rand.Reader, 4096) + case "dsa": + key := new(dsa.PrivateKey) + // again, good enough for government work + if err = dsa.GenerateParameters(&key.Parameters, rand.Reader, dsa.L2048N256); err != nil { + return fmt.Sprintf("failed to generate dsa params: %s", err) + } + err = dsa.GenerateKey(key, rand.Reader) + priv = key + case "ecdsa": + // again, good enough for government work + priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + default: + return "Unknown type " + typ + } + if err != nil { + return fmt.Sprintf("failed to generate private key: %s", err) + } + + return string(pem.EncodeToMemory(pemBlockForKey(priv))) +} + +// DSAKeyFormat stores the format for DSA keys. +// Used by pemBlockForKey +type DSAKeyFormat struct { + Version int + P, Q, G, Y, X *big.Int +} + +func pemBlockForKey(priv interface{}) *pem.Block { + switch k := priv.(type) { + case *rsa.PrivateKey: + return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)} + case *dsa.PrivateKey: + val := DSAKeyFormat{ + P: k.P, Q: k.Q, G: k.G, + Y: k.Y, X: k.X, + } + bytes, _ := asn1.Marshal(val) + return &pem.Block{Type: "DSA PRIVATE KEY", Bytes: bytes} + case *ecdsa.PrivateKey: + b, _ := x509.MarshalECPrivateKey(k) + return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} + default: + return nil + } +} + +type certificate struct { + Cert string + Key string +} + +func buildCustomCertificate(b64cert string, b64key string) (certificate, error) { + crt := certificate{} + + cert, err := base64.StdEncoding.DecodeString(b64cert) + if err != nil { + return crt, errors.New("unable to decode base64 certificate") + } + + key, err := base64.StdEncoding.DecodeString(b64key) + if err != nil { + return crt, errors.New("unable to decode base64 private key") + } + + decodedCert, _ := pem.Decode(cert) + if decodedCert == nil { + return crt, errors.New("unable to decode certificate") + } + _, err = x509.ParseCertificate(decodedCert.Bytes) + if err != nil { + return crt, fmt.Errorf( + "error parsing certificate: decodedCert.Bytes: %s", + err, + ) + } + + decodedKey, _ := pem.Decode(key) + if decodedKey == nil { + return crt, errors.New("unable to decode key") + } + _, err = x509.ParsePKCS1PrivateKey(decodedKey.Bytes) + if err != nil { + return crt, fmt.Errorf( + "error parsing prive key: decodedKey.Bytes: %s", + err, + ) + } + + crt.Cert = string(cert) + crt.Key = string(key) + + return crt, nil +} + +func generateCertificateAuthority( + cn string, + daysValid int, +) (certificate, error) { + ca := certificate{} + + template, err := getBaseCertTemplate(cn, nil, nil, daysValid) + if err != nil { + return ca, err + } + // Override KeyUsage and IsCA + template.KeyUsage = x509.KeyUsageKeyEncipherment | + x509.KeyUsageDigitalSignature | + x509.KeyUsageCertSign + template.IsCA = true + + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return ca, fmt.Errorf("error generating rsa key: %s", err) + } + + ca.Cert, ca.Key, err = getCertAndKey(template, priv, template, priv) + + return ca, err +} + +func generateSelfSignedCertificate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, +) (certificate, error) { + cert := certificate{} + + template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) + if err != nil { + return cert, err + } + + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return cert, fmt.Errorf("error generating rsa key: %s", err) + } + + cert.Cert, cert.Key, err = getCertAndKey(template, priv, template, priv) + + return cert, err +} + +func generateSignedCertificate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + ca certificate, +) (certificate, error) { + cert := certificate{} + + decodedSignerCert, _ := pem.Decode([]byte(ca.Cert)) + if decodedSignerCert == nil { + return cert, errors.New("unable to decode certificate") + } + signerCert, err := x509.ParseCertificate(decodedSignerCert.Bytes) + if err != nil { + return cert, fmt.Errorf( + "error parsing certificate: decodedSignerCert.Bytes: %s", + err, + ) + } + decodedSignerKey, _ := pem.Decode([]byte(ca.Key)) + if decodedSignerKey == nil { + return cert, errors.New("unable to decode key") + } + signerKey, err := x509.ParsePKCS1PrivateKey(decodedSignerKey.Bytes) + if err != nil { + return cert, fmt.Errorf( + "error parsing prive key: decodedSignerKey.Bytes: %s", + err, + ) + } + + template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) + if err != nil { + return cert, err + } + + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return cert, fmt.Errorf("error generating rsa key: %s", err) + } + + cert.Cert, cert.Key, err = getCertAndKey( + template, + priv, + signerCert, + signerKey, + ) + + return cert, err +} + +func getCertAndKey( + template *x509.Certificate, + signeeKey *rsa.PrivateKey, + parent *x509.Certificate, + signingKey *rsa.PrivateKey, +) (string, string, error) { + derBytes, err := x509.CreateCertificate( + rand.Reader, + template, + parent, + &signeeKey.PublicKey, + signingKey, + ) + if err != nil { + return "", "", fmt.Errorf("error creating certificate: %s", err) + } + + certBuffer := bytes.Buffer{} + if err := pem.Encode( + &certBuffer, + &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}, + ); err != nil { + return "", "", fmt.Errorf("error pem-encoding certificate: %s", err) + } + + keyBuffer := bytes.Buffer{} + if err := pem.Encode( + &keyBuffer, + &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(signeeKey), + }, + ); err != nil { + return "", "", fmt.Errorf("error pem-encoding key: %s", err) + } + + return certBuffer.String(), keyBuffer.String(), nil +} + +func getBaseCertTemplate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, +) (*x509.Certificate, error) { + ipAddresses, err := getNetIPs(ips) + if err != nil { + return nil, err + } + dnsNames, err := getAlternateDNSStrs(alternateDNS) + if err != nil { + return nil, err + } + serialNumberUpperBound := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberUpperBound) + if err != nil { + return nil, err + } + return &x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + CommonName: cn, + }, + IPAddresses: ipAddresses, + DNSNames: dnsNames, + NotBefore: time.Now(), + NotAfter: time.Now().Add(time.Hour * 24 * time.Duration(daysValid)), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + }, + BasicConstraintsValid: true, + }, nil +} + +func getNetIPs(ips []interface{}) ([]net.IP, error) { + if ips == nil { + return []net.IP{}, nil + } + var ipStr string + var ok bool + var netIP net.IP + netIPs := make([]net.IP, len(ips)) + for i, ip := range ips { + ipStr, ok = ip.(string) + if !ok { + return nil, fmt.Errorf("error parsing ip: %v is not a string", ip) + } + netIP = net.ParseIP(ipStr) + if netIP == nil { + return nil, fmt.Errorf("error parsing ip: %s", ipStr) + } + netIPs[i] = netIP + } + return netIPs, nil +} + +func getAlternateDNSStrs(alternateDNS []interface{}) ([]string, error) { + if alternateDNS == nil { + return []string{}, nil + } + var dnsStr string + var ok bool + alternateDNSStrs := make([]string, len(alternateDNS)) + for i, dns := range alternateDNS { + dnsStr, ok = dns.(string) + if !ok { + return nil, fmt.Errorf( + "error processing alternate dns name: %v is not a string", + dns, + ) + } + alternateDNSStrs[i] = dnsStr + } + return alternateDNSStrs, nil +} + +func encryptAES(password string, plaintext string) (string, error) { + if plaintext == "" { + return "", nil + } + + key := make([]byte, 32) + copy(key, []byte(password)) + block, err := aes.NewCipher(key) + if err != nil { + return "", err + } + + content := []byte(plaintext) + blockSize := block.BlockSize() + padding := blockSize - len(content)%blockSize + padtext := bytes.Repeat([]byte{byte(padding)}, padding) + content = append(content, padtext...) + + ciphertext := make([]byte, aes.BlockSize+len(content)) + + iv := ciphertext[:aes.BlockSize] + if _, err := io.ReadFull(rand.Reader, iv); err != nil { + return "", err + } + + mode := cipher.NewCBCEncrypter(block, iv) + mode.CryptBlocks(ciphertext[aes.BlockSize:], content) + + return base64.StdEncoding.EncodeToString(ciphertext), nil +} + +func decryptAES(password string, crypt64 string) (string, error) { + if crypt64 == "" { + return "", nil + } + + key := make([]byte, 32) + copy(key, []byte(password)) + + crypt, err := base64.StdEncoding.DecodeString(crypt64) + if err != nil { + return "", err + } + + block, err := aes.NewCipher(key) + if err != nil { + return "", err + } + + iv := crypt[:aes.BlockSize] + crypt = crypt[aes.BlockSize:] + decrypted := make([]byte, len(crypt)) + mode := cipher.NewCBCDecrypter(block, iv) + mode.CryptBlocks(decrypted, crypt) + + return string(decrypted[:len(decrypted)-int(decrypted[len(decrypted)-1])]), nil +} diff --git a/vendor/github.com/Masterminds/sprig/v3/date.go b/vendor/github.com/Masterminds/sprig/v3/date.go new file mode 100644 index 0000000000..027ca257f9 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/date.go @@ -0,0 +1,139 @@ +package sprig + +import ( + "strconv" + "time" +) + +// Given a format and a date, format the date string. +// +// Date can be a `time.Time` or an `int, int32, int64`. +// In the later case, it is treated as seconds since UNIX +// epoch. +func date(fmt string, date interface{}) string { + return dateInZone(fmt, date, "Local") +} + +func htmlDate(date interface{}) string { + return dateInZone("2006-01-02", date, "Local") +} + +func htmlDateInZone(date interface{}, zone string) string { + return dateInZone("2006-01-02", date, zone) +} + +func dateInZone(fmt string, date interface{}, zone string) string { + var t time.Time + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case *time.Time: + t = *date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + case int32: + t = time.Unix(int64(date), 0) + } + + loc, err := time.LoadLocation(zone) + if err != nil { + loc, _ = time.LoadLocation("UTC") + } + + return t.In(loc).Format(fmt) +} + +func dateModify(fmt string, date time.Time) time.Time { + d, err := time.ParseDuration(fmt) + if err != nil { + return date + } + return date.Add(d) +} + +func mustDateModify(fmt string, date time.Time) (time.Time, error) { + d, err := time.ParseDuration(fmt) + if err != nil { + return time.Time{}, err + } + return date.Add(d), nil +} + +func dateAgo(date interface{}) string { + var t time.Time + + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + } + // Drop resolution to seconds + duration := time.Since(t).Round(time.Second) + return duration.String() +} + +func durationRound(duration interface{}) string { + var d time.Duration + switch duration := duration.(type) { + default: + d = 0 + case string: + d, _ = time.ParseDuration(duration) + case int64: + d = time.Duration(duration) + case time.Time: + d = time.Since(duration) + } + + u := uint64(d) + neg := d < 0 + if neg { + u = -u + } + + var ( + year = uint64(time.Hour) * 24 * 365 + month = uint64(time.Hour) * 24 * 30 + day = uint64(time.Hour) * 24 + hour = uint64(time.Hour) + minute = uint64(time.Minute) + second = uint64(time.Second) + ) + switch { + case u > year: + return strconv.FormatUint(u/year, 10) + "y" + case u > month: + return strconv.FormatUint(u/month, 10) + "mo" + case u > day: + return strconv.FormatUint(u/day, 10) + "d" + case u > hour: + return strconv.FormatUint(u/hour, 10) + "h" + case u > minute: + return strconv.FormatUint(u/minute, 10) + "m" + case u > second: + return strconv.FormatUint(u/second, 10) + "s" + } + return "0s" +} + +func toDate(fmt, str string) time.Time { + t, _ := time.ParseInLocation(fmt, str, time.Local) + return t +} + +func mustToDate(fmt, str string) (time.Time, error) { + return time.ParseInLocation(fmt, str, time.Local) +} + +func unixEpoch(date time.Time) string { + return strconv.FormatInt(date.Unix(), 10) +} diff --git a/vendor/github.com/Masterminds/sprig/v3/defaults.go b/vendor/github.com/Masterminds/sprig/v3/defaults.go new file mode 100644 index 0000000000..97d7d6e645 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/defaults.go @@ -0,0 +1,122 @@ +package sprig + +import ( + "bytes" + "encoding/json" + "reflect" + "strings" +) + +// dfault checks whether `given` is set, and returns default if not set. +// +// This returns `d` if `given` appears not to be set, and `given` otherwise. +// +// For numeric types 0 is unset. +// For strings, maps, arrays, and slices, len() = 0 is considered unset. +// For bool, false is unset. +// Structs are never considered unset. +// +// For everything else, including pointers, a nil value is unset. +func dfault(d interface{}, given ...interface{}) interface{} { + + if empty(given) || empty(given[0]) { + return d + } + return given[0] +} + +// empty returns true if the given value has the zero value for its type. +func empty(given interface{}) bool { + g := reflect.ValueOf(given) + if !g.IsValid() { + return true + } + + // Basically adapted from text/template.isTrue + switch g.Kind() { + default: + return g.IsNil() + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return g.Len() == 0 + case reflect.Bool: + return !g.Bool() + case reflect.Complex64, reflect.Complex128: + return g.Complex() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return g.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return g.Uint() == 0 + case reflect.Float32, reflect.Float64: + return g.Float() == 0 + case reflect.Struct: + return false + } +} + +// coalesce returns the first non-empty value. +func coalesce(v ...interface{}) interface{} { + for _, val := range v { + if !empty(val) { + return val + } + } + return nil +} + +// toJson encodes an item into a JSON string +func toJson(v interface{}) string { + output, _ := json.Marshal(v) + return string(output) +} + +func mustToJson(v interface{}) (string, error) { + output, err := json.Marshal(v) + if err != nil { + return "", err + } + return string(output), nil +} + +// toPrettyJson encodes an item into a pretty (indented) JSON string +func toPrettyJson(v interface{}) string { + output, _ := json.MarshalIndent(v, "", " ") + return string(output) +} + +func mustToPrettyJson(v interface{}) (string, error) { + output, err := json.MarshalIndent(v, "", " ") + if err != nil { + return "", err + } + return string(output), nil +} + +// toRawJson encodes an item into a JSON string with no escaping of HTML characters. +func toRawJson(v interface{}) string { + output, err := mustToRawJson(v) + if err != nil { + panic(err) + } + return string(output) +} + +// mustToRawJson encodes an item into a JSON string with no escaping of HTML characters. +func mustToRawJson(v interface{}) (string, error) { + buf := new(bytes.Buffer) + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err := enc.Encode(&v) + if err != nil { + return "", err + } + return strings.TrimSuffix(buf.String(), "\n"), nil +} + +// ternary returns the first value if the last value is true, otherwise returns the second value. +func ternary(vt interface{}, vf interface{}, v bool) interface{} { + if v { + return vt + } + + return vf +} diff --git a/vendor/github.com/Masterminds/sprig/v3/dict.go b/vendor/github.com/Masterminds/sprig/v3/dict.go new file mode 100644 index 0000000000..11d943fc4b --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/dict.go @@ -0,0 +1,148 @@ +package sprig + +import ( + "github.com/imdario/mergo" + "github.com/mitchellh/copystructure" +) + +func get(d map[string]interface{}, key string) interface{} { + if val, ok := d[key]; ok { + return val + } + return "" +} + +func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} { + d[key] = value + return d +} + +func unset(d map[string]interface{}, key string) map[string]interface{} { + delete(d, key) + return d +} + +func hasKey(d map[string]interface{}, key string) bool { + _, ok := d[key] + return ok +} + +func pluck(key string, d ...map[string]interface{}) []interface{} { + res := []interface{}{} + for _, dict := range d { + if val, ok := dict[key]; ok { + res = append(res, val) + } + } + return res +} + +func keys(dicts ...map[string]interface{}) []string { + k := []string{} + for _, dict := range dicts { + for key := range dict { + k = append(k, key) + } + } + return k +} + +func pick(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + for _, k := range keys { + if v, ok := dict[k]; ok { + res[k] = v + } + } + return res +} + +func omit(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + + omit := make(map[string]bool, len(keys)) + for _, k := range keys { + omit[k] = true + } + + for k, v := range dict { + if _, ok := omit[k]; !ok { + res[k] = v + } + } + return res +} + +func dict(v ...interface{}) map[string]interface{} { + dict := map[string]interface{}{} + lenv := len(v) + for i := 0; i < lenv; i += 2 { + key := strval(v[i]) + if i+1 >= lenv { + dict[key] = "" + continue + } + dict[key] = v[i+1] + } + return dict +} + +func merge(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { + for _, src := range srcs { + if err := mergo.Merge(&dst, src); err != nil { + // Swallow errors inside of a template. + return "" + } + } + return dst +} + +func mustMerge(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) { + for _, src := range srcs { + if err := mergo.Merge(&dst, src); err != nil { + return nil, err + } + } + return dst, nil +} + +func mergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { + for _, src := range srcs { + if err := mergo.MergeWithOverwrite(&dst, src); err != nil { + // Swallow errors inside of a template. + return "" + } + } + return dst +} + +func mustMergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) { + for _, src := range srcs { + if err := mergo.MergeWithOverwrite(&dst, src); err != nil { + return nil, err + } + } + return dst, nil +} + +func values(dict map[string]interface{}) []interface{} { + values := []interface{}{} + for _, value := range dict { + values = append(values, value) + } + + return values +} + +func deepCopy(i interface{}) interface{} { + c, err := mustDeepCopy(i) + if err != nil { + panic("deepCopy error: " + err.Error()) + } + + return c +} + +func mustDeepCopy(i interface{}) (interface{}, error) { + return copystructure.Copy(i) +} diff --git a/vendor/github.com/Masterminds/sprig/v3/doc.go b/vendor/github.com/Masterminds/sprig/v3/doc.go new file mode 100644 index 0000000000..aabb9d4489 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/doc.go @@ -0,0 +1,19 @@ +/* +Package sprig provides template functions for Go. + +This package contains a number of utility functions for working with data +inside of Go `html/template` and `text/template` files. + +To add these functions, use the `template.Funcs()` method: + + t := templates.New("foo").Funcs(sprig.FuncMap()) + +Note that you should add the function map before you parse any template files. + + In several cases, Sprig reverses the order of arguments from the way they + appear in the standard library. This is to make it easier to pipe + arguments into functions. + +See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions. +*/ +package sprig diff --git a/vendor/github.com/Masterminds/sprig/v3/functions.go b/vendor/github.com/Masterminds/sprig/v3/functions.go new file mode 100644 index 0000000000..180877a8b4 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/functions.go @@ -0,0 +1,336 @@ +package sprig + +import ( + "errors" + "html/template" + "os" + "path" + "reflect" + "strconv" + "strings" + ttemplate "text/template" + "time" + + util "github.com/Masterminds/goutils" + "github.com/huandu/xstrings" +) + +// FuncMap produces the function map. +// +// Use this to pass the functions into the template engine: +// +// tpl := template.New("foo").Funcs(sprig.FuncMap())) +// +func FuncMap() template.FuncMap { + return HtmlFuncMap() +} + +// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions. +func HermeticTxtFuncMap() ttemplate.FuncMap { + r := TxtFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions. +func HermeticHtmlFuncMap() template.FuncMap { + r := HtmlFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// TxtFuncMap returns a 'text/template'.FuncMap +func TxtFuncMap() ttemplate.FuncMap { + return ttemplate.FuncMap(GenericFuncMap()) +} + +// HtmlFuncMap returns an 'html/template'.Funcmap +func HtmlFuncMap() template.FuncMap { + return template.FuncMap(GenericFuncMap()) +} + +// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}. +func GenericFuncMap() map[string]interface{} { + gfm := make(map[string]interface{}, len(genericMap)) + for k, v := range genericMap { + gfm[k] = v + } + return gfm +} + +// These functions are not guaranteed to evaluate to the same result for given input, because they +// refer to the environment or global state. +var nonhermeticFunctions = []string{ + // Date functions + "date", + "date_in_zone", + "date_modify", + "now", + "htmlDate", + "htmlDateInZone", + "dateInZone", + "dateModify", + + // Strings + "randAlphaNum", + "randAlpha", + "randAscii", + "randNumeric", + "uuidv4", + + // OS + "env", + "expandenv", + + // Network + "getHostByName", +} + +var genericMap = map[string]interface{}{ + "hello": func() string { return "Hello!" }, + + // Date functions + "ago": dateAgo, + "date": date, + "date_in_zone": dateInZone, + "date_modify": dateModify, + "dateInZone": dateInZone, + "dateModify": dateModify, + "durationRound": durationRound, + "htmlDate": htmlDate, + "htmlDateInZone": htmlDateInZone, + "must_date_modify": mustDateModify, + "mustDateModify": mustDateModify, + "mustToDate": mustToDate, + "now": func() time.Time { return time.Now() }, + "toDate": toDate, + "unixEpoch": unixEpoch, + + // Strings + "abbrev": abbrev, + "abbrevboth": abbrevboth, + "trunc": trunc, + "trim": strings.TrimSpace, + "upper": strings.ToUpper, + "lower": strings.ToLower, + "title": strings.Title, + "untitle": untitle, + "substr": substring, + // Switch order so that "foo" | repeat 5 + "repeat": func(count int, str string) string { return strings.Repeat(str, count) }, + // Deprecated: Use trimAll. + "trimall": func(a, b string) string { return strings.Trim(b, a) }, + // Switch order so that "$foo" | trimall "$" + "trimAll": func(a, b string) string { return strings.Trim(b, a) }, + "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) }, + "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) }, + "nospace": util.DeleteWhiteSpace, + "initials": initials, + "randAlphaNum": randAlphaNumeric, + "randAlpha": randAlpha, + "randAscii": randAscii, + "randNumeric": randNumeric, + "swapcase": util.SwapCase, + "shuffle": xstrings.Shuffle, + "snakecase": xstrings.ToSnakeCase, + "camelcase": xstrings.ToCamelCase, + "kebabcase": xstrings.ToKebabCase, + "wrap": func(l int, s string) string { return util.Wrap(s, l) }, + "wrapWith": func(l int, sep, str string) string { return util.WrapCustom(str, l, sep, true) }, + // Switch order so that "foobar" | contains "foo" + "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, + "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, + "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, + "quote": quote, + "squote": squote, + "cat": cat, + "indent": indent, + "nindent": nindent, + "replace": replace, + "plural": plural, + "sha1sum": sha1sum, + "sha256sum": sha256sum, + "adler32sum": adler32sum, + "toString": strval, + + // Wrap Atoi to stop errors. + "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i }, + "int64": toInt64, + "int": toInt, + "float64": toFloat64, + "toDecimal": toDecimal, + + //"gt": func(a, b int) bool {return a > b}, + //"gte": func(a, b int) bool {return a >= b}, + //"lt": func(a, b int) bool {return a < b}, + //"lte": func(a, b int) bool {return a <= b}, + + // split "/" foo/bar returns map[int]string{0: foo, 1: bar} + "split": split, + "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) }, + // splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu} + "splitn": splitn, + "toStrings": strslice, + + "until": until, + "untilStep": untilStep, + + // VERY basic arithmetic. + "add1": func(i interface{}) int64 { return toInt64(i) + 1 }, + "add": func(i ...interface{}) int64 { + var a int64 = 0 + for _, b := range i { + a += toInt64(b) + } + return a + }, + "sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) }, + "div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) }, + "mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) }, + "mul": func(a interface{}, v ...interface{}) int64 { + val := toInt64(a) + for _, b := range v { + val = val * toInt64(b) + } + return val + }, + "biggest": max, + "max": max, + "min": min, + "ceil": ceil, + "floor": floor, + "round": round, + + // string slices. Note that we reverse the order b/c that's better + // for template processing. + "join": join, + "sortAlpha": sortAlpha, + + // Defaults + "default": dfault, + "empty": empty, + "coalesce": coalesce, + "compact": compact, + "mustCompact": mustCompact, + "toJson": toJson, + "toPrettyJson": toPrettyJson, + "toRawJson": toRawJson, + "mustToJson": mustToJson, + "mustToPrettyJson": mustToPrettyJson, + "mustToRawJson": mustToRawJson, + "ternary": ternary, + "deepCopy": deepCopy, + "mustDeepCopy": mustDeepCopy, + + // Reflection + "typeOf": typeOf, + "typeIs": typeIs, + "typeIsLike": typeIsLike, + "kindOf": kindOf, + "kindIs": kindIs, + "deepEqual": reflect.DeepEqual, + + // OS: + "env": func(s string) string { return os.Getenv(s) }, + "expandenv": func(s string) string { return os.ExpandEnv(s) }, + + // Network: + "getHostByName": getHostByName, + + // File Paths: + "base": path.Base, + "dir": path.Dir, + "clean": path.Clean, + "ext": path.Ext, + "isAbs": path.IsAbs, + + // Encoding: + "b64enc": base64encode, + "b64dec": base64decode, + "b32enc": base32encode, + "b32dec": base32decode, + + // Data Structures: + "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. + "list": list, + "dict": dict, + "get": get, + "set": set, + "unset": unset, + "hasKey": hasKey, + "pluck": pluck, + "keys": keys, + "pick": pick, + "omit": omit, + "merge": merge, + "mergeOverwrite": mergeOverwrite, + "mustMerge": mustMerge, + "mustMergeOverwrite": mustMergeOverwrite, + "values": values, + + "append": push, "push": push, + "mustAppend": mustPush, "mustPush": mustPush, + "prepend": prepend, + "mustPrepend": mustPrepend, + "first": first, + "mustFirst": mustFirst, + "rest": rest, + "mustRest": mustRest, + "last": last, + "mustLast": mustLast, + "initial": initial, + "mustInitial": mustInitial, + "reverse": reverse, + "mustReverse": mustReverse, + "uniq": uniq, + "mustUniq": mustUniq, + "without": without, + "mustWithout": mustWithout, + "has": has, + "mustHas": mustHas, + "slice": slice, + "mustSlice": mustSlice, + "concat": concat, + + // Crypto: + "genPrivateKey": generatePrivateKey, + "derivePassword": derivePassword, + "buildCustomCert": buildCustomCertificate, + "genCA": generateCertificateAuthority, + "genSelfSignedCert": generateSelfSignedCertificate, + "genSignedCert": generateSignedCertificate, + "encryptAES": encryptAES, + "decryptAES": decryptAES, + + // UUIDs: + "uuidv4": uuidv4, + + // SemVer: + "semver": semver, + "semverCompare": semverCompare, + + // Flow Control: + "fail": func(msg string) (string, error) { return "", errors.New(msg) }, + + // Regex + "regexMatch": regexMatch, + "mustRegexMatch": mustRegexMatch, + "regexFindAll": regexFindAll, + "mustRegexFindAll": mustRegexFindAll, + "regexFind": regexFind, + "mustRegexFind": mustRegexFind, + "regexReplaceAll": regexReplaceAll, + "mustRegexReplaceAll": mustRegexReplaceAll, + "regexReplaceAllLiteral": regexReplaceAllLiteral, + "mustRegexReplaceAllLiteral": mustRegexReplaceAllLiteral, + "regexSplit": regexSplit, + "mustRegexSplit": mustRegexSplit, + + // URLs: + "urlParse": urlParse, + "urlJoin": urlJoin, +} diff --git a/vendor/github.com/Masterminds/sprig/v3/go.mod b/vendor/github.com/Masterminds/sprig/v3/go.mod new file mode 100644 index 0000000000..2ada222f2b --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/go.mod @@ -0,0 +1,15 @@ +module github.com/Masterminds/sprig/v3 + +go 1.13 + +require ( + github.com/Masterminds/goutils v1.1.0 + github.com/Masterminds/semver/v3 v3.0.1 + github.com/google/uuid v1.1.1 + github.com/huandu/xstrings v1.2.0 + github.com/imdario/mergo v0.3.7 + github.com/mitchellh/copystructure v1.0.0 + github.com/spf13/cast v1.3.0 + github.com/stretchr/testify v1.4.0 + golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7 +) diff --git a/vendor/github.com/Masterminds/sprig/v3/go.sum b/vendor/github.com/Masterminds/sprig/v3/go.sum new file mode 100644 index 0000000000..2b2a10e08b --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/go.sum @@ -0,0 +1,36 @@ +github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg= +github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.0.1 h1:2kKm5lb7dKVrt5TYUiAavE6oFc1cFT0057UVGT+JqLk= +github.com/Masterminds/semver/v3 v3.0.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/huandu/xstrings v1.2.0 h1:yPeWdRnmynF7p+lLYz0H2tthW9lqhMJrQV/U7yy4wX0= +github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= +github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= +github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7 h1:0hQKqeLdqlt5iIwVOBErRisrHJAN57yOiPRQItI20fU= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/Masterminds/sprig/v3/list.go b/vendor/github.com/Masterminds/sprig/v3/list.go new file mode 100644 index 0000000000..063fe4f857 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/list.go @@ -0,0 +1,419 @@ +package sprig + +import ( + "fmt" + "reflect" + "sort" +) + +// Reflection is used in these functions so that slices and arrays of strings, +// ints, and other types not implementing []interface{} can be worked with. +// For example, this is useful if you need to work on the output of regexs. + +func list(v ...interface{}) []interface{} { + return v +} + +func push(list interface{}, v interface{}) []interface{} { + l, err := mustPush(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPush(list interface{}, v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append(nl, v), nil + + default: + return nil, fmt.Errorf("Cannot push on type %s", tp) + } +} + +func prepend(list interface{}, v interface{}) []interface{} { + l, err := mustPrepend(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPrepend(list interface{}, v interface{}) ([]interface{}, error) { + //return append([]interface{}{v}, list...) + + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append([]interface{}{v}, nl...), nil + + default: + return nil, fmt.Errorf("Cannot prepend on type %s", tp) + } +} + +func last(list interface{}) interface{} { + l, err := mustLast(list) + if err != nil { + panic(err) + } + + return l +} + +func mustLast(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(l - 1).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find last on type %s", tp) + } +} + +func first(list interface{}) interface{} { + l, err := mustFirst(list) + if err != nil { + panic(err) + } + + return l +} + +func mustFirst(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(0).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find first on type %s", tp) + } +} + +func rest(list interface{}) []interface{} { + l, err := mustRest(list) + if err != nil { + panic(err) + } + + return l +} + +func mustRest(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 1; i < l; i++ { + nl[i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find rest on type %s", tp) + } +} + +func initial(list interface{}) []interface{} { + l, err := mustInitial(list) + if err != nil { + panic(err) + } + + return l +} + +func mustInitial(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 0; i < l-1; i++ { + nl[i] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find initial on type %s", tp) + } +} + +func sortAlpha(list interface{}) []string { + k := reflect.Indirect(reflect.ValueOf(list)).Kind() + switch k { + case reflect.Slice, reflect.Array: + a := strslice(list) + s := sort.StringSlice(a) + s.Sort() + return s + } + return []string{strval(list)} +} + +func reverse(v interface{}) []interface{} { + l, err := mustReverse(v) + if err != nil { + panic(err) + } + + return l +} + +func mustReverse(v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(v).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(v) + + l := l2.Len() + // We do not sort in place because the incoming array should not be altered. + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[l-i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find reverse on type %s", tp) + } +} + +func compact(list interface{}) []interface{} { + l, err := mustCompact(list) + if err != nil { + panic(err) + } + + return l +} + +func mustCompact(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !empty(item) { + nl = append(nl, item) + } + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot compact on type %s", tp) + } +} + +func uniq(list interface{}) []interface{} { + l, err := mustUniq(list) + if err != nil { + panic(err) + } + + return l +} + +func mustUniq(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + dest := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(dest, item) { + dest = append(dest, item) + } + } + + return dest, nil + default: + return nil, fmt.Errorf("Cannot find uniq on type %s", tp) + } +} + +func inList(haystack []interface{}, needle interface{}) bool { + for _, h := range haystack { + if reflect.DeepEqual(needle, h) { + return true + } + } + return false +} + +func without(list interface{}, omit ...interface{}) []interface{} { + l, err := mustWithout(list, omit...) + if err != nil { + panic(err) + } + + return l +} + +func mustWithout(list interface{}, omit ...interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + res := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(omit, item) { + res = append(res, item) + } + } + + return res, nil + default: + return nil, fmt.Errorf("Cannot find without on type %s", tp) + } +} + +func has(needle interface{}, haystack interface{}) bool { + l, err := mustHas(needle, haystack) + if err != nil { + panic(err) + } + + return l +} + +func mustHas(needle interface{}, haystack interface{}) (bool, error) { + if haystack == nil { + return false, nil + } + tp := reflect.TypeOf(haystack).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(haystack) + var item interface{} + l := l2.Len() + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if reflect.DeepEqual(needle, item) { + return true, nil + } + } + + return false, nil + default: + return false, fmt.Errorf("Cannot find has on type %s", tp) + } +} + +// $list := [1, 2, 3, 4, 5] +// slice $list -> list[0:5] = list[:] +// slice $list 0 3 -> list[0:3] = list[:3] +// slice $list 3 5 -> list[3:5] +// slice $list 3 -> list[3:5] = list[3:] +func slice(list interface{}, indices ...interface{}) interface{} { + l, err := mustSlice(list, indices...) + if err != nil { + panic(err) + } + + return l +} + +func mustSlice(list interface{}, indices ...interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + var start, end int + if len(indices) > 0 { + start = toInt(indices[0]) + } + if len(indices) < 2 { + end = l + } else { + end = toInt(indices[1]) + } + + return l2.Slice(start, end).Interface(), nil + default: + return nil, fmt.Errorf("list should be type of slice or array but %s", tp) + } +} + +func concat(lists ...interface{}) interface{} { + var res []interface{} + for _, list := range lists { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + for i := 0; i < l2.Len(); i++ { + res = append(res, l2.Index(i).Interface()) + } + default: + panic(fmt.Sprintf("Cannot concat type %s as list", tp)) + } + } + return res +} diff --git a/vendor/github.com/Masterminds/sprig/v3/network.go b/vendor/github.com/Masterminds/sprig/v3/network.go new file mode 100644 index 0000000000..d786cc7363 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/network.go @@ -0,0 +1,12 @@ +package sprig + +import ( + "math/rand" + "net" +) + +func getHostByName(name string) string { + addrs, _ := net.LookupHost(name) + //TODO: add error handing when release v3 cames out + return addrs[rand.Intn(len(addrs))] +} diff --git a/vendor/github.com/Masterminds/sprig/v3/numeric.go b/vendor/github.com/Masterminds/sprig/v3/numeric.go new file mode 100644 index 0000000000..49f8419b64 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/numeric.go @@ -0,0 +1,114 @@ +package sprig + +import ( + "fmt" + "math" + "strconv" + + "github.com/spf13/cast" +) + +// toFloat64 converts 64-bit floats +func toFloat64(v interface{}) float64 { + return cast.ToFloat64(v) +} + +func toInt(v interface{}) int { + return cast.ToInt(v) +} + +// toInt64 converts integer types to 64-bit integers +func toInt64(v interface{}) int64 { + return cast.ToInt64(v) +} + +func max(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb > aa { + aa = bb + } + } + return aa +} + +func min(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb < aa { + aa = bb + } + } + return aa +} + +func until(count int) []int { + step := 1 + if count < 0 { + step = -1 + } + return untilStep(0, count, step) +} + +func untilStep(start, stop, step int) []int { + v := []int{} + + if stop < start { + if step >= 0 { + return v + } + for i := start; i > stop; i += step { + v = append(v, i) + } + return v + } + + if step <= 0 { + return v + } + for i := start; i < stop; i += step { + v = append(v, i) + } + return v +} + +func floor(a interface{}) float64 { + aa := toFloat64(a) + return math.Floor(aa) +} + +func ceil(a interface{}) float64 { + aa := toFloat64(a) + return math.Ceil(aa) +} + +func round(a interface{}, p int, rOpt ...float64) float64 { + roundOn := .5 + if len(rOpt) > 0 { + roundOn = rOpt[0] + } + val := toFloat64(a) + places := toFloat64(p) + + var round float64 + pow := math.Pow(10, places) + digit := pow * val + _, div := math.Modf(digit) + if div >= roundOn { + round = math.Ceil(digit) + } else { + round = math.Floor(digit) + } + return round / pow +} + +// converts unix octal to decimal +func toDecimal(v interface{}) int64 { + result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64) + if err != nil { + return 0 + } + return result +} diff --git a/vendor/github.com/Masterminds/sprig/v3/reflect.go b/vendor/github.com/Masterminds/sprig/v3/reflect.go new file mode 100644 index 0000000000..8a65c132f0 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/reflect.go @@ -0,0 +1,28 @@ +package sprig + +import ( + "fmt" + "reflect" +) + +// typeIs returns true if the src is the type named in target. +func typeIs(target string, src interface{}) bool { + return target == typeOf(src) +} + +func typeIsLike(target string, src interface{}) bool { + t := typeOf(src) + return target == t || "*"+target == t +} + +func typeOf(src interface{}) string { + return fmt.Sprintf("%T", src) +} + +func kindIs(target string, src interface{}) bool { + return target == kindOf(src) +} + +func kindOf(src interface{}) string { + return reflect.ValueOf(src).Kind().String() +} diff --git a/vendor/github.com/Masterminds/sprig/v3/regex.go b/vendor/github.com/Masterminds/sprig/v3/regex.go new file mode 100644 index 0000000000..2370878f16 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/regex.go @@ -0,0 +1,79 @@ +package sprig + +import ( + "regexp" +) + +func regexMatch(regex string, s string) bool { + match, _ := regexp.MatchString(regex, s) + return match +} + +func mustRegexMatch(regex string, s string) (bool, error) { + return regexp.MatchString(regex, s) +} + +func regexFindAll(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.FindAllString(s, n) +} + +func mustRegexFindAll(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.FindAllString(s, n), nil +} + +func regexFind(regex string, s string) string { + r := regexp.MustCompile(regex) + return r.FindString(s) +} + +func mustRegexFind(regex string, s string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.FindString(s), nil +} + +func regexReplaceAll(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllString(s, repl) +} + +func mustRegexReplaceAll(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllString(s, repl), nil +} + +func regexReplaceAllLiteral(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllLiteralString(s, repl) +} + +func mustRegexReplaceAllLiteral(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllLiteralString(s, repl), nil +} + +func regexSplit(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.Split(s, n) +} + +func mustRegexSplit(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.Split(s, n), nil +} diff --git a/vendor/github.com/Masterminds/sprig/v3/semver.go b/vendor/github.com/Masterminds/sprig/v3/semver.go new file mode 100644 index 0000000000..3fbe08aa63 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/semver.go @@ -0,0 +1,23 @@ +package sprig + +import ( + sv2 "github.com/Masterminds/semver/v3" +) + +func semverCompare(constraint, version string) (bool, error) { + c, err := sv2.NewConstraint(constraint) + if err != nil { + return false, err + } + + v, err := sv2.NewVersion(version) + if err != nil { + return false, err + } + + return c.Check(v), nil +} + +func semver(version string) (*sv2.Version, error) { + return sv2.NewVersion(version) +} diff --git a/vendor/github.com/Masterminds/sprig/v3/strings.go b/vendor/github.com/Masterminds/sprig/v3/strings.go new file mode 100644 index 0000000000..e0ae628c84 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/strings.go @@ -0,0 +1,236 @@ +package sprig + +import ( + "encoding/base32" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "strings" + + util "github.com/Masterminds/goutils" +) + +func base64encode(v string) string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +func base64decode(v string) string { + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func base32encode(v string) string { + return base32.StdEncoding.EncodeToString([]byte(v)) +} + +func base32decode(v string) string { + data, err := base32.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func abbrev(width int, s string) string { + if width < 4 { + return s + } + r, _ := util.Abbreviate(s, width) + return r +} + +func abbrevboth(left, right int, s string) string { + if right < 4 || left > 0 && right < 7 { + return s + } + r, _ := util.AbbreviateFull(s, left, right) + return r +} +func initials(s string) string { + // Wrap this just to eliminate the var args, which templates don't do well. + return util.Initials(s) +} + +func randAlphaNumeric(count int) string { + // It is not possible, it appears, to actually generate an error here. + r, _ := util.CryptoRandomAlphaNumeric(count) + return r +} + +func randAlpha(count int) string { + r, _ := util.CryptoRandomAlphabetic(count) + return r +} + +func randAscii(count int) string { + r, _ := util.CryptoRandomAscii(count) + return r +} + +func randNumeric(count int) string { + r, _ := util.CryptoRandomNumeric(count) + return r +} + +func untitle(str string) string { + return util.Uncapitalize(str) +} + +func quote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("%q", strval(s))) + } + } + return strings.Join(out, " ") +} + +func squote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("'%v'", s)) + } + } + return strings.Join(out, " ") +} + +func cat(v ...interface{}) string { + v = removeNilElements(v) + r := strings.TrimSpace(strings.Repeat("%v ", len(v))) + return fmt.Sprintf(r, v...) +} + +func indent(spaces int, v string) string { + pad := strings.Repeat(" ", spaces) + return pad + strings.Replace(v, "\n", "\n"+pad, -1) +} + +func nindent(spaces int, v string) string { + return "\n" + indent(spaces, v) +} + +func replace(old, new, src string) string { + return strings.Replace(src, old, new, -1) +} + +func plural(one, many string, count int) string { + if count == 1 { + return one + } + return many +} + +func strslice(v interface{}) []string { + switch v := v.(type) { + case []string: + return v + case []interface{}: + b := make([]string, 0, len(v)) + for _, s := range v { + if s != nil { + b = append(b, strval(s)) + } + } + return b + default: + val := reflect.ValueOf(v) + switch val.Kind() { + case reflect.Array, reflect.Slice: + l := val.Len() + b := make([]string, 0, l) + for i := 0; i < l; i++ { + value := val.Index(i).Interface() + if value != nil { + b = append(b, strval(value)) + } + } + return b + default: + if v == nil { + return []string{} + } + + return []string{strval(v)} + } + } +} + +func removeNilElements(v []interface{}) []interface{} { + newSlice := make([]interface{}, 0, len(v)) + for _, i := range v { + if i != nil { + newSlice = append(newSlice, i) + } + } + return newSlice +} + +func strval(v interface{}) string { + switch v := v.(type) { + case string: + return v + case []byte: + return string(v) + case error: + return v.Error() + case fmt.Stringer: + return v.String() + default: + return fmt.Sprintf("%v", v) + } +} + +func trunc(c int, s string) string { + if c < 0 && len(s)+c > 0 { + return s[len(s)+c:] + } + if c >= 0 && len(s) > c { + return s[:c] + } + return s +} + +func join(sep string, v interface{}) string { + return strings.Join(strslice(v), sep) +} + +func split(sep, orig string) map[string]string { + parts := strings.Split(orig, sep) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +func splitn(sep string, n int, orig string) map[string]string { + parts := strings.SplitN(orig, sep, n) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +// substring creates a substring of the given string. +// +// If start is < 0, this calls string[:end]. +// +// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:] +// +// Otherwise, this calls string[start, end]. +func substring(start, end int, s string) string { + if start < 0 { + return s[:end] + } + if end < 0 || end > len(s) { + return s[start:] + } + return s[start:end] +} diff --git a/vendor/github.com/Masterminds/sprig/v3/url.go b/vendor/github.com/Masterminds/sprig/v3/url.go new file mode 100644 index 0000000000..b8e120e19b --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/url.go @@ -0,0 +1,66 @@ +package sprig + +import ( + "fmt" + "net/url" + "reflect" +) + +func dictGetOrEmpty(dict map[string]interface{}, key string) string { + value, ok := dict[key] + if !ok { + return "" + } + tp := reflect.TypeOf(value).Kind() + if tp != reflect.String { + panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String())) + } + return reflect.ValueOf(value).String() +} + +// parses given URL to return dict object +func urlParse(v string) map[string]interface{} { + dict := map[string]interface{}{} + parsedURL, err := url.Parse(v) + if err != nil { + panic(fmt.Sprintf("unable to parse url: %s", err)) + } + dict["scheme"] = parsedURL.Scheme + dict["host"] = parsedURL.Host + dict["hostname"] = parsedURL.Hostname() + dict["path"] = parsedURL.Path + dict["query"] = parsedURL.RawQuery + dict["opaque"] = parsedURL.Opaque + dict["fragment"] = parsedURL.Fragment + if parsedURL.User != nil { + dict["userinfo"] = parsedURL.User.String() + } else { + dict["userinfo"] = "" + } + + return dict +} + +// join given dict to URL string +func urlJoin(d map[string]interface{}) string { + resURL := url.URL{ + Scheme: dictGetOrEmpty(d, "scheme"), + Host: dictGetOrEmpty(d, "host"), + Path: dictGetOrEmpty(d, "path"), + RawQuery: dictGetOrEmpty(d, "query"), + Opaque: dictGetOrEmpty(d, "opaque"), + Fragment: dictGetOrEmpty(d, "fragment"), + } + userinfo := dictGetOrEmpty(d, "userinfo") + var user *url.Userinfo + if userinfo != "" { + tempURL, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo)) + if err != nil { + panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err)) + } + user = tempURL.User + } + + resURL.User = user + return resURL.String() +} diff --git a/vendor/github.com/Masterminds/squirrel/.gitignore b/vendor/github.com/Masterminds/squirrel/.gitignore new file mode 100644 index 0000000000..4a0699f0b7 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/.gitignore @@ -0,0 +1 @@ +squirrel.test \ No newline at end of file diff --git a/vendor/github.com/Masterminds/squirrel/.travis.yml b/vendor/github.com/Masterminds/squirrel/.travis.yml new file mode 100644 index 0000000000..bc6be0f814 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/.travis.yml @@ -0,0 +1,22 @@ +language: go + +go: + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - tip + +# Setting sudo access to false will let Travis CI use containers rather than +# VMs to run the tests. For more details see: +# - http://docs.travis-ci.com/user/workers/container-based-infrastructure/ +# - http://docs.travis-ci.com/user/workers/standard-infrastructure/ +sudo: false + +install: + - go get + - go get github.com/stretchr/testify/assert + +notifications: + irc: "irc.freenode.net#masterminds" diff --git a/vendor/github.com/Masterminds/squirrel/LICENSE.txt b/vendor/github.com/Masterminds/squirrel/LICENSE.txt new file mode 100644 index 0000000000..74c20a2b97 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/LICENSE.txt @@ -0,0 +1,23 @@ +Squirrel +The Masterminds +Copyright (C) 2014-2015, Lann Martin +Copyright (C) 2015-2016, Google +Copyright (C) 2015, Matt Farina and Matt Butcher + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/squirrel/README.md b/vendor/github.com/Masterminds/squirrel/README.md new file mode 100644 index 0000000000..e0c4394c96 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/README.md @@ -0,0 +1,118 @@ +# Squirrel - fluent SQL generator for Go + +```go +import "gopkg.in/Masterminds/squirrel.v1" +``` +or if you prefer using `master` (which may be arbitrarily ahead of or behind `v1`): + +**NOTE:** as of Go 1.6, `go get` correctly clones the Github default branch (which is `v1` in this repo). +```go +import "github.com/Masterminds/squirrel" +``` + +[![GoDoc](https://godoc.org/github.com/Masterminds/squirrel?status.png)](https://godoc.org/github.com/Masterminds/squirrel) +[![Build Status](https://travis-ci.org/Masterminds/squirrel.svg?branch=v1)](https://travis-ci.org/Masterminds/squirrel) + +_**Note:** This project has moved from `github.com/lann/squirrel` to +`github.com/Masterminds/squirrel`. Lann remains the architect of the +project, but we're helping him curate. + +**Squirrel is not an ORM.** For an application of Squirrel, check out +[structable, a table-struct mapper](https://github.com/technosophos/structable) + + +Squirrel helps you build SQL queries from composable parts: + +```go +import sq "github.com/Masterminds/squirrel" + +users := sq.Select("*").From("users").Join("emails USING (email_id)") + +active := users.Where(sq.Eq{"deleted_at": nil}) + +sql, args, err := active.ToSql() + +sql == "SELECT * FROM users JOIN emails USING (email_id) WHERE deleted_at IS NULL" +``` + +```go +sql, args, err := sq. + Insert("users").Columns("name", "age"). + Values("moe", 13).Values("larry", sq.Expr("? + 5", 12)). + ToSql() + +sql == "INSERT INTO users (name,age) VALUES (?,?),(?,? + 5)" +``` + +Squirrel can also execute queries directly: + +```go +stooges := users.Where(sq.Eq{"username": []string{"moe", "larry", "curly", "shemp"}}) +three_stooges := stooges.Limit(3) +rows, err := three_stooges.RunWith(db).Query() + +// Behaves like: +rows, err := db.Query("SELECT * FROM users WHERE username IN (?,?,?,?) LIMIT 3", + "moe", "larry", "curly", "shemp") +``` + +Squirrel makes conditional query building a breeze: + +```go +if len(q) > 0 { + users = users.Where("name LIKE ?", fmt.Sprint("%", q, "%")) +} +``` + +Squirrel wants to make your life easier: + +```go +// StmtCache caches Prepared Stmts for you +dbCache := sq.NewStmtCacher(db) + +// StatementBuilder keeps your syntax neat +mydb := sq.StatementBuilder.RunWith(dbCache) +select_users := mydb.Select("*").From("users") +``` + +Squirrel loves PostgreSQL: + +```go +psql := sq.StatementBuilder.PlaceholderFormat(sq.Dollar) + +// You use question marks for placeholders... +sql, _, _ := psql.Select("*").From("elephants").Where("name IN (?,?)", "Dumbo", "Verna") + +/// ...squirrel replaces them using PlaceholderFormat. +sql == "SELECT * FROM elephants WHERE name IN ($1,$2)" + + +/// You can retrieve id ... +query := sq.Insert("nodes"). + Columns("uuid", "type", "data"). + Values(node.Uuid, node.Type, node.Data). + Suffix("RETURNING \"id\""). + RunWith(m.db). + PlaceholderFormat(sq.Dollar) + +query.QueryRow().Scan(&node.id) +``` + +You can escape question mask by inserting two question marks: + +```sql +SELECT * FROM nodes WHERE meta->'format' ??| array[?,?] +``` + +will generate with the Dollar Placeholder: + +```sql +SELECT * FROM nodes WHERE meta->'format' ?| array[$1,$2] +``` + + + +## License + +Squirrel is released under the +[MIT License](http://www.opensource.org/licenses/MIT). diff --git a/vendor/github.com/Masterminds/squirrel/case.go b/vendor/github.com/Masterminds/squirrel/case.go new file mode 100644 index 0000000000..2eb69dd5c9 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/case.go @@ -0,0 +1,118 @@ +package squirrel + +import ( + "bytes" + "errors" + + "github.com/lann/builder" +) + +func init() { + builder.Register(CaseBuilder{}, caseData{}) +} + +// sqlizerBuffer is a helper that allows to write many Sqlizers one by one +// without constant checks for errors that may come from Sqlizer +type sqlizerBuffer struct { + bytes.Buffer + args []interface{} + err error +} + +// WriteSql converts Sqlizer to SQL strings and writes it to buffer +func (b *sqlizerBuffer) WriteSql(item Sqlizer) { + if b.err != nil { + return + } + + var str string + var args []interface{} + str, args, b.err = item.ToSql() + + if b.err != nil { + return + } + + b.WriteString(str) + b.WriteByte(' ') + b.args = append(b.args, args...) +} + +func (b *sqlizerBuffer) ToSql() (string, []interface{}, error) { + return b.String(), b.args, b.err +} + +// whenPart is a helper structure to describe SQLs "WHEN ... THEN ..." expression +type whenPart struct { + when Sqlizer + then Sqlizer +} + +func newWhenPart(when interface{}, then interface{}) whenPart { + return whenPart{newPart(when), newPart(then)} +} + +// caseData holds all the data required to build a CASE SQL construct +type caseData struct { + What Sqlizer + WhenParts []whenPart + Else Sqlizer +} + +// ToSql implements Sqlizer +func (d *caseData) ToSql() (sqlStr string, args []interface{}, err error) { + if len(d.WhenParts) == 0 { + err = errors.New("case expression must contain at lease one WHEN clause") + + return + } + + sql := sqlizerBuffer{} + + sql.WriteString("CASE ") + if d.What != nil { + sql.WriteSql(d.What) + } + + for _, p := range d.WhenParts { + sql.WriteString("WHEN ") + sql.WriteSql(p.when) + sql.WriteString("THEN ") + sql.WriteSql(p.then) + } + + if d.Else != nil { + sql.WriteString("ELSE ") + sql.WriteSql(d.Else) + } + + sql.WriteString("END") + + return sql.ToSql() +} + +// CaseBuilder builds SQL CASE construct which could be used as parts of queries. +type CaseBuilder builder.Builder + +// ToSql builds the query into a SQL string and bound args. +func (b CaseBuilder) ToSql() (string, []interface{}, error) { + data := builder.GetStruct(b).(caseData) + return data.ToSql() +} + +// what sets optional value for CASE construct "CASE [value] ..." +func (b CaseBuilder) what(expr interface{}) CaseBuilder { + return builder.Set(b, "What", newPart(expr)).(CaseBuilder) +} + +// When adds "WHEN ... THEN ..." part to CASE construct +func (b CaseBuilder) When(when interface{}, then interface{}) CaseBuilder { + // TODO: performance hint: replace slice of WhenPart with just slice of parts + // where even indices of the slice belong to "when"s and odd indices belong to "then"s + return builder.Append(b, "WhenParts", newWhenPart(when, then)).(CaseBuilder) +} + +// What sets optional "ELSE ..." part for CASE construct +func (b CaseBuilder) Else(expr interface{}) CaseBuilder { + return builder.Set(b, "Else", newPart(expr)).(CaseBuilder) +} diff --git a/vendor/github.com/Masterminds/squirrel/delete.go b/vendor/github.com/Masterminds/squirrel/delete.go new file mode 100644 index 0000000000..8aa4f1e667 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/delete.go @@ -0,0 +1,152 @@ +package squirrel + +import ( + "bytes" + "database/sql" + "fmt" + "github.com/lann/builder" + "strings" +) + +type deleteData struct { + PlaceholderFormat PlaceholderFormat + RunWith BaseRunner + Prefixes exprs + From string + WhereParts []Sqlizer + OrderBys []string + Limit string + Offset string + Suffixes exprs +} + +func (d *deleteData) Exec() (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return ExecWith(d.RunWith, d) +} + +func (d *deleteData) ToSql() (sqlStr string, args []interface{}, err error) { + if len(d.From) == 0 { + err = fmt.Errorf("delete statements must specify a From table") + return + } + + sql := &bytes.Buffer{} + + if len(d.Prefixes) > 0 { + args, _ = d.Prefixes.AppendToSql(sql, " ", args) + sql.WriteString(" ") + } + + sql.WriteString("DELETE FROM ") + sql.WriteString(d.From) + + if len(d.WhereParts) > 0 { + sql.WriteString(" WHERE ") + args, err = appendToSql(d.WhereParts, sql, " AND ", args) + if err != nil { + return + } + } + + if len(d.OrderBys) > 0 { + sql.WriteString(" ORDER BY ") + sql.WriteString(strings.Join(d.OrderBys, ", ")) + } + + if len(d.Limit) > 0 { + sql.WriteString(" LIMIT ") + sql.WriteString(d.Limit) + } + + if len(d.Offset) > 0 { + sql.WriteString(" OFFSET ") + sql.WriteString(d.Offset) + } + + if len(d.Suffixes) > 0 { + sql.WriteString(" ") + args, _ = d.Suffixes.AppendToSql(sql, " ", args) + } + + sqlStr, err = d.PlaceholderFormat.ReplacePlaceholders(sql.String()) + return +} + + +// Builder + +// DeleteBuilder builds SQL DELETE statements. +type DeleteBuilder builder.Builder + +func init() { + builder.Register(DeleteBuilder{}, deleteData{}) +} + +// Format methods + +// PlaceholderFormat sets PlaceholderFormat (e.g. Question or Dollar) for the +// query. +func (b DeleteBuilder) PlaceholderFormat(f PlaceholderFormat) DeleteBuilder { + return builder.Set(b, "PlaceholderFormat", f).(DeleteBuilder) +} + +// Runner methods + +// RunWith sets a Runner (like database/sql.DB) to be used with e.g. Exec. +func (b DeleteBuilder) RunWith(runner BaseRunner) DeleteBuilder { + return setRunWith(b, runner).(DeleteBuilder) +} + +// Exec builds and Execs the query with the Runner set by RunWith. +func (b DeleteBuilder) Exec() (sql.Result, error) { + data := builder.GetStruct(b).(deleteData) + return data.Exec() +} + +// SQL methods + +// ToSql builds the query into a SQL string and bound args. +func (b DeleteBuilder) ToSql() (string, []interface{}, error) { + data := builder.GetStruct(b).(deleteData) + return data.ToSql() +} + +// Prefix adds an expression to the beginning of the query +func (b DeleteBuilder) Prefix(sql string, args ...interface{}) DeleteBuilder { + return builder.Append(b, "Prefixes", Expr(sql, args...)).(DeleteBuilder) +} + +// From sets the table to be deleted from. +func (b DeleteBuilder) From(from string) DeleteBuilder { + return builder.Set(b, "From", from).(DeleteBuilder) +} + +// Where adds WHERE expressions to the query. +// +// See SelectBuilder.Where for more information. +func (b DeleteBuilder) Where(pred interface{}, args ...interface{}) DeleteBuilder { + return builder.Append(b, "WhereParts", newWherePart(pred, args...)).(DeleteBuilder) +} + +// OrderBy adds ORDER BY expressions to the query. +func (b DeleteBuilder) OrderBy(orderBys ...string) DeleteBuilder { + return builder.Extend(b, "OrderBys", orderBys).(DeleteBuilder) +} + +// Limit sets a LIMIT clause on the query. +func (b DeleteBuilder) Limit(limit uint64) DeleteBuilder { + return builder.Set(b, "Limit", fmt.Sprintf("%d", limit)).(DeleteBuilder) +} + +// Offset sets a OFFSET clause on the query. +func (b DeleteBuilder) Offset(offset uint64) DeleteBuilder { + return builder.Set(b, "Offset", fmt.Sprintf("%d", offset)).(DeleteBuilder) +} + +// Suffix adds an expression to the end of the query +func (b DeleteBuilder) Suffix(sql string, args ...interface{}) DeleteBuilder { + return builder.Append(b, "Suffixes", Expr(sql, args...)).(DeleteBuilder) +} diff --git a/vendor/github.com/Masterminds/squirrel/expr.go b/vendor/github.com/Masterminds/squirrel/expr.go new file mode 100644 index 0000000000..a8749f10d5 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/expr.go @@ -0,0 +1,247 @@ +package squirrel + +import ( + "database/sql/driver" + "fmt" + "io" + "reflect" + "strings" +) + +type expr struct { + sql string + args []interface{} +} + +// Expr builds value expressions for InsertBuilder and UpdateBuilder. +// +// Ex: +// .Values(Expr("FROM_UNIXTIME(?)", t)) +func Expr(sql string, args ...interface{}) expr { + return expr{sql: sql, args: args} +} + +func (e expr) ToSql() (sql string, args []interface{}, err error) { + return e.sql, e.args, nil +} + +type exprs []expr + +func (es exprs) AppendToSql(w io.Writer, sep string, args []interface{}) ([]interface{}, error) { + for i, e := range es { + if i > 0 { + _, err := io.WriteString(w, sep) + if err != nil { + return nil, err + } + } + _, err := io.WriteString(w, e.sql) + if err != nil { + return nil, err + } + args = append(args, e.args...) + } + return args, nil +} + +// aliasExpr helps to alias part of SQL query generated with underlying "expr" +type aliasExpr struct { + expr Sqlizer + alias string +} + +// Alias allows to define alias for column in SelectBuilder. Useful when column is +// defined as complex expression like IF or CASE +// Ex: +// .Column(Alias(caseStmt, "case_column")) +func Alias(expr Sqlizer, alias string) aliasExpr { + return aliasExpr{expr, alias} +} + +func (e aliasExpr) ToSql() (sql string, args []interface{}, err error) { + sql, args, err = e.expr.ToSql() + if err == nil { + sql = fmt.Sprintf("(%s) AS %s", sql, e.alias) + } + return +} + +// Eq is syntactic sugar for use with Where/Having/Set methods. +// Ex: +// .Where(Eq{"id": 1}) +type Eq map[string]interface{} + +func (eq Eq) toSql(useNotOpr bool) (sql string, args []interface{}, err error) { + var ( + exprs []string + equalOpr string = "=" + inOpr string = "IN" + nullOpr string = "IS" + ) + + if useNotOpr { + equalOpr = "<>" + inOpr = "NOT IN" + nullOpr = "IS NOT" + } + + for key, val := range eq { + expr := "" + + switch v := val.(type) { + case driver.Valuer: + if val, err = v.Value(); err != nil { + return + } + } + + if val == nil { + expr = fmt.Sprintf("%s %s NULL", key, nullOpr) + } else { + valVal := reflect.ValueOf(val) + if valVal.Kind() == reflect.Array || valVal.Kind() == reflect.Slice { + if valVal.Len() == 0 { + expr = fmt.Sprintf("%s %s (NULL)", key, inOpr) + if args == nil { + args = []interface{}{} + } + } else { + for i := 0; i < valVal.Len(); i++ { + args = append(args, valVal.Index(i).Interface()) + } + expr = fmt.Sprintf("%s %s (%s)", key, inOpr, Placeholders(valVal.Len())) + } + } else { + expr = fmt.Sprintf("%s %s ?", key, equalOpr) + args = append(args, val) + } + } + exprs = append(exprs, expr) + } + sql = strings.Join(exprs, " AND ") + return +} + +func (eq Eq) ToSql() (sql string, args []interface{}, err error) { + return eq.toSql(false) +} + +// NotEq is syntactic sugar for use with Where/Having/Set methods. +// Ex: +// .Where(NotEq{"id": 1}) == "id <> 1" +type NotEq Eq + +func (neq NotEq) ToSql() (sql string, args []interface{}, err error) { + return Eq(neq).toSql(true) +} + +// Lt is syntactic sugar for use with Where/Having/Set methods. +// Ex: +// .Where(Lt{"id": 1}) +type Lt map[string]interface{} + +func (lt Lt) toSql(opposite, orEq bool) (sql string, args []interface{}, err error) { + var ( + exprs []string + opr string = "<" + ) + + if opposite { + opr = ">" + } + + if orEq { + opr = fmt.Sprintf("%s%s", opr, "=") + } + + for key, val := range lt { + expr := "" + + switch v := val.(type) { + case driver.Valuer: + if val, err = v.Value(); err != nil { + return + } + } + + if val == nil { + err = fmt.Errorf("cannot use null with less than or greater than operators") + return + } else { + valVal := reflect.ValueOf(val) + if valVal.Kind() == reflect.Array || valVal.Kind() == reflect.Slice { + err = fmt.Errorf("cannot use array or slice with less than or greater than operators") + return + } else { + expr = fmt.Sprintf("%s %s ?", key, opr) + args = append(args, val) + } + } + exprs = append(exprs, expr) + } + sql = strings.Join(exprs, " AND ") + return +} + +func (lt Lt) ToSql() (sql string, args []interface{}, err error) { + return lt.toSql(false, false) +} + +// LtOrEq is syntactic sugar for use with Where/Having/Set methods. +// Ex: +// .Where(LtOrEq{"id": 1}) == "id <= 1" +type LtOrEq Lt + +func (ltOrEq LtOrEq) ToSql() (sql string, args []interface{}, err error) { + return Lt(ltOrEq).toSql(false, true) +} + +// Gt is syntactic sugar for use with Where/Having/Set methods. +// Ex: +// .Where(Gt{"id": 1}) == "id > 1" +type Gt Lt + +func (gt Gt) ToSql() (sql string, args []interface{}, err error) { + return Lt(gt).toSql(true, false) +} + +// GtOrEq is syntactic sugar for use with Where/Having/Set methods. +// Ex: +// .Where(GtOrEq{"id": 1}) == "id >= 1" +type GtOrEq Lt + +func (gtOrEq GtOrEq) ToSql() (sql string, args []interface{}, err error) { + return Lt(gtOrEq).toSql(true, true) +} + +type conj []Sqlizer + +func (c conj) join(sep string) (sql string, args []interface{}, err error) { + var sqlParts []string + for _, sqlizer := range c { + partSql, partArgs, err := sqlizer.ToSql() + if err != nil { + return "", nil, err + } + if partSql != "" { + sqlParts = append(sqlParts, partSql) + args = append(args, partArgs...) + } + } + if len(sqlParts) > 0 { + sql = fmt.Sprintf("(%s)", strings.Join(sqlParts, sep)) + } + return +} + +type And conj + +func (a And) ToSql() (string, []interface{}, error) { + return conj(a).join(" AND ") +} + +type Or conj + +func (o Or) ToSql() (string, []interface{}, error) { + return conj(o).join(" OR ") +} diff --git a/vendor/github.com/Masterminds/squirrel/insert.go b/vendor/github.com/Masterminds/squirrel/insert.go new file mode 100644 index 0000000000..f08025f500 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/insert.go @@ -0,0 +1,207 @@ +package squirrel + +import ( + "bytes" + "database/sql" + "fmt" + "github.com/lann/builder" + "strings" +) + +type insertData struct { + PlaceholderFormat PlaceholderFormat + RunWith BaseRunner + Prefixes exprs + Options []string + Into string + Columns []string + Values [][]interface{} + Suffixes exprs +} + +func (d *insertData) Exec() (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return ExecWith(d.RunWith, d) +} + +func (d *insertData) Query() (*sql.Rows, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return QueryWith(d.RunWith, d) +} + +func (d *insertData) QueryRow() RowScanner { + if d.RunWith == nil { + return &Row{err: RunnerNotSet} + } + queryRower, ok := d.RunWith.(QueryRower) + if !ok { + return &Row{err: RunnerNotQueryRunner} + } + return QueryRowWith(queryRower, d) +} + +func (d *insertData) ToSql() (sqlStr string, args []interface{}, err error) { + if len(d.Into) == 0 { + err = fmt.Errorf("insert statements must specify a table") + return + } + if len(d.Values) == 0 { + err = fmt.Errorf("insert statements must have at least one set of values") + return + } + + sql := &bytes.Buffer{} + + if len(d.Prefixes) > 0 { + args, _ = d.Prefixes.AppendToSql(sql, " ", args) + sql.WriteString(" ") + } + + sql.WriteString("INSERT ") + + if len(d.Options) > 0 { + sql.WriteString(strings.Join(d.Options, " ")) + sql.WriteString(" ") + } + + sql.WriteString("INTO ") + sql.WriteString(d.Into) + sql.WriteString(" ") + + if len(d.Columns) > 0 { + sql.WriteString("(") + sql.WriteString(strings.Join(d.Columns, ",")) + sql.WriteString(") ") + } + + sql.WriteString("VALUES ") + + valuesStrings := make([]string, len(d.Values)) + for r, row := range d.Values { + valueStrings := make([]string, len(row)) + for v, val := range row { + e, isExpr := val.(expr) + if isExpr { + valueStrings[v] = e.sql + args = append(args, e.args...) + } else { + valueStrings[v] = "?" + args = append(args, val) + } + } + valuesStrings[r] = fmt.Sprintf("(%s)", strings.Join(valueStrings, ",")) + } + sql.WriteString(strings.Join(valuesStrings, ",")) + + if len(d.Suffixes) > 0 { + sql.WriteString(" ") + args, _ = d.Suffixes.AppendToSql(sql, " ", args) + } + + sqlStr, err = d.PlaceholderFormat.ReplacePlaceholders(sql.String()) + return +} + +// Builder + +// InsertBuilder builds SQL INSERT statements. +type InsertBuilder builder.Builder + +func init() { + builder.Register(InsertBuilder{}, insertData{}) +} + +// Format methods + +// PlaceholderFormat sets PlaceholderFormat (e.g. Question or Dollar) for the +// query. +func (b InsertBuilder) PlaceholderFormat(f PlaceholderFormat) InsertBuilder { + return builder.Set(b, "PlaceholderFormat", f).(InsertBuilder) +} + +// Runner methods + +// RunWith sets a Runner (like database/sql.DB) to be used with e.g. Exec. +func (b InsertBuilder) RunWith(runner BaseRunner) InsertBuilder { + return setRunWith(b, runner).(InsertBuilder) +} + +// Exec builds and Execs the query with the Runner set by RunWith. +func (b InsertBuilder) Exec() (sql.Result, error) { + data := builder.GetStruct(b).(insertData) + return data.Exec() +} + +// Query builds and Querys the query with the Runner set by RunWith. +func (b InsertBuilder) Query() (*sql.Rows, error) { + data := builder.GetStruct(b).(insertData) + return data.Query() +} + +// QueryRow builds and QueryRows the query with the Runner set by RunWith. +func (b InsertBuilder) QueryRow() RowScanner { + data := builder.GetStruct(b).(insertData) + return data.QueryRow() +} + +// Scan is a shortcut for QueryRow().Scan. +func (b InsertBuilder) Scan(dest ...interface{}) error { + return b.QueryRow().Scan(dest...) +} + +// SQL methods + +// ToSql builds the query into a SQL string and bound args. +func (b InsertBuilder) ToSql() (string, []interface{}, error) { + data := builder.GetStruct(b).(insertData) + return data.ToSql() +} + +// Prefix adds an expression to the beginning of the query +func (b InsertBuilder) Prefix(sql string, args ...interface{}) InsertBuilder { + return builder.Append(b, "Prefixes", Expr(sql, args...)).(InsertBuilder) +} + +// Options adds keyword options before the INTO clause of the query. +func (b InsertBuilder) Options(options ...string) InsertBuilder { + return builder.Extend(b, "Options", options).(InsertBuilder) +} + +// Into sets the INTO clause of the query. +func (b InsertBuilder) Into(from string) InsertBuilder { + return builder.Set(b, "Into", from).(InsertBuilder) +} + +// Columns adds insert columns to the query. +func (b InsertBuilder) Columns(columns ...string) InsertBuilder { + return builder.Extend(b, "Columns", columns).(InsertBuilder) +} + +// Values adds a single row's values to the query. +func (b InsertBuilder) Values(values ...interface{}) InsertBuilder { + return builder.Append(b, "Values", values).(InsertBuilder) +} + +// Suffix adds an expression to the end of the query +func (b InsertBuilder) Suffix(sql string, args ...interface{}) InsertBuilder { + return builder.Append(b, "Suffixes", Expr(sql, args...)).(InsertBuilder) +} + +// SetMap set columns and values for insert builder from a map of column name and value +// note that it will reset all previous columns and values was set if any +func (b InsertBuilder) SetMap(clauses map[string]interface{}) InsertBuilder { + cols := make([]string, 0, len(clauses)) + vals := make([]interface{}, 0, len(clauses)) + for col, val := range clauses { + cols = append(cols, col) + vals = append(vals, val) + } + + b = builder.Set(b, "Columns", cols).(InsertBuilder) + b = builder.Set(b, "Values", [][]interface{}{vals}).(InsertBuilder) + return b +} diff --git a/vendor/github.com/Masterminds/squirrel/part.go b/vendor/github.com/Masterminds/squirrel/part.go new file mode 100644 index 0000000000..2926d03151 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/part.go @@ -0,0 +1,55 @@ +package squirrel + +import ( + "fmt" + "io" +) + +type part struct { + pred interface{} + args []interface{} +} + +func newPart(pred interface{}, args ...interface{}) Sqlizer { + return &part{pred, args} +} + +func (p part) ToSql() (sql string, args []interface{}, err error) { + switch pred := p.pred.(type) { + case nil: + // no-op + case Sqlizer: + sql, args, err = pred.ToSql() + case string: + sql = pred + args = p.args + default: + err = fmt.Errorf("expected string or Sqlizer, not %T", pred) + } + return +} + +func appendToSql(parts []Sqlizer, w io.Writer, sep string, args []interface{}) ([]interface{}, error) { + for i, p := range parts { + partSql, partArgs, err := p.ToSql() + if err != nil { + return nil, err + } else if len(partSql) == 0 { + continue + } + + if i > 0 { + _, err := io.WriteString(w, sep) + if err != nil { + return nil, err + } + } + + _, err = io.WriteString(w, partSql) + if err != nil { + return nil, err + } + args = append(args, partArgs...) + } + return args, nil +} diff --git a/vendor/github.com/Masterminds/squirrel/placeholder.go b/vendor/github.com/Masterminds/squirrel/placeholder.go new file mode 100644 index 0000000000..d377788b95 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/placeholder.go @@ -0,0 +1,70 @@ +package squirrel + +import ( + "bytes" + "fmt" + "strings" +) + +// PlaceholderFormat is the interface that wraps the ReplacePlaceholders method. +// +// ReplacePlaceholders takes a SQL statement and replaces each question mark +// placeholder with a (possibly different) SQL placeholder. +type PlaceholderFormat interface { + ReplacePlaceholders(sql string) (string, error) +} + +var ( + // Question is a PlaceholderFormat instance that leaves placeholders as + // question marks. + Question = questionFormat{} + + // Dollar is a PlaceholderFormat instance that replaces placeholders with + // dollar-prefixed positional placeholders (e.g. $1, $2, $3). + Dollar = dollarFormat{} +) + +type questionFormat struct{} + +func (_ questionFormat) ReplacePlaceholders(sql string) (string, error) { + return sql, nil +} + +type dollarFormat struct{} + +func (_ dollarFormat) ReplacePlaceholders(sql string) (string, error) { + buf := &bytes.Buffer{} + i := 0 + for { + p := strings.Index(sql, "?") + if p == -1 { + break + } + + if len(sql[p:]) > 1 && sql[p:p+2] == "??" { // escape ?? => ? + buf.WriteString(sql[:p]) + buf.WriteString("?") + if len(sql[p:]) == 1 { + break + } + sql = sql[p+2:] + } else { + i++ + buf.WriteString(sql[:p]) + fmt.Fprintf(buf, "$%d", i) + sql = sql[p+1:] + } + } + + buf.WriteString(sql) + return buf.String(), nil +} + +// Placeholders returns a string with count ? placeholders joined with commas. +func Placeholders(count int) string { + if count < 1 { + return "" + } + + return strings.Repeat(",?", count)[1:] +} diff --git a/vendor/github.com/Masterminds/squirrel/row.go b/vendor/github.com/Masterminds/squirrel/row.go new file mode 100644 index 0000000000..74ffda92bd --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/row.go @@ -0,0 +1,22 @@ +package squirrel + +// RowScanner is the interface that wraps the Scan method. +// +// Scan behaves like database/sql.Row.Scan. +type RowScanner interface { + Scan(...interface{}) error +} + +// Row wraps database/sql.Row to let squirrel return new errors on Scan. +type Row struct { + RowScanner + err error +} + +// Scan returns Row.err or calls RowScanner.Scan. +func (r *Row) Scan(dest ...interface{}) error { + if r.err != nil { + return r.err + } + return r.RowScanner.Scan(dest...) +} diff --git a/vendor/github.com/Masterminds/squirrel/select.go b/vendor/github.com/Masterminds/squirrel/select.go new file mode 100644 index 0000000000..7dc09bc5a4 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/select.go @@ -0,0 +1,313 @@ +package squirrel + +import ( + "bytes" + "database/sql" + "fmt" + "strings" + + "github.com/lann/builder" +) + +type selectData struct { + PlaceholderFormat PlaceholderFormat + RunWith BaseRunner + Prefixes exprs + Options []string + Columns []Sqlizer + From Sqlizer + Joins []Sqlizer + WhereParts []Sqlizer + GroupBys []string + HavingParts []Sqlizer + OrderBys []string + Limit string + Offset string + Suffixes exprs +} + +func (d *selectData) Exec() (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return ExecWith(d.RunWith, d) +} + +func (d *selectData) Query() (*sql.Rows, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return QueryWith(d.RunWith, d) +} + +func (d *selectData) QueryRow() RowScanner { + if d.RunWith == nil { + return &Row{err: RunnerNotSet} + } + queryRower, ok := d.RunWith.(QueryRower) + if !ok { + return &Row{err: RunnerNotQueryRunner} + } + return QueryRowWith(queryRower, d) +} + +func (d *selectData) ToSql() (sqlStr string, args []interface{}, err error) { + if len(d.Columns) == 0 { + err = fmt.Errorf("select statements must have at least one result column") + return + } + + sql := &bytes.Buffer{} + + if len(d.Prefixes) > 0 { + args, _ = d.Prefixes.AppendToSql(sql, " ", args) + sql.WriteString(" ") + } + + sql.WriteString("SELECT ") + + if len(d.Options) > 0 { + sql.WriteString(strings.Join(d.Options, " ")) + sql.WriteString(" ") + } + + if len(d.Columns) > 0 { + args, err = appendToSql(d.Columns, sql, ", ", args) + if err != nil { + return + } + } + + if d.From != nil { + sql.WriteString(" FROM ") + args, err = appendToSql([]Sqlizer{d.From}, sql, "", args) + if err != nil { + return + } + } + + if len(d.Joins) > 0 { + sql.WriteString(" ") + args, err = appendToSql(d.Joins, sql, " ", args) + if err != nil { + return + } + } + + if len(d.WhereParts) > 0 { + sql.WriteString(" WHERE ") + args, err = appendToSql(d.WhereParts, sql, " AND ", args) + if err != nil { + return + } + } + + if len(d.GroupBys) > 0 { + sql.WriteString(" GROUP BY ") + sql.WriteString(strings.Join(d.GroupBys, ", ")) + } + + if len(d.HavingParts) > 0 { + sql.WriteString(" HAVING ") + args, err = appendToSql(d.HavingParts, sql, " AND ", args) + if err != nil { + return + } + } + + if len(d.OrderBys) > 0 { + sql.WriteString(" ORDER BY ") + sql.WriteString(strings.Join(d.OrderBys, ", ")) + } + + if len(d.Limit) > 0 { + sql.WriteString(" LIMIT ") + sql.WriteString(d.Limit) + } + + if len(d.Offset) > 0 { + sql.WriteString(" OFFSET ") + sql.WriteString(d.Offset) + } + + if len(d.Suffixes) > 0 { + sql.WriteString(" ") + args, _ = d.Suffixes.AppendToSql(sql, " ", args) + } + + sqlStr, err = d.PlaceholderFormat.ReplacePlaceholders(sql.String()) + return +} + +// Builder + +// SelectBuilder builds SQL SELECT statements. +type SelectBuilder builder.Builder + +func init() { + builder.Register(SelectBuilder{}, selectData{}) +} + +// Format methods + +// PlaceholderFormat sets PlaceholderFormat (e.g. Question or Dollar) for the +// query. +func (b SelectBuilder) PlaceholderFormat(f PlaceholderFormat) SelectBuilder { + return builder.Set(b, "PlaceholderFormat", f).(SelectBuilder) +} + +// Runner methods + +// RunWith sets a Runner (like database/sql.DB) to be used with e.g. Exec. +func (b SelectBuilder) RunWith(runner BaseRunner) SelectBuilder { + return setRunWith(b, runner).(SelectBuilder) +} + +// Exec builds and Execs the query with the Runner set by RunWith. +func (b SelectBuilder) Exec() (sql.Result, error) { + data := builder.GetStruct(b).(selectData) + return data.Exec() +} + +// Query builds and Querys the query with the Runner set by RunWith. +func (b SelectBuilder) Query() (*sql.Rows, error) { + data := builder.GetStruct(b).(selectData) + return data.Query() +} + +// QueryRow builds and QueryRows the query with the Runner set by RunWith. +func (b SelectBuilder) QueryRow() RowScanner { + data := builder.GetStruct(b).(selectData) + return data.QueryRow() +} + +// Scan is a shortcut for QueryRow().Scan. +func (b SelectBuilder) Scan(dest ...interface{}) error { + return b.QueryRow().Scan(dest...) +} + +// SQL methods + +// ToSql builds the query into a SQL string and bound args. +func (b SelectBuilder) ToSql() (string, []interface{}, error) { + data := builder.GetStruct(b).(selectData) + return data.ToSql() +} + +// Prefix adds an expression to the beginning of the query +func (b SelectBuilder) Prefix(sql string, args ...interface{}) SelectBuilder { + return builder.Append(b, "Prefixes", Expr(sql, args...)).(SelectBuilder) +} + +// Distinct adds a DISTINCT clause to the query. +func (b SelectBuilder) Distinct() SelectBuilder { + return b.Options("DISTINCT") +} + +// Options adds select option to the query +func (b SelectBuilder) Options(options ...string) SelectBuilder { + return builder.Extend(b, "Options", options).(SelectBuilder) +} + +// Columns adds result columns to the query. +func (b SelectBuilder) Columns(columns ...string) SelectBuilder { + var parts []interface{} + for _, str := range columns { + parts = append(parts, newPart(str)) + } + return builder.Extend(b, "Columns", parts).(SelectBuilder) +} + +// Column adds a result column to the query. +// Unlike Columns, Column accepts args which will be bound to placeholders in +// the columns string, for example: +// Column("IF(col IN ("+squirrel.Placeholders(3)+"), 1, 0) as col", 1, 2, 3) +func (b SelectBuilder) Column(column interface{}, args ...interface{}) SelectBuilder { + return builder.Append(b, "Columns", newPart(column, args...)).(SelectBuilder) +} + +// From sets the FROM clause of the query. +func (b SelectBuilder) From(from string) SelectBuilder { + return builder.Set(b, "From", newPart(from)).(SelectBuilder) +} + +// FromSelect sets a subquery into the FROM clause of the query. +func (b SelectBuilder) FromSelect(from SelectBuilder, alias string) SelectBuilder { + return builder.Set(b, "From", Alias(from, alias)).(SelectBuilder) +} + +// JoinClause adds a join clause to the query. +func (b SelectBuilder) JoinClause(pred interface{}, args ...interface{}) SelectBuilder { + return builder.Append(b, "Joins", newPart(pred, args...)).(SelectBuilder) +} + +// Join adds a JOIN clause to the query. +func (b SelectBuilder) Join(join string, rest ...interface{}) SelectBuilder { + return b.JoinClause("JOIN "+join, rest...) +} + +// LeftJoin adds a LEFT JOIN clause to the query. +func (b SelectBuilder) LeftJoin(join string, rest ...interface{}) SelectBuilder { + return b.JoinClause("LEFT JOIN "+join, rest...) +} + +// RightJoin adds a RIGHT JOIN clause to the query. +func (b SelectBuilder) RightJoin(join string, rest ...interface{}) SelectBuilder { + return b.JoinClause("RIGHT JOIN "+join, rest...) +} + +// Where adds an expression to the WHERE clause of the query. +// +// Expressions are ANDed together in the generated SQL. +// +// Where accepts several types for its pred argument: +// +// nil OR "" - ignored. +// +// string - SQL expression. +// If the expression has SQL placeholders then a set of arguments must be passed +// as well, one for each placeholder. +// +// map[string]interface{} OR Eq - map of SQL expressions to values. Each key is +// transformed into an expression like " = ?", with the corresponding value +// bound to the placeholder. If the value is nil, the expression will be " +// IS NULL". If the value is an array or slice, the expression will be " IN +// (?,?,...)", with one placeholder for each item in the value. These expressions +// are ANDed together. +// +// Where will panic if pred isn't any of the above types. +func (b SelectBuilder) Where(pred interface{}, args ...interface{}) SelectBuilder { + return builder.Append(b, "WhereParts", newWherePart(pred, args...)).(SelectBuilder) +} + +// GroupBy adds GROUP BY expressions to the query. +func (b SelectBuilder) GroupBy(groupBys ...string) SelectBuilder { + return builder.Extend(b, "GroupBys", groupBys).(SelectBuilder) +} + +// Having adds an expression to the HAVING clause of the query. +// +// See Where. +func (b SelectBuilder) Having(pred interface{}, rest ...interface{}) SelectBuilder { + return builder.Append(b, "HavingParts", newWherePart(pred, rest...)).(SelectBuilder) +} + +// OrderBy adds ORDER BY expressions to the query. +func (b SelectBuilder) OrderBy(orderBys ...string) SelectBuilder { + return builder.Extend(b, "OrderBys", orderBys).(SelectBuilder) +} + +// Limit sets a LIMIT clause on the query. +func (b SelectBuilder) Limit(limit uint64) SelectBuilder { + return builder.Set(b, "Limit", fmt.Sprintf("%d", limit)).(SelectBuilder) +} + +// Offset sets a OFFSET clause on the query. +func (b SelectBuilder) Offset(offset uint64) SelectBuilder { + return builder.Set(b, "Offset", fmt.Sprintf("%d", offset)).(SelectBuilder) +} + +// Suffix adds an expression to the end of the query +func (b SelectBuilder) Suffix(sql string, args ...interface{}) SelectBuilder { + return builder.Append(b, "Suffixes", Expr(sql, args...)).(SelectBuilder) +} diff --git a/vendor/github.com/Masterminds/squirrel/squirrel.go b/vendor/github.com/Masterminds/squirrel/squirrel.go new file mode 100644 index 0000000000..89aaf3dcfc --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/squirrel.go @@ -0,0 +1,166 @@ +// Package squirrel provides a fluent SQL generator. +// +// See https://github.com/lann/squirrel for examples. +package squirrel + +import ( + "bytes" + "database/sql" + "fmt" + "strings" + + "github.com/lann/builder" +) + +// Sqlizer is the interface that wraps the ToSql method. +// +// ToSql returns a SQL representation of the Sqlizer, along with a slice of args +// as passed to e.g. database/sql.Exec. It can also return an error. +type Sqlizer interface { + ToSql() (string, []interface{}, error) +} + +// Execer is the interface that wraps the Exec method. +// +// Exec executes the given query as implemented by database/sql.Exec. +type Execer interface { + Exec(query string, args ...interface{}) (sql.Result, error) +} + +// Queryer is the interface that wraps the Query method. +// +// Query executes the given query as implemented by database/sql.Query. +type Queryer interface { + Query(query string, args ...interface{}) (*sql.Rows, error) +} + +// QueryRower is the interface that wraps the QueryRow method. +// +// QueryRow executes the given query as implemented by database/sql.QueryRow. +type QueryRower interface { + QueryRow(query string, args ...interface{}) RowScanner +} + +// BaseRunner groups the Execer and Queryer interfaces. +type BaseRunner interface { + Execer + Queryer +} + +// Runner groups the Execer, Queryer, and QueryRower interfaces. +type Runner interface { + Execer + Queryer + QueryRower +} + +// DBRunner wraps sql.DB to implement Runner. +type dbRunner struct { + *sql.DB +} + +func (r *dbRunner) QueryRow(query string, args ...interface{}) RowScanner { + return r.DB.QueryRow(query, args...) +} + +type txRunner struct { + *sql.Tx +} + +func (r *txRunner) QueryRow(query string, args ...interface{}) RowScanner { + return r.Tx.QueryRow(query, args...) +} + +func setRunWith(b interface{}, baseRunner BaseRunner) interface{} { + var runner Runner + switch r := baseRunner.(type) { + case Runner: + runner = r + case *sql.DB: + runner = &dbRunner{r} + case *sql.Tx: + runner = &txRunner{r} + } + return builder.Set(b, "RunWith", runner) +} + +// RunnerNotSet is returned by methods that need a Runner if it isn't set. +var RunnerNotSet = fmt.Errorf("cannot run; no Runner set (RunWith)") + +// RunnerNotQueryRunner is returned by QueryRow if the RunWith value doesn't implement QueryRower. +var RunnerNotQueryRunner = fmt.Errorf("cannot QueryRow; Runner is not a QueryRower") + +// ExecWith Execs the SQL returned by s with db. +func ExecWith(db Execer, s Sqlizer) (res sql.Result, err error) { + query, args, err := s.ToSql() + if err != nil { + return + } + return db.Exec(query, args...) +} + +// QueryWith Querys the SQL returned by s with db. +func QueryWith(db Queryer, s Sqlizer) (rows *sql.Rows, err error) { + query, args, err := s.ToSql() + if err != nil { + return + } + return db.Query(query, args...) +} + +// QueryRowWith QueryRows the SQL returned by s with db. +func QueryRowWith(db QueryRower, s Sqlizer) RowScanner { + query, args, err := s.ToSql() + return &Row{RowScanner: db.QueryRow(query, args...), err: err} +} + +// DebugSqlizer calls ToSql on s and shows the approximate SQL to be executed +// +// If ToSql returns an error, the result of this method will look like: +// "[ToSql error: %s]" or "[DebugSqlizer error: %s]" +// +// IMPORTANT: As its name suggests, this function should only be used for +// debugging. While the string result *might* be valid SQL, this function does +// not try very hard to ensure it. Additionally, executing the output of this +// function with any untrusted user input is certainly insecure. +func DebugSqlizer(s Sqlizer) string { + sql, args, err := s.ToSql() + if err != nil { + return fmt.Sprintf("[ToSql error: %s]", err) + } + + // TODO: dedupe this with placeholder.go + buf := &bytes.Buffer{} + i := 0 + for { + p := strings.Index(sql, "?") + if p == -1 { + break + } + if len(sql[p:]) > 1 && sql[p:p+2] == "??" { // escape ?? => ? + buf.WriteString(sql[:p]) + buf.WriteString("?") + if len(sql[p:]) == 1 { + break + } + sql = sql[p+2:] + } else { + if i+1 > len(args) { + return fmt.Sprintf( + "[DebugSqlizer error: too many placeholders in %#v for %d args]", + sql, len(args)) + } + buf.WriteString(sql[:p]) + fmt.Fprintf(buf, "'%v'", args[i]) + sql = sql[p+1:] + i++ + } + } + if i < len(args) { + return fmt.Sprintf( + "[DebugSqlizer error: not enough placeholders in %#v for %d args]", + sql, len(args)) + } + buf.WriteString(sql) + return buf.String() +} diff --git a/vendor/github.com/Masterminds/squirrel/statement.go b/vendor/github.com/Masterminds/squirrel/statement.go new file mode 100644 index 0000000000..275388f630 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/statement.go @@ -0,0 +1,83 @@ +package squirrel + +import "github.com/lann/builder" + +// StatementBuilderType is the type of StatementBuilder. +type StatementBuilderType builder.Builder + +// Select returns a SelectBuilder for this StatementBuilderType. +func (b StatementBuilderType) Select(columns ...string) SelectBuilder { + return SelectBuilder(b).Columns(columns...) +} + +// Insert returns a InsertBuilder for this StatementBuilderType. +func (b StatementBuilderType) Insert(into string) InsertBuilder { + return InsertBuilder(b).Into(into) +} + +// Update returns a UpdateBuilder for this StatementBuilderType. +func (b StatementBuilderType) Update(table string) UpdateBuilder { + return UpdateBuilder(b).Table(table) +} + +// Delete returns a DeleteBuilder for this StatementBuilderType. +func (b StatementBuilderType) Delete(from string) DeleteBuilder { + return DeleteBuilder(b).From(from) +} + +// PlaceholderFormat sets the PlaceholderFormat field for any child builders. +func (b StatementBuilderType) PlaceholderFormat(f PlaceholderFormat) StatementBuilderType { + return builder.Set(b, "PlaceholderFormat", f).(StatementBuilderType) +} + +// RunWith sets the RunWith field for any child builders. +func (b StatementBuilderType) RunWith(runner BaseRunner) StatementBuilderType { + return setRunWith(b, runner).(StatementBuilderType) +} + +// StatementBuilder is a parent builder for other builders, e.g. SelectBuilder. +var StatementBuilder = StatementBuilderType(builder.EmptyBuilder).PlaceholderFormat(Question) + +// Select returns a new SelectBuilder, optionally setting some result columns. +// +// See SelectBuilder.Columns. +func Select(columns ...string) SelectBuilder { + return StatementBuilder.Select(columns...) +} + +// Insert returns a new InsertBuilder with the given table name. +// +// See InsertBuilder.Into. +func Insert(into string) InsertBuilder { + return StatementBuilder.Insert(into) +} + +// Update returns a new UpdateBuilder with the given table name. +// +// See UpdateBuilder.Table. +func Update(table string) UpdateBuilder { + return StatementBuilder.Update(table) +} + +// Delete returns a new DeleteBuilder with the given table name. +// +// See DeleteBuilder.Table. +func Delete(from string) DeleteBuilder { + return StatementBuilder.Delete(from) +} + +// Case returns a new CaseBuilder +// "what" represents case value +func Case(what ...interface{}) CaseBuilder { + b := CaseBuilder(builder.EmptyBuilder) + + switch len(what) { + case 0: + case 1: + b = b.what(what[0]) + default: + b = b.what(newPart(what[0], what[1:]...)) + + } + return b +} diff --git a/vendor/github.com/Masterminds/squirrel/stmtcacher.go b/vendor/github.com/Masterminds/squirrel/stmtcacher.go new file mode 100644 index 0000000000..c2dc220883 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/stmtcacher.go @@ -0,0 +1,90 @@ +package squirrel + +import ( + "database/sql" + "sync" +) + +// Prepareer is the interface that wraps the Prepare method. +// +// Prepare executes the given query as implemented by database/sql.Prepare. +type Preparer interface { + Prepare(query string) (*sql.Stmt, error) +} + +// DBProxy groups the Execer, Queryer, QueryRower, and Preparer interfaces. +type DBProxy interface { + Execer + Queryer + QueryRower + Preparer +} + +type stmtCacher struct { + prep Preparer + cache map[string]*sql.Stmt + mu sync.Mutex +} + +// NewStmtCacher returns a DBProxy wrapping prep that caches Prepared Stmts. +// +// Stmts are cached based on the string value of their queries. +func NewStmtCacher(prep Preparer) DBProxy { + return &stmtCacher{prep: prep, cache: make(map[string]*sql.Stmt)} +} + +func (sc *stmtCacher) Prepare(query string) (*sql.Stmt, error) { + sc.mu.Lock() + defer sc.mu.Unlock() + stmt, ok := sc.cache[query] + if ok { + return stmt, nil + } + stmt, err := sc.prep.Prepare(query) + if err == nil { + sc.cache[query] = stmt + } + return stmt, err +} + +func (sc *stmtCacher) Exec(query string, args ...interface{}) (res sql.Result, err error) { + stmt, err := sc.Prepare(query) + if err != nil { + return + } + return stmt.Exec(args...) +} + +func (sc *stmtCacher) Query(query string, args ...interface{}) (rows *sql.Rows, err error) { + stmt, err := sc.Prepare(query) + if err != nil { + return + } + return stmt.Query(args...) +} + +func (sc *stmtCacher) QueryRow(query string, args ...interface{}) RowScanner { + stmt, err := sc.Prepare(query) + if err != nil { + return &Row{err: err} + } + return stmt.QueryRow(args...) +} + +type DBProxyBeginner interface { + DBProxy + Begin() (*sql.Tx, error) +} + +type stmtCacheProxy struct { + DBProxy + db *sql.DB +} + +func NewStmtCacheProxy(db *sql.DB) DBProxyBeginner { + return &stmtCacheProxy{DBProxy: NewStmtCacher(db), db: db} +} + +func (sp *stmtCacheProxy) Begin() (*sql.Tx, error) { + return sp.db.Begin() +} diff --git a/vendor/github.com/Masterminds/squirrel/update.go b/vendor/github.com/Masterminds/squirrel/update.go new file mode 100644 index 0000000000..682906bc05 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/update.go @@ -0,0 +1,232 @@ +package squirrel + +import ( + "bytes" + "database/sql" + "fmt" + "sort" + "strings" + + "github.com/lann/builder" +) + +type updateData struct { + PlaceholderFormat PlaceholderFormat + RunWith BaseRunner + Prefixes exprs + Table string + SetClauses []setClause + WhereParts []Sqlizer + OrderBys []string + Limit string + Offset string + Suffixes exprs +} + +type setClause struct { + column string + value interface{} +} + +func (d *updateData) Exec() (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return ExecWith(d.RunWith, d) +} + +func (d *updateData) Query() (*sql.Rows, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return QueryWith(d.RunWith, d) +} + +func (d *updateData) QueryRow() RowScanner { + if d.RunWith == nil { + return &Row{err: RunnerNotSet} + } + queryRower, ok := d.RunWith.(QueryRower) + if !ok { + return &Row{err: RunnerNotQueryRunner} + } + return QueryRowWith(queryRower, d) +} + +func (d *updateData) ToSql() (sqlStr string, args []interface{}, err error) { + if len(d.Table) == 0 { + err = fmt.Errorf("update statements must specify a table") + return + } + if len(d.SetClauses) == 0 { + err = fmt.Errorf("update statements must have at least one Set clause") + return + } + + sql := &bytes.Buffer{} + + if len(d.Prefixes) > 0 { + args, _ = d.Prefixes.AppendToSql(sql, " ", args) + sql.WriteString(" ") + } + + sql.WriteString("UPDATE ") + sql.WriteString(d.Table) + + sql.WriteString(" SET ") + setSqls := make([]string, len(d.SetClauses)) + for i, setClause := range d.SetClauses { + var valSql string + e, isExpr := setClause.value.(expr) + if isExpr { + valSql = e.sql + args = append(args, e.args...) + } else { + valSql = "?" + args = append(args, setClause.value) + } + setSqls[i] = fmt.Sprintf("%s = %s", setClause.column, valSql) + } + sql.WriteString(strings.Join(setSqls, ", ")) + + if len(d.WhereParts) > 0 { + sql.WriteString(" WHERE ") + args, err = appendToSql(d.WhereParts, sql, " AND ", args) + if err != nil { + return + } + } + + if len(d.OrderBys) > 0 { + sql.WriteString(" ORDER BY ") + sql.WriteString(strings.Join(d.OrderBys, ", ")) + } + + if len(d.Limit) > 0 { + sql.WriteString(" LIMIT ") + sql.WriteString(d.Limit) + } + + if len(d.Offset) > 0 { + sql.WriteString(" OFFSET ") + sql.WriteString(d.Offset) + } + + if len(d.Suffixes) > 0 { + sql.WriteString(" ") + args, _ = d.Suffixes.AppendToSql(sql, " ", args) + } + + sqlStr, err = d.PlaceholderFormat.ReplacePlaceholders(sql.String()) + return +} + +// Builder + +// UpdateBuilder builds SQL UPDATE statements. +type UpdateBuilder builder.Builder + +func init() { + builder.Register(UpdateBuilder{}, updateData{}) +} + +// Format methods + +// PlaceholderFormat sets PlaceholderFormat (e.g. Question or Dollar) for the +// query. +func (b UpdateBuilder) PlaceholderFormat(f PlaceholderFormat) UpdateBuilder { + return builder.Set(b, "PlaceholderFormat", f).(UpdateBuilder) +} + +// Runner methods + +// RunWith sets a Runner (like database/sql.DB) to be used with e.g. Exec. +func (b UpdateBuilder) RunWith(runner BaseRunner) UpdateBuilder { + return setRunWith(b, runner).(UpdateBuilder) +} + +// Exec builds and Execs the query with the Runner set by RunWith. +func (b UpdateBuilder) Exec() (sql.Result, error) { + data := builder.GetStruct(b).(updateData) + return data.Exec() +} + +func (b UpdateBuilder) Query() (*sql.Rows, error) { + data := builder.GetStruct(b).(updateData) + return data.Query() +} + +func (b UpdateBuilder) QueryRow() RowScanner { + data := builder.GetStruct(b).(updateData) + return data.QueryRow() +} + +func (b UpdateBuilder) Scan(dest ...interface{}) error { + return b.QueryRow().Scan(dest...) +} + +// SQL methods + +// ToSql builds the query into a SQL string and bound args. +func (b UpdateBuilder) ToSql() (string, []interface{}, error) { + data := builder.GetStruct(b).(updateData) + return data.ToSql() +} + +// Prefix adds an expression to the beginning of the query +func (b UpdateBuilder) Prefix(sql string, args ...interface{}) UpdateBuilder { + return builder.Append(b, "Prefixes", Expr(sql, args...)).(UpdateBuilder) +} + +// Table sets the table to be updated. +func (b UpdateBuilder) Table(table string) UpdateBuilder { + return builder.Set(b, "Table", table).(UpdateBuilder) +} + +// Set adds SET clauses to the query. +func (b UpdateBuilder) Set(column string, value interface{}) UpdateBuilder { + return builder.Append(b, "SetClauses", setClause{column: column, value: value}).(UpdateBuilder) +} + +// SetMap is a convenience method which calls .Set for each key/value pair in clauses. +func (b UpdateBuilder) SetMap(clauses map[string]interface{}) UpdateBuilder { + keys := make([]string, len(clauses)) + i := 0 + for key := range clauses { + keys[i] = key + i++ + } + sort.Strings(keys) + for _, key := range keys { + val, _ := clauses[key] + b = b.Set(key, val) + } + return b +} + +// Where adds WHERE expressions to the query. +// +// See SelectBuilder.Where for more information. +func (b UpdateBuilder) Where(pred interface{}, args ...interface{}) UpdateBuilder { + return builder.Append(b, "WhereParts", newWherePart(pred, args...)).(UpdateBuilder) +} + +// OrderBy adds ORDER BY expressions to the query. +func (b UpdateBuilder) OrderBy(orderBys ...string) UpdateBuilder { + return builder.Extend(b, "OrderBys", orderBys).(UpdateBuilder) +} + +// Limit sets a LIMIT clause on the query. +func (b UpdateBuilder) Limit(limit uint64) UpdateBuilder { + return builder.Set(b, "Limit", fmt.Sprintf("%d", limit)).(UpdateBuilder) +} + +// Offset sets a OFFSET clause on the query. +func (b UpdateBuilder) Offset(offset uint64) UpdateBuilder { + return builder.Set(b, "Offset", fmt.Sprintf("%d", offset)).(UpdateBuilder) +} + +// Suffix adds an expression to the end of the query +func (b UpdateBuilder) Suffix(sql string, args ...interface{}) UpdateBuilder { + return builder.Append(b, "Suffixes", Expr(sql, args...)).(UpdateBuilder) +} diff --git a/vendor/github.com/Masterminds/squirrel/where.go b/vendor/github.com/Masterminds/squirrel/where.go new file mode 100644 index 0000000000..3a2d7b709a --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/where.go @@ -0,0 +1,28 @@ +package squirrel + +import ( + "fmt" +) + +type wherePart part + +func newWherePart(pred interface{}, args ...interface{}) Sqlizer { + return &wherePart{pred: pred, args: args} +} + +func (p wherePart) ToSql() (sql string, args []interface{}, err error) { + switch pred := p.pred.(type) { + case nil: + // no-op + case Sqlizer: + return pred.ToSql() + case map[string]interface{}: + return Eq(pred).ToSql() + case string: + sql = pred + args = p.args + default: + err = fmt.Errorf("expected string-keyed map or string, not %T", pred) + } + return +} diff --git a/vendor/github.com/huandu/xstrings/.gitignore b/vendor/github.com/huandu/xstrings/.gitignore new file mode 100644 index 0000000000..daf913b1b3 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/huandu/xstrings/.travis.yml b/vendor/github.com/huandu/xstrings/.travis.yml new file mode 100644 index 0000000000..4f2ee4d973 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/huandu/xstrings/CONTRIBUTING.md b/vendor/github.com/huandu/xstrings/CONTRIBUTING.md new file mode 100644 index 0000000000..d7b4b8d584 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/CONTRIBUTING.md @@ -0,0 +1,23 @@ +# Contributing # + +Thanks for your contribution in advance. No matter what you will contribute to this project, pull request or bug report or feature discussion, it's always highly appreciated. + +## New API or feature ## + +I want to speak more about how to add new functions to this package. + +Package `xstring` is a collection of useful string functions which should be implemented in Go. It's a bit subject to say which function should be included and which should not. I set up following rules in order to make it clear and as objective as possible. + +* Rule 1: Only string algorithm, which takes string as input, can be included. +* Rule 2: If a function has been implemented in package `string`, it must not be included. +* Rule 3: If a function is not language neutral, it must not be included. +* Rule 4: If a function is a part of standard library in other languages, it can be included. +* Rule 5: If a function is quite useful in some famous framework or library, it can be included. + +New function must be discussed in project issues before submitting any code. If a pull request with new functions is sent without any ref issue, it will be rejected. + +## Pull request ## + +Pull request is always welcome. Just make sure you have run `go fmt` and all test cases passed before submit. + +If the pull request is to add a new API or feature, don't forget to update README.md and add new API in function list. diff --git a/vendor/github.com/huandu/xstrings/LICENSE b/vendor/github.com/huandu/xstrings/LICENSE new file mode 100644 index 0000000000..2701772593 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Huan Du + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/huandu/xstrings/README.md b/vendor/github.com/huandu/xstrings/README.md new file mode 100644 index 0000000000..a4a36f4543 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/README.md @@ -0,0 +1,115 @@ +# xstrings # + +[![Build Status](https://travis-ci.org/huandu/xstrings.svg?branch=master)](https://travis-ci.org/huandu/xstrings) +[![GoDoc](https://godoc.org/github.com/huandu/xstrings?status.svg)](https://godoc.org/github.com/huandu/xstrings) + +Go package [xstrings](https://godoc.org/github.com/huandu/xstrings) is a collection of string functions, which are widely used in other languages but absent in Go package [strings](http://golang.org/pkg/strings). + +All functions are well tested and carefully tuned for performance. + +## Propose a new function ## + +Please review [contributing guideline](CONTRIBUTING.md) and [create new issue](https://github.com/huandu/xstrings/issues) to state why it should be included. + +## Install ## + +Use `go get` to install this library. + + go get github.com/huandu/xstrings + +## API document ## + +See [GoDoc](https://godoc.org/github.com/huandu/xstrings) for full document. + +## Function list ## + +Go functions have a unique naming style. One, who has experience in other language but new in Go, may have difficulties to find out right string function to use. + +Here is a list of functions in [strings](http://golang.org/pkg/strings) and [xstrings](https://godoc.org/github.com/huandu/xstrings) with enough extra information about how to map these functions to their friends in other languages. Hope this list could be helpful for fresh gophers. + +### Package `xstrings` functions ### + +*Keep this table sorted by Function in ascending order.* + +| Function | Friends | # | +| -------- | ------- | --- | +| [Center](https://godoc.org/github.com/huandu/xstrings#Center) | `str.center` in Python; `String#center` in Ruby | [#30](https://github.com/huandu/xstrings/issues/30) | +| [Count](https://godoc.org/github.com/huandu/xstrings#Count) | `String#count` in Ruby | [#16](https://github.com/huandu/xstrings/issues/16) | +| [Delete](https://godoc.org/github.com/huandu/xstrings#Delete) | `String#delete` in Ruby | [#17](https://github.com/huandu/xstrings/issues/17) | +| [ExpandTabs](https://godoc.org/github.com/huandu/xstrings#ExpandTabs) | `str.expandtabs` in Python | [#27](https://github.com/huandu/xstrings/issues/27) | +| [FirstRuneToLower](https://godoc.org/github.com/huandu/xstrings#FirstRuneToLower) | `lcfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | +| [FirstRuneToUpper](https://godoc.org/github.com/huandu/xstrings#FirstRuneToUpper) | `String#capitalize` in Ruby; `ucfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | +| [Insert](https://godoc.org/github.com/huandu/xstrings#Insert) | `String#insert` in Ruby | [#18](https://github.com/huandu/xstrings/issues/18) | +| [LastPartition](https://godoc.org/github.com/huandu/xstrings#LastPartition) | `str.rpartition` in Python; `String#rpartition` in Ruby | [#19](https://github.com/huandu/xstrings/issues/19) | +| [LeftJustify](https://godoc.org/github.com/huandu/xstrings#LeftJustify) | `str.ljust` in Python; `String#ljust` in Ruby | [#28](https://github.com/huandu/xstrings/issues/28) | +| [Len](https://godoc.org/github.com/huandu/xstrings#Len) | `mb_strlen` in PHP | [#23](https://github.com/huandu/xstrings/issues/23) | +| [Partition](https://godoc.org/github.com/huandu/xstrings#Partition) | `str.partition` in Python; `String#partition` in Ruby | [#10](https://github.com/huandu/xstrings/issues/10) | +| [Reverse](https://godoc.org/github.com/huandu/xstrings#Reverse) | `String#reverse` in Ruby; `strrev` in PHP; `reverse` in Perl | [#7](https://github.com/huandu/xstrings/issues/7) | +| [RightJustify](https://godoc.org/github.com/huandu/xstrings#RightJustify) | `str.rjust` in Python; `String#rjust` in Ruby | [#29](https://github.com/huandu/xstrings/issues/29) | +| [RuneWidth](https://godoc.org/github.com/huandu/xstrings#RuneWidth) | - | [#27](https://github.com/huandu/xstrings/issues/27) | +| [Scrub](https://godoc.org/github.com/huandu/xstrings#Scrub) | `String#scrub` in Ruby | [#20](https://github.com/huandu/xstrings/issues/20) | +| [Shuffle](https://godoc.org/github.com/huandu/xstrings#Shuffle) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | +| [ShuffleSource](https://godoc.org/github.com/huandu/xstrings#ShuffleSource) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | +| [Slice](https://godoc.org/github.com/huandu/xstrings#Slice) | `mb_substr` in PHP | [#9](https://github.com/huandu/xstrings/issues/9) | +| [Squeeze](https://godoc.org/github.com/huandu/xstrings#Squeeze) | `String#squeeze` in Ruby | [#11](https://github.com/huandu/xstrings/issues/11) | +| [Successor](https://godoc.org/github.com/huandu/xstrings#Successor) | `String#succ` or `String#next` in Ruby | [#22](https://github.com/huandu/xstrings/issues/22) | +| [SwapCase](https://godoc.org/github.com/huandu/xstrings#SwapCase) | `str.swapcase` in Python; `String#swapcase` in Ruby | [#12](https://github.com/huandu/xstrings/issues/12) | +| [ToCamelCase](https://godoc.org/github.com/huandu/xstrings#ToCamelCase) | `String#camelize` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | +| [ToKebab](https://godoc.org/github.com/huandu/xstrings#ToKebabCase) | - | [#41](https://github.com/huandu/xstrings/issues/41) | +| [ToSnakeCase](https://godoc.org/github.com/huandu/xstrings#ToSnakeCase) | `String#underscore` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | +| [Translate](https://godoc.org/github.com/huandu/xstrings#Translate) | `str.translate` in Python; `String#tr` in Ruby; `strtr` in PHP; `tr///` in Perl | [#21](https://github.com/huandu/xstrings/issues/21) | +| [Width](https://godoc.org/github.com/huandu/xstrings#Width) | `mb_strwidth` in PHP | [#26](https://github.com/huandu/xstrings/issues/26) | +| [WordCount](https://godoc.org/github.com/huandu/xstrings#WordCount) | `str_word_count` in PHP | [#14](https://github.com/huandu/xstrings/issues/14) | +| [WordSplit](https://godoc.org/github.com/huandu/xstrings#WordSplit) | - | [#14](https://github.com/huandu/xstrings/issues/14) | + +### Package `strings` functions ### + +*Keep this table sorted by Function in ascending order.* + +| Function | Friends | +| -------- | ------- | +| [Contains](http://golang.org/pkg/strings/#Contains) | `String#include?` in Ruby | +| [ContainsAny](http://golang.org/pkg/strings/#ContainsAny) | - | +| [ContainsRune](http://golang.org/pkg/strings/#ContainsRune) | - | +| [Count](http://golang.org/pkg/strings/#Count) | `str.count` in Python; `substr_count` in PHP | +| [EqualFold](http://golang.org/pkg/strings/#EqualFold) | `stricmp` in PHP; `String#casecmp` in Ruby | +| [Fields](http://golang.org/pkg/strings/#Fields) | `str.split` in Python; `split` in Perl; `String#split` in Ruby | +| [FieldsFunc](http://golang.org/pkg/strings/#FieldsFunc) | - | +| [HasPrefix](http://golang.org/pkg/strings/#HasPrefix) | `str.startswith` in Python; `String#start_with?` in Ruby | +| [HasSuffix](http://golang.org/pkg/strings/#HasSuffix) | `str.endswith` in Python; `String#end_with?` in Ruby | +| [Index](http://golang.org/pkg/strings/#Index) | `str.index` in Python; `String#index` in Ruby; `strpos` in PHP; `index` in Perl | +| [IndexAny](http://golang.org/pkg/strings/#IndexAny) | - | +| [IndexByte](http://golang.org/pkg/strings/#IndexByte) | - | +| [IndexFunc](http://golang.org/pkg/strings/#IndexFunc) | - | +| [IndexRune](http://golang.org/pkg/strings/#IndexRune) | - | +| [Join](http://golang.org/pkg/strings/#Join) | `str.join` in Python; `Array#join` in Ruby; `implode` in PHP; `join` in Perl | +| [LastIndex](http://golang.org/pkg/strings/#LastIndex) | `str.rindex` in Python; `String#rindex`; `strrpos` in PHP; `rindex` in Perl | +| [LastIndexAny](http://golang.org/pkg/strings/#LastIndexAny) | - | +| [LastIndexFunc](http://golang.org/pkg/strings/#LastIndexFunc) | - | +| [Map](http://golang.org/pkg/strings/#Map) | `String#each_codepoint` in Ruby | +| [Repeat](http://golang.org/pkg/strings/#Repeat) | operator `*` in Python and Ruby; `str_repeat` in PHP | +| [Replace](http://golang.org/pkg/strings/#Replace) | `str.replace` in Python; `String#sub` in Ruby; `str_replace` in PHP | +| [Split](http://golang.org/pkg/strings/#Split) | `str.split` in Python; `String#split` in Ruby; `explode` in PHP; `split` in Perl | +| [SplitAfter](http://golang.org/pkg/strings/#SplitAfter) | - | +| [SplitAfterN](http://golang.org/pkg/strings/#SplitAfterN) | - | +| [SplitN](http://golang.org/pkg/strings/#SplitN) | `str.split` in Python; `String#split` in Ruby; `explode` in PHP; `split` in Perl | +| [Title](http://golang.org/pkg/strings/#Title) | `str.title` in Python | +| [ToLower](http://golang.org/pkg/strings/#ToLower) | `str.lower` in Python; `String#downcase` in Ruby; `strtolower` in PHP; `lc` in Perl | +| [ToLowerSpecial](http://golang.org/pkg/strings/#ToLowerSpecial) | - | +| [ToTitle](http://golang.org/pkg/strings/#ToTitle) | - | +| [ToTitleSpecial](http://golang.org/pkg/strings/#ToTitleSpecial) | - | +| [ToUpper](http://golang.org/pkg/strings/#ToUpper) | `str.upper` in Python; `String#upcase` in Ruby; `strtoupper` in PHP; `uc` in Perl | +| [ToUpperSpecial](http://golang.org/pkg/strings/#ToUpperSpecial) | - | +| [Trim](http://golang.org/pkg/strings/#Trim) | `str.strip` in Python; `String#strip` in Ruby; `trim` in PHP | +| [TrimFunc](http://golang.org/pkg/strings/#TrimFunc) | - | +| [TrimLeft](http://golang.org/pkg/strings/#TrimLeft) | `str.lstrip` in Python; `String#lstrip` in Ruby; `ltrim` in PHP | +| [TrimLeftFunc](http://golang.org/pkg/strings/#TrimLeftFunc) | - | +| [TrimPrefix](http://golang.org/pkg/strings/#TrimPrefix) | - | +| [TrimRight](http://golang.org/pkg/strings/#TrimRight) | `str.rstrip` in Python; `String#rstrip` in Ruby; `rtrim` in PHP | +| [TrimRightFunc](http://golang.org/pkg/strings/#TrimRightFunc) | - | +| [TrimSpace](http://golang.org/pkg/strings/#TrimSpace) | `str.strip` in Python; `String#strip` in Ruby; `trim` in PHP | +| [TrimSuffix](http://golang.org/pkg/strings/#TrimSuffix) | `String#chomp` in Ruby; `chomp` in Perl | + +## License ## + +This library is licensed under MIT license. See LICENSE for details. diff --git a/vendor/github.com/huandu/xstrings/common.go b/vendor/github.com/huandu/xstrings/common.go new file mode 100644 index 0000000000..2aff57aab4 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/common.go @@ -0,0 +1,25 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "bytes" +) + +const bufferMaxInitGrowSize = 2048 + +// Lazy initialize a buffer. +func allocBuffer(orig, cur string) *bytes.Buffer { + output := &bytes.Buffer{} + maxSize := len(orig) * 4 + + // Avoid to reserve too much memory at once. + if maxSize > bufferMaxInitGrowSize { + maxSize = bufferMaxInitGrowSize + } + + output.Grow(maxSize) + output.WriteString(orig[:len(orig)-len(cur)]) + return output +} diff --git a/vendor/github.com/huandu/xstrings/convert.go b/vendor/github.com/huandu/xstrings/convert.go new file mode 100644 index 0000000000..8253fa9c63 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/convert.go @@ -0,0 +1,400 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "bytes" + "math/rand" + "unicode" + "unicode/utf8" +) + +// ToCamelCase can convert all lower case characters behind underscores +// to upper case character. +// Underscore character will be removed in result except following cases. +// * More than 1 underscore. +// "a__b" => "A_B" +// * At the beginning of string. +// "_a" => "_A" +// * At the end of string. +// "ab_" => "Ab_" +func ToCamelCase(str string) string { + if len(str) == 0 { + return "" + } + + buf := &bytes.Buffer{} + var r0, r1 rune + var size int + + // leading '_' will appear in output. + for len(str) > 0 { + r0, size = utf8.DecodeRuneInString(str) + str = str[size:] + + if r0 != '_' { + break + } + + buf.WriteRune(r0) + } + + if len(str) == 0 { + return buf.String() + } + + r0 = unicode.ToUpper(r0) + + for len(str) > 0 { + r1 = r0 + r0, size = utf8.DecodeRuneInString(str) + str = str[size:] + + if r1 == '_' && r0 == '_' { + buf.WriteRune(r1) + continue + } + + if r1 == '_' { + r0 = unicode.ToUpper(r0) + } else { + r0 = unicode.ToLower(r0) + } + + if r1 != '_' { + buf.WriteRune(r1) + } + } + + buf.WriteRune(r0) + return buf.String() +} + +// ToSnakeCase can convert all upper case characters in a string to +// snake case format. +// +// Some samples. +// "FirstName" => "first_name" +// "HTTPServer" => "http_server" +// "NoHTTPS" => "no_https" +// "GO_PATH" => "go_path" +// "GO PATH" => "go_path" // space is converted to underscore. +// "GO-PATH" => "go_path" // hyphen is converted to underscore. +// "HTTP2XX" => "http_2xx" // insert an underscore before a number and after an alphabet. +// "http2xx" => "http_2xx" +// "HTTP20xOK" => "http_20x_ok" +func ToSnakeCase(str string) string { + return camelCaseToLowerCase(str, '_') +} + +// ToKebabCase can convert all upper case characters in a string to +// kebab case format. +// +// Some samples. +// "FirstName" => "first-name" +// "HTTPServer" => "http-server" +// "NoHTTPS" => "no-https" +// "GO_PATH" => "go-path" +// "GO PATH" => "go-path" // space is converted to '-'. +// "GO-PATH" => "go-path" // hyphen is converted to '-'. +// "HTTP2XX" => "http-2xx" // insert a '-' before a number and after an alphabet. +// "http2xx" => "http-2xx" +// "HTTP20xOK" => "http-20x-ok" +func ToKebabCase(str string) string { + return camelCaseToLowerCase(str, '-') +} + +func camelCaseToLowerCase(str string, connector rune) string { + if len(str) == 0 { + return "" + } + + buf := &bytes.Buffer{} + var prev, r0, r1 rune + var size int + + r0 = connector + + for len(str) > 0 { + prev = r0 + r0, size = utf8.DecodeRuneInString(str) + str = str[size:] + + switch { + case r0 == utf8.RuneError: + buf.WriteRune(r0) + + case unicode.IsUpper(r0): + if prev != connector && !unicode.IsNumber(prev) { + buf.WriteRune(connector) + } + + buf.WriteRune(unicode.ToLower(r0)) + + if len(str) == 0 { + break + } + + r0, size = utf8.DecodeRuneInString(str) + str = str[size:] + + if !unicode.IsUpper(r0) { + buf.WriteRune(r0) + break + } + + // find next non-upper-case character and insert connector properly. + // it's designed to convert `HTTPServer` to `http_server`. + // if there are more than 2 adjacent upper case characters in a word, + // treat them as an abbreviation plus a normal word. + for len(str) > 0 { + r1 = r0 + r0, size = utf8.DecodeRuneInString(str) + str = str[size:] + + if r0 == utf8.RuneError { + buf.WriteRune(unicode.ToLower(r1)) + buf.WriteRune(r0) + break + } + + if !unicode.IsUpper(r0) { + if r0 == '_' || r0 == ' ' || r0 == '-' { + r0 = connector + + buf.WriteRune(unicode.ToLower(r1)) + } else if unicode.IsNumber(r0) { + // treat a number as an upper case rune + // so that both `http2xx` and `HTTP2XX` can be converted to `http_2xx`. + buf.WriteRune(unicode.ToLower(r1)) + buf.WriteRune(connector) + buf.WriteRune(r0) + } else { + buf.WriteRune(connector) + buf.WriteRune(unicode.ToLower(r1)) + buf.WriteRune(r0) + } + + break + } + + buf.WriteRune(unicode.ToLower(r1)) + } + + if len(str) == 0 || r0 == connector { + buf.WriteRune(unicode.ToLower(r0)) + } + + case unicode.IsNumber(r0): + if prev != connector && !unicode.IsNumber(prev) { + buf.WriteRune(connector) + } + + buf.WriteRune(r0) + + default: + if r0 == ' ' || r0 == '-' || r0 == '_' { + r0 = connector + } + + buf.WriteRune(r0) + } + } + + return buf.String() +} + +// SwapCase will swap characters case from upper to lower or lower to upper. +func SwapCase(str string) string { + var r rune + var size int + + buf := &bytes.Buffer{} + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + switch { + case unicode.IsUpper(r): + buf.WriteRune(unicode.ToLower(r)) + + case unicode.IsLower(r): + buf.WriteRune(unicode.ToUpper(r)) + + default: + buf.WriteRune(r) + } + + str = str[size:] + } + + return buf.String() +} + +// FirstRuneToUpper converts first rune to upper case if necessary. +func FirstRuneToUpper(str string) string { + if str == "" { + return str + } + + r, size := utf8.DecodeRuneInString(str) + + if !unicode.IsLower(r) { + return str + } + + buf := &bytes.Buffer{} + buf.WriteRune(unicode.ToUpper(r)) + buf.WriteString(str[size:]) + return buf.String() +} + +// FirstRuneToLower converts first rune to lower case if necessary. +func FirstRuneToLower(str string) string { + if str == "" { + return str + } + + r, size := utf8.DecodeRuneInString(str) + + if !unicode.IsUpper(r) { + return str + } + + buf := &bytes.Buffer{} + buf.WriteRune(unicode.ToLower(r)) + buf.WriteString(str[size:]) + return buf.String() +} + +// Shuffle randomizes runes in a string and returns the result. +// It uses default random source in `math/rand`. +func Shuffle(str string) string { + if str == "" { + return str + } + + runes := []rune(str) + index := 0 + + for i := len(runes) - 1; i > 0; i-- { + index = rand.Intn(i + 1) + + if i != index { + runes[i], runes[index] = runes[index], runes[i] + } + } + + return string(runes) +} + +// ShuffleSource randomizes runes in a string with given random source. +func ShuffleSource(str string, src rand.Source) string { + if str == "" { + return str + } + + runes := []rune(str) + index := 0 + r := rand.New(src) + + for i := len(runes) - 1; i > 0; i-- { + index = r.Intn(i + 1) + + if i != index { + runes[i], runes[index] = runes[index], runes[i] + } + } + + return string(runes) +} + +// Successor returns the successor to string. +// +// If there is one alphanumeric rune is found in string, increase the rune by 1. +// If increment generates a "carry", the rune to the left of it is incremented. +// This process repeats until there is no carry, adding an additional rune if necessary. +// +// If there is no alphanumeric rune, the rightmost rune will be increased by 1 +// regardless whether the result is a valid rune or not. +// +// Only following characters are alphanumeric. +// * a - z +// * A - Z +// * 0 - 9 +// +// Samples (borrowed from ruby's String#succ document): +// "abcd" => "abce" +// "THX1138" => "THX1139" +// "<>" => "<>" +// "1999zzz" => "2000aaa" +// "ZZZ9999" => "AAAA0000" +// "***" => "**+" +func Successor(str string) string { + if str == "" { + return str + } + + var r rune + var i int + carry := ' ' + runes := []rune(str) + l := len(runes) + lastAlphanumeric := l + + for i = l - 1; i >= 0; i-- { + r = runes[i] + + if ('a' <= r && r <= 'y') || + ('A' <= r && r <= 'Y') || + ('0' <= r && r <= '8') { + runes[i]++ + carry = ' ' + lastAlphanumeric = i + break + } + + switch r { + case 'z': + runes[i] = 'a' + carry = 'a' + lastAlphanumeric = i + + case 'Z': + runes[i] = 'A' + carry = 'A' + lastAlphanumeric = i + + case '9': + runes[i] = '0' + carry = '0' + lastAlphanumeric = i + } + } + + // Needs to add one character for carry. + if i < 0 && carry != ' ' { + buf := &bytes.Buffer{} + buf.Grow(l + 4) // Reserve enough space for write. + + if lastAlphanumeric != 0 { + buf.WriteString(str[:lastAlphanumeric]) + } + + buf.WriteRune(carry) + + for _, r = range runes[lastAlphanumeric:] { + buf.WriteRune(r) + } + + return buf.String() + } + + // No alphanumeric character. Simply increase last rune's value. + if lastAlphanumeric == l { + runes[l-1]++ + } + + return string(runes) +} diff --git a/vendor/github.com/huandu/xstrings/count.go b/vendor/github.com/huandu/xstrings/count.go new file mode 100644 index 0000000000..f96e38703a --- /dev/null +++ b/vendor/github.com/huandu/xstrings/count.go @@ -0,0 +1,120 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "unicode" + "unicode/utf8" +) + +// Len returns str's utf8 rune length. +func Len(str string) int { + return utf8.RuneCountInString(str) +} + +// WordCount returns number of words in a string. +// +// Word is defined as a locale dependent string containing alphabetic characters, +// which may also contain but not start with `'` and `-` characters. +func WordCount(str string) int { + var r rune + var size, n int + + inWord := false + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + switch { + case isAlphabet(r): + if !inWord { + inWord = true + n++ + } + + case inWord && (r == '\'' || r == '-'): + // Still in word. + + default: + inWord = false + } + + str = str[size:] + } + + return n +} + +const minCJKCharacter = '\u3400' + +// Checks r is a letter but not CJK character. +func isAlphabet(r rune) bool { + if !unicode.IsLetter(r) { + return false + } + + switch { + // Quick check for non-CJK character. + case r < minCJKCharacter: + return true + + // Common CJK characters. + case r >= '\u4E00' && r <= '\u9FCC': + return false + + // Rare CJK characters. + case r >= '\u3400' && r <= '\u4D85': + return false + + // Rare and historic CJK characters. + case r >= '\U00020000' && r <= '\U0002B81D': + return false + } + + return true +} + +// Width returns string width in monotype font. +// Multi-byte characters are usually twice the width of single byte characters. +// +// Algorithm comes from `mb_strwidth` in PHP. +// http://php.net/manual/en/function.mb-strwidth.php +func Width(str string) int { + var r rune + var size, n int + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + n += RuneWidth(r) + str = str[size:] + } + + return n +} + +// RuneWidth returns character width in monotype font. +// Multi-byte characters are usually twice the width of single byte characters. +// +// Algorithm comes from `mb_strwidth` in PHP. +// http://php.net/manual/en/function.mb-strwidth.php +func RuneWidth(r rune) int { + switch { + case r == utf8.RuneError || r < '\x20': + return 0 + + case '\x20' <= r && r < '\u2000': + return 1 + + case '\u2000' <= r && r < '\uFF61': + return 2 + + case '\uFF61' <= r && r < '\uFFA0': + return 1 + + case '\uFFA0' <= r: + return 2 + } + + return 0 +} diff --git a/vendor/github.com/huandu/xstrings/doc.go b/vendor/github.com/huandu/xstrings/doc.go new file mode 100644 index 0000000000..1a6ef069f6 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/doc.go @@ -0,0 +1,8 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +// Package xstrings is to provide string algorithms which are useful but not included in `strings` package. +// See project home page for details. https://github.com/huandu/xstrings +// +// Package xstrings assumes all strings are encoded in utf8. +package xstrings diff --git a/vendor/github.com/huandu/xstrings/format.go b/vendor/github.com/huandu/xstrings/format.go new file mode 100644 index 0000000000..2d02df1c04 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/format.go @@ -0,0 +1,170 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "bytes" + "unicode/utf8" +) + +// ExpandTabs can expand tabs ('\t') rune in str to one or more spaces dpending on +// current column and tabSize. +// The column number is reset to zero after each newline ('\n') occurring in the str. +// +// ExpandTabs uses RuneWidth to decide rune's width. +// For example, CJK characters will be treated as two characters. +// +// If tabSize <= 0, ExpandTabs panics with error. +// +// Samples: +// ExpandTabs("a\tbc\tdef\tghij\tk", 4) => "a bc def ghij k" +// ExpandTabs("abcdefg\thij\nk\tl", 4) => "abcdefg hij\nk l" +// ExpandTabs("z中\t文\tw", 4) => "z中 文 w" +func ExpandTabs(str string, tabSize int) string { + if tabSize <= 0 { + panic("tab size must be positive") + } + + var r rune + var i, size, column, expand int + var output *bytes.Buffer + + orig := str + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + if r == '\t' { + expand = tabSize - column%tabSize + + if output == nil { + output = allocBuffer(orig, str) + } + + for i = 0; i < expand; i++ { + output.WriteByte(byte(' ')) + } + + column += expand + } else { + if r == '\n' { + column = 0 + } else { + column += RuneWidth(r) + } + + if output != nil { + output.WriteRune(r) + } + } + + str = str[size:] + } + + if output == nil { + return orig + } + + return output.String() +} + +// LeftJustify returns a string with pad string at right side if str's rune length is smaller than length. +// If str's rune length is larger than length, str itself will be returned. +// +// If pad is an empty string, str will be returned. +// +// Samples: +// LeftJustify("hello", 4, " ") => "hello" +// LeftJustify("hello", 10, " ") => "hello " +// LeftJustify("hello", 10, "123") => "hello12312" +func LeftJustify(str string, length int, pad string) string { + l := Len(str) + + if l >= length || pad == "" { + return str + } + + remains := length - l + padLen := Len(pad) + + output := &bytes.Buffer{} + output.Grow(len(str) + (remains/padLen+1)*len(pad)) + output.WriteString(str) + writePadString(output, pad, padLen, remains) + return output.String() +} + +// RightJustify returns a string with pad string at left side if str's rune length is smaller than length. +// If str's rune length is larger than length, str itself will be returned. +// +// If pad is an empty string, str will be returned. +// +// Samples: +// RightJustify("hello", 4, " ") => "hello" +// RightJustify("hello", 10, " ") => " hello" +// RightJustify("hello", 10, "123") => "12312hello" +func RightJustify(str string, length int, pad string) string { + l := Len(str) + + if l >= length || pad == "" { + return str + } + + remains := length - l + padLen := Len(pad) + + output := &bytes.Buffer{} + output.Grow(len(str) + (remains/padLen+1)*len(pad)) + writePadString(output, pad, padLen, remains) + output.WriteString(str) + return output.String() +} + +// Center returns a string with pad string at both side if str's rune length is smaller than length. +// If str's rune length is larger than length, str itself will be returned. +// +// If pad is an empty string, str will be returned. +// +// Samples: +// Center("hello", 4, " ") => "hello" +// Center("hello", 10, " ") => " hello " +// Center("hello", 10, "123") => "12hello123" +func Center(str string, length int, pad string) string { + l := Len(str) + + if l >= length || pad == "" { + return str + } + + remains := length - l + padLen := Len(pad) + + output := &bytes.Buffer{} + output.Grow(len(str) + (remains/padLen+1)*len(pad)) + writePadString(output, pad, padLen, remains/2) + output.WriteString(str) + writePadString(output, pad, padLen, (remains+1)/2) + return output.String() +} + +func writePadString(output *bytes.Buffer, pad string, padLen, remains int) { + var r rune + var size int + + repeats := remains / padLen + + for i := 0; i < repeats; i++ { + output.WriteString(pad) + } + + remains = remains % padLen + + if remains != 0 { + for i := 0; i < remains; i++ { + r, size = utf8.DecodeRuneInString(pad) + output.WriteRune(r) + pad = pad[size:] + } + } +} diff --git a/vendor/github.com/huandu/xstrings/go.mod b/vendor/github.com/huandu/xstrings/go.mod new file mode 100644 index 0000000000..5866b3a8eb --- /dev/null +++ b/vendor/github.com/huandu/xstrings/go.mod @@ -0,0 +1 @@ +module github.com/huandu/xstrings \ No newline at end of file diff --git a/vendor/github.com/huandu/xstrings/manipulate.go b/vendor/github.com/huandu/xstrings/manipulate.go new file mode 100644 index 0000000000..0eefb43ed7 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/manipulate.go @@ -0,0 +1,217 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "bytes" + "strings" + "unicode/utf8" +) + +// Reverse a utf8 encoded string. +func Reverse(str string) string { + var size int + + tail := len(str) + buf := make([]byte, tail) + s := buf + + for len(str) > 0 { + _, size = utf8.DecodeRuneInString(str) + tail -= size + s = append(s[:tail], []byte(str[:size])...) + str = str[size:] + } + + return string(buf) +} + +// Slice a string by rune. +// +// Start must satisfy 0 <= start <= rune length. +// +// End can be positive, zero or negative. +// If end >= 0, start and end must satisfy start <= end <= rune length. +// If end < 0, it means slice to the end of string. +// +// Otherwise, Slice will panic as out of range. +func Slice(str string, start, end int) string { + var size, startPos, endPos int + + origin := str + + if start < 0 || end > len(str) || (end >= 0 && start > end) { + panic("out of range") + } + + if end >= 0 { + end -= start + } + + for start > 0 && len(str) > 0 { + _, size = utf8.DecodeRuneInString(str) + start-- + startPos += size + str = str[size:] + } + + if end < 0 { + return origin[startPos:] + } + + endPos = startPos + + for end > 0 && len(str) > 0 { + _, size = utf8.DecodeRuneInString(str) + end-- + endPos += size + str = str[size:] + } + + if len(str) == 0 && (start > 0 || end > 0) { + panic("out of range") + } + + return origin[startPos:endPos] +} + +// Partition splits a string by sep into three parts. +// The return value is a slice of strings with head, match and tail. +// +// If str contains sep, for example "hello" and "l", Partition returns +// "he", "l", "lo" +// +// If str doesn't contain sep, for example "hello" and "x", Partition returns +// "hello", "", "" +func Partition(str, sep string) (head, match, tail string) { + index := strings.Index(str, sep) + + if index == -1 { + head = str + return + } + + head = str[:index] + match = str[index : index+len(sep)] + tail = str[index+len(sep):] + return +} + +// LastPartition splits a string by last instance of sep into three parts. +// The return value is a slice of strings with head, match and tail. +// +// If str contains sep, for example "hello" and "l", LastPartition returns +// "hel", "l", "o" +// +// If str doesn't contain sep, for example "hello" and "x", LastPartition returns +// "", "", "hello" +func LastPartition(str, sep string) (head, match, tail string) { + index := strings.LastIndex(str, sep) + + if index == -1 { + tail = str + return + } + + head = str[:index] + match = str[index : index+len(sep)] + tail = str[index+len(sep):] + return +} + +// Insert src into dst at given rune index. +// Index is counted by runes instead of bytes. +// +// If index is out of range of dst, panic with out of range. +func Insert(dst, src string, index int) string { + return Slice(dst, 0, index) + src + Slice(dst, index, -1) +} + +// Scrub scrubs invalid utf8 bytes with repl string. +// Adjacent invalid bytes are replaced only once. +func Scrub(str, repl string) string { + var buf *bytes.Buffer + var r rune + var size, pos int + var hasError bool + + origin := str + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + if r == utf8.RuneError { + if !hasError { + if buf == nil { + buf = &bytes.Buffer{} + } + + buf.WriteString(origin[:pos]) + hasError = true + } + } else if hasError { + hasError = false + buf.WriteString(repl) + + origin = origin[pos:] + pos = 0 + } + + pos += size + str = str[size:] + } + + if buf != nil { + buf.WriteString(origin) + return buf.String() + } + + // No invalid byte. + return origin +} + +// WordSplit splits a string into words. Returns a slice of words. +// If there is no word in a string, return nil. +// +// Word is defined as a locale dependent string containing alphabetic characters, +// which may also contain but not start with `'` and `-` characters. +func WordSplit(str string) []string { + var word string + var words []string + var r rune + var size, pos int + + inWord := false + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + switch { + case isAlphabet(r): + if !inWord { + inWord = true + word = str + pos = 0 + } + + case inWord && (r == '\'' || r == '-'): + // Still in word. + + default: + if inWord { + inWord = false + words = append(words, word[:pos]) + } + } + + pos += size + str = str[size:] + } + + if inWord { + words = append(words, word[:pos]) + } + + return words +} diff --git a/vendor/github.com/huandu/xstrings/translate.go b/vendor/github.com/huandu/xstrings/translate.go new file mode 100644 index 0000000000..d86a4cbbd3 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/translate.go @@ -0,0 +1,547 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "bytes" + "unicode" + "unicode/utf8" +) + +type runeRangeMap struct { + FromLo rune // Lower bound of range map. + FromHi rune // An inclusive higher bound of range map. + ToLo rune + ToHi rune +} + +type runeDict struct { + Dict [unicode.MaxASCII + 1]rune +} + +type runeMap map[rune]rune + +// Translator can translate string with pre-compiled from and to patterns. +// If a from/to pattern pair needs to be used more than once, it's recommended +// to create a Translator and reuse it. +type Translator struct { + quickDict *runeDict // A quick dictionary to look up rune by index. Only availabe for latin runes. + runeMap runeMap // Rune map for translation. + ranges []*runeRangeMap // Ranges of runes. + mappedRune rune // If mappedRune >= 0, all matched runes are translated to the mappedRune. + reverted bool // If to pattern is empty, all matched characters will be deleted. + hasPattern bool +} + +// NewTranslator creates new Translator through a from/to pattern pair. +func NewTranslator(from, to string) *Translator { + tr := &Translator{} + + if from == "" { + return tr + } + + reverted := from[0] == '^' + deletion := len(to) == 0 + + if reverted { + from = from[1:] + } + + var fromStart, fromEnd, fromRangeStep rune + var toStart, toEnd, toRangeStep rune + var fromRangeSize, toRangeSize rune + var singleRunes []rune + + // Update the to rune range. + updateRange := func() { + // No more rune to read in the to rune pattern. + if toEnd == utf8.RuneError { + return + } + + if toRangeStep == 0 { + to, toStart, toEnd, toRangeStep = nextRuneRange(to, toEnd) + return + } + + // Current range is not empty. Consume 1 rune from start. + if toStart != toEnd { + toStart += toRangeStep + return + } + + // No more rune. Repeat the last rune. + if to == "" { + toEnd = utf8.RuneError + return + } + + // Both start and end are used. Read two more runes from the to pattern. + to, toStart, toEnd, toRangeStep = nextRuneRange(to, utf8.RuneError) + } + + if deletion { + toStart = utf8.RuneError + toEnd = utf8.RuneError + } else { + // If from pattern is reverted, only the last rune in the to pattern will be used. + if reverted { + var size int + + for len(to) > 0 { + toStart, size = utf8.DecodeRuneInString(to) + to = to[size:] + } + + toEnd = utf8.RuneError + } else { + to, toStart, toEnd, toRangeStep = nextRuneRange(to, utf8.RuneError) + } + } + + fromEnd = utf8.RuneError + + for len(from) > 0 { + from, fromStart, fromEnd, fromRangeStep = nextRuneRange(from, fromEnd) + + // fromStart is a single character. Just map it with a rune in the to pattern. + if fromRangeStep == 0 { + singleRunes = tr.addRune(fromStart, toStart, singleRunes) + updateRange() + continue + } + + for toEnd != utf8.RuneError && fromStart != fromEnd { + // If mapped rune is a single character instead of a range, simply shift first + // rune in the range. + if toRangeStep == 0 { + singleRunes = tr.addRune(fromStart, toStart, singleRunes) + updateRange() + fromStart += fromRangeStep + continue + } + + fromRangeSize = (fromEnd - fromStart) * fromRangeStep + toRangeSize = (toEnd - toStart) * toRangeStep + + // Not enough runes in the to pattern. Need to read more. + if fromRangeSize > toRangeSize { + fromStart, toStart = tr.addRuneRange(fromStart, fromStart+toRangeSize*fromRangeStep, toStart, toEnd, singleRunes) + fromStart += fromRangeStep + updateRange() + + // Edge case: If fromRangeSize == toRangeSize + 1, the last fromStart value needs be considered + // as a single rune. + if fromStart == fromEnd { + singleRunes = tr.addRune(fromStart, toStart, singleRunes) + updateRange() + } + + continue + } + + fromStart, toStart = tr.addRuneRange(fromStart, fromEnd, toStart, toStart+fromRangeSize*toRangeStep, singleRunes) + updateRange() + break + } + + if fromStart == fromEnd { + fromEnd = utf8.RuneError + continue + } + + fromStart, toStart = tr.addRuneRange(fromStart, fromEnd, toStart, toStart, singleRunes) + fromEnd = utf8.RuneError + } + + if fromEnd != utf8.RuneError { + singleRunes = tr.addRune(fromEnd, toStart, singleRunes) + } + + tr.reverted = reverted + tr.mappedRune = -1 + tr.hasPattern = true + + // Translate RuneError only if in deletion or reverted mode. + if deletion || reverted { + tr.mappedRune = toStart + } + + return tr +} + +func (tr *Translator) addRune(from, to rune, singleRunes []rune) []rune { + if from <= unicode.MaxASCII { + if tr.quickDict == nil { + tr.quickDict = &runeDict{} + } + + tr.quickDict.Dict[from] = to + } else { + if tr.runeMap == nil { + tr.runeMap = make(runeMap) + } + + tr.runeMap[from] = to + } + + singleRunes = append(singleRunes, from) + return singleRunes +} + +func (tr *Translator) addRuneRange(fromLo, fromHi, toLo, toHi rune, singleRunes []rune) (rune, rune) { + var r rune + var rrm *runeRangeMap + + if fromLo < fromHi { + rrm = &runeRangeMap{ + FromLo: fromLo, + FromHi: fromHi, + ToLo: toLo, + ToHi: toHi, + } + } else { + rrm = &runeRangeMap{ + FromLo: fromHi, + FromHi: fromLo, + ToLo: toHi, + ToHi: toLo, + } + } + + // If there is any single rune conflicts with this rune range, clear single rune record. + for _, r = range singleRunes { + if rrm.FromLo <= r && r <= rrm.FromHi { + if r <= unicode.MaxASCII { + tr.quickDict.Dict[r] = 0 + } else { + delete(tr.runeMap, r) + } + } + } + + tr.ranges = append(tr.ranges, rrm) + return fromHi, toHi +} + +func nextRuneRange(str string, last rune) (remaining string, start, end rune, rangeStep rune) { + var r rune + var size int + + remaining = str + escaping := false + isRange := false + + for len(remaining) > 0 { + r, size = utf8.DecodeRuneInString(remaining) + remaining = remaining[size:] + + // Parse special characters. + if !escaping { + if r == '\\' { + escaping = true + continue + } + + if r == '-' { + // Ignore slash at beginning of string. + if last == utf8.RuneError { + continue + } + + start = last + isRange = true + continue + } + } + + escaping = false + + if last != utf8.RuneError { + // This is a range which start and end are the same. + // Considier it as a normal character. + if isRange && last == r { + isRange = false + continue + } + + start = last + end = r + + if isRange { + if start < end { + rangeStep = 1 + } else { + rangeStep = -1 + } + } + + return + } + + last = r + } + + start = last + end = utf8.RuneError + return +} + +// Translate str with a from/to pattern pair. +// +// See comment in Translate function for usage and samples. +func (tr *Translator) Translate(str string) string { + if !tr.hasPattern || str == "" { + return str + } + + var r rune + var size int + var needTr bool + + orig := str + + var output *bytes.Buffer + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + r, needTr = tr.TranslateRune(r) + + if needTr && output == nil { + output = allocBuffer(orig, str) + } + + if r != utf8.RuneError && output != nil { + output.WriteRune(r) + } + + str = str[size:] + } + + // No character is translated. + if output == nil { + return orig + } + + return output.String() +} + +// TranslateRune return translated rune and true if r matches the from pattern. +// If r doesn't match the pattern, original r is returned and translated is false. +func (tr *Translator) TranslateRune(r rune) (result rune, translated bool) { + switch { + case tr.quickDict != nil: + if r <= unicode.MaxASCII { + result = tr.quickDict.Dict[r] + + if result != 0 { + translated = true + + if tr.mappedRune >= 0 { + result = tr.mappedRune + } + + break + } + } + + fallthrough + + case tr.runeMap != nil: + var ok bool + + if result, ok = tr.runeMap[r]; ok { + translated = true + + if tr.mappedRune >= 0 { + result = tr.mappedRune + } + + break + } + + fallthrough + + default: + var rrm *runeRangeMap + ranges := tr.ranges + + for i := len(ranges) - 1; i >= 0; i-- { + rrm = ranges[i] + + if rrm.FromLo <= r && r <= rrm.FromHi { + translated = true + + if tr.mappedRune >= 0 { + result = tr.mappedRune + break + } + + if rrm.ToLo < rrm.ToHi { + result = rrm.ToLo + r - rrm.FromLo + } else if rrm.ToLo > rrm.ToHi { + // ToHi can be smaller than ToLo if range is from higher to lower. + result = rrm.ToLo - r + rrm.FromLo + } else { + result = rrm.ToLo + } + + break + } + } + } + + if tr.reverted { + if !translated { + result = tr.mappedRune + } + + translated = !translated + } + + if !translated { + result = r + } + + return +} + +// HasPattern returns true if Translator has one pattern at least. +func (tr *Translator) HasPattern() bool { + return tr.hasPattern +} + +// Translate str with the characters defined in from replaced by characters defined in to. +// +// From and to are patterns representing a set of characters. Pattern is defined as following. +// +// * Special characters +// * '-' means a range of runes, e.g. +// * "a-z" means all characters from 'a' to 'z' inclusive; +// * "z-a" means all characters from 'z' to 'a' inclusive. +// * '^' as first character means a set of all runes excepted listed, e.g. +// * "^a-z" means all characters except 'a' to 'z' inclusive. +// * '\' escapes special characters. +// * Normal character represents itself, e.g. "abc" is a set including 'a', 'b' and 'c'. +// +// Translate will try to find a 1:1 mapping from from to to. +// If to is smaller than from, last rune in to will be used to map "out of range" characters in from. +// +// Note that '^' only works in the from pattern. It will be considered as a normal character in the to pattern. +// +// If the to pattern is an empty string, Translate works exactly the same as Delete. +// +// Samples: +// Translate("hello", "aeiou", "12345") => "h2ll4" +// Translate("hello", "a-z", "A-Z") => "HELLO" +// Translate("hello", "z-a", "a-z") => "svool" +// Translate("hello", "aeiou", "*") => "h*ll*" +// Translate("hello", "^l", "*") => "**ll*" +// Translate("hello ^ world", `\^lo`, "*") => "he*** * w*r*d" +func Translate(str, from, to string) string { + tr := NewTranslator(from, to) + return tr.Translate(str) +} + +// Delete runes in str matching the pattern. +// Pattern is defined in Translate function. +// +// Samples: +// Delete("hello", "aeiou") => "hll" +// Delete("hello", "a-k") => "llo" +// Delete("hello", "^a-k") => "he" +func Delete(str, pattern string) string { + tr := NewTranslator(pattern, "") + return tr.Translate(str) +} + +// Count how many runes in str match the pattern. +// Pattern is defined in Translate function. +// +// Samples: +// Count("hello", "aeiou") => 3 +// Count("hello", "a-k") => 3 +// Count("hello", "^a-k") => 2 +func Count(str, pattern string) int { + if pattern == "" || str == "" { + return 0 + } + + var r rune + var size int + var matched bool + + tr := NewTranslator(pattern, "") + cnt := 0 + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + str = str[size:] + + if _, matched = tr.TranslateRune(r); matched { + cnt++ + } + } + + return cnt +} + +// Squeeze deletes adjacent repeated runes in str. +// If pattern is not empty, only runes matching the pattern will be squeezed. +// +// Samples: +// Squeeze("hello", "") => "helo" +// Squeeze("hello", "m-z") => "hello" +// Squeeze("hello world", " ") => "hello world" +func Squeeze(str, pattern string) string { + var last, r rune + var size int + var skipSqueeze, matched bool + var tr *Translator + var output *bytes.Buffer + + orig := str + last = -1 + + if len(pattern) > 0 { + tr = NewTranslator(pattern, "") + } + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + // Need to squeeze the str. + if last == r && !skipSqueeze { + if tr != nil { + if _, matched = tr.TranslateRune(r); !matched { + skipSqueeze = true + } + } + + if output == nil { + output = allocBuffer(orig, str) + } + + if skipSqueeze { + output.WriteRune(r) + } + } else { + if output != nil { + output.WriteRune(r) + } + + last = r + skipSqueeze = false + } + + str = str[size:] + } + + if output == nil { + return orig + } + + return output.String() +} diff --git a/vendor/github.com/jmoiron/sqlx/.gitignore b/vendor/github.com/jmoiron/sqlx/.gitignore new file mode 100644 index 0000000000..529841cf17 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +tags +environ diff --git a/vendor/github.com/jmoiron/sqlx/.travis.yml b/vendor/github.com/jmoiron/sqlx/.travis.yml new file mode 100644 index 0000000000..6bc68d67f2 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/.travis.yml @@ -0,0 +1,27 @@ +# vim: ft=yaml sw=2 ts=2 + +language: go + +# enable database services +services: + - mysql + - postgresql + +# create test database +before_install: + - mysql -e 'CREATE DATABASE IF NOT EXISTS sqlxtest;' + - psql -c 'create database sqlxtest;' -U postgres + - go get github.com/mattn/goveralls + - export SQLX_MYSQL_DSN="travis:@/sqlxtest?parseTime=true" + - export SQLX_POSTGRES_DSN="postgres://postgres:@localhost/sqlxtest?sslmode=disable" + - export SQLX_SQLITE_DSN="$HOME/sqlxtest.db" + +# go versions to test +go: + - "1.8" + - "1.9" + - "1.10.x" + +# run tests w/ coverage +script: + - travis_retry $GOPATH/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/jmoiron/sqlx/LICENSE b/vendor/github.com/jmoiron/sqlx/LICENSE new file mode 100644 index 0000000000..0d31edfa73 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/LICENSE @@ -0,0 +1,23 @@ + Copyright (c) 2013, Jason Moiron + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without + restriction, including without limitation the rights to use, + copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/jmoiron/sqlx/README.md b/vendor/github.com/jmoiron/sqlx/README.md new file mode 100644 index 0000000000..839034365f --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/README.md @@ -0,0 +1,187 @@ +# sqlx + +[![Build Status](https://travis-ci.org/jmoiron/sqlx.svg?branch=master)](https://travis-ci.org/jmoiron/sqlx) [![Coverage Status](https://coveralls.io/repos/github/jmoiron/sqlx/badge.svg?branch=master)](https://coveralls.io/github/jmoiron/sqlx?branch=master) [![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/jmoiron/sqlx) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/jmoiron/sqlx/master/LICENSE) + +sqlx is a library which provides a set of extensions on go's standard +`database/sql` library. The sqlx versions of `sql.DB`, `sql.TX`, `sql.Stmt`, +et al. all leave the underlying interfaces untouched, so that their interfaces +are a superset on the standard ones. This makes it relatively painless to +integrate existing codebases using database/sql with sqlx. + +Major additional concepts are: + +* Marshal rows into structs (with embedded struct support), maps, and slices +* Named parameter support including prepared statements +* `Get` and `Select` to go quickly from query to struct/slice + +In addition to the [godoc API documentation](http://godoc.org/github.com/jmoiron/sqlx), +there is also some [standard documentation](http://jmoiron.github.io/sqlx/) that +explains how to use `database/sql` along with sqlx. + +## Recent Changes + +* The [introduction](https://github.com/jmoiron/sqlx/pull/387) of `sql.ColumnType` sets the required minimum Go version to 1.8. + +* sqlx/types.JsonText has been renamed to JSONText to follow Go naming conventions. + +This breaks backwards compatibility, but it's in a way that is trivially fixable +(`s/JsonText/JSONText/g`). The `types` package is both experimental and not in +active development currently. + +* Using Go 1.6 and below with `types.JSONText` and `types.GzippedText` can be _potentially unsafe_, **especially** when used with common auto-scan sqlx idioms like `Select` and `Get`. See [golang bug #13905](https://github.com/golang/go/issues/13905). + +### Backwards Compatibility + +There is no Go1-like promise of absolute stability, but I take the issue seriously +and will maintain the library in a compatible state unless vital bugs prevent me +from doing so. Since [#59](https://github.com/jmoiron/sqlx/issues/59) and +[#60](https://github.com/jmoiron/sqlx/issues/60) necessitated breaking behavior, +a wider API cleanup was done at the time of fixing. It's possible this will happen +in future; if it does, a git tag will be provided for users requiring the old +behavior to continue to use it until such a time as they can migrate. + +## install + + go get github.com/jmoiron/sqlx + +## issues + +Row headers can be ambiguous (`SELECT 1 AS a, 2 AS a`), and the result of +`Columns()` does not fully qualify column names in queries like: + +```sql +SELECT a.id, a.name, b.id, b.name FROM foos AS a JOIN foos AS b ON a.parent = b.id; +``` + +making a struct or map destination ambiguous. Use `AS` in your queries +to give columns distinct names, `rows.Scan` to scan them manually, or +`SliceScan` to get a slice of results. + +## usage + +Below is an example which shows some common use cases for sqlx. Check +[sqlx_test.go](https://github.com/jmoiron/sqlx/blob/master/sqlx_test.go) for more +usage. + + +```go +package main + +import ( + "database/sql" + "fmt" + "log" + + _ "github.com/lib/pq" + "github.com/jmoiron/sqlx" +) + +var schema = ` +CREATE TABLE person ( + first_name text, + last_name text, + email text +); + +CREATE TABLE place ( + country text, + city text NULL, + telcode integer +)` + +type Person struct { + FirstName string `db:"first_name"` + LastName string `db:"last_name"` + Email string +} + +type Place struct { + Country string + City sql.NullString + TelCode int +} + +func main() { + // this Pings the database trying to connect, panics on error + // use sqlx.Open() for sql.Open() semantics + db, err := sqlx.Connect("postgres", "user=foo dbname=bar sslmode=disable") + if err != nil { + log.Fatalln(err) + } + + // exec the schema or fail; multi-statement Exec behavior varies between + // database drivers; pq will exec them all, sqlite3 won't, ymmv + db.MustExec(schema) + + tx := db.MustBegin() + tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "Jason", "Moiron", "jmoiron@jmoiron.net") + tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "John", "Doe", "johndoeDNE@gmail.net") + tx.MustExec("INSERT INTO place (country, city, telcode) VALUES ($1, $2, $3)", "United States", "New York", "1") + tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Hong Kong", "852") + tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Singapore", "65") + // Named queries can use structs, so if you have an existing struct (i.e. person := &Person{}) that you have populated, you can pass it in as &person + tx.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)", &Person{"Jane", "Citizen", "jane.citzen@example.com"}) + tx.Commit() + + // Query the database, storing results in a []Person (wrapped in []interface{}) + people := []Person{} + db.Select(&people, "SELECT * FROM person ORDER BY first_name ASC") + jason, john := people[0], people[1] + + fmt.Printf("%#v\n%#v", jason, john) + // Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"} + // Person{FirstName:"John", LastName:"Doe", Email:"johndoeDNE@gmail.net"} + + // You can also get a single result, a la QueryRow + jason = Person{} + err = db.Get(&jason, "SELECT * FROM person WHERE first_name=$1", "Jason") + fmt.Printf("%#v\n", jason) + // Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"} + + // if you have null fields and use SELECT *, you must use sql.Null* in your struct + places := []Place{} + err = db.Select(&places, "SELECT * FROM place ORDER BY telcode ASC") + if err != nil { + fmt.Println(err) + return + } + usa, singsing, honkers := places[0], places[1], places[2] + + fmt.Printf("%#v\n%#v\n%#v\n", usa, singsing, honkers) + // Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1} + // Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65} + // Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852} + + // Loop through rows using only one struct + place := Place{} + rows, err := db.Queryx("SELECT * FROM place") + for rows.Next() { + err := rows.StructScan(&place) + if err != nil { + log.Fatalln(err) + } + fmt.Printf("%#v\n", place) + } + // Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1} + // Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852} + // Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65} + + // Named queries, using `:name` as the bindvar. Automatic bindvar support + // which takes into account the dbtype based on the driverName on sqlx.Open/Connect + _, err = db.NamedExec(`INSERT INTO person (first_name,last_name,email) VALUES (:first,:last,:email)`, + map[string]interface{}{ + "first": "Bin", + "last": "Smuth", + "email": "bensmith@allblacks.nz", + }) + + // Selects Mr. Smith from the database + rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:fn`, map[string]interface{}{"fn": "Bin"}) + + // Named queries can also use structs. Their bind names follow the same rules + // as the name -> db mapping, so struct fields are lowercased and the `db` tag + // is taken into consideration. + rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:first_name`, jason) +} +``` + diff --git a/vendor/github.com/jmoiron/sqlx/bind.go b/vendor/github.com/jmoiron/sqlx/bind.go new file mode 100644 index 0000000000..0a48252a03 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/bind.go @@ -0,0 +1,217 @@ +package sqlx + +import ( + "bytes" + "database/sql/driver" + "errors" + "reflect" + "strconv" + "strings" + + "github.com/jmoiron/sqlx/reflectx" +) + +// Bindvar types supported by Rebind, BindMap and BindStruct. +const ( + UNKNOWN = iota + QUESTION + DOLLAR + NAMED + AT +) + +// BindType returns the bindtype for a given database given a drivername. +func BindType(driverName string) int { + switch driverName { + case "postgres", "pgx", "pq-timeouts", "cloudsqlpostgres": + return DOLLAR + case "mysql": + return QUESTION + case "sqlite3": + return QUESTION + case "oci8", "ora", "goracle": + return NAMED + case "sqlserver": + return AT + } + return UNKNOWN +} + +// FIXME: this should be able to be tolerant of escaped ?'s in queries without +// losing much speed, and should be to avoid confusion. + +// Rebind a query from the default bindtype (QUESTION) to the target bindtype. +func Rebind(bindType int, query string) string { + switch bindType { + case QUESTION, UNKNOWN: + return query + } + + // Add space enough for 10 params before we have to allocate + rqb := make([]byte, 0, len(query)+10) + + var i, j int + + for i = strings.Index(query, "?"); i != -1; i = strings.Index(query, "?") { + rqb = append(rqb, query[:i]...) + + switch bindType { + case DOLLAR: + rqb = append(rqb, '$') + case NAMED: + rqb = append(rqb, ':', 'a', 'r', 'g') + case AT: + rqb = append(rqb, '@', 'p') + } + + j++ + rqb = strconv.AppendInt(rqb, int64(j), 10) + + query = query[i+1:] + } + + return string(append(rqb, query...)) +} + +// Experimental implementation of Rebind which uses a bytes.Buffer. The code is +// much simpler and should be more resistant to odd unicode, but it is twice as +// slow. Kept here for benchmarking purposes and to possibly replace Rebind if +// problems arise with its somewhat naive handling of unicode. +func rebindBuff(bindType int, query string) string { + if bindType != DOLLAR { + return query + } + + b := make([]byte, 0, len(query)) + rqb := bytes.NewBuffer(b) + j := 1 + for _, r := range query { + if r == '?' { + rqb.WriteRune('$') + rqb.WriteString(strconv.Itoa(j)) + j++ + } else { + rqb.WriteRune(r) + } + } + + return rqb.String() +} + +// In expands slice values in args, returning the modified query string +// and a new arg list that can be executed by a database. The `query` should +// use the `?` bindVar. The return value uses the `?` bindVar. +func In(query string, args ...interface{}) (string, []interface{}, error) { + // argMeta stores reflect.Value and length for slices and + // the value itself for non-slice arguments + type argMeta struct { + v reflect.Value + i interface{} + length int + } + + var flatArgsCount int + var anySlices bool + + meta := make([]argMeta, len(args)) + + for i, arg := range args { + if a, ok := arg.(driver.Valuer); ok { + arg, _ = a.Value() + } + v := reflect.ValueOf(arg) + t := reflectx.Deref(v.Type()) + + // []byte is a driver.Value type so it should not be expanded + if t.Kind() == reflect.Slice && t != reflect.TypeOf([]byte{}) { + meta[i].length = v.Len() + meta[i].v = v + + anySlices = true + flatArgsCount += meta[i].length + + if meta[i].length == 0 { + return "", nil, errors.New("empty slice passed to 'in' query") + } + } else { + meta[i].i = arg + flatArgsCount++ + } + } + + // don't do any parsing if there aren't any slices; note that this means + // some errors that we might have caught below will not be returned. + if !anySlices { + return query, args, nil + } + + newArgs := make([]interface{}, 0, flatArgsCount) + buf := make([]byte, 0, len(query)+len(", ?")*flatArgsCount) + + var arg, offset int + + for i := strings.IndexByte(query[offset:], '?'); i != -1; i = strings.IndexByte(query[offset:], '?') { + if arg >= len(meta) { + // if an argument wasn't passed, lets return an error; this is + // not actually how database/sql Exec/Query works, but since we are + // creating an argument list programmatically, we want to be able + // to catch these programmer errors earlier. + return "", nil, errors.New("number of bindVars exceeds arguments") + } + + argMeta := meta[arg] + arg++ + + // not a slice, continue. + // our questionmark will either be written before the next expansion + // of a slice or after the loop when writing the rest of the query + if argMeta.length == 0 { + offset = offset + i + 1 + newArgs = append(newArgs, argMeta.i) + continue + } + + // write everything up to and including our ? character + buf = append(buf, query[:offset+i+1]...) + + for si := 1; si < argMeta.length; si++ { + buf = append(buf, ", ?"...) + } + + newArgs = appendReflectSlice(newArgs, argMeta.v, argMeta.length) + + // slice the query and reset the offset. this avoids some bookkeeping for + // the write after the loop + query = query[offset+i+1:] + offset = 0 + } + + buf = append(buf, query...) + + if arg < len(meta) { + return "", nil, errors.New("number of bindVars less than number arguments") + } + + return string(buf), newArgs, nil +} + +func appendReflectSlice(args []interface{}, v reflect.Value, vlen int) []interface{} { + switch val := v.Interface().(type) { + case []interface{}: + args = append(args, val...) + case []int: + for i := range val { + args = append(args, val[i]) + } + case []string: + for i := range val { + args = append(args, val[i]) + } + default: + for si := 0; si < vlen; si++ { + args = append(args, v.Index(si).Interface()) + } + } + + return args +} diff --git a/vendor/github.com/jmoiron/sqlx/doc.go b/vendor/github.com/jmoiron/sqlx/doc.go new file mode 100644 index 0000000000..e2b4e60b2e --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/doc.go @@ -0,0 +1,12 @@ +// Package sqlx provides general purpose extensions to database/sql. +// +// It is intended to seamlessly wrap database/sql and provide convenience +// methods which are useful in the development of database driven applications. +// None of the underlying database/sql methods are changed. Instead all extended +// behavior is implemented through new methods defined on wrapper types. +// +// Additions include scanning into structs, named query support, rebinding +// queries for different drivers, convenient shorthands for common error handling +// and more. +// +package sqlx diff --git a/vendor/github.com/jmoiron/sqlx/go.mod b/vendor/github.com/jmoiron/sqlx/go.mod new file mode 100644 index 0000000000..66c67561cc --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/go.mod @@ -0,0 +1,7 @@ +module github.com/jmoiron/sqlx + +require ( + github.com/go-sql-driver/mysql v1.4.0 + github.com/lib/pq v1.0.0 + github.com/mattn/go-sqlite3 v1.9.0 +) diff --git a/vendor/github.com/jmoiron/sqlx/go.sum b/vendor/github.com/jmoiron/sqlx/go.sum new file mode 100644 index 0000000000..a3239ada75 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/go.sum @@ -0,0 +1,6 @@ +github.com/go-sql-driver/mysql v1.4.0 h1:7LxgVwFb2hIQtMm87NdgAVfXjnt4OePseqT1tKx+opk= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/mattn/go-sqlite3 v1.9.0 h1:pDRiWfl+++eC2FEFRy6jXmQlvp4Yh3z1MJKg4UeYM/4= +github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= diff --git a/vendor/github.com/jmoiron/sqlx/named.go b/vendor/github.com/jmoiron/sqlx/named.go new file mode 100644 index 0000000000..fa82b5609f --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/named.go @@ -0,0 +1,356 @@ +package sqlx + +// Named Query Support +// +// * BindMap - bind query bindvars to map/struct args +// * NamedExec, NamedQuery - named query w/ struct or map +// * NamedStmt - a pre-compiled named query which is a prepared statement +// +// Internal Interfaces: +// +// * compileNamedQuery - rebind a named query, returning a query and list of names +// * bindArgs, bindMapArgs, bindAnyArgs - given a list of names, return an arglist +// +import ( + "database/sql" + "errors" + "fmt" + "reflect" + "strconv" + "unicode" + + "github.com/jmoiron/sqlx/reflectx" +) + +// NamedStmt is a prepared statement that executes named queries. Prepare it +// how you would execute a NamedQuery, but pass in a struct or map when executing. +type NamedStmt struct { + Params []string + QueryString string + Stmt *Stmt +} + +// Close closes the named statement. +func (n *NamedStmt) Close() error { + return n.Stmt.Close() +} + +// Exec executes a named statement using the struct passed. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) Exec(arg interface{}) (sql.Result, error) { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return *new(sql.Result), err + } + return n.Stmt.Exec(args...) +} + +// Query executes a named statement using the struct argument, returning rows. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) Query(arg interface{}) (*sql.Rows, error) { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return nil, err + } + return n.Stmt.Query(args...) +} + +// QueryRow executes a named statement against the database. Because sqlx cannot +// create a *sql.Row with an error condition pre-set for binding errors, sqlx +// returns a *sqlx.Row instead. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryRow(arg interface{}) *Row { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return &Row{err: err} + } + return n.Stmt.QueryRowx(args...) +} + +// MustExec execs a NamedStmt, panicing on error +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) MustExec(arg interface{}) sql.Result { + res, err := n.Exec(arg) + if err != nil { + panic(err) + } + return res +} + +// Queryx using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) Queryx(arg interface{}) (*Rows, error) { + r, err := n.Query(arg) + if err != nil { + return nil, err + } + return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err +} + +// QueryRowx this NamedStmt. Because of limitations with QueryRow, this is +// an alias for QueryRow. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryRowx(arg interface{}) *Row { + return n.QueryRow(arg) +} + +// Select using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) Select(dest interface{}, arg interface{}) error { + rows, err := n.Queryx(arg) + if err != nil { + return err + } + // if something happens here, we want to make sure the rows are Closed + defer rows.Close() + return scanAll(rows, dest, false) +} + +// Get using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) Get(dest interface{}, arg interface{}) error { + r := n.QueryRowx(arg) + return r.scanAny(dest, false) +} + +// Unsafe creates an unsafe version of the NamedStmt +func (n *NamedStmt) Unsafe() *NamedStmt { + r := &NamedStmt{Params: n.Params, Stmt: n.Stmt, QueryString: n.QueryString} + r.Stmt.unsafe = true + return r +} + +// A union interface of preparer and binder, required to be able to prepare +// named statements (as the bindtype must be determined). +type namedPreparer interface { + Preparer + binder +} + +func prepareNamed(p namedPreparer, query string) (*NamedStmt, error) { + bindType := BindType(p.DriverName()) + q, args, err := compileNamedQuery([]byte(query), bindType) + if err != nil { + return nil, err + } + stmt, err := Preparex(p, q) + if err != nil { + return nil, err + } + return &NamedStmt{ + QueryString: q, + Params: args, + Stmt: stmt, + }, nil +} + +func bindAnyArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) { + if maparg, ok := arg.(map[string]interface{}); ok { + return bindMapArgs(names, maparg) + } + return bindArgs(names, arg, m) +} + +// private interface to generate a list of interfaces from a given struct +// type, given a list of names to pull out of the struct. Used by public +// BindStruct interface. +func bindArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) { + arglist := make([]interface{}, 0, len(names)) + + // grab the indirected value of arg + v := reflect.ValueOf(arg) + for v = reflect.ValueOf(arg); v.Kind() == reflect.Ptr; { + v = v.Elem() + } + + err := m.TraversalsByNameFunc(v.Type(), names, func(i int, t []int) error { + if len(t) == 0 { + return fmt.Errorf("could not find name %s in %#v", names[i], arg) + } + + val := reflectx.FieldByIndexesReadOnly(v, t) + arglist = append(arglist, val.Interface()) + + return nil + }) + + return arglist, err +} + +// like bindArgs, but for maps. +func bindMapArgs(names []string, arg map[string]interface{}) ([]interface{}, error) { + arglist := make([]interface{}, 0, len(names)) + + for _, name := range names { + val, ok := arg[name] + if !ok { + return arglist, fmt.Errorf("could not find name %s in %#v", name, arg) + } + arglist = append(arglist, val) + } + return arglist, nil +} + +// bindStruct binds a named parameter query with fields from a struct argument. +// The rules for binding field names to parameter names follow the same +// conventions as for StructScan, including obeying the `db` struct tags. +func bindStruct(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) { + bound, names, err := compileNamedQuery([]byte(query), bindType) + if err != nil { + return "", []interface{}{}, err + } + + arglist, err := bindArgs(names, arg, m) + if err != nil { + return "", []interface{}{}, err + } + + return bound, arglist, nil +} + +// bindMap binds a named parameter query with a map of arguments. +func bindMap(bindType int, query string, args map[string]interface{}) (string, []interface{}, error) { + bound, names, err := compileNamedQuery([]byte(query), bindType) + if err != nil { + return "", []interface{}{}, err + } + + arglist, err := bindMapArgs(names, args) + return bound, arglist, err +} + +// -- Compilation of Named Queries + +// Allow digits and letters in bind params; additionally runes are +// checked against underscores, meaning that bind params can have be +// alphanumeric with underscores. Mind the difference between unicode +// digits and numbers, where '5' is a digit but '五' is not. +var allowedBindRunes = []*unicode.RangeTable{unicode.Letter, unicode.Digit} + +// FIXME: this function isn't safe for unicode named params, as a failing test +// can testify. This is not a regression but a failure of the original code +// as well. It should be modified to range over runes in a string rather than +// bytes, even though this is less convenient and slower. Hopefully the +// addition of the prepared NamedStmt (which will only do this once) will make +// up for the slightly slower ad-hoc NamedExec/NamedQuery. + +// compile a NamedQuery into an unbound query (using the '?' bindvar) and +// a list of names. +func compileNamedQuery(qs []byte, bindType int) (query string, names []string, err error) { + names = make([]string, 0, 10) + rebound := make([]byte, 0, len(qs)) + + inName := false + last := len(qs) - 1 + currentVar := 1 + name := make([]byte, 0, 10) + + for i, b := range qs { + // a ':' while we're in a name is an error + if b == ':' { + // if this is the second ':' in a '::' escape sequence, append a ':' + if inName && i > 0 && qs[i-1] == ':' { + rebound = append(rebound, ':') + inName = false + continue + } else if inName { + err = errors.New("unexpected `:` while reading named param at " + strconv.Itoa(i)) + return query, names, err + } + inName = true + name = []byte{} + } else if inName && i > 0 && b == '=' { + rebound = append(rebound, ':', '=') + inName = false + continue + // if we're in a name, and this is an allowed character, continue + } else if inName && (unicode.IsOneOf(allowedBindRunes, rune(b)) || b == '_' || b == '.') && i != last { + // append the byte to the name if we are in a name and not on the last byte + name = append(name, b) + // if we're in a name and it's not an allowed character, the name is done + } else if inName { + inName = false + // if this is the final byte of the string and it is part of the name, then + // make sure to add it to the name + if i == last && unicode.IsOneOf(allowedBindRunes, rune(b)) { + name = append(name, b) + } + // add the string representation to the names list + names = append(names, string(name)) + // add a proper bindvar for the bindType + switch bindType { + // oracle only supports named type bind vars even for positional + case NAMED: + rebound = append(rebound, ':') + rebound = append(rebound, name...) + case QUESTION, UNKNOWN: + rebound = append(rebound, '?') + case DOLLAR: + rebound = append(rebound, '$') + for _, b := range strconv.Itoa(currentVar) { + rebound = append(rebound, byte(b)) + } + currentVar++ + case AT: + rebound = append(rebound, '@', 'p') + for _, b := range strconv.Itoa(currentVar) { + rebound = append(rebound, byte(b)) + } + currentVar++ + } + // add this byte to string unless it was not part of the name + if i != last { + rebound = append(rebound, b) + } else if !unicode.IsOneOf(allowedBindRunes, rune(b)) { + rebound = append(rebound, b) + } + } else { + // this is a normal byte and should just go onto the rebound query + rebound = append(rebound, b) + } + } + + return string(rebound), names, err +} + +// BindNamed binds a struct or a map to a query with named parameters. +// DEPRECATED: use sqlx.Named` instead of this, it may be removed in future. +func BindNamed(bindType int, query string, arg interface{}) (string, []interface{}, error) { + return bindNamedMapper(bindType, query, arg, mapper()) +} + +// Named takes a query using named parameters and an argument and +// returns a new query with a list of args that can be executed by +// a database. The return value uses the `?` bindvar. +func Named(query string, arg interface{}) (string, []interface{}, error) { + return bindNamedMapper(QUESTION, query, arg, mapper()) +} + +func bindNamedMapper(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) { + if maparg, ok := arg.(map[string]interface{}); ok { + return bindMap(bindType, query, maparg) + } + return bindStruct(bindType, query, arg, m) +} + +// NamedQuery binds a named query and then runs Query on the result using the +// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with +// map[string]interface{} types. +func NamedQuery(e Ext, query string, arg interface{}) (*Rows, error) { + q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) + if err != nil { + return nil, err + } + return e.Queryx(q, args...) +} + +// NamedExec uses BindStruct to get a query executable by the driver and +// then runs Exec on the result. Returns an error from the binding +// or the query excution itself. +func NamedExec(e Ext, query string, arg interface{}) (sql.Result, error) { + q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) + if err != nil { + return nil, err + } + return e.Exec(q, args...) +} diff --git a/vendor/github.com/jmoiron/sqlx/named_context.go b/vendor/github.com/jmoiron/sqlx/named_context.go new file mode 100644 index 0000000000..9405007e23 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/named_context.go @@ -0,0 +1,132 @@ +// +build go1.8 + +package sqlx + +import ( + "context" + "database/sql" +) + +// A union interface of contextPreparer and binder, required to be able to +// prepare named statements with context (as the bindtype must be determined). +type namedPreparerContext interface { + PreparerContext + binder +} + +func prepareNamedContext(ctx context.Context, p namedPreparerContext, query string) (*NamedStmt, error) { + bindType := BindType(p.DriverName()) + q, args, err := compileNamedQuery([]byte(query), bindType) + if err != nil { + return nil, err + } + stmt, err := PreparexContext(ctx, p, q) + if err != nil { + return nil, err + } + return &NamedStmt{ + QueryString: q, + Params: args, + Stmt: stmt, + }, nil +} + +// ExecContext executes a named statement using the struct passed. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) ExecContext(ctx context.Context, arg interface{}) (sql.Result, error) { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return *new(sql.Result), err + } + return n.Stmt.ExecContext(ctx, args...) +} + +// QueryContext executes a named statement using the struct argument, returning rows. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryContext(ctx context.Context, arg interface{}) (*sql.Rows, error) { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return nil, err + } + return n.Stmt.QueryContext(ctx, args...) +} + +// QueryRowContext executes a named statement against the database. Because sqlx cannot +// create a *sql.Row with an error condition pre-set for binding errors, sqlx +// returns a *sqlx.Row instead. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryRowContext(ctx context.Context, arg interface{}) *Row { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return &Row{err: err} + } + return n.Stmt.QueryRowxContext(ctx, args...) +} + +// MustExecContext execs a NamedStmt, panicing on error +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) MustExecContext(ctx context.Context, arg interface{}) sql.Result { + res, err := n.ExecContext(ctx, arg) + if err != nil { + panic(err) + } + return res +} + +// QueryxContext using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryxContext(ctx context.Context, arg interface{}) (*Rows, error) { + r, err := n.QueryContext(ctx, arg) + if err != nil { + return nil, err + } + return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err +} + +// QueryRowxContext this NamedStmt. Because of limitations with QueryRow, this is +// an alias for QueryRow. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryRowxContext(ctx context.Context, arg interface{}) *Row { + return n.QueryRowContext(ctx, arg) +} + +// SelectContext using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) SelectContext(ctx context.Context, dest interface{}, arg interface{}) error { + rows, err := n.QueryxContext(ctx, arg) + if err != nil { + return err + } + // if something happens here, we want to make sure the rows are Closed + defer rows.Close() + return scanAll(rows, dest, false) +} + +// GetContext using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) GetContext(ctx context.Context, dest interface{}, arg interface{}) error { + r := n.QueryRowxContext(ctx, arg) + return r.scanAny(dest, false) +} + +// NamedQueryContext binds a named query and then runs Query on the result using the +// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with +// map[string]interface{} types. +func NamedQueryContext(ctx context.Context, e ExtContext, query string, arg interface{}) (*Rows, error) { + q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) + if err != nil { + return nil, err + } + return e.QueryxContext(ctx, q, args...) +} + +// NamedExecContext uses BindStruct to get a query executable by the driver and +// then runs Exec on the result. Returns an error from the binding +// or the query excution itself. +func NamedExecContext(ctx context.Context, e ExtContext, query string, arg interface{}) (sql.Result, error) { + q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) + if err != nil { + return nil, err + } + return e.ExecContext(ctx, q, args...) +} diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/README.md b/vendor/github.com/jmoiron/sqlx/reflectx/README.md new file mode 100644 index 0000000000..f01d3d1f08 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/reflectx/README.md @@ -0,0 +1,17 @@ +# reflectx + +The sqlx package has special reflect needs. In particular, it needs to: + +* be able to map a name to a field +* understand embedded structs +* understand mapping names to fields by a particular tag +* user specified name -> field mapping functions + +These behaviors mimic the behaviors by the standard library marshallers and also the +behavior of standard Go accessors. + +The first two are amply taken care of by `Reflect.Value.FieldByName`, and the third is +addressed by `Reflect.Value.FieldByNameFunc`, but these don't quite understand struct +tags in the ways that are vital to most marshallers, and they are slow. + +This reflectx package extends reflect to achieve these goals. diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go b/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go new file mode 100644 index 0000000000..73c21eb39d --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go @@ -0,0 +1,441 @@ +// Package reflectx implements extensions to the standard reflect lib suitable +// for implementing marshalling and unmarshalling packages. The main Mapper type +// allows for Go-compatible named attribute access, including accessing embedded +// struct attributes and the ability to use functions and struct tags to +// customize field names. +// +package reflectx + +import ( + "reflect" + "runtime" + "strings" + "sync" +) + +// A FieldInfo is metadata for a struct field. +type FieldInfo struct { + Index []int + Path string + Field reflect.StructField + Zero reflect.Value + Name string + Options map[string]string + Embedded bool + Children []*FieldInfo + Parent *FieldInfo +} + +// A StructMap is an index of field metadata for a struct. +type StructMap struct { + Tree *FieldInfo + Index []*FieldInfo + Paths map[string]*FieldInfo + Names map[string]*FieldInfo +} + +// GetByPath returns a *FieldInfo for a given string path. +func (f StructMap) GetByPath(path string) *FieldInfo { + return f.Paths[path] +} + +// GetByTraversal returns a *FieldInfo for a given integer path. It is +// analogous to reflect.FieldByIndex, but using the cached traversal +// rather than re-executing the reflect machinery each time. +func (f StructMap) GetByTraversal(index []int) *FieldInfo { + if len(index) == 0 { + return nil + } + + tree := f.Tree + for _, i := range index { + if i >= len(tree.Children) || tree.Children[i] == nil { + return nil + } + tree = tree.Children[i] + } + return tree +} + +// Mapper is a general purpose mapper of names to struct fields. A Mapper +// behaves like most marshallers in the standard library, obeying a field tag +// for name mapping but also providing a basic transform function. +type Mapper struct { + cache map[reflect.Type]*StructMap + tagName string + tagMapFunc func(string) string + mapFunc func(string) string + mutex sync.Mutex +} + +// NewMapper returns a new mapper using the tagName as its struct field tag. +// If tagName is the empty string, it is ignored. +func NewMapper(tagName string) *Mapper { + return &Mapper{ + cache: make(map[reflect.Type]*StructMap), + tagName: tagName, + } +} + +// NewMapperTagFunc returns a new mapper which contains a mapper for field names +// AND a mapper for tag values. This is useful for tags like json which can +// have values like "name,omitempty". +func NewMapperTagFunc(tagName string, mapFunc, tagMapFunc func(string) string) *Mapper { + return &Mapper{ + cache: make(map[reflect.Type]*StructMap), + tagName: tagName, + mapFunc: mapFunc, + tagMapFunc: tagMapFunc, + } +} + +// NewMapperFunc returns a new mapper which optionally obeys a field tag and +// a struct field name mapper func given by f. Tags will take precedence, but +// for any other field, the mapped name will be f(field.Name) +func NewMapperFunc(tagName string, f func(string) string) *Mapper { + return &Mapper{ + cache: make(map[reflect.Type]*StructMap), + tagName: tagName, + mapFunc: f, + } +} + +// TypeMap returns a mapping of field strings to int slices representing +// the traversal down the struct to reach the field. +func (m *Mapper) TypeMap(t reflect.Type) *StructMap { + m.mutex.Lock() + mapping, ok := m.cache[t] + if !ok { + mapping = getMapping(t, m.tagName, m.mapFunc, m.tagMapFunc) + m.cache[t] = mapping + } + m.mutex.Unlock() + return mapping +} + +// FieldMap returns the mapper's mapping of field names to reflect values. Panics +// if v's Kind is not Struct, or v is not Indirectable to a struct kind. +func (m *Mapper) FieldMap(v reflect.Value) map[string]reflect.Value { + v = reflect.Indirect(v) + mustBe(v, reflect.Struct) + + r := map[string]reflect.Value{} + tm := m.TypeMap(v.Type()) + for tagName, fi := range tm.Names { + r[tagName] = FieldByIndexes(v, fi.Index) + } + return r +} + +// FieldByName returns a field by its mapped name as a reflect.Value. +// Panics if v's Kind is not Struct or v is not Indirectable to a struct Kind. +// Returns zero Value if the name is not found. +func (m *Mapper) FieldByName(v reflect.Value, name string) reflect.Value { + v = reflect.Indirect(v) + mustBe(v, reflect.Struct) + + tm := m.TypeMap(v.Type()) + fi, ok := tm.Names[name] + if !ok { + return v + } + return FieldByIndexes(v, fi.Index) +} + +// FieldsByName returns a slice of values corresponding to the slice of names +// for the value. Panics if v's Kind is not Struct or v is not Indirectable +// to a struct Kind. Returns zero Value for each name not found. +func (m *Mapper) FieldsByName(v reflect.Value, names []string) []reflect.Value { + v = reflect.Indirect(v) + mustBe(v, reflect.Struct) + + tm := m.TypeMap(v.Type()) + vals := make([]reflect.Value, 0, len(names)) + for _, name := range names { + fi, ok := tm.Names[name] + if !ok { + vals = append(vals, *new(reflect.Value)) + } else { + vals = append(vals, FieldByIndexes(v, fi.Index)) + } + } + return vals +} + +// TraversalsByName returns a slice of int slices which represent the struct +// traversals for each mapped name. Panics if t is not a struct or Indirectable +// to a struct. Returns empty int slice for each name not found. +func (m *Mapper) TraversalsByName(t reflect.Type, names []string) [][]int { + r := make([][]int, 0, len(names)) + m.TraversalsByNameFunc(t, names, func(_ int, i []int) error { + if i == nil { + r = append(r, []int{}) + } else { + r = append(r, i) + } + + return nil + }) + return r +} + +// TraversalsByNameFunc traverses the mapped names and calls fn with the index of +// each name and the struct traversal represented by that name. Panics if t is not +// a struct or Indirectable to a struct. Returns the first error returned by fn or nil. +func (m *Mapper) TraversalsByNameFunc(t reflect.Type, names []string, fn func(int, []int) error) error { + t = Deref(t) + mustBe(t, reflect.Struct) + tm := m.TypeMap(t) + for i, name := range names { + fi, ok := tm.Names[name] + if !ok { + if err := fn(i, nil); err != nil { + return err + } + } else { + if err := fn(i, fi.Index); err != nil { + return err + } + } + } + return nil +} + +// FieldByIndexes returns a value for the field given by the struct traversal +// for the given value. +func FieldByIndexes(v reflect.Value, indexes []int) reflect.Value { + for _, i := range indexes { + v = reflect.Indirect(v).Field(i) + // if this is a pointer and it's nil, allocate a new value and set it + if v.Kind() == reflect.Ptr && v.IsNil() { + alloc := reflect.New(Deref(v.Type())) + v.Set(alloc) + } + if v.Kind() == reflect.Map && v.IsNil() { + v.Set(reflect.MakeMap(v.Type())) + } + } + return v +} + +// FieldByIndexesReadOnly returns a value for a particular struct traversal, +// but is not concerned with allocating nil pointers because the value is +// going to be used for reading and not setting. +func FieldByIndexesReadOnly(v reflect.Value, indexes []int) reflect.Value { + for _, i := range indexes { + v = reflect.Indirect(v).Field(i) + } + return v +} + +// Deref is Indirect for reflect.Types +func Deref(t reflect.Type) reflect.Type { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t +} + +// -- helpers & utilities -- + +type kinder interface { + Kind() reflect.Kind +} + +// mustBe checks a value against a kind, panicing with a reflect.ValueError +// if the kind isn't that which is required. +func mustBe(v kinder, expected reflect.Kind) { + if k := v.Kind(); k != expected { + panic(&reflect.ValueError{Method: methodName(), Kind: k}) + } +} + +// methodName returns the caller of the function calling methodName +func methodName() string { + pc, _, _, _ := runtime.Caller(2) + f := runtime.FuncForPC(pc) + if f == nil { + return "unknown method" + } + return f.Name() +} + +type typeQueue struct { + t reflect.Type + fi *FieldInfo + pp string // Parent path +} + +// A copying append that creates a new slice each time. +func apnd(is []int, i int) []int { + x := make([]int, len(is)+1) + for p, n := range is { + x[p] = n + } + x[len(x)-1] = i + return x +} + +type mapf func(string) string + +// parseName parses the tag and the target name for the given field using +// the tagName (eg 'json' for `json:"foo"` tags), mapFunc for mapping the +// field's name to a target name, and tagMapFunc for mapping the tag to +// a target name. +func parseName(field reflect.StructField, tagName string, mapFunc, tagMapFunc mapf) (tag, fieldName string) { + // first, set the fieldName to the field's name + fieldName = field.Name + // if a mapFunc is set, use that to override the fieldName + if mapFunc != nil { + fieldName = mapFunc(fieldName) + } + + // if there's no tag to look for, return the field name + if tagName == "" { + return "", fieldName + } + + // if this tag is not set using the normal convention in the tag, + // then return the fieldname.. this check is done because according + // to the reflect documentation: + // If the tag does not have the conventional format, + // the value returned by Get is unspecified. + // which doesn't sound great. + if !strings.Contains(string(field.Tag), tagName+":") { + return "", fieldName + } + + // at this point we're fairly sure that we have a tag, so lets pull it out + tag = field.Tag.Get(tagName) + + // if we have a mapper function, call it on the whole tag + // XXX: this is a change from the old version, which pulled out the name + // before the tagMapFunc could be run, but I think this is the right way + if tagMapFunc != nil { + tag = tagMapFunc(tag) + } + + // finally, split the options from the name + parts := strings.Split(tag, ",") + fieldName = parts[0] + + return tag, fieldName +} + +// parseOptions parses options out of a tag string, skipping the name +func parseOptions(tag string) map[string]string { + parts := strings.Split(tag, ",") + options := make(map[string]string, len(parts)) + if len(parts) > 1 { + for _, opt := range parts[1:] { + // short circuit potentially expensive split op + if strings.Contains(opt, "=") { + kv := strings.Split(opt, "=") + options[kv[0]] = kv[1] + continue + } + options[opt] = "" + } + } + return options +} + +// getMapping returns a mapping for the t type, using the tagName, mapFunc and +// tagMapFunc to determine the canonical names of fields. +func getMapping(t reflect.Type, tagName string, mapFunc, tagMapFunc mapf) *StructMap { + m := []*FieldInfo{} + + root := &FieldInfo{} + queue := []typeQueue{} + queue = append(queue, typeQueue{Deref(t), root, ""}) + +QueueLoop: + for len(queue) != 0 { + // pop the first item off of the queue + tq := queue[0] + queue = queue[1:] + + // ignore recursive field + for p := tq.fi.Parent; p != nil; p = p.Parent { + if tq.fi.Field.Type == p.Field.Type { + continue QueueLoop + } + } + + nChildren := 0 + if tq.t.Kind() == reflect.Struct { + nChildren = tq.t.NumField() + } + tq.fi.Children = make([]*FieldInfo, nChildren) + + // iterate through all of its fields + for fieldPos := 0; fieldPos < nChildren; fieldPos++ { + + f := tq.t.Field(fieldPos) + + // parse the tag and the target name using the mapping options for this field + tag, name := parseName(f, tagName, mapFunc, tagMapFunc) + + // if the name is "-", disabled via a tag, skip it + if name == "-" { + continue + } + + fi := FieldInfo{ + Field: f, + Name: name, + Zero: reflect.New(f.Type).Elem(), + Options: parseOptions(tag), + } + + // if the path is empty this path is just the name + if tq.pp == "" { + fi.Path = fi.Name + } else { + fi.Path = tq.pp + "." + fi.Name + } + + // skip unexported fields + if len(f.PkgPath) != 0 && !f.Anonymous { + continue + } + + // bfs search of anonymous embedded structs + if f.Anonymous { + pp := tq.pp + if tag != "" { + pp = fi.Path + } + + fi.Embedded = true + fi.Index = apnd(tq.fi.Index, fieldPos) + nChildren := 0 + ft := Deref(f.Type) + if ft.Kind() == reflect.Struct { + nChildren = ft.NumField() + } + fi.Children = make([]*FieldInfo, nChildren) + queue = append(queue, typeQueue{Deref(f.Type), &fi, pp}) + } else if fi.Zero.Kind() == reflect.Struct || (fi.Zero.Kind() == reflect.Ptr && fi.Zero.Type().Elem().Kind() == reflect.Struct) { + fi.Index = apnd(tq.fi.Index, fieldPos) + fi.Children = make([]*FieldInfo, Deref(f.Type).NumField()) + queue = append(queue, typeQueue{Deref(f.Type), &fi, fi.Path}) + } + + fi.Index = apnd(tq.fi.Index, fieldPos) + fi.Parent = tq.fi + tq.fi.Children[fieldPos] = &fi + m = append(m, &fi) + } + } + + flds := &StructMap{Index: m, Tree: root, Paths: map[string]*FieldInfo{}, Names: map[string]*FieldInfo{}} + for _, fi := range flds.Index { + flds.Paths[fi.Path] = fi + if fi.Name != "" && !fi.Embedded { + flds.Names[fi.Path] = fi + } + } + + return flds +} diff --git a/vendor/github.com/jmoiron/sqlx/sqlx.go b/vendor/github.com/jmoiron/sqlx/sqlx.go new file mode 100644 index 0000000000..3f000f47ce --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/sqlx.go @@ -0,0 +1,1045 @@ +package sqlx + +import ( + "database/sql" + "database/sql/driver" + "errors" + "fmt" + + "io/ioutil" + "path/filepath" + "reflect" + "strings" + "sync" + + "github.com/jmoiron/sqlx/reflectx" +) + +// Although the NameMapper is convenient, in practice it should not +// be relied on except for application code. If you are writing a library +// that uses sqlx, you should be aware that the name mappings you expect +// can be overridden by your user's application. + +// NameMapper is used to map column names to struct field names. By default, +// it uses strings.ToLower to lowercase struct field names. It can be set +// to whatever you want, but it is encouraged to be set before sqlx is used +// as name-to-field mappings are cached after first use on a type. +var NameMapper = strings.ToLower +var origMapper = reflect.ValueOf(NameMapper) + +// Rather than creating on init, this is created when necessary so that +// importers have time to customize the NameMapper. +var mpr *reflectx.Mapper + +// mprMu protects mpr. +var mprMu sync.Mutex + +// mapper returns a valid mapper using the configured NameMapper func. +func mapper() *reflectx.Mapper { + mprMu.Lock() + defer mprMu.Unlock() + + if mpr == nil { + mpr = reflectx.NewMapperFunc("db", NameMapper) + } else if origMapper != reflect.ValueOf(NameMapper) { + // if NameMapper has changed, create a new mapper + mpr = reflectx.NewMapperFunc("db", NameMapper) + origMapper = reflect.ValueOf(NameMapper) + } + return mpr +} + +// isScannable takes the reflect.Type and the actual dest value and returns +// whether or not it's Scannable. Something is scannable if: +// * it is not a struct +// * it implements sql.Scanner +// * it has no exported fields +func isScannable(t reflect.Type) bool { + if reflect.PtrTo(t).Implements(_scannerInterface) { + return true + } + if t.Kind() != reflect.Struct { + return true + } + + // it's not important that we use the right mapper for this particular object, + // we're only concerned on how many exported fields this struct has + m := mapper() + if len(m.TypeMap(t).Index) == 0 { + return true + } + return false +} + +// ColScanner is an interface used by MapScan and SliceScan +type ColScanner interface { + Columns() ([]string, error) + Scan(dest ...interface{}) error + Err() error +} + +// Queryer is an interface used by Get and Select +type Queryer interface { + Query(query string, args ...interface{}) (*sql.Rows, error) + Queryx(query string, args ...interface{}) (*Rows, error) + QueryRowx(query string, args ...interface{}) *Row +} + +// Execer is an interface used by MustExec and LoadFile +type Execer interface { + Exec(query string, args ...interface{}) (sql.Result, error) +} + +// Binder is an interface for something which can bind queries (Tx, DB) +type binder interface { + DriverName() string + Rebind(string) string + BindNamed(string, interface{}) (string, []interface{}, error) +} + +// Ext is a union interface which can bind, query, and exec, used by +// NamedQuery and NamedExec. +type Ext interface { + binder + Queryer + Execer +} + +// Preparer is an interface used by Preparex. +type Preparer interface { + Prepare(query string) (*sql.Stmt, error) +} + +// determine if any of our extensions are unsafe +func isUnsafe(i interface{}) bool { + switch v := i.(type) { + case Row: + return v.unsafe + case *Row: + return v.unsafe + case Rows: + return v.unsafe + case *Rows: + return v.unsafe + case NamedStmt: + return v.Stmt.unsafe + case *NamedStmt: + return v.Stmt.unsafe + case Stmt: + return v.unsafe + case *Stmt: + return v.unsafe + case qStmt: + return v.unsafe + case *qStmt: + return v.unsafe + case DB: + return v.unsafe + case *DB: + return v.unsafe + case Tx: + return v.unsafe + case *Tx: + return v.unsafe + case sql.Rows, *sql.Rows: + return false + default: + return false + } +} + +func mapperFor(i interface{}) *reflectx.Mapper { + switch i.(type) { + case DB: + return i.(DB).Mapper + case *DB: + return i.(*DB).Mapper + case Tx: + return i.(Tx).Mapper + case *Tx: + return i.(*Tx).Mapper + default: + return mapper() + } +} + +var _scannerInterface = reflect.TypeOf((*sql.Scanner)(nil)).Elem() +var _valuerInterface = reflect.TypeOf((*driver.Valuer)(nil)).Elem() + +// Row is a reimplementation of sql.Row in order to gain access to the underlying +// sql.Rows.Columns() data, necessary for StructScan. +type Row struct { + err error + unsafe bool + rows *sql.Rows + Mapper *reflectx.Mapper +} + +// Scan is a fixed implementation of sql.Row.Scan, which does not discard the +// underlying error from the internal rows object if it exists. +func (r *Row) Scan(dest ...interface{}) error { + if r.err != nil { + return r.err + } + + // TODO(bradfitz): for now we need to defensively clone all + // []byte that the driver returned (not permitting + // *RawBytes in Rows.Scan), since we're about to close + // the Rows in our defer, when we return from this function. + // the contract with the driver.Next(...) interface is that it + // can return slices into read-only temporary memory that's + // only valid until the next Scan/Close. But the TODO is that + // for a lot of drivers, this copy will be unnecessary. We + // should provide an optional interface for drivers to + // implement to say, "don't worry, the []bytes that I return + // from Next will not be modified again." (for instance, if + // they were obtained from the network anyway) But for now we + // don't care. + defer r.rows.Close() + for _, dp := range dest { + if _, ok := dp.(*sql.RawBytes); ok { + return errors.New("sql: RawBytes isn't allowed on Row.Scan") + } + } + + if !r.rows.Next() { + if err := r.rows.Err(); err != nil { + return err + } + return sql.ErrNoRows + } + err := r.rows.Scan(dest...) + if err != nil { + return err + } + // Make sure the query can be processed to completion with no errors. + if err := r.rows.Close(); err != nil { + return err + } + return nil +} + +// Columns returns the underlying sql.Rows.Columns(), or the deferred error usually +// returned by Row.Scan() +func (r *Row) Columns() ([]string, error) { + if r.err != nil { + return []string{}, r.err + } + return r.rows.Columns() +} + +// ColumnTypes returns the underlying sql.Rows.ColumnTypes(), or the deferred error +func (r *Row) ColumnTypes() ([]*sql.ColumnType, error) { + if r.err != nil { + return []*sql.ColumnType{}, r.err + } + return r.rows.ColumnTypes() +} + +// Err returns the error encountered while scanning. +func (r *Row) Err() error { + return r.err +} + +// DB is a wrapper around sql.DB which keeps track of the driverName upon Open, +// used mostly to automatically bind named queries using the right bindvars. +type DB struct { + *sql.DB + driverName string + unsafe bool + Mapper *reflectx.Mapper +} + +// NewDb returns a new sqlx DB wrapper for a pre-existing *sql.DB. The +// driverName of the original database is required for named query support. +func NewDb(db *sql.DB, driverName string) *DB { + return &DB{DB: db, driverName: driverName, Mapper: mapper()} +} + +// DriverName returns the driverName passed to the Open function for this DB. +func (db *DB) DriverName() string { + return db.driverName +} + +// Open is the same as sql.Open, but returns an *sqlx.DB instead. +func Open(driverName, dataSourceName string) (*DB, error) { + db, err := sql.Open(driverName, dataSourceName) + if err != nil { + return nil, err + } + return &DB{DB: db, driverName: driverName, Mapper: mapper()}, err +} + +// MustOpen is the same as sql.Open, but returns an *sqlx.DB instead and panics on error. +func MustOpen(driverName, dataSourceName string) *DB { + db, err := Open(driverName, dataSourceName) + if err != nil { + panic(err) + } + return db +} + +// MapperFunc sets a new mapper for this db using the default sqlx struct tag +// and the provided mapper function. +func (db *DB) MapperFunc(mf func(string) string) { + db.Mapper = reflectx.NewMapperFunc("db", mf) +} + +// Rebind transforms a query from QUESTION to the DB driver's bindvar type. +func (db *DB) Rebind(query string) string { + return Rebind(BindType(db.driverName), query) +} + +// Unsafe returns a version of DB which will silently succeed to scan when +// columns in the SQL result have no fields in the destination struct. +// sqlx.Stmt and sqlx.Tx which are created from this DB will inherit its +// safety behavior. +func (db *DB) Unsafe() *DB { + return &DB{DB: db.DB, driverName: db.driverName, unsafe: true, Mapper: db.Mapper} +} + +// BindNamed binds a query using the DB driver's bindvar type. +func (db *DB) BindNamed(query string, arg interface{}) (string, []interface{}, error) { + return bindNamedMapper(BindType(db.driverName), query, arg, db.Mapper) +} + +// NamedQuery using this DB. +// Any named placeholder parameters are replaced with fields from arg. +func (db *DB) NamedQuery(query string, arg interface{}) (*Rows, error) { + return NamedQuery(db, query, arg) +} + +// NamedExec using this DB. +// Any named placeholder parameters are replaced with fields from arg. +func (db *DB) NamedExec(query string, arg interface{}) (sql.Result, error) { + return NamedExec(db, query, arg) +} + +// Select using this DB. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) Select(dest interface{}, query string, args ...interface{}) error { + return Select(db, dest, query, args...) +} + +// Get using this DB. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (db *DB) Get(dest interface{}, query string, args ...interface{}) error { + return Get(db, dest, query, args...) +} + +// MustBegin starts a transaction, and panics on error. Returns an *sqlx.Tx instead +// of an *sql.Tx. +func (db *DB) MustBegin() *Tx { + tx, err := db.Beginx() + if err != nil { + panic(err) + } + return tx +} + +// Beginx begins a transaction and returns an *sqlx.Tx instead of an *sql.Tx. +func (db *DB) Beginx() (*Tx, error) { + tx, err := db.DB.Begin() + if err != nil { + return nil, err + } + return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err +} + +// Queryx queries the database and returns an *sqlx.Rows. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) Queryx(query string, args ...interface{}) (*Rows, error) { + r, err := db.DB.Query(query, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err +} + +// QueryRowx queries the database and returns an *sqlx.Row. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) QueryRowx(query string, args ...interface{}) *Row { + rows, err := db.DB.Query(query, args...) + return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper} +} + +// MustExec (panic) runs MustExec using this database. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) MustExec(query string, args ...interface{}) sql.Result { + return MustExec(db, query, args...) +} + +// Preparex returns an sqlx.Stmt instead of a sql.Stmt +func (db *DB) Preparex(query string) (*Stmt, error) { + return Preparex(db, query) +} + +// PrepareNamed returns an sqlx.NamedStmt +func (db *DB) PrepareNamed(query string) (*NamedStmt, error) { + return prepareNamed(db, query) +} + +// Tx is an sqlx wrapper around sql.Tx with extra functionality +type Tx struct { + *sql.Tx + driverName string + unsafe bool + Mapper *reflectx.Mapper +} + +// DriverName returns the driverName used by the DB which began this transaction. +func (tx *Tx) DriverName() string { + return tx.driverName +} + +// Rebind a query within a transaction's bindvar type. +func (tx *Tx) Rebind(query string) string { + return Rebind(BindType(tx.driverName), query) +} + +// Unsafe returns a version of Tx which will silently succeed to scan when +// columns in the SQL result have no fields in the destination struct. +func (tx *Tx) Unsafe() *Tx { + return &Tx{Tx: tx.Tx, driverName: tx.driverName, unsafe: true, Mapper: tx.Mapper} +} + +// BindNamed binds a query within a transaction's bindvar type. +func (tx *Tx) BindNamed(query string, arg interface{}) (string, []interface{}, error) { + return bindNamedMapper(BindType(tx.driverName), query, arg, tx.Mapper) +} + +// NamedQuery within a transaction. +// Any named placeholder parameters are replaced with fields from arg. +func (tx *Tx) NamedQuery(query string, arg interface{}) (*Rows, error) { + return NamedQuery(tx, query, arg) +} + +// NamedExec a named query within a transaction. +// Any named placeholder parameters are replaced with fields from arg. +func (tx *Tx) NamedExec(query string, arg interface{}) (sql.Result, error) { + return NamedExec(tx, query, arg) +} + +// Select within a transaction. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) Select(dest interface{}, query string, args ...interface{}) error { + return Select(tx, dest, query, args...) +} + +// Queryx within a transaction. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) Queryx(query string, args ...interface{}) (*Rows, error) { + r, err := tx.Tx.Query(query, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err +} + +// QueryRowx within a transaction. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) QueryRowx(query string, args ...interface{}) *Row { + rows, err := tx.Tx.Query(query, args...) + return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper} +} + +// Get within a transaction. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (tx *Tx) Get(dest interface{}, query string, args ...interface{}) error { + return Get(tx, dest, query, args...) +} + +// MustExec runs MustExec within a transaction. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) MustExec(query string, args ...interface{}) sql.Result { + return MustExec(tx, query, args...) +} + +// Preparex a statement within a transaction. +func (tx *Tx) Preparex(query string) (*Stmt, error) { + return Preparex(tx, query) +} + +// Stmtx returns a version of the prepared statement which runs within a transaction. Provided +// stmt can be either *sql.Stmt or *sqlx.Stmt. +func (tx *Tx) Stmtx(stmt interface{}) *Stmt { + var s *sql.Stmt + switch v := stmt.(type) { + case Stmt: + s = v.Stmt + case *Stmt: + s = v.Stmt + case *sql.Stmt: + s = v + default: + panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type())) + } + return &Stmt{Stmt: tx.Stmt(s), Mapper: tx.Mapper} +} + +// NamedStmt returns a version of the prepared statement which runs within a transaction. +func (tx *Tx) NamedStmt(stmt *NamedStmt) *NamedStmt { + return &NamedStmt{ + QueryString: stmt.QueryString, + Params: stmt.Params, + Stmt: tx.Stmtx(stmt.Stmt), + } +} + +// PrepareNamed returns an sqlx.NamedStmt +func (tx *Tx) PrepareNamed(query string) (*NamedStmt, error) { + return prepareNamed(tx, query) +} + +// Stmt is an sqlx wrapper around sql.Stmt with extra functionality +type Stmt struct { + *sql.Stmt + unsafe bool + Mapper *reflectx.Mapper +} + +// Unsafe returns a version of Stmt which will silently succeed to scan when +// columns in the SQL result have no fields in the destination struct. +func (s *Stmt) Unsafe() *Stmt { + return &Stmt{Stmt: s.Stmt, unsafe: true, Mapper: s.Mapper} +} + +// Select using the prepared statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) Select(dest interface{}, args ...interface{}) error { + return Select(&qStmt{s}, dest, "", args...) +} + +// Get using the prepared statement. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (s *Stmt) Get(dest interface{}, args ...interface{}) error { + return Get(&qStmt{s}, dest, "", args...) +} + +// MustExec (panic) using this statement. Note that the query portion of the error +// output will be blank, as Stmt does not expose its query. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) MustExec(args ...interface{}) sql.Result { + return MustExec(&qStmt{s}, "", args...) +} + +// QueryRowx using this statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) QueryRowx(args ...interface{}) *Row { + qs := &qStmt{s} + return qs.QueryRowx("", args...) +} + +// Queryx using this statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) Queryx(args ...interface{}) (*Rows, error) { + qs := &qStmt{s} + return qs.Queryx("", args...) +} + +// qStmt is an unexposed wrapper which lets you use a Stmt as a Queryer & Execer by +// implementing those interfaces and ignoring the `query` argument. +type qStmt struct{ *Stmt } + +func (q *qStmt) Query(query string, args ...interface{}) (*sql.Rows, error) { + return q.Stmt.Query(args...) +} + +func (q *qStmt) Queryx(query string, args ...interface{}) (*Rows, error) { + r, err := q.Stmt.Query(args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err +} + +func (q *qStmt) QueryRowx(query string, args ...interface{}) *Row { + rows, err := q.Stmt.Query(args...) + return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper} +} + +func (q *qStmt) Exec(query string, args ...interface{}) (sql.Result, error) { + return q.Stmt.Exec(args...) +} + +// Rows is a wrapper around sql.Rows which caches costly reflect operations +// during a looped StructScan +type Rows struct { + *sql.Rows + unsafe bool + Mapper *reflectx.Mapper + // these fields cache memory use for a rows during iteration w/ structScan + started bool + fields [][]int + values []interface{} +} + +// SliceScan using this Rows. +func (r *Rows) SliceScan() ([]interface{}, error) { + return SliceScan(r) +} + +// MapScan using this Rows. +func (r *Rows) MapScan(dest map[string]interface{}) error { + return MapScan(r, dest) +} + +// StructScan is like sql.Rows.Scan, but scans a single Row into a single Struct. +// Use this and iterate over Rows manually when the memory load of Select() might be +// prohibitive. *Rows.StructScan caches the reflect work of matching up column +// positions to fields to avoid that overhead per scan, which means it is not safe +// to run StructScan on the same Rows instance with different struct types. +func (r *Rows) StructScan(dest interface{}) error { + v := reflect.ValueOf(dest) + + if v.Kind() != reflect.Ptr { + return errors.New("must pass a pointer, not a value, to StructScan destination") + } + + v = v.Elem() + + if !r.started { + columns, err := r.Columns() + if err != nil { + return err + } + m := r.Mapper + + r.fields = m.TraversalsByName(v.Type(), columns) + // if we are not unsafe and are missing fields, return an error + if f, err := missingFields(r.fields); err != nil && !r.unsafe { + return fmt.Errorf("missing destination name %s in %T", columns[f], dest) + } + r.values = make([]interface{}, len(columns)) + r.started = true + } + + err := fieldsByTraversal(v, r.fields, r.values, true) + if err != nil { + return err + } + // scan into the struct field pointers and append to our results + err = r.Scan(r.values...) + if err != nil { + return err + } + return r.Err() +} + +// Connect to a database and verify with a ping. +func Connect(driverName, dataSourceName string) (*DB, error) { + db, err := Open(driverName, dataSourceName) + if err != nil { + return nil, err + } + err = db.Ping() + if err != nil { + db.Close() + return nil, err + } + return db, nil +} + +// MustConnect connects to a database and panics on error. +func MustConnect(driverName, dataSourceName string) *DB { + db, err := Connect(driverName, dataSourceName) + if err != nil { + panic(err) + } + return db +} + +// Preparex prepares a statement. +func Preparex(p Preparer, query string) (*Stmt, error) { + s, err := p.Prepare(query) + if err != nil { + return nil, err + } + return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err +} + +// Select executes a query using the provided Queryer, and StructScans each row +// into dest, which must be a slice. If the slice elements are scannable, then +// the result set must have only one column. Otherwise, StructScan is used. +// The *sql.Rows are closed automatically. +// Any placeholder parameters are replaced with supplied args. +func Select(q Queryer, dest interface{}, query string, args ...interface{}) error { + rows, err := q.Queryx(query, args...) + if err != nil { + return err + } + // if something happens here, we want to make sure the rows are Closed + defer rows.Close() + return scanAll(rows, dest, false) +} + +// Get does a QueryRow using the provided Queryer, and scans the resulting row +// to dest. If dest is scannable, the result must only have one column. Otherwise, +// StructScan is used. Get will return sql.ErrNoRows like row.Scan would. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func Get(q Queryer, dest interface{}, query string, args ...interface{}) error { + r := q.QueryRowx(query, args...) + return r.scanAny(dest, false) +} + +// LoadFile exec's every statement in a file (as a single call to Exec). +// LoadFile may return a nil *sql.Result if errors are encountered locating or +// reading the file at path. LoadFile reads the entire file into memory, so it +// is not suitable for loading large data dumps, but can be useful for initializing +// schemas or loading indexes. +// +// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3 +// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting +// this by requiring something with DriverName() and then attempting to split the +// queries will be difficult to get right, and its current driver-specific behavior +// is deemed at least not complex in its incorrectness. +func LoadFile(e Execer, path string) (*sql.Result, error) { + realpath, err := filepath.Abs(path) + if err != nil { + return nil, err + } + contents, err := ioutil.ReadFile(realpath) + if err != nil { + return nil, err + } + res, err := e.Exec(string(contents)) + return &res, err +} + +// MustExec execs the query using e and panics if there was an error. +// Any placeholder parameters are replaced with supplied args. +func MustExec(e Execer, query string, args ...interface{}) sql.Result { + res, err := e.Exec(query, args...) + if err != nil { + panic(err) + } + return res +} + +// SliceScan using this Rows. +func (r *Row) SliceScan() ([]interface{}, error) { + return SliceScan(r) +} + +// MapScan using this Rows. +func (r *Row) MapScan(dest map[string]interface{}) error { + return MapScan(r, dest) +} + +func (r *Row) scanAny(dest interface{}, structOnly bool) error { + if r.err != nil { + return r.err + } + if r.rows == nil { + r.err = sql.ErrNoRows + return r.err + } + defer r.rows.Close() + + v := reflect.ValueOf(dest) + if v.Kind() != reflect.Ptr { + return errors.New("must pass a pointer, not a value, to StructScan destination") + } + if v.IsNil() { + return errors.New("nil pointer passed to StructScan destination") + } + + base := reflectx.Deref(v.Type()) + scannable := isScannable(base) + + if structOnly && scannable { + return structOnlyError(base) + } + + columns, err := r.Columns() + if err != nil { + return err + } + + if scannable && len(columns) > 1 { + return fmt.Errorf("scannable dest type %s with >1 columns (%d) in result", base.Kind(), len(columns)) + } + + if scannable { + return r.Scan(dest) + } + + m := r.Mapper + + fields := m.TraversalsByName(v.Type(), columns) + // if we are not unsafe and are missing fields, return an error + if f, err := missingFields(fields); err != nil && !r.unsafe { + return fmt.Errorf("missing destination name %s in %T", columns[f], dest) + } + values := make([]interface{}, len(columns)) + + err = fieldsByTraversal(v, fields, values, true) + if err != nil { + return err + } + // scan into the struct field pointers and append to our results + return r.Scan(values...) +} + +// StructScan a single Row into dest. +func (r *Row) StructScan(dest interface{}) error { + return r.scanAny(dest, true) +} + +// SliceScan a row, returning a []interface{} with values similar to MapScan. +// This function is primarily intended for use where the number of columns +// is not known. Because you can pass an []interface{} directly to Scan, +// it's recommended that you do that as it will not have to allocate new +// slices per row. +func SliceScan(r ColScanner) ([]interface{}, error) { + // ignore r.started, since we needn't use reflect for anything. + columns, err := r.Columns() + if err != nil { + return []interface{}{}, err + } + + values := make([]interface{}, len(columns)) + for i := range values { + values[i] = new(interface{}) + } + + err = r.Scan(values...) + + if err != nil { + return values, err + } + + for i := range columns { + values[i] = *(values[i].(*interface{})) + } + + return values, r.Err() +} + +// MapScan scans a single Row into the dest map[string]interface{}. +// Use this to get results for SQL that might not be under your control +// (for instance, if you're building an interface for an SQL server that +// executes SQL from input). Please do not use this as a primary interface! +// This will modify the map sent to it in place, so reuse the same map with +// care. Columns which occur more than once in the result will overwrite +// each other! +func MapScan(r ColScanner, dest map[string]interface{}) error { + // ignore r.started, since we needn't use reflect for anything. + columns, err := r.Columns() + if err != nil { + return err + } + + values := make([]interface{}, len(columns)) + for i := range values { + values[i] = new(interface{}) + } + + err = r.Scan(values...) + if err != nil { + return err + } + + for i, column := range columns { + dest[column] = *(values[i].(*interface{})) + } + + return r.Err() +} + +type rowsi interface { + Close() error + Columns() ([]string, error) + Err() error + Next() bool + Scan(...interface{}) error +} + +// structOnlyError returns an error appropriate for type when a non-scannable +// struct is expected but something else is given +func structOnlyError(t reflect.Type) error { + isStruct := t.Kind() == reflect.Struct + isScanner := reflect.PtrTo(t).Implements(_scannerInterface) + if !isStruct { + return fmt.Errorf("expected %s but got %s", reflect.Struct, t.Kind()) + } + if isScanner { + return fmt.Errorf("structscan expects a struct dest but the provided struct type %s implements scanner", t.Name()) + } + return fmt.Errorf("expected a struct, but struct %s has no exported fields", t.Name()) +} + +// scanAll scans all rows into a destination, which must be a slice of any +// type. If the destination slice type is a Struct, then StructScan will be +// used on each row. If the destination is some other kind of base type, then +// each row must only have one column which can scan into that type. This +// allows you to do something like: +// +// rows, _ := db.Query("select id from people;") +// var ids []int +// scanAll(rows, &ids, false) +// +// and ids will be a list of the id results. I realize that this is a desirable +// interface to expose to users, but for now it will only be exposed via changes +// to `Get` and `Select`. The reason that this has been implemented like this is +// this is the only way to not duplicate reflect work in the new API while +// maintaining backwards compatibility. +func scanAll(rows rowsi, dest interface{}, structOnly bool) error { + var v, vp reflect.Value + + value := reflect.ValueOf(dest) + + // json.Unmarshal returns errors for these + if value.Kind() != reflect.Ptr { + return errors.New("must pass a pointer, not a value, to StructScan destination") + } + if value.IsNil() { + return errors.New("nil pointer passed to StructScan destination") + } + direct := reflect.Indirect(value) + + slice, err := baseType(value.Type(), reflect.Slice) + if err != nil { + return err + } + + isPtr := slice.Elem().Kind() == reflect.Ptr + base := reflectx.Deref(slice.Elem()) + scannable := isScannable(base) + + if structOnly && scannable { + return structOnlyError(base) + } + + columns, err := rows.Columns() + if err != nil { + return err + } + + // if it's a base type make sure it only has 1 column; if not return an error + if scannable && len(columns) > 1 { + return fmt.Errorf("non-struct dest type %s with >1 columns (%d)", base.Kind(), len(columns)) + } + + if !scannable { + var values []interface{} + var m *reflectx.Mapper + + switch rows.(type) { + case *Rows: + m = rows.(*Rows).Mapper + default: + m = mapper() + } + + fields := m.TraversalsByName(base, columns) + // if we are not unsafe and are missing fields, return an error + if f, err := missingFields(fields); err != nil && !isUnsafe(rows) { + return fmt.Errorf("missing destination name %s in %T", columns[f], dest) + } + values = make([]interface{}, len(columns)) + + for rows.Next() { + // create a new struct type (which returns PtrTo) and indirect it + vp = reflect.New(base) + v = reflect.Indirect(vp) + + err = fieldsByTraversal(v, fields, values, true) + if err != nil { + return err + } + + // scan into the struct field pointers and append to our results + err = rows.Scan(values...) + if err != nil { + return err + } + + if isPtr { + direct.Set(reflect.Append(direct, vp)) + } else { + direct.Set(reflect.Append(direct, v)) + } + } + } else { + for rows.Next() { + vp = reflect.New(base) + err = rows.Scan(vp.Interface()) + if err != nil { + return err + } + // append + if isPtr { + direct.Set(reflect.Append(direct, vp)) + } else { + direct.Set(reflect.Append(direct, reflect.Indirect(vp))) + } + } + } + + return rows.Err() +} + +// FIXME: StructScan was the very first bit of API in sqlx, and now unfortunately +// it doesn't really feel like it's named properly. There is an incongruency +// between this and the way that StructScan (which might better be ScanStruct +// anyway) works on a rows object. + +// StructScan all rows from an sql.Rows or an sqlx.Rows into the dest slice. +// StructScan will scan in the entire rows result, so if you do not want to +// allocate structs for the entire result, use Queryx and see sqlx.Rows.StructScan. +// If rows is sqlx.Rows, it will use its mapper, otherwise it will use the default. +func StructScan(rows rowsi, dest interface{}) error { + return scanAll(rows, dest, true) + +} + +// reflect helpers + +func baseType(t reflect.Type, expected reflect.Kind) (reflect.Type, error) { + t = reflectx.Deref(t) + if t.Kind() != expected { + return nil, fmt.Errorf("expected %s but got %s", expected, t.Kind()) + } + return t, nil +} + +// fieldsByName fills a values interface with fields from the passed value based +// on the traversals in int. If ptrs is true, return addresses instead of values. +// We write this instead of using FieldsByName to save allocations and map lookups +// when iterating over many rows. Empty traversals will get an interface pointer. +// Because of the necessity of requesting ptrs or values, it's considered a bit too +// specialized for inclusion in reflectx itself. +func fieldsByTraversal(v reflect.Value, traversals [][]int, values []interface{}, ptrs bool) error { + v = reflect.Indirect(v) + if v.Kind() != reflect.Struct { + return errors.New("argument not a struct") + } + + for i, traversal := range traversals { + if len(traversal) == 0 { + values[i] = new(interface{}) + continue + } + f := reflectx.FieldByIndexes(v, traversal) + if ptrs { + values[i] = f.Addr().Interface() + } else { + values[i] = f.Interface() + } + } + return nil +} + +func missingFields(transversals [][]int) (field int, err error) { + for i, t := range transversals { + if len(t) == 0 { + return i, errors.New("missing field") + } + } + return 0, nil +} diff --git a/vendor/github.com/jmoiron/sqlx/sqlx_context.go b/vendor/github.com/jmoiron/sqlx/sqlx_context.go new file mode 100644 index 0000000000..06033111a5 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/sqlx_context.go @@ -0,0 +1,346 @@ +// +build go1.8 + +package sqlx + +import ( + "context" + "database/sql" + "fmt" + "io/ioutil" + "path/filepath" + "reflect" +) + +// ConnectContext to a database and verify with a ping. +func ConnectContext(ctx context.Context, driverName, dataSourceName string) (*DB, error) { + db, err := Open(driverName, dataSourceName) + if err != nil { + return db, err + } + err = db.PingContext(ctx) + return db, err +} + +// QueryerContext is an interface used by GetContext and SelectContext +type QueryerContext interface { + QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) + QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) + QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row +} + +// PreparerContext is an interface used by PreparexContext. +type PreparerContext interface { + PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) +} + +// ExecerContext is an interface used by MustExecContext and LoadFileContext +type ExecerContext interface { + ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) +} + +// ExtContext is a union interface which can bind, query, and exec, with Context +// used by NamedQueryContext and NamedExecContext. +type ExtContext interface { + binder + QueryerContext + ExecerContext +} + +// SelectContext executes a query using the provided Queryer, and StructScans +// each row into dest, which must be a slice. If the slice elements are +// scannable, then the result set must have only one column. Otherwise, +// StructScan is used. The *sql.Rows are closed automatically. +// Any placeholder parameters are replaced with supplied args. +func SelectContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error { + rows, err := q.QueryxContext(ctx, query, args...) + if err != nil { + return err + } + // if something happens here, we want to make sure the rows are Closed + defer rows.Close() + return scanAll(rows, dest, false) +} + +// PreparexContext prepares a statement. +// +// The provided context is used for the preparation of the statement, not for +// the execution of the statement. +func PreparexContext(ctx context.Context, p PreparerContext, query string) (*Stmt, error) { + s, err := p.PrepareContext(ctx, query) + if err != nil { + return nil, err + } + return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err +} + +// GetContext does a QueryRow using the provided Queryer, and scans the +// resulting row to dest. If dest is scannable, the result must only have one +// column. Otherwise, StructScan is used. Get will return sql.ErrNoRows like +// row.Scan would. Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func GetContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error { + r := q.QueryRowxContext(ctx, query, args...) + return r.scanAny(dest, false) +} + +// LoadFileContext exec's every statement in a file (as a single call to Exec). +// LoadFileContext may return a nil *sql.Result if errors are encountered +// locating or reading the file at path. LoadFile reads the entire file into +// memory, so it is not suitable for loading large data dumps, but can be useful +// for initializing schemas or loading indexes. +// +// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3 +// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting +// this by requiring something with DriverName() and then attempting to split the +// queries will be difficult to get right, and its current driver-specific behavior +// is deemed at least not complex in its incorrectness. +func LoadFileContext(ctx context.Context, e ExecerContext, path string) (*sql.Result, error) { + realpath, err := filepath.Abs(path) + if err != nil { + return nil, err + } + contents, err := ioutil.ReadFile(realpath) + if err != nil { + return nil, err + } + res, err := e.ExecContext(ctx, string(contents)) + return &res, err +} + +// MustExecContext execs the query using e and panics if there was an error. +// Any placeholder parameters are replaced with supplied args. +func MustExecContext(ctx context.Context, e ExecerContext, query string, args ...interface{}) sql.Result { + res, err := e.ExecContext(ctx, query, args...) + if err != nil { + panic(err) + } + return res +} + +// PrepareNamedContext returns an sqlx.NamedStmt +func (db *DB) PrepareNamedContext(ctx context.Context, query string) (*NamedStmt, error) { + return prepareNamedContext(ctx, db, query) +} + +// NamedQueryContext using this DB. +// Any named placeholder parameters are replaced with fields from arg. +func (db *DB) NamedQueryContext(ctx context.Context, query string, arg interface{}) (*Rows, error) { + return NamedQueryContext(ctx, db, query, arg) +} + +// NamedExecContext using this DB. +// Any named placeholder parameters are replaced with fields from arg. +func (db *DB) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) { + return NamedExecContext(ctx, db, query, arg) +} + +// SelectContext using this DB. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return SelectContext(ctx, db, dest, query, args...) +} + +// GetContext using this DB. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (db *DB) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return GetContext(ctx, db, dest, query, args...) +} + +// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt. +// +// The provided context is used for the preparation of the statement, not for +// the execution of the statement. +func (db *DB) PreparexContext(ctx context.Context, query string) (*Stmt, error) { + return PreparexContext(ctx, db, query) +} + +// QueryxContext queries the database and returns an *sqlx.Rows. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { + r, err := db.DB.QueryContext(ctx, query, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err +} + +// QueryRowxContext queries the database and returns an *sqlx.Row. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { + rows, err := db.DB.QueryContext(ctx, query, args...) + return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper} +} + +// MustBeginTx starts a transaction, and panics on error. Returns an *sqlx.Tx instead +// of an *sql.Tx. +// +// The provided context is used until the transaction is committed or rolled +// back. If the context is canceled, the sql package will roll back the +// transaction. Tx.Commit will return an error if the context provided to +// MustBeginContext is canceled. +func (db *DB) MustBeginTx(ctx context.Context, opts *sql.TxOptions) *Tx { + tx, err := db.BeginTxx(ctx, opts) + if err != nil { + panic(err) + } + return tx +} + +// MustExecContext (panic) runs MustExec using this database. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result { + return MustExecContext(ctx, db, query, args...) +} + +// BeginTxx begins a transaction and returns an *sqlx.Tx instead of an +// *sql.Tx. +// +// The provided context is used until the transaction is committed or rolled +// back. If the context is canceled, the sql package will roll back the +// transaction. Tx.Commit will return an error if the context provided to +// BeginxContext is canceled. +func (db *DB) BeginTxx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { + tx, err := db.DB.BeginTx(ctx, opts) + if err != nil { + return nil, err + } + return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err +} + +// StmtxContext returns a version of the prepared statement which runs within a +// transaction. Provided stmt can be either *sql.Stmt or *sqlx.Stmt. +func (tx *Tx) StmtxContext(ctx context.Context, stmt interface{}) *Stmt { + var s *sql.Stmt + switch v := stmt.(type) { + case Stmt: + s = v.Stmt + case *Stmt: + s = v.Stmt + case *sql.Stmt: + s = v + default: + panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type())) + } + return &Stmt{Stmt: tx.StmtContext(ctx, s), Mapper: tx.Mapper} +} + +// NamedStmtContext returns a version of the prepared statement which runs +// within a transaction. +func (tx *Tx) NamedStmtContext(ctx context.Context, stmt *NamedStmt) *NamedStmt { + return &NamedStmt{ + QueryString: stmt.QueryString, + Params: stmt.Params, + Stmt: tx.StmtxContext(ctx, stmt.Stmt), + } +} + +// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt. +// +// The provided context is used for the preparation of the statement, not for +// the execution of the statement. +func (tx *Tx) PreparexContext(ctx context.Context, query string) (*Stmt, error) { + return PreparexContext(ctx, tx, query) +} + +// PrepareNamedContext returns an sqlx.NamedStmt +func (tx *Tx) PrepareNamedContext(ctx context.Context, query string) (*NamedStmt, error) { + return prepareNamedContext(ctx, tx, query) +} + +// MustExecContext runs MustExecContext within a transaction. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result { + return MustExecContext(ctx, tx, query, args...) +} + +// QueryxContext within a transaction and context. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { + r, err := tx.Tx.QueryContext(ctx, query, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err +} + +// SelectContext within a transaction and context. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return SelectContext(ctx, tx, dest, query, args...) +} + +// GetContext within a transaction and context. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (tx *Tx) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return GetContext(ctx, tx, dest, query, args...) +} + +// QueryRowxContext within a transaction and context. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { + rows, err := tx.Tx.QueryContext(ctx, query, args...) + return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper} +} + +// NamedExecContext using this Tx. +// Any named placeholder parameters are replaced with fields from arg. +func (tx *Tx) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) { + return NamedExecContext(ctx, tx, query, arg) +} + +// SelectContext using the prepared statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) SelectContext(ctx context.Context, dest interface{}, args ...interface{}) error { + return SelectContext(ctx, &qStmt{s}, dest, "", args...) +} + +// GetContext using the prepared statement. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (s *Stmt) GetContext(ctx context.Context, dest interface{}, args ...interface{}) error { + return GetContext(ctx, &qStmt{s}, dest, "", args...) +} + +// MustExecContext (panic) using this statement. Note that the query portion of +// the error output will be blank, as Stmt does not expose its query. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) MustExecContext(ctx context.Context, args ...interface{}) sql.Result { + return MustExecContext(ctx, &qStmt{s}, "", args...) +} + +// QueryRowxContext using this statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) QueryRowxContext(ctx context.Context, args ...interface{}) *Row { + qs := &qStmt{s} + return qs.QueryRowxContext(ctx, "", args...) +} + +// QueryxContext using this statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) QueryxContext(ctx context.Context, args ...interface{}) (*Rows, error) { + qs := &qStmt{s} + return qs.QueryxContext(ctx, "", args...) +} + +func (q *qStmt) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) { + return q.Stmt.QueryContext(ctx, args...) +} + +func (q *qStmt) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { + r, err := q.Stmt.QueryContext(ctx, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err +} + +func (q *qStmt) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { + rows, err := q.Stmt.QueryContext(ctx, args...) + return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper} +} + +func (q *qStmt) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + return q.Stmt.ExecContext(ctx, args...) +} diff --git a/vendor/github.com/lann/builder/.gitignore b/vendor/github.com/lann/builder/.gitignore new file mode 100644 index 0000000000..f54eb28f6e --- /dev/null +++ b/vendor/github.com/lann/builder/.gitignore @@ -0,0 +1,2 @@ +*~ +\#*# diff --git a/vendor/github.com/lann/builder/.travis.yml b/vendor/github.com/lann/builder/.travis.yml new file mode 100644 index 0000000000..c8860f69bc --- /dev/null +++ b/vendor/github.com/lann/builder/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - '1.8' + - '1.9' + - '1.10' + - tip diff --git a/vendor/github.com/lann/builder/LICENSE b/vendor/github.com/lann/builder/LICENSE new file mode 100644 index 0000000000..a109e8051c --- /dev/null +++ b/vendor/github.com/lann/builder/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2014-2015 Lann Martin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/lann/builder/README.md b/vendor/github.com/lann/builder/README.md new file mode 100644 index 0000000000..3b18550bb9 --- /dev/null +++ b/vendor/github.com/lann/builder/README.md @@ -0,0 +1,68 @@ +# Builder - fluent immutable builders for Go + +[![GoDoc](https://godoc.org/github.com/lann/builder?status.png)](https://godoc.org/github.com/lann/builder) +[![Build Status](https://travis-ci.org/lann/builder.png?branch=master)](https://travis-ci.org/lann/builder) + +Builder was originally written for +[Squirrel](https://github.com/lann/squirrel), a fluent SQL generator. It +is probably the best example of Builder in action. + +Builder helps you write **fluent** DSLs for your libraries with method chaining: + +```go +resp := ReqBuilder. + Url("http://golang.org"). + Header("User-Agent", "Builder"). + Get() +``` + +Builder uses **immutable** persistent data structures +([these](https://github.com/mndrix/ps), specifically) +so that each step in your method chain can be reused: + +```go +build := WordBuilder.AddLetters("Build") +builder := build.AddLetters("er") +building := build.AddLetters("ing") +``` + +Builder makes it easy to **build** structs using the **builder** pattern +(*surprise!*): + +```go +import "github.com/lann/builder" + +type Muppet struct { + Name string + Friends []string +} + +type muppetBuilder builder.Builder + +func (b muppetBuilder) Name(name string) muppetBuilder { + return builder.Set(b, "Name", name).(muppetBuilder) +} + +func (b muppetBuilder) AddFriend(friend string) muppetBuilder { + return builder.Append(b, "Friends", friend).(muppetBuilder) +} + +func (b muppetBuilder) Build() Muppet { + return builder.GetStruct(b).(Muppet) +} + +var MuppetBuilder = builder.Register(muppetBuilder{}, Muppet{}).(muppetBuilder) +``` +```go +MuppetBuilder. + Name("Beaker"). + AddFriend("Dr. Honeydew"). + Build() + +=> Muppet{Name:"Beaker", Friends:[]string{"Dr. Honeydew"}} +``` + +## License + +Builder is released under the +[MIT License](http://www.opensource.org/licenses/MIT). diff --git a/vendor/github.com/lann/builder/builder.go b/vendor/github.com/lann/builder/builder.go new file mode 100644 index 0000000000..ff621406a4 --- /dev/null +++ b/vendor/github.com/lann/builder/builder.go @@ -0,0 +1,225 @@ +// Package builder provides a method for writing fluent immutable builders. +package builder + +import ( + "github.com/lann/ps" + "go/ast" + "reflect" +) + +// Builder stores a set of named values. +// +// New types can be declared with underlying type Builder and used with the +// functions in this package. See example. +// +// Instances of Builder should be treated as immutable. It is up to the +// implementor to ensure mutable values set on a Builder are not mutated while +// the Builder is in use. +type Builder struct { + builderMap ps.Map +} + +var ( + EmptyBuilder = Builder{ps.NewMap()} + emptyBuilderValue = reflect.ValueOf(EmptyBuilder) +) + +func getBuilderMap(builder interface{}) ps.Map { + b := convert(builder, Builder{}).(Builder) + + if b.builderMap == nil { + return ps.NewMap() + } + + return b.builderMap +} + +// Set returns a copy of the given builder with a new value set for the given +// name. +// +// Set (and all other functions taking a builder in this package) will panic if +// the given builder's underlying type is not Builder. +func Set(builder interface{}, name string, v interface{}) interface{} { + b := Builder{getBuilderMap(builder).Set(name, v)} + return convert(b, builder) +} + +// Delete returns a copy of the given builder with the given named value unset. +func Delete(builder interface{}, name string) interface{} { + b := Builder{getBuilderMap(builder).Delete(name)} + return convert(b, builder) +} + +// Append returns a copy of the given builder with new value(s) appended to the +// named list. If the value was previously unset or set with Set (even to a e.g. +// slice values), the new value(s) will be appended to an empty list. +func Append(builder interface{}, name string, vs ...interface{}) interface{} { + return Extend(builder, name, vs) +} + +// Extend behaves like Append, except it takes a single slice or array value +// which will be concatenated to the named list. +// +// Unlike a variadic call to Append - which requires a []interface{} value - +// Extend accepts slices or arrays of any type. +// +// Extend will panic if the given value is not a slice, array, or nil. +func Extend(builder interface{}, name string, vs interface{}) interface{} { + if vs == nil { + return builder + } + + maybeList, ok := getBuilderMap(builder).Lookup(name) + + var list ps.List + if ok { + list, ok = maybeList.(ps.List) + } + if !ok { + list = ps.NewList() + } + + forEach(vs, func(v interface{}) { + list = list.Cons(v) + }) + + return Set(builder, name, list) +} + +func listToSlice(list ps.List, arrayType reflect.Type) reflect.Value { + size := list.Size() + slice := reflect.MakeSlice(arrayType, size, size) + for i := size - 1; i >= 0; i-- { + val := reflect.ValueOf(list.Head()) + slice.Index(i).Set(val) + list = list.Tail() + } + return slice +} + +var anyArrayType = reflect.TypeOf([]interface{}{}) + +// Get retrieves a single named value from the given builder. +// If the value has not been set, it returns (nil, false). Otherwise, it will +// return (value, true). +// +// If the named value was last set with Append or Extend, the returned value +// will be a slice. If the given Builder has been registered with Register or +// RegisterType and the given name is an exported field of the registered +// struct, the returned slice will have the same type as that field. Otherwise +// the slice will have type []interface{}. It will panic if the given name is a +// registered struct's exported field and the value set on the Builder is not +// assignable to the field. +func Get(builder interface{}, name string) (interface{}, bool) { + val, ok := getBuilderMap(builder).Lookup(name) + if !ok { + return nil, false + } + + list, isList := val.(ps.List) + if isList { + arrayType := anyArrayType + + if ast.IsExported(name) { + structType := getBuilderStructType(reflect.TypeOf(builder)) + if structType != nil { + field, ok := (*structType).FieldByName(name) + if ok { + arrayType = field.Type + } + } + } + + val = listToSlice(list, arrayType).Interface() + } + + return val, true +} + +// GetMap returns a map[string]interface{} of the values set in the given +// builder. +// +// See notes on Get regarding returned slices. +func GetMap(builder interface{}) map[string]interface{} { + m := getBuilderMap(builder) + structType := getBuilderStructType(reflect.TypeOf(builder)) + + ret := make(map[string]interface{}, m.Size()) + + m.ForEach(func(name string, val ps.Any) { + list, isList := val.(ps.List) + if isList { + arrayType := anyArrayType + + if structType != nil { + field, ok := (*structType).FieldByName(name) + if ok { + arrayType = field.Type + } + } + + val = listToSlice(list, arrayType).Interface() + } + + ret[name] = val + }) + + return ret +} + +// GetStruct builds a new struct from the given registered builder. +// It will return nil if the given builder's type has not been registered with +// Register or RegisterValue. +// +// All values set on the builder with names that start with an uppercase letter +// (i.e. which would be exported if they were identifiers) are assigned to the +// corresponding exported fields of the struct. +// +// GetStruct will panic if any of these "exported" values are not assignable to +// their corresponding struct fields. +func GetStruct(builder interface{}) interface{} { + structVal := newBuilderStruct(reflect.TypeOf(builder)) + if structVal == nil { + return nil + } + return scanStruct(builder, structVal) +} + +// GetStructLike builds a new struct from the given builder with the same type +// as the given struct. +// +// All values set on the builder with names that start with an uppercase letter +// (i.e. which would be exported if they were identifiers) are assigned to the +// corresponding exported fields of the struct. +// +// ScanStruct will panic if any of these "exported" values are not assignable to +// their corresponding struct fields. +func GetStructLike(builder interface{}, strct interface{}) interface{} { + structVal := reflect.New(reflect.TypeOf(strct)).Elem() + return scanStruct(builder, &structVal) +} + +func scanStruct(builder interface{}, structVal *reflect.Value) interface{} { + getBuilderMap(builder).ForEach(func(name string, val ps.Any) { + if ast.IsExported(name) { + field := structVal.FieldByName(name) + + var value reflect.Value + switch v := val.(type) { + case nil: + switch field.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + value = reflect.Zero(field.Type()) + } + // nil is not valid for this Type; Set will panic + case ps.List: + value = listToSlice(v, field.Type()) + default: + value = reflect.ValueOf(val) + } + field.Set(value) + } + }) + + return structVal.Interface() +} diff --git a/vendor/github.com/lann/builder/reflect.go b/vendor/github.com/lann/builder/reflect.go new file mode 100644 index 0000000000..3b236e7918 --- /dev/null +++ b/vendor/github.com/lann/builder/reflect.go @@ -0,0 +1,24 @@ +package builder + +import "reflect" + +func convert(from interface{}, to interface{}) interface{} { + return reflect. + ValueOf(from). + Convert(reflect.TypeOf(to)). + Interface() +} + +func forEach(s interface{}, f func(interface{})) { + val := reflect.ValueOf(s) + + kind := val.Kind() + if kind != reflect.Slice && kind != reflect.Array { + panic(&reflect.ValueError{Method: "builder.forEach", Kind: kind}) + } + + l := val.Len() + for i := 0; i < l; i++ { + f(val.Index(i).Interface()) + } +} diff --git a/vendor/github.com/lann/builder/registry.go b/vendor/github.com/lann/builder/registry.go new file mode 100644 index 0000000000..612845418e --- /dev/null +++ b/vendor/github.com/lann/builder/registry.go @@ -0,0 +1,59 @@ +package builder + +import ( + "reflect" + "sync" +) + +var ( + registry = make(map[reflect.Type]reflect.Type) + registryMux sync.RWMutex +) + +// RegisterType maps the given builderType to a structType. +// This mapping affects the type of slices returned by Get and is required for +// GetStruct to work. +// +// Returns a Value containing an empty instance of the registered builderType. +// +// RegisterType will panic if builderType's underlying type is not Builder or +// if structType's Kind is not Struct. +func RegisterType(builderType reflect.Type, structType reflect.Type) *reflect.Value { + registryMux.Lock() + defer registryMux.Unlock() + structType.NumField() // Panic if structType is not a struct + registry[builderType] = structType + emptyValue := emptyBuilderValue.Convert(builderType) + return &emptyValue +} + +// Register wraps RegisterType, taking instances instead of Types. +// +// Returns an empty instance of the registered builder type which can be used +// as the initial value for builder expressions. See example. +func Register(builderProto, structProto interface{}) interface{} { + empty := RegisterType( + reflect.TypeOf(builderProto), + reflect.TypeOf(structProto), + ).Interface() + return empty +} + +func getBuilderStructType(builderType reflect.Type) *reflect.Type { + registryMux.RLock() + defer registryMux.RUnlock() + structType, ok := registry[builderType] + if !ok { + return nil + } + return &structType +} + +func newBuilderStruct(builderType reflect.Type) *reflect.Value { + structType := getBuilderStructType(builderType) + if structType == nil { + return nil + } + newStruct := reflect.New(*structType).Elem() + return &newStruct +} diff --git a/vendor/github.com/lann/ps/LICENSE b/vendor/github.com/lann/ps/LICENSE new file mode 100644 index 0000000000..69f9ae8d5a --- /dev/null +++ b/vendor/github.com/lann/ps/LICENSE @@ -0,0 +1,7 @@ +Copyright (c) 2013 Michael Hendricks + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/lann/ps/README.md b/vendor/github.com/lann/ps/README.md new file mode 100644 index 0000000000..325a5b3540 --- /dev/null +++ b/vendor/github.com/lann/ps/README.md @@ -0,0 +1,10 @@ +**This is a stable fork of https://github.com/mndrix/ps; it will not introduce breaking changes.** + +ps +== + +Persistent data structures for Go. See the [full package documentation](http://godoc.org/github.com/lann/ps) + +Install with + + go get github.com/lann/ps diff --git a/vendor/github.com/lann/ps/list.go b/vendor/github.com/lann/ps/list.go new file mode 100644 index 0000000000..48a1cebacf --- /dev/null +++ b/vendor/github.com/lann/ps/list.go @@ -0,0 +1,93 @@ +package ps + +// List is a persistent list of possibly heterogenous values. +type List interface { + // IsNil returns true if the list is empty + IsNil() bool + + // Cons returns a new list with val as the head + Cons(val Any) List + + // Head returns the first element of the list; + // panics if the list is empty + Head() Any + + // Tail returns a list with all elements except the head; + // panics if the list is empty + Tail() List + + // Size returns the list's length. This takes O(1) time. + Size() int + + // ForEach executes a callback for each value in the list. + ForEach(f func(Any)) + + // Reverse returns a list whose elements are in the opposite order as + // the original list. + Reverse() List +} + +// Immutable (i.e. persistent) list +type list struct { + depth int // the number of nodes after, and including, this one + value Any + tail *list +} + +// An empty list shared by all lists +var nilList = &list{} + +// NewList returns a new, empty list. The result is a singly linked +// list implementation. All lists share an empty tail, so allocating +// empty lists is efficient in time and memory. +func NewList() List { + return nilList +} + +func (self *list) IsNil() bool { + return self == nilList +} + +func (self *list) Size() int { + return self.depth +} + +func (tail *list) Cons(val Any) List { + var xs list + xs.depth = tail.depth + 1 + xs.value = val + xs.tail = tail + return &xs +} + +func (self *list) Head() Any { + if self.IsNil() { + panic("Called Head() on an empty list") + } + + return self.value +} + +func (self *list) Tail() List { + if self.IsNil() { + panic("Called Tail() on an empty list") + } + + return self.tail +} + +// ForEach executes a callback for each value in the list +func (self *list) ForEach(f func(Any)) { + if self.IsNil() { + return + } + f(self.Head()) + self.Tail().ForEach(f) +} + +// Reverse returns a list with elements in opposite order as this list +func (self *list) Reverse() List { + reversed := NewList() + self.ForEach(func(v Any) { reversed = reversed.Cons(v) }) + return reversed +} diff --git a/vendor/github.com/lann/ps/map.go b/vendor/github.com/lann/ps/map.go new file mode 100644 index 0000000000..192daec8f3 --- /dev/null +++ b/vendor/github.com/lann/ps/map.go @@ -0,0 +1,311 @@ +// Fully persistent data structures. A persistent data structure is a data +// structure that always preserves the previous version of itself when +// it is modified. Such data structures are effectively immutable, +// as their operations do not update the structure in-place, but instead +// always yield a new structure. +// +// Persistent +// data structures typically share structure among themselves. This allows +// operations to avoid copying the entire data structure. +package ps + +import ( + "bytes" + "fmt" +) + +// Any is a shorthand for Go's verbose interface{} type. +type Any interface{} + +// A Map associates unique keys (type string) with values (type Any). +type Map interface { + // IsNil returns true if the Map is empty + IsNil() bool + + // Set returns a new map in which key and value are associated. + // If the key didn't exist before, it's created; otherwise, the + // associated value is changed. + // This operation is O(log N) in the number of keys. + Set(key string, value Any) Map + + // Delete returns a new map with the association for key, if any, removed. + // This operation is O(log N) in the number of keys. + Delete(key string) Map + + // Lookup returns the value associated with a key, if any. If the key + // exists, the second return value is true; otherwise, false. + // This operation is O(log N) in the number of keys. + Lookup(key string) (Any, bool) + + // Size returns the number of key value pairs in the map. + // This takes O(1) time. + Size() int + + // ForEach executes a callback on each key value pair in the map. + ForEach(f func(key string, val Any)) + + // Keys returns a slice with all keys in this map. + // This operation is O(N) in the number of keys. + Keys() []string + + String() string +} + +// Immutable (i.e. persistent) associative array +const childCount = 8 +const shiftSize = 3 + +type tree struct { + count int + hash uint64 // hash of the key (used for tree balancing) + key string + value Any + children [childCount]*tree +} + +var nilMap = &tree{} + +// Recursively set nilMap's subtrees to point at itself. +// This eliminates all nil pointers in the map structure. +// All map nodes are created by cloning this structure so +// they avoid the problem too. +func init() { + for i := range nilMap.children { + nilMap.children[i] = nilMap + } +} + +// NewMap allocates a new, persistent map from strings to values of +// any type. +// This is currently implemented as a path-copying binary tree. +func NewMap() Map { + return nilMap +} + +func (self *tree) IsNil() bool { + return self == nilMap +} + +// clone returns an exact duplicate of a tree node +func (self *tree) clone() *tree { + var m tree + m = *self + return &m +} + +// constants for FNV-1a hash algorithm +const ( + offset64 uint64 = 14695981039346656037 + prime64 uint64 = 1099511628211 +) + +// hashKey returns a hash code for a given string +func hashKey(key string) uint64 { + hash := offset64 + for _, codepoint := range key { + hash ^= uint64(codepoint) + hash *= prime64 + } + return hash +} + +// Set returns a new map similar to this one but with key and value +// associated. If the key didn't exist, it's created; otherwise, the +// associated value is changed. +func (self *tree) Set(key string, value Any) Map { + hash := hashKey(key) + return setLowLevel(self, hash, hash, key, value) +} + +func setLowLevel(self *tree, partialHash, hash uint64, key string, value Any) *tree { + if self.IsNil() { // an empty tree is easy + m := self.clone() + m.count = 1 + m.hash = hash + m.key = key + m.value = value + return m + } + + if hash != self.hash { + m := self.clone() + i := partialHash % childCount + m.children[i] = setLowLevel(self.children[i], partialHash>>shiftSize, hash, key, value) + recalculateCount(m) + return m + } + + // replacing a key's previous value + m := self.clone() + m.value = value + return m +} + +// modifies a map by recalculating its key count based on the counts +// of its subtrees +func recalculateCount(m *tree) { + count := 0 + for _, t := range m.children { + count += t.Size() + } + m.count = count + 1 // add one to count ourself +} + +func (m *tree) Delete(key string) Map { + hash := hashKey(key) + newMap, _ := deleteLowLevel(m, hash, hash) + return newMap +} + +func deleteLowLevel(self *tree, partialHash, hash uint64) (*tree, bool) { + // empty trees are easy + if self.IsNil() { + return self, false + } + + if hash != self.hash { + i := partialHash % childCount + child, found := deleteLowLevel(self.children[i], partialHash>>shiftSize, hash) + if !found { + return self, false + } + newMap := self.clone() + newMap.children[i] = child + recalculateCount(newMap) + return newMap, true // ? this wasn't in the original code + } + + // we must delete our own node + if self.isLeaf() { // we have no children + return nilMap, true + } + /* + if self.subtreeCount() == 1 { // only one subtree + for _, t := range self.children { + if t != nilMap { + return t, true + } + } + panic("Tree with 1 subtree actually had no subtrees") + } + */ + + // find a node to replace us + i := -1 + size := -1 + for j, t := range self.children { + if t.Size() > size { + i = j + size = t.Size() + } + } + + // make chosen leaf smaller + replacement, child := self.children[i].deleteLeftmost() + newMap := replacement.clone() + for j := range self.children { + if j == i { + newMap.children[j] = child + } else { + newMap.children[j] = self.children[j] + } + } + recalculateCount(newMap) + return newMap, true +} + +// delete the leftmost node in a tree returning the node that +// was deleted and the tree left over after its deletion +func (m *tree) deleteLeftmost() (*tree, *tree) { + if m.isLeaf() { + return m, nilMap + } + + for i, t := range m.children { + if t != nilMap { + deleted, child := t.deleteLeftmost() + newMap := m.clone() + newMap.children[i] = child + recalculateCount(newMap) + return deleted, newMap + } + } + panic("Tree isn't a leaf but also had no children. How does that happen?") +} + +// isLeaf returns true if this is a leaf node +func (m *tree) isLeaf() bool { + return m.Size() == 1 +} + +// returns the number of child subtrees we have +func (m *tree) subtreeCount() int { + count := 0 + for _, t := range m.children { + if t != nilMap { + count++ + } + } + return count +} + +func (m *tree) Lookup(key string) (Any, bool) { + hash := hashKey(key) + return lookupLowLevel(m, hash, hash) +} + +func lookupLowLevel(self *tree, partialHash, hash uint64) (Any, bool) { + if self.IsNil() { // an empty tree is easy + return nil, false + } + + if hash != self.hash { + i := partialHash % childCount + return lookupLowLevel(self.children[i], partialHash>>shiftSize, hash) + } + + // we found it + return self.value, true +} + +func (m *tree) Size() int { + return m.count +} + +func (m *tree) ForEach(f func(key string, val Any)) { + if m.IsNil() { + return + } + + // ourself + f(m.key, m.value) + + // children + for _, t := range m.children { + if t != nilMap { + t.ForEach(f) + } + } +} + +func (m *tree) Keys() []string { + keys := make([]string, m.Size()) + i := 0 + m.ForEach(func(k string, v Any) { + keys[i] = k + i++ + }) + return keys +} + +// make it easier to display maps for debugging +func (m *tree) String() string { + keys := m.Keys() + buf := bytes.NewBufferString("{") + for _, key := range keys { + val, _ := m.Lookup(key) + fmt.Fprintf(buf, "%s: %s, ", key, val) + } + fmt.Fprintf(buf, "}\n") + return buf.String() +} diff --git a/vendor/github.com/lann/ps/profile.sh b/vendor/github.com/lann/ps/profile.sh new file mode 100644 index 0000000000..f03df05a5a --- /dev/null +++ b/vendor/github.com/lann/ps/profile.sh @@ -0,0 +1,3 @@ +#!/bin/sh +go test -c +./ps.test -test.run=none -test.bench=$2 -test.$1profile=$1.profile diff --git a/vendor/github.com/lib/pq/.gitignore b/vendor/github.com/lib/pq/.gitignore new file mode 100644 index 0000000000..0f1d00e119 --- /dev/null +++ b/vendor/github.com/lib/pq/.gitignore @@ -0,0 +1,4 @@ +.db +*.test +*~ +*.swp diff --git a/vendor/github.com/lib/pq/.travis.sh b/vendor/github.com/lib/pq/.travis.sh new file mode 100644 index 0000000000..ebf447030b --- /dev/null +++ b/vendor/github.com/lib/pq/.travis.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +set -eu + +client_configure() { + sudo chmod 600 $PQSSLCERTTEST_PATH/postgresql.key +} + +pgdg_repository() { + local sourcelist='sources.list.d/postgresql.list' + + curl -sS 'https://www.postgresql.org/media/keys/ACCC4CF8.asc' | sudo apt-key add - + echo deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main $PGVERSION | sudo tee "/etc/apt/$sourcelist" + sudo apt-get -o Dir::Etc::sourcelist="$sourcelist" -o Dir::Etc::sourceparts='-' -o APT::Get::List-Cleanup='0' update +} + +postgresql_configure() { + sudo tee /etc/postgresql/$PGVERSION/main/pg_hba.conf > /dev/null <<-config + local all all trust + hostnossl all pqgossltest 127.0.0.1/32 reject + hostnossl all pqgosslcert 127.0.0.1/32 reject + hostssl all pqgossltest 127.0.0.1/32 trust + hostssl all pqgosslcert 127.0.0.1/32 cert + host all all 127.0.0.1/32 trust + hostnossl all pqgossltest ::1/128 reject + hostnossl all pqgosslcert ::1/128 reject + hostssl all pqgossltest ::1/128 trust + hostssl all pqgosslcert ::1/128 cert + host all all ::1/128 trust + config + + xargs sudo install -o postgres -g postgres -m 600 -t /var/lib/postgresql/$PGVERSION/main/ <<-certificates + certs/root.crt + certs/server.crt + certs/server.key + certificates + + sort -VCu <<-versions || + $PGVERSION + 9.2 + versions + sudo tee -a /etc/postgresql/$PGVERSION/main/postgresql.conf > /dev/null <<-config + ssl_ca_file = 'root.crt' + ssl_cert_file = 'server.crt' + ssl_key_file = 'server.key' + config + + echo 127.0.0.1 postgres | sudo tee -a /etc/hosts > /dev/null + + sudo service postgresql restart +} + +postgresql_install() { + xargs sudo apt-get -y -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confnew' install <<-packages + postgresql-$PGVERSION + postgresql-server-dev-$PGVERSION + postgresql-contrib-$PGVERSION + packages +} + +postgresql_uninstall() { + sudo service postgresql stop + xargs sudo apt-get -y --purge remove <<-packages + libpq-dev + libpq5 + postgresql + postgresql-client-common + postgresql-common + packages + sudo rm -rf /var/lib/postgresql +} + +$1 diff --git a/vendor/github.com/lib/pq/.travis.yml b/vendor/github.com/lib/pq/.travis.yml new file mode 100644 index 0000000000..8396f5d9d4 --- /dev/null +++ b/vendor/github.com/lib/pq/.travis.yml @@ -0,0 +1,44 @@ +language: go + +go: + - 1.11.x + - 1.12.x + - master + +sudo: true + +env: + global: + - PGUSER=postgres + - PQGOSSLTESTS=1 + - PQSSLCERTTEST_PATH=$PWD/certs + - PGHOST=127.0.0.1 + matrix: + - PGVERSION=10 + - PGVERSION=9.6 + - PGVERSION=9.5 + - PGVERSION=9.4 + +before_install: + - ./.travis.sh postgresql_uninstall + - ./.travis.sh pgdg_repository + - ./.travis.sh postgresql_install + - ./.travis.sh postgresql_configure + - ./.travis.sh client_configure + - go get golang.org/x/tools/cmd/goimports + - go get golang.org/x/lint/golint + - GO111MODULE=on go get honnef.co/go/tools/cmd/staticcheck@2019.2.1 + +before_script: + - createdb pqgotest + - createuser -DRS pqgossltest + - createuser -DRS pqgosslcert + +script: + - > + goimports -d -e $(find -name '*.go') | awk '{ print } END { exit NR == 0 ? 0 : 1 }' + - go vet ./... + - staticcheck -go 1.11 ./... + - golint ./... + - PQTEST_BINARY_PARAMETERS=no go test -race -v ./... + - PQTEST_BINARY_PARAMETERS=yes go test -race -v ./... diff --git a/vendor/github.com/lib/pq/CONTRIBUTING.md b/vendor/github.com/lib/pq/CONTRIBUTING.md new file mode 100644 index 0000000000..84c937f156 --- /dev/null +++ b/vendor/github.com/lib/pq/CONTRIBUTING.md @@ -0,0 +1,29 @@ +## Contributing to pq + +`pq` has a backlog of pull requests, but contributions are still very +much welcome. You can help with patch review, submitting bug reports, +or adding new functionality. There is no formal style guide, but +please conform to the style of existing code and general Go formatting +conventions when submitting patches. + +### Patch review + +Help review existing open pull requests by commenting on the code or +proposed functionality. + +### Bug reports + +We appreciate any bug reports, but especially ones with self-contained +(doesn't depend on code outside of pq), minimal (can't be simplified +further) test cases. It's especially helpful if you can submit a pull +request with just the failing test case (you'll probably want to +pattern it after the tests in +[conn_test.go](https://github.com/lib/pq/blob/master/conn_test.go). + +### New functionality + +There are a number of pending patches for new functionality, so +additional feature patches will take a while to merge. Still, patches +are generally reviewed based on usefulness and complexity in addition +to time-in-queue, so if you have a knockout idea, take a shot. Feel +free to open an issue discussion your proposed patch beforehand. diff --git a/vendor/github.com/lib/pq/LICENSE.md b/vendor/github.com/lib/pq/LICENSE.md new file mode 100644 index 0000000000..5773904a30 --- /dev/null +++ b/vendor/github.com/lib/pq/LICENSE.md @@ -0,0 +1,8 @@ +Copyright (c) 2011-2013, 'pq' Contributors +Portions Copyright (C) 2011 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/lib/pq/README.md b/vendor/github.com/lib/pq/README.md new file mode 100644 index 0000000000..385fe73508 --- /dev/null +++ b/vendor/github.com/lib/pq/README.md @@ -0,0 +1,95 @@ +# pq - A pure Go postgres driver for Go's database/sql package + +[![GoDoc](https://godoc.org/github.com/lib/pq?status.svg)](https://godoc.org/github.com/lib/pq) +[![Build Status](https://travis-ci.org/lib/pq.svg?branch=master)](https://travis-ci.org/lib/pq) + +## Install + + go get github.com/lib/pq + +## Docs + +For detailed documentation and basic usage examples, please see the package +documentation at . + +## Tests + +`go test` is used for testing. See [TESTS.md](TESTS.md) for more details. + +## Features + +* SSL +* Handles bad connections for `database/sql` +* Scan `time.Time` correctly (i.e. `timestamp[tz]`, `time[tz]`, `date`) +* Scan binary blobs correctly (i.e. `bytea`) +* Package for `hstore` support +* COPY FROM support +* pq.ParseURL for converting urls to connection strings for sql.Open. +* Many libpq compatible environment variables +* Unix socket support +* Notifications: `LISTEN`/`NOTIFY` +* pgpass support + +## Future / Things you can help with + +* Better COPY FROM / COPY TO (see discussion in #181) + +## Thank you (alphabetical) + +Some of these contributors are from the original library `bmizerany/pq.go` whose +code still exists in here. + +* Andy Balholm (andybalholm) +* Ben Berkert (benburkert) +* Benjamin Heatwole (bheatwole) +* Bill Mill (llimllib) +* Bjørn Madsen (aeons) +* Blake Gentry (bgentry) +* Brad Fitzpatrick (bradfitz) +* Charlie Melbye (cmelbye) +* Chris Bandy (cbandy) +* Chris Gilling (cgilling) +* Chris Walsh (cwds) +* Dan Sosedoff (sosedoff) +* Daniel Farina (fdr) +* Eric Chlebek (echlebek) +* Eric Garrido (minusnine) +* Eric Urban (hydrogen18) +* Everyone at The Go Team +* Evan Shaw (edsrzf) +* Ewan Chou (coocood) +* Fazal Majid (fazalmajid) +* Federico Romero (federomero) +* Fumin (fumin) +* Gary Burd (garyburd) +* Heroku (heroku) +* James Pozdena (jpoz) +* Jason McVetta (jmcvetta) +* Jeremy Jay (pbnjay) +* Joakim Sernbrant (serbaut) +* John Gallagher (jgallagher) +* Jonathan Rudenberg (titanous) +* Joël Stemmer (jstemmer) +* Kamil Kisiel (kisielk) +* Kelly Dunn (kellydunn) +* Keith Rarick (kr) +* Kir Shatrov (kirs) +* Lann Martin (lann) +* Maciek Sakrejda (uhoh-itsmaciek) +* Marc Brinkmann (mbr) +* Marko Tiikkaja (johto) +* Matt Newberry (MattNewberry) +* Matt Robenolt (mattrobenolt) +* Martin Olsen (martinolsen) +* Mike Lewis (mikelikespie) +* Nicolas Patry (Narsil) +* Oliver Tonnhofer (olt) +* Patrick Hayes (phayes) +* Paul Hammond (paulhammond) +* Ryan Smith (ryandotsmith) +* Samuel Stauffer (samuel) +* Timothée Peignier (cyberdelia) +* Travis Cline (tmc) +* TruongSinh Tran-Nguyen (truongsinh) +* Yaismel Miranda (ympons) +* notedit (notedit) diff --git a/vendor/github.com/lib/pq/TESTS.md b/vendor/github.com/lib/pq/TESTS.md new file mode 100644 index 0000000000..f05021115b --- /dev/null +++ b/vendor/github.com/lib/pq/TESTS.md @@ -0,0 +1,33 @@ +# Tests + +## Running Tests + +`go test` is used for testing. A running PostgreSQL +server is required, with the ability to log in. The +database to connect to test with is "pqgotest," on +"localhost" but these can be overridden using [environment +variables](https://www.postgresql.org/docs/9.3/static/libpq-envars.html). + +Example: + + PGHOST=/run/postgresql go test + +## Benchmarks + +A benchmark suite can be run as part of the tests: + + go test -bench . + +## Example setup (Docker) + +Run a postgres container: + +``` +docker run --expose 5432:5432 postgres +``` + +Run tests: + +``` +PGHOST=localhost PGPORT=5432 PGUSER=postgres PGSSLMODE=disable PGDATABASE=postgres go test +``` diff --git a/vendor/github.com/lib/pq/array.go b/vendor/github.com/lib/pq/array.go new file mode 100644 index 0000000000..e4933e2276 --- /dev/null +++ b/vendor/github.com/lib/pq/array.go @@ -0,0 +1,756 @@ +package pq + +import ( + "bytes" + "database/sql" + "database/sql/driver" + "encoding/hex" + "fmt" + "reflect" + "strconv" + "strings" +) + +var typeByteSlice = reflect.TypeOf([]byte{}) +var typeDriverValuer = reflect.TypeOf((*driver.Valuer)(nil)).Elem() +var typeSQLScanner = reflect.TypeOf((*sql.Scanner)(nil)).Elem() + +// Array returns the optimal driver.Valuer and sql.Scanner for an array or +// slice of any dimension. +// +// For example: +// db.Query(`SELECT * FROM t WHERE id = ANY($1)`, pq.Array([]int{235, 401})) +// +// var x []sql.NullInt64 +// db.QueryRow('SELECT ARRAY[235, 401]').Scan(pq.Array(&x)) +// +// Scanning multi-dimensional arrays is not supported. Arrays where the lower +// bound is not one (such as `[0:0]={1}') are not supported. +func Array(a interface{}) interface { + driver.Valuer + sql.Scanner +} { + switch a := a.(type) { + case []bool: + return (*BoolArray)(&a) + case []float64: + return (*Float64Array)(&a) + case []int64: + return (*Int64Array)(&a) + case []string: + return (*StringArray)(&a) + + case *[]bool: + return (*BoolArray)(a) + case *[]float64: + return (*Float64Array)(a) + case *[]int64: + return (*Int64Array)(a) + case *[]string: + return (*StringArray)(a) + } + + return GenericArray{a} +} + +// ArrayDelimiter may be optionally implemented by driver.Valuer or sql.Scanner +// to override the array delimiter used by GenericArray. +type ArrayDelimiter interface { + // ArrayDelimiter returns the delimiter character(s) for this element's type. + ArrayDelimiter() string +} + +// BoolArray represents a one-dimensional array of the PostgreSQL boolean type. +type BoolArray []bool + +// Scan implements the sql.Scanner interface. +func (a *BoolArray) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to BoolArray", src) +} + +func (a *BoolArray) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "BoolArray") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(BoolArray, len(elems)) + for i, v := range elems { + if len(v) != 1 { + return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v) + } + switch v[0] { + case 't': + b[i] = true + case 'f': + b[i] = false + default: + return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a BoolArray) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be exactly two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1+2*n) + + for i := 0; i < n; i++ { + b[2*i] = ',' + if a[i] { + b[1+2*i] = 't' + } else { + b[1+2*i] = 'f' + } + } + + b[0] = '{' + b[2*n] = '}' + + return string(b), nil + } + + return "{}", nil +} + +// ByteaArray represents a one-dimensional array of the PostgreSQL bytea type. +type ByteaArray [][]byte + +// Scan implements the sql.Scanner interface. +func (a *ByteaArray) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to ByteaArray", src) +} + +func (a *ByteaArray) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "ByteaArray") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(ByteaArray, len(elems)) + for i, v := range elems { + b[i], err = parseBytea(v) + if err != nil { + return fmt.Errorf("could not parse bytea array index %d: %s", i, err.Error()) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. It uses the "hex" format which +// is only supported on PostgreSQL 9.0 or newer. +func (a ByteaArray) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, 2*N bytes of quotes, + // 3*N bytes of hex formatting, and N-1 bytes of delimiters. + size := 1 + 6*n + for _, x := range a { + size += hex.EncodedLen(len(x)) + } + + b := make([]byte, size) + + for i, s := 0, b; i < n; i++ { + o := copy(s, `,"\\x`) + o += hex.Encode(s[o:], a[i]) + s[o] = '"' + s = s[o+1:] + } + + b[0] = '{' + b[size-1] = '}' + + return string(b), nil + } + + return "{}", nil +} + +// Float64Array represents a one-dimensional array of the PostgreSQL double +// precision type. +type Float64Array []float64 + +// Scan implements the sql.Scanner interface. +func (a *Float64Array) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to Float64Array", src) +} + +func (a *Float64Array) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "Float64Array") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(Float64Array, len(elems)) + for i, v := range elems { + if b[i], err = strconv.ParseFloat(string(v), 64); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a Float64Array) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+2*n) + b[0] = '{' + + b = strconv.AppendFloat(b, a[0], 'f', -1, 64) + for i := 1; i < n; i++ { + b = append(b, ',') + b = strconv.AppendFloat(b, a[i], 'f', -1, 64) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// GenericArray implements the driver.Valuer and sql.Scanner interfaces for +// an array or slice of any dimension. +type GenericArray struct{ A interface{} } + +func (GenericArray) evaluateDestination(rt reflect.Type) (reflect.Type, func([]byte, reflect.Value) error, string) { + var assign func([]byte, reflect.Value) error + var del = "," + + // TODO calculate the assign function for other types + // TODO repeat this section on the element type of arrays or slices (multidimensional) + { + if reflect.PtrTo(rt).Implements(typeSQLScanner) { + // dest is always addressable because it is an element of a slice. + assign = func(src []byte, dest reflect.Value) (err error) { + ss := dest.Addr().Interface().(sql.Scanner) + if src == nil { + err = ss.Scan(nil) + } else { + err = ss.Scan(src) + } + return + } + goto FoundType + } + + assign = func([]byte, reflect.Value) error { + return fmt.Errorf("pq: scanning to %s is not implemented; only sql.Scanner", rt) + } + } + +FoundType: + + if ad, ok := reflect.Zero(rt).Interface().(ArrayDelimiter); ok { + del = ad.ArrayDelimiter() + } + + return rt, assign, del +} + +// Scan implements the sql.Scanner interface. +func (a GenericArray) Scan(src interface{}) error { + dpv := reflect.ValueOf(a.A) + switch { + case dpv.Kind() != reflect.Ptr: + return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A) + case dpv.IsNil(): + return fmt.Errorf("pq: destination %T is nil", a.A) + } + + dv := dpv.Elem() + switch dv.Kind() { + case reflect.Slice: + case reflect.Array: + default: + return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A) + } + + switch src := src.(type) { + case []byte: + return a.scanBytes(src, dv) + case string: + return a.scanBytes([]byte(src), dv) + case nil: + if dv.Kind() == reflect.Slice { + dv.Set(reflect.Zero(dv.Type())) + return nil + } + } + + return fmt.Errorf("pq: cannot convert %T to %s", src, dv.Type()) +} + +func (a GenericArray) scanBytes(src []byte, dv reflect.Value) error { + dtype, assign, del := a.evaluateDestination(dv.Type().Elem()) + dims, elems, err := parseArray(src, []byte(del)) + if err != nil { + return err + } + + // TODO allow multidimensional + + if len(dims) > 1 { + return fmt.Errorf("pq: scanning from multidimensional ARRAY%s is not implemented", + strings.Replace(fmt.Sprint(dims), " ", "][", -1)) + } + + // Treat a zero-dimensional array like an array with a single dimension of zero. + if len(dims) == 0 { + dims = append(dims, 0) + } + + for i, rt := 0, dv.Type(); i < len(dims); i, rt = i+1, rt.Elem() { + switch rt.Kind() { + case reflect.Slice: + case reflect.Array: + if rt.Len() != dims[i] { + return fmt.Errorf("pq: cannot convert ARRAY%s to %s", + strings.Replace(fmt.Sprint(dims), " ", "][", -1), dv.Type()) + } + default: + // TODO handle multidimensional + } + } + + values := reflect.MakeSlice(reflect.SliceOf(dtype), len(elems), len(elems)) + for i, e := range elems { + if err := assign(e, values.Index(i)); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + } + + // TODO handle multidimensional + + switch dv.Kind() { + case reflect.Slice: + dv.Set(values.Slice(0, dims[0])) + case reflect.Array: + for i := 0; i < dims[0]; i++ { + dv.Index(i).Set(values.Index(i)) + } + } + + return nil +} + +// Value implements the driver.Valuer interface. +func (a GenericArray) Value() (driver.Value, error) { + if a.A == nil { + return nil, nil + } + + rv := reflect.ValueOf(a.A) + + switch rv.Kind() { + case reflect.Slice: + if rv.IsNil() { + return nil, nil + } + case reflect.Array: + default: + return nil, fmt.Errorf("pq: Unable to convert %T to array", a.A) + } + + if n := rv.Len(); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 0, 1+2*n) + + b, _, err := appendArray(b, rv, n) + return string(b), err + } + + return "{}", nil +} + +// Int64Array represents a one-dimensional array of the PostgreSQL integer types. +type Int64Array []int64 + +// Scan implements the sql.Scanner interface. +func (a *Int64Array) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to Int64Array", src) +} + +func (a *Int64Array) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "Int64Array") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(Int64Array, len(elems)) + for i, v := range elems { + if b[i], err = strconv.ParseInt(string(v), 10, 64); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a Int64Array) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+2*n) + b[0] = '{' + + b = strconv.AppendInt(b, a[0], 10) + for i := 1; i < n; i++ { + b = append(b, ',') + b = strconv.AppendInt(b, a[i], 10) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// StringArray represents a one-dimensional array of the PostgreSQL character types. +type StringArray []string + +// Scan implements the sql.Scanner interface. +func (a *StringArray) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to StringArray", src) +} + +func (a *StringArray) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "StringArray") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(StringArray, len(elems)) + for i, v := range elems { + if b[i] = string(v); v == nil { + return fmt.Errorf("pq: parsing array element index %d: cannot convert nil to string", i) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a StringArray) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, 2*N bytes of quotes, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+3*n) + b[0] = '{' + + b = appendArrayQuotedBytes(b, []byte(a[0])) + for i := 1; i < n; i++ { + b = append(b, ',') + b = appendArrayQuotedBytes(b, []byte(a[i])) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// appendArray appends rv to the buffer, returning the extended buffer and +// the delimiter used between elements. +// +// It panics when n <= 0 or rv's Kind is not reflect.Array nor reflect.Slice. +func appendArray(b []byte, rv reflect.Value, n int) ([]byte, string, error) { + var del string + var err error + + b = append(b, '{') + + if b, del, err = appendArrayElement(b, rv.Index(0)); err != nil { + return b, del, err + } + + for i := 1; i < n; i++ { + b = append(b, del...) + if b, del, err = appendArrayElement(b, rv.Index(i)); err != nil { + return b, del, err + } + } + + return append(b, '}'), del, nil +} + +// appendArrayElement appends rv to the buffer, returning the extended buffer +// and the delimiter to use before the next element. +// +// When rv's Kind is neither reflect.Array nor reflect.Slice, it is converted +// using driver.DefaultParameterConverter and the resulting []byte or string +// is double-quoted. +// +// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO +func appendArrayElement(b []byte, rv reflect.Value) ([]byte, string, error) { + if k := rv.Kind(); k == reflect.Array || k == reflect.Slice { + if t := rv.Type(); t != typeByteSlice && !t.Implements(typeDriverValuer) { + if n := rv.Len(); n > 0 { + return appendArray(b, rv, n) + } + + return b, "", nil + } + } + + var del = "," + var err error + var iv interface{} = rv.Interface() + + if ad, ok := iv.(ArrayDelimiter); ok { + del = ad.ArrayDelimiter() + } + + if iv, err = driver.DefaultParameterConverter.ConvertValue(iv); err != nil { + return b, del, err + } + + switch v := iv.(type) { + case nil: + return append(b, "NULL"...), del, nil + case []byte: + return appendArrayQuotedBytes(b, v), del, nil + case string: + return appendArrayQuotedBytes(b, []byte(v)), del, nil + } + + b, err = appendValue(b, iv) + return b, del, err +} + +func appendArrayQuotedBytes(b, v []byte) []byte { + b = append(b, '"') + for { + i := bytes.IndexAny(v, `"\`) + if i < 0 { + b = append(b, v...) + break + } + if i > 0 { + b = append(b, v[:i]...) + } + b = append(b, '\\', v[i]) + v = v[i+1:] + } + return append(b, '"') +} + +func appendValue(b []byte, v driver.Value) ([]byte, error) { + return append(b, encode(nil, v, 0)...), nil +} + +// parseArray extracts the dimensions and elements of an array represented in +// text format. Only representations emitted by the backend are supported. +// Notably, whitespace around brackets and delimiters is significant, and NULL +// is case-sensitive. +// +// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO +func parseArray(src, del []byte) (dims []int, elems [][]byte, err error) { + var depth, i int + + if len(src) < 1 || src[0] != '{' { + return nil, nil, fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '{', 0) + } + +Open: + for i < len(src) { + switch src[i] { + case '{': + depth++ + i++ + case '}': + elems = make([][]byte, 0) + goto Close + default: + break Open + } + } + dims = make([]int, i) + +Element: + for i < len(src) { + switch src[i] { + case '{': + if depth == len(dims) { + break Element + } + depth++ + dims[depth-1] = 0 + i++ + case '"': + var elem = []byte{} + var escape bool + for i++; i < len(src); i++ { + if escape { + elem = append(elem, src[i]) + escape = false + } else { + switch src[i] { + default: + elem = append(elem, src[i]) + case '\\': + escape = true + case '"': + elems = append(elems, elem) + i++ + break Element + } + } + } + default: + for start := i; i < len(src); i++ { + if bytes.HasPrefix(src[i:], del) || src[i] == '}' { + elem := src[start:i] + if len(elem) == 0 { + return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) + } + if bytes.Equal(elem, []byte("NULL")) { + elem = nil + } + elems = append(elems, elem) + break Element + } + } + } + } + + for i < len(src) { + if bytes.HasPrefix(src[i:], del) && depth > 0 { + dims[depth-1]++ + i += len(del) + goto Element + } else if src[i] == '}' && depth > 0 { + dims[depth-1]++ + depth-- + i++ + } else { + return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) + } + } + +Close: + for i < len(src) { + if src[i] == '}' && depth > 0 { + depth-- + i++ + } else { + return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) + } + } + if depth > 0 { + err = fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '}', i) + } + if err == nil { + for _, d := range dims { + if (len(elems) % d) != 0 { + err = fmt.Errorf("pq: multidimensional arrays must have elements with matching dimensions") + } + } + } + return +} + +func scanLinearArray(src, del []byte, typ string) (elems [][]byte, err error) { + dims, elems, err := parseArray(src, del) + if err != nil { + return nil, err + } + if len(dims) > 1 { + return nil, fmt.Errorf("pq: cannot convert ARRAY%s to %s", strings.Replace(fmt.Sprint(dims), " ", "][", -1), typ) + } + return elems, err +} diff --git a/vendor/github.com/lib/pq/buf.go b/vendor/github.com/lib/pq/buf.go new file mode 100644 index 0000000000..4b0a0a8f7e --- /dev/null +++ b/vendor/github.com/lib/pq/buf.go @@ -0,0 +1,91 @@ +package pq + +import ( + "bytes" + "encoding/binary" + + "github.com/lib/pq/oid" +) + +type readBuf []byte + +func (b *readBuf) int32() (n int) { + n = int(int32(binary.BigEndian.Uint32(*b))) + *b = (*b)[4:] + return +} + +func (b *readBuf) oid() (n oid.Oid) { + n = oid.Oid(binary.BigEndian.Uint32(*b)) + *b = (*b)[4:] + return +} + +// N.B: this is actually an unsigned 16-bit integer, unlike int32 +func (b *readBuf) int16() (n int) { + n = int(binary.BigEndian.Uint16(*b)) + *b = (*b)[2:] + return +} + +func (b *readBuf) string() string { + i := bytes.IndexByte(*b, 0) + if i < 0 { + errorf("invalid message format; expected string terminator") + } + s := (*b)[:i] + *b = (*b)[i+1:] + return string(s) +} + +func (b *readBuf) next(n int) (v []byte) { + v = (*b)[:n] + *b = (*b)[n:] + return +} + +func (b *readBuf) byte() byte { + return b.next(1)[0] +} + +type writeBuf struct { + buf []byte + pos int +} + +func (b *writeBuf) int32(n int) { + x := make([]byte, 4) + binary.BigEndian.PutUint32(x, uint32(n)) + b.buf = append(b.buf, x...) +} + +func (b *writeBuf) int16(n int) { + x := make([]byte, 2) + binary.BigEndian.PutUint16(x, uint16(n)) + b.buf = append(b.buf, x...) +} + +func (b *writeBuf) string(s string) { + b.buf = append(append(b.buf, s...), '\000') +} + +func (b *writeBuf) byte(c byte) { + b.buf = append(b.buf, c) +} + +func (b *writeBuf) bytes(v []byte) { + b.buf = append(b.buf, v...) +} + +func (b *writeBuf) wrap() []byte { + p := b.buf[b.pos:] + binary.BigEndian.PutUint32(p, uint32(len(p))) + return b.buf +} + +func (b *writeBuf) next(c byte) { + p := b.buf[b.pos:] + binary.BigEndian.PutUint32(p, uint32(len(p))) + b.pos = len(b.buf) + 1 + b.buf = append(b.buf, c, 0, 0, 0, 0) +} diff --git a/vendor/github.com/lib/pq/conn.go b/vendor/github.com/lib/pq/conn.go new file mode 100644 index 0000000000..55152b1242 --- /dev/null +++ b/vendor/github.com/lib/pq/conn.go @@ -0,0 +1,1923 @@ +package pq + +import ( + "bufio" + "context" + "crypto/md5" + "crypto/sha256" + "database/sql" + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "io" + "net" + "os" + "os/user" + "path" + "path/filepath" + "strconv" + "strings" + "time" + "unicode" + + "github.com/lib/pq/oid" + "github.com/lib/pq/scram" +) + +// Common error types +var ( + ErrNotSupported = errors.New("pq: Unsupported command") + ErrInFailedTransaction = errors.New("pq: Could not complete operation in a failed transaction") + ErrSSLNotSupported = errors.New("pq: SSL is not enabled on the server") + ErrSSLKeyHasWorldPermissions = errors.New("pq: Private key file has group or world access. Permissions should be u=rw (0600) or less") + ErrCouldNotDetectUsername = errors.New("pq: Could not detect default username. Please provide one explicitly") + + errUnexpectedReady = errors.New("unexpected ReadyForQuery") + errNoRowsAffected = errors.New("no RowsAffected available after the empty statement") + errNoLastInsertID = errors.New("no LastInsertId available after the empty statement") +) + +// Driver is the Postgres database driver. +type Driver struct{} + +// Open opens a new connection to the database. name is a connection string. +// Most users should only use it through database/sql package from the standard +// library. +func (d *Driver) Open(name string) (driver.Conn, error) { + return Open(name) +} + +func init() { + sql.Register("postgres", &Driver{}) +} + +type parameterStatus struct { + // server version in the same format as server_version_num, or 0 if + // unavailable + serverVersion int + + // the current location based on the TimeZone value of the session, if + // available + currentLocation *time.Location +} + +type transactionStatus byte + +const ( + txnStatusIdle transactionStatus = 'I' + txnStatusIdleInTransaction transactionStatus = 'T' + txnStatusInFailedTransaction transactionStatus = 'E' +) + +func (s transactionStatus) String() string { + switch s { + case txnStatusIdle: + return "idle" + case txnStatusIdleInTransaction: + return "idle in transaction" + case txnStatusInFailedTransaction: + return "in a failed transaction" + default: + errorf("unknown transactionStatus %d", s) + } + + panic("not reached") +} + +// Dialer is the dialer interface. It can be used to obtain more control over +// how pq creates network connections. +type Dialer interface { + Dial(network, address string) (net.Conn, error) + DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) +} + +// DialerContext is the context-aware dialer interface. +type DialerContext interface { + DialContext(ctx context.Context, network, address string) (net.Conn, error) +} + +type defaultDialer struct { + d net.Dialer +} + +func (d defaultDialer) Dial(network, address string) (net.Conn, error) { + return d.d.Dial(network, address) +} +func (d defaultDialer) DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + return d.DialContext(ctx, network, address) +} +func (d defaultDialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { + return d.d.DialContext(ctx, network, address) +} + +type conn struct { + c net.Conn + buf *bufio.Reader + namei int + scratch [512]byte + txnStatus transactionStatus + txnFinish func() + + // Save connection arguments to use during CancelRequest. + dialer Dialer + opts values + + // Cancellation key data for use with CancelRequest messages. + processID int + secretKey int + + parameterStatus parameterStatus + + saveMessageType byte + saveMessageBuffer []byte + + // If true, this connection is bad and all public-facing functions should + // return ErrBadConn. + bad bool + + // If set, this connection should never use the binary format when + // receiving query results from prepared statements. Only provided for + // debugging. + disablePreparedBinaryResult bool + + // Whether to always send []byte parameters over as binary. Enables single + // round-trip mode for non-prepared Query calls. + binaryParameters bool + + // If true this connection is in the middle of a COPY + inCopy bool +} + +// Handle driver-side settings in parsed connection string. +func (cn *conn) handleDriverSettings(o values) (err error) { + boolSetting := func(key string, val *bool) error { + if value, ok := o[key]; ok { + if value == "yes" { + *val = true + } else if value == "no" { + *val = false + } else { + return fmt.Errorf("unrecognized value %q for %s", value, key) + } + } + return nil + } + + err = boolSetting("disable_prepared_binary_result", &cn.disablePreparedBinaryResult) + if err != nil { + return err + } + return boolSetting("binary_parameters", &cn.binaryParameters) +} + +func (cn *conn) handlePgpass(o values) { + // if a password was supplied, do not process .pgpass + if _, ok := o["password"]; ok { + return + } + filename := os.Getenv("PGPASSFILE") + if filename == "" { + // XXX this code doesn't work on Windows where the default filename is + // XXX %APPDATA%\postgresql\pgpass.conf + // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 + userHome := os.Getenv("HOME") + if userHome == "" { + user, err := user.Current() + if err != nil { + return + } + userHome = user.HomeDir + } + filename = filepath.Join(userHome, ".pgpass") + } + fileinfo, err := os.Stat(filename) + if err != nil { + return + } + mode := fileinfo.Mode() + if mode&(0x77) != 0 { + // XXX should warn about incorrect .pgpass permissions as psql does + return + } + file, err := os.Open(filename) + if err != nil { + return + } + defer file.Close() + scanner := bufio.NewScanner(io.Reader(file)) + hostname := o["host"] + ntw, _ := network(o) + port := o["port"] + db := o["dbname"] + username := o["user"] + // From: https://github.com/tg/pgpass/blob/master/reader.go + getFields := func(s string) []string { + fs := make([]string, 0, 5) + f := make([]rune, 0, len(s)) + + var esc bool + for _, c := range s { + switch { + case esc: + f = append(f, c) + esc = false + case c == '\\': + esc = true + case c == ':': + fs = append(fs, string(f)) + f = f[:0] + default: + f = append(f, c) + } + } + return append(fs, string(f)) + } + for scanner.Scan() { + line := scanner.Text() + if len(line) == 0 || line[0] == '#' { + continue + } + split := getFields(line) + if len(split) != 5 { + continue + } + if (split[0] == "*" || split[0] == hostname || (split[0] == "localhost" && (hostname == "" || ntw == "unix"))) && (split[1] == "*" || split[1] == port) && (split[2] == "*" || split[2] == db) && (split[3] == "*" || split[3] == username) { + o["password"] = split[4] + return + } + } +} + +func (cn *conn) writeBuf(b byte) *writeBuf { + cn.scratch[0] = b + return &writeBuf{ + buf: cn.scratch[:5], + pos: 1, + } +} + +// Open opens a new connection to the database. dsn is a connection string. +// Most users should only use it through database/sql package from the standard +// library. +func Open(dsn string) (_ driver.Conn, err error) { + return DialOpen(defaultDialer{}, dsn) +} + +// DialOpen opens a new connection to the database using a dialer. +func DialOpen(d Dialer, dsn string) (_ driver.Conn, err error) { + c, err := NewConnector(dsn) + if err != nil { + return nil, err + } + c.dialer = d + return c.open(context.Background()) +} + +func (c *Connector) open(ctx context.Context) (cn *conn, err error) { + // Handle any panics during connection initialization. Note that we + // specifically do *not* want to use errRecover(), as that would turn any + // connection errors into ErrBadConns, hiding the real error message from + // the user. + defer errRecoverNoErrBadConn(&err) + + o := c.opts + + cn = &conn{ + opts: o, + dialer: c.dialer, + } + err = cn.handleDriverSettings(o) + if err != nil { + return nil, err + } + cn.handlePgpass(o) + + cn.c, err = dial(ctx, c.dialer, o) + if err != nil { + return nil, err + } + + err = cn.ssl(o) + if err != nil { + if cn.c != nil { + cn.c.Close() + } + return nil, err + } + + // cn.startup panics on error. Make sure we don't leak cn.c. + panicking := true + defer func() { + if panicking { + cn.c.Close() + } + }() + + cn.buf = bufio.NewReader(cn.c) + cn.startup(o) + + // reset the deadline, in case one was set (see dial) + if timeout, ok := o["connect_timeout"]; ok && timeout != "0" { + err = cn.c.SetDeadline(time.Time{}) + } + panicking = false + return cn, err +} + +func dial(ctx context.Context, d Dialer, o values) (net.Conn, error) { + network, address := network(o) + // SSL is not necessary or supported over UNIX domain sockets + if network == "unix" { + o["sslmode"] = "disable" + } + + // Zero or not specified means wait indefinitely. + if timeout, ok := o["connect_timeout"]; ok && timeout != "0" { + seconds, err := strconv.ParseInt(timeout, 10, 0) + if err != nil { + return nil, fmt.Errorf("invalid value for parameter connect_timeout: %s", err) + } + duration := time.Duration(seconds) * time.Second + + // connect_timeout should apply to the entire connection establishment + // procedure, so we both use a timeout for the TCP connection + // establishment and set a deadline for doing the initial handshake. + // The deadline is then reset after startup() is done. + deadline := time.Now().Add(duration) + var conn net.Conn + if dctx, ok := d.(DialerContext); ok { + ctx, cancel := context.WithTimeout(ctx, duration) + defer cancel() + conn, err = dctx.DialContext(ctx, network, address) + } else { + conn, err = d.DialTimeout(network, address, duration) + } + if err != nil { + return nil, err + } + err = conn.SetDeadline(deadline) + return conn, err + } + if dctx, ok := d.(DialerContext); ok { + return dctx.DialContext(ctx, network, address) + } + return d.Dial(network, address) +} + +func network(o values) (string, string) { + host := o["host"] + + if strings.HasPrefix(host, "/") { + sockPath := path.Join(host, ".s.PGSQL."+o["port"]) + return "unix", sockPath + } + + return "tcp", net.JoinHostPort(host, o["port"]) +} + +type values map[string]string + +// scanner implements a tokenizer for libpq-style option strings. +type scanner struct { + s []rune + i int +} + +// newScanner returns a new scanner initialized with the option string s. +func newScanner(s string) *scanner { + return &scanner{[]rune(s), 0} +} + +// Next returns the next rune. +// It returns 0, false if the end of the text has been reached. +func (s *scanner) Next() (rune, bool) { + if s.i >= len(s.s) { + return 0, false + } + r := s.s[s.i] + s.i++ + return r, true +} + +// SkipSpaces returns the next non-whitespace rune. +// It returns 0, false if the end of the text has been reached. +func (s *scanner) SkipSpaces() (rune, bool) { + r, ok := s.Next() + for unicode.IsSpace(r) && ok { + r, ok = s.Next() + } + return r, ok +} + +// parseOpts parses the options from name and adds them to the values. +// +// The parsing code is based on conninfo_parse from libpq's fe-connect.c +func parseOpts(name string, o values) error { + s := newScanner(name) + + for { + var ( + keyRunes, valRunes []rune + r rune + ok bool + ) + + if r, ok = s.SkipSpaces(); !ok { + break + } + + // Scan the key + for !unicode.IsSpace(r) && r != '=' { + keyRunes = append(keyRunes, r) + if r, ok = s.Next(); !ok { + break + } + } + + // Skip any whitespace if we're not at the = yet + if r != '=' { + r, ok = s.SkipSpaces() + } + + // The current character should be = + if r != '=' || !ok { + return fmt.Errorf(`missing "=" after %q in connection info string"`, string(keyRunes)) + } + + // Skip any whitespace after the = + if r, ok = s.SkipSpaces(); !ok { + // If we reach the end here, the last value is just an empty string as per libpq. + o[string(keyRunes)] = "" + break + } + + if r != '\'' { + for !unicode.IsSpace(r) { + if r == '\\' { + if r, ok = s.Next(); !ok { + return fmt.Errorf(`missing character after backslash`) + } + } + valRunes = append(valRunes, r) + + if r, ok = s.Next(); !ok { + break + } + } + } else { + quote: + for { + if r, ok = s.Next(); !ok { + return fmt.Errorf(`unterminated quoted string literal in connection string`) + } + switch r { + case '\'': + break quote + case '\\': + r, _ = s.Next() + fallthrough + default: + valRunes = append(valRunes, r) + } + } + } + + o[string(keyRunes)] = string(valRunes) + } + + return nil +} + +func (cn *conn) isInTransaction() bool { + return cn.txnStatus == txnStatusIdleInTransaction || + cn.txnStatus == txnStatusInFailedTransaction +} + +func (cn *conn) checkIsInTransaction(intxn bool) { + if cn.isInTransaction() != intxn { + cn.bad = true + errorf("unexpected transaction status %v", cn.txnStatus) + } +} + +func (cn *conn) Begin() (_ driver.Tx, err error) { + return cn.begin("") +} + +func (cn *conn) begin(mode string) (_ driver.Tx, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + defer cn.errRecover(&err) + + cn.checkIsInTransaction(false) + _, commandTag, err := cn.simpleExec("BEGIN" + mode) + if err != nil { + return nil, err + } + if commandTag != "BEGIN" { + cn.bad = true + return nil, fmt.Errorf("unexpected command tag %s", commandTag) + } + if cn.txnStatus != txnStatusIdleInTransaction { + cn.bad = true + return nil, fmt.Errorf("unexpected transaction status %v", cn.txnStatus) + } + return cn, nil +} + +func (cn *conn) closeTxn() { + if finish := cn.txnFinish; finish != nil { + finish() + } +} + +func (cn *conn) Commit() (err error) { + defer cn.closeTxn() + if cn.bad { + return driver.ErrBadConn + } + defer cn.errRecover(&err) + + cn.checkIsInTransaction(true) + // We don't want the client to think that everything is okay if it tries + // to commit a failed transaction. However, no matter what we return, + // database/sql will release this connection back into the free connection + // pool so we have to abort the current transaction here. Note that you + // would get the same behaviour if you issued a COMMIT in a failed + // transaction, so it's also the least surprising thing to do here. + if cn.txnStatus == txnStatusInFailedTransaction { + if err := cn.rollback(); err != nil { + return err + } + return ErrInFailedTransaction + } + + _, commandTag, err := cn.simpleExec("COMMIT") + if err != nil { + if cn.isInTransaction() { + cn.bad = true + } + return err + } + if commandTag != "COMMIT" { + cn.bad = true + return fmt.Errorf("unexpected command tag %s", commandTag) + } + cn.checkIsInTransaction(false) + return nil +} + +func (cn *conn) Rollback() (err error) { + defer cn.closeTxn() + if cn.bad { + return driver.ErrBadConn + } + defer cn.errRecover(&err) + return cn.rollback() +} + +func (cn *conn) rollback() (err error) { + cn.checkIsInTransaction(true) + _, commandTag, err := cn.simpleExec("ROLLBACK") + if err != nil { + if cn.isInTransaction() { + cn.bad = true + } + return err + } + if commandTag != "ROLLBACK" { + return fmt.Errorf("unexpected command tag %s", commandTag) + } + cn.checkIsInTransaction(false) + return nil +} + +func (cn *conn) gname() string { + cn.namei++ + return strconv.FormatInt(int64(cn.namei), 10) +} + +func (cn *conn) simpleExec(q string) (res driver.Result, commandTag string, err error) { + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'C': + res, commandTag = cn.parseComplete(r.string()) + case 'Z': + cn.processReadyForQuery(r) + if res == nil && err == nil { + err = errUnexpectedReady + } + // done + return + case 'E': + err = parseError(r) + case 'I': + res = emptyRows + case 'T', 'D': + // ignore any results + default: + cn.bad = true + errorf("unknown response for simple query: %q", t) + } + } +} + +func (cn *conn) simpleQuery(q string) (res *rows, err error) { + defer cn.errRecover(&err) + + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'C', 'I': + // We allow queries which don't return any results through Query as + // well as Exec. We still have to give database/sql a rows object + // the user can close, though, to avoid connections from being + // leaked. A "rows" with done=true works fine for that purpose. + if err != nil { + cn.bad = true + errorf("unexpected message %q in simple query execution", t) + } + if res == nil { + res = &rows{ + cn: cn, + } + } + // Set the result and tag to the last command complete if there wasn't a + // query already run. Although queries usually return from here and cede + // control to Next, a query with zero results does not. + if t == 'C' && res.colNames == nil { + res.result, res.tag = cn.parseComplete(r.string()) + } + res.done = true + case 'Z': + cn.processReadyForQuery(r) + // done + return + case 'E': + res = nil + err = parseError(r) + case 'D': + if res == nil { + cn.bad = true + errorf("unexpected DataRow in simple query execution") + } + // the query didn't fail; kick off to Next + cn.saveMessage(t, r) + return + case 'T': + // res might be non-nil here if we received a previous + // CommandComplete, but that's fine; just overwrite it + res = &rows{cn: cn} + res.rowsHeader = parsePortalRowDescribe(r) + + // To work around a bug in QueryRow in Go 1.2 and earlier, wait + // until the first DataRow has been received. + default: + cn.bad = true + errorf("unknown response for simple query: %q", t) + } + } +} + +type noRows struct{} + +var emptyRows noRows + +var _ driver.Result = noRows{} + +func (noRows) LastInsertId() (int64, error) { + return 0, errNoLastInsertID +} + +func (noRows) RowsAffected() (int64, error) { + return 0, errNoRowsAffected +} + +// Decides which column formats to use for a prepared statement. The input is +// an array of type oids, one element per result column. +func decideColumnFormats(colTyps []fieldDesc, forceText bool) (colFmts []format, colFmtData []byte) { + if len(colTyps) == 0 { + return nil, colFmtDataAllText + } + + colFmts = make([]format, len(colTyps)) + if forceText { + return colFmts, colFmtDataAllText + } + + allBinary := true + allText := true + for i, t := range colTyps { + switch t.OID { + // This is the list of types to use binary mode for when receiving them + // through a prepared statement. If a type appears in this list, it + // must also be implemented in binaryDecode in encode.go. + case oid.T_bytea: + fallthrough + case oid.T_int8: + fallthrough + case oid.T_int4: + fallthrough + case oid.T_int2: + fallthrough + case oid.T_uuid: + colFmts[i] = formatBinary + allText = false + + default: + allBinary = false + } + } + + if allBinary { + return colFmts, colFmtDataAllBinary + } else if allText { + return colFmts, colFmtDataAllText + } else { + colFmtData = make([]byte, 2+len(colFmts)*2) + binary.BigEndian.PutUint16(colFmtData, uint16(len(colFmts))) + for i, v := range colFmts { + binary.BigEndian.PutUint16(colFmtData[2+i*2:], uint16(v)) + } + return colFmts, colFmtData + } +} + +func (cn *conn) prepareTo(q, stmtName string) *stmt { + st := &stmt{cn: cn, name: stmtName} + + b := cn.writeBuf('P') + b.string(st.name) + b.string(q) + b.int16(0) + + b.next('D') + b.byte('S') + b.string(st.name) + + b.next('S') + cn.send(b) + + cn.readParseResponse() + st.paramTyps, st.colNames, st.colTyps = cn.readStatementDescribeResponse() + st.colFmts, st.colFmtData = decideColumnFormats(st.colTyps, cn.disablePreparedBinaryResult) + cn.readReadyForQuery() + return st +} + +func (cn *conn) Prepare(q string) (_ driver.Stmt, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + defer cn.errRecover(&err) + + if len(q) >= 4 && strings.EqualFold(q[:4], "COPY") { + s, err := cn.prepareCopyIn(q) + if err == nil { + cn.inCopy = true + } + return s, err + } + return cn.prepareTo(q, cn.gname()), nil +} + +func (cn *conn) Close() (err error) { + // Skip cn.bad return here because we always want to close a connection. + defer cn.errRecover(&err) + + // Ensure that cn.c.Close is always run. Since error handling is done with + // panics and cn.errRecover, the Close must be in a defer. + defer func() { + cerr := cn.c.Close() + if err == nil { + err = cerr + } + }() + + // Don't go through send(); ListenerConn relies on us not scribbling on the + // scratch buffer of this connection. + return cn.sendSimpleMessage('X') +} + +// Implement the "Queryer" interface +func (cn *conn) Query(query string, args []driver.Value) (driver.Rows, error) { + return cn.query(query, args) +} + +func (cn *conn) query(query string, args []driver.Value) (_ *rows, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + if cn.inCopy { + return nil, errCopyInProgress + } + defer cn.errRecover(&err) + + // Check to see if we can use the "simpleQuery" interface, which is + // *much* faster than going through prepare/exec + if len(args) == 0 { + return cn.simpleQuery(query) + } + + if cn.binaryParameters { + cn.sendBinaryModeQuery(query, args) + + cn.readParseResponse() + cn.readBindResponse() + rows := &rows{cn: cn} + rows.rowsHeader = cn.readPortalDescribeResponse() + cn.postExecuteWorkaround() + return rows, nil + } + st := cn.prepareTo(query, "") + st.exec(args) + return &rows{ + cn: cn, + rowsHeader: st.rowsHeader, + }, nil +} + +// Implement the optional "Execer" interface for one-shot queries +func (cn *conn) Exec(query string, args []driver.Value) (res driver.Result, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + defer cn.errRecover(&err) + + // Check to see if we can use the "simpleExec" interface, which is + // *much* faster than going through prepare/exec + if len(args) == 0 { + // ignore commandTag, our caller doesn't care + r, _, err := cn.simpleExec(query) + return r, err + } + + if cn.binaryParameters { + cn.sendBinaryModeQuery(query, args) + + cn.readParseResponse() + cn.readBindResponse() + cn.readPortalDescribeResponse() + cn.postExecuteWorkaround() + res, _, err = cn.readExecuteResponse("Execute") + return res, err + } + // Use the unnamed statement to defer planning until bind + // time, or else value-based selectivity estimates cannot be + // used. + st := cn.prepareTo(query, "") + r, err := st.Exec(args) + if err != nil { + panic(err) + } + return r, err +} + +func (cn *conn) send(m *writeBuf) { + _, err := cn.c.Write(m.wrap()) + if err != nil { + panic(err) + } +} + +func (cn *conn) sendStartupPacket(m *writeBuf) error { + _, err := cn.c.Write((m.wrap())[1:]) + return err +} + +// Send a message of type typ to the server on the other end of cn. The +// message should have no payload. This method does not use the scratch +// buffer. +func (cn *conn) sendSimpleMessage(typ byte) (err error) { + _, err = cn.c.Write([]byte{typ, '\x00', '\x00', '\x00', '\x04'}) + return err +} + +// saveMessage memorizes a message and its buffer in the conn struct. +// recvMessage will then return these values on the next call to it. This +// method is useful in cases where you have to see what the next message is +// going to be (e.g. to see whether it's an error or not) but you can't handle +// the message yourself. +func (cn *conn) saveMessage(typ byte, buf *readBuf) { + if cn.saveMessageType != 0 { + cn.bad = true + errorf("unexpected saveMessageType %d", cn.saveMessageType) + } + cn.saveMessageType = typ + cn.saveMessageBuffer = *buf +} + +// recvMessage receives any message from the backend, or returns an error if +// a problem occurred while reading the message. +func (cn *conn) recvMessage(r *readBuf) (byte, error) { + // workaround for a QueryRow bug, see exec + if cn.saveMessageType != 0 { + t := cn.saveMessageType + *r = cn.saveMessageBuffer + cn.saveMessageType = 0 + cn.saveMessageBuffer = nil + return t, nil + } + + x := cn.scratch[:5] + _, err := io.ReadFull(cn.buf, x) + if err != nil { + return 0, err + } + + // read the type and length of the message that follows + t := x[0] + n := int(binary.BigEndian.Uint32(x[1:])) - 4 + var y []byte + if n <= len(cn.scratch) { + y = cn.scratch[:n] + } else { + y = make([]byte, n) + } + _, err = io.ReadFull(cn.buf, y) + if err != nil { + return 0, err + } + *r = y + return t, nil +} + +// recv receives a message from the backend, but if an error happened while +// reading the message or the received message was an ErrorResponse, it panics. +// NoticeResponses are ignored. This function should generally be used only +// during the startup sequence. +func (cn *conn) recv() (t byte, r *readBuf) { + for { + var err error + r = &readBuf{} + t, err = cn.recvMessage(r) + if err != nil { + panic(err) + } + switch t { + case 'E': + panic(parseError(r)) + case 'N': + // ignore + default: + return + } + } +} + +// recv1Buf is exactly equivalent to recv1, except it uses a buffer supplied by +// the caller to avoid an allocation. +func (cn *conn) recv1Buf(r *readBuf) byte { + for { + t, err := cn.recvMessage(r) + if err != nil { + panic(err) + } + + switch t { + case 'A', 'N': + // ignore + case 'S': + cn.processParameterStatus(r) + default: + return t + } + } +} + +// recv1 receives a message from the backend, panicking if an error occurs +// while attempting to read it. All asynchronous messages are ignored, with +// the exception of ErrorResponse. +func (cn *conn) recv1() (t byte, r *readBuf) { + r = &readBuf{} + t = cn.recv1Buf(r) + return t, r +} + +func (cn *conn) ssl(o values) error { + upgrade, err := ssl(o) + if err != nil { + return err + } + + if upgrade == nil { + // Nothing to do + return nil + } + + w := cn.writeBuf(0) + w.int32(80877103) + if err = cn.sendStartupPacket(w); err != nil { + return err + } + + b := cn.scratch[:1] + _, err = io.ReadFull(cn.c, b) + if err != nil { + return err + } + + if b[0] != 'S' { + return ErrSSLNotSupported + } + + cn.c, err = upgrade(cn.c) + return err +} + +// isDriverSetting returns true iff a setting is purely for configuring the +// driver's options and should not be sent to the server in the connection +// startup packet. +func isDriverSetting(key string) bool { + switch key { + case "host", "port": + return true + case "password": + return true + case "sslmode", "sslcert", "sslkey", "sslrootcert": + return true + case "fallback_application_name": + return true + case "connect_timeout": + return true + case "disable_prepared_binary_result": + return true + case "binary_parameters": + return true + + default: + return false + } +} + +func (cn *conn) startup(o values) { + w := cn.writeBuf(0) + w.int32(196608) + // Send the backend the name of the database we want to connect to, and the + // user we want to connect as. Additionally, we send over any run-time + // parameters potentially included in the connection string. If the server + // doesn't recognize any of them, it will reply with an error. + for k, v := range o { + if isDriverSetting(k) { + // skip options which can't be run-time parameters + continue + } + // The protocol requires us to supply the database name as "database" + // instead of "dbname". + if k == "dbname" { + k = "database" + } + w.string(k) + w.string(v) + } + w.string("") + if err := cn.sendStartupPacket(w); err != nil { + panic(err) + } + + for { + t, r := cn.recv() + switch t { + case 'K': + cn.processBackendKeyData(r) + case 'S': + cn.processParameterStatus(r) + case 'R': + cn.auth(r, o) + case 'Z': + cn.processReadyForQuery(r) + return + default: + errorf("unknown response for startup: %q", t) + } + } +} + +func (cn *conn) auth(r *readBuf, o values) { + switch code := r.int32(); code { + case 0: + // OK + case 3: + w := cn.writeBuf('p') + w.string(o["password"]) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 0 { + errorf("unexpected authentication response: %q", t) + } + case 5: + s := string(r.next(4)) + w := cn.writeBuf('p') + w.string("md5" + md5s(md5s(o["password"]+o["user"])+s)) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 0 { + errorf("unexpected authentication response: %q", t) + } + case 10: + sc := scram.NewClient(sha256.New, o["user"], o["password"]) + sc.Step(nil) + if sc.Err() != nil { + errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) + } + scOut := sc.Out() + + w := cn.writeBuf('p') + w.string("SCRAM-SHA-256") + w.int32(len(scOut)) + w.bytes(scOut) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 11 { + errorf("unexpected authentication response: %q", t) + } + + nextStep := r.next(len(*r)) + sc.Step(nextStep) + if sc.Err() != nil { + errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) + } + + scOut = sc.Out() + w = cn.writeBuf('p') + w.bytes(scOut) + cn.send(w) + + t, r = cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 12 { + errorf("unexpected authentication response: %q", t) + } + + nextStep = r.next(len(*r)) + sc.Step(nextStep) + if sc.Err() != nil { + errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) + } + + default: + errorf("unknown authentication response: %d", code) + } +} + +type format int + +const formatText format = 0 +const formatBinary format = 1 + +// One result-column format code with the value 1 (i.e. all binary). +var colFmtDataAllBinary = []byte{0, 1, 0, 1} + +// No result-column format codes (i.e. all text). +var colFmtDataAllText = []byte{0, 0} + +type stmt struct { + cn *conn + name string + rowsHeader + colFmtData []byte + paramTyps []oid.Oid + closed bool +} + +func (st *stmt) Close() (err error) { + if st.closed { + return nil + } + if st.cn.bad { + return driver.ErrBadConn + } + defer st.cn.errRecover(&err) + + w := st.cn.writeBuf('C') + w.byte('S') + w.string(st.name) + st.cn.send(w) + + st.cn.send(st.cn.writeBuf('S')) + + t, _ := st.cn.recv1() + if t != '3' { + st.cn.bad = true + errorf("unexpected close response: %q", t) + } + st.closed = true + + t, r := st.cn.recv1() + if t != 'Z' { + st.cn.bad = true + errorf("expected ready for query, but got: %q", t) + } + st.cn.processReadyForQuery(r) + + return nil +} + +func (st *stmt) Query(v []driver.Value) (r driver.Rows, err error) { + if st.cn.bad { + return nil, driver.ErrBadConn + } + defer st.cn.errRecover(&err) + + st.exec(v) + return &rows{ + cn: st.cn, + rowsHeader: st.rowsHeader, + }, nil +} + +func (st *stmt) Exec(v []driver.Value) (res driver.Result, err error) { + if st.cn.bad { + return nil, driver.ErrBadConn + } + defer st.cn.errRecover(&err) + + st.exec(v) + res, _, err = st.cn.readExecuteResponse("simple query") + return res, err +} + +func (st *stmt) exec(v []driver.Value) { + if len(v) >= 65536 { + errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(v)) + } + if len(v) != len(st.paramTyps) { + errorf("got %d parameters but the statement requires %d", len(v), len(st.paramTyps)) + } + + cn := st.cn + w := cn.writeBuf('B') + w.byte(0) // unnamed portal + w.string(st.name) + + if cn.binaryParameters { + cn.sendBinaryParameters(w, v) + } else { + w.int16(0) + w.int16(len(v)) + for i, x := range v { + if x == nil { + w.int32(-1) + } else { + b := encode(&cn.parameterStatus, x, st.paramTyps[i]) + w.int32(len(b)) + w.bytes(b) + } + } + } + w.bytes(st.colFmtData) + + w.next('E') + w.byte(0) + w.int32(0) + + w.next('S') + cn.send(w) + + cn.readBindResponse() + cn.postExecuteWorkaround() + +} + +func (st *stmt) NumInput() int { + return len(st.paramTyps) +} + +// parseComplete parses the "command tag" from a CommandComplete message, and +// returns the number of rows affected (if applicable) and a string +// identifying only the command that was executed, e.g. "ALTER TABLE". If the +// command tag could not be parsed, parseComplete panics. +func (cn *conn) parseComplete(commandTag string) (driver.Result, string) { + commandsWithAffectedRows := []string{ + "SELECT ", + // INSERT is handled below + "UPDATE ", + "DELETE ", + "FETCH ", + "MOVE ", + "COPY ", + } + + var affectedRows *string + for _, tag := range commandsWithAffectedRows { + if strings.HasPrefix(commandTag, tag) { + t := commandTag[len(tag):] + affectedRows = &t + commandTag = tag[:len(tag)-1] + break + } + } + // INSERT also includes the oid of the inserted row in its command tag. + // Oids in user tables are deprecated, and the oid is only returned when + // exactly one row is inserted, so it's unlikely to be of value to any + // real-world application and we can ignore it. + if affectedRows == nil && strings.HasPrefix(commandTag, "INSERT ") { + parts := strings.Split(commandTag, " ") + if len(parts) != 3 { + cn.bad = true + errorf("unexpected INSERT command tag %s", commandTag) + } + affectedRows = &parts[len(parts)-1] + commandTag = "INSERT" + } + // There should be no affected rows attached to the tag, just return it + if affectedRows == nil { + return driver.RowsAffected(0), commandTag + } + n, err := strconv.ParseInt(*affectedRows, 10, 64) + if err != nil { + cn.bad = true + errorf("could not parse commandTag: %s", err) + } + return driver.RowsAffected(n), commandTag +} + +type rowsHeader struct { + colNames []string + colTyps []fieldDesc + colFmts []format +} + +type rows struct { + cn *conn + finish func() + rowsHeader + done bool + rb readBuf + result driver.Result + tag string + + next *rowsHeader +} + +func (rs *rows) Close() error { + if finish := rs.finish; finish != nil { + defer finish() + } + // no need to look at cn.bad as Next() will + for { + err := rs.Next(nil) + switch err { + case nil: + case io.EOF: + // rs.Next can return io.EOF on both 'Z' (ready for query) and 'T' (row + // description, used with HasNextResultSet). We need to fetch messages until + // we hit a 'Z', which is done by waiting for done to be set. + if rs.done { + return nil + } + default: + return err + } + } +} + +func (rs *rows) Columns() []string { + return rs.colNames +} + +func (rs *rows) Result() driver.Result { + if rs.result == nil { + return emptyRows + } + return rs.result +} + +func (rs *rows) Tag() string { + return rs.tag +} + +func (rs *rows) Next(dest []driver.Value) (err error) { + if rs.done { + return io.EOF + } + + conn := rs.cn + if conn.bad { + return driver.ErrBadConn + } + defer conn.errRecover(&err) + + for { + t := conn.recv1Buf(&rs.rb) + switch t { + case 'E': + err = parseError(&rs.rb) + case 'C', 'I': + if t == 'C' { + rs.result, rs.tag = conn.parseComplete(rs.rb.string()) + } + continue + case 'Z': + conn.processReadyForQuery(&rs.rb) + rs.done = true + if err != nil { + return err + } + return io.EOF + case 'D': + n := rs.rb.int16() + if err != nil { + conn.bad = true + errorf("unexpected DataRow after error %s", err) + } + if n < len(dest) { + dest = dest[:n] + } + for i := range dest { + l := rs.rb.int32() + if l == -1 { + dest[i] = nil + continue + } + dest[i] = decode(&conn.parameterStatus, rs.rb.next(l), rs.colTyps[i].OID, rs.colFmts[i]) + } + return + case 'T': + next := parsePortalRowDescribe(&rs.rb) + rs.next = &next + return io.EOF + default: + errorf("unexpected message after execute: %q", t) + } + } +} + +func (rs *rows) HasNextResultSet() bool { + hasNext := rs.next != nil && !rs.done + return hasNext +} + +func (rs *rows) NextResultSet() error { + if rs.next == nil { + return io.EOF + } + rs.rowsHeader = *rs.next + rs.next = nil + return nil +} + +// QuoteIdentifier quotes an "identifier" (e.g. a table or a column name) to be +// used as part of an SQL statement. For example: +// +// tblname := "my_table" +// data := "my_data" +// quoted := pq.QuoteIdentifier(tblname) +// err := db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", quoted), data) +// +// Any double quotes in name will be escaped. The quoted identifier will be +// case sensitive when used in a query. If the input string contains a zero +// byte, the result will be truncated immediately before it. +func QuoteIdentifier(name string) string { + end := strings.IndexRune(name, 0) + if end > -1 { + name = name[:end] + } + return `"` + strings.Replace(name, `"`, `""`, -1) + `"` +} + +// QuoteLiteral quotes a 'literal' (e.g. a parameter, often used to pass literal +// to DDL and other statements that do not accept parameters) to be used as part +// of an SQL statement. For example: +// +// exp_date := pq.QuoteLiteral("2023-01-05 15:00:00Z") +// err := db.Exec(fmt.Sprintf("CREATE ROLE my_user VALID UNTIL %s", exp_date)) +// +// Any single quotes in name will be escaped. Any backslashes (i.e. "\") will be +// replaced by two backslashes (i.e. "\\") and the C-style escape identifier +// that PostgreSQL provides ('E') will be prepended to the string. +func QuoteLiteral(literal string) string { + // This follows the PostgreSQL internal algorithm for handling quoted literals + // from libpq, which can be found in the "PQEscapeStringInternal" function, + // which is found in the libpq/fe-exec.c source file: + // https://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/interfaces/libpq/fe-exec.c + // + // substitute any single-quotes (') with two single-quotes ('') + literal = strings.Replace(literal, `'`, `''`, -1) + // determine if the string has any backslashes (\) in it. + // if it does, replace any backslashes (\) with two backslashes (\\) + // then, we need to wrap the entire string with a PostgreSQL + // C-style escape. Per how "PQEscapeStringInternal" handles this case, we + // also add a space before the "E" + if strings.Contains(literal, `\`) { + literal = strings.Replace(literal, `\`, `\\`, -1) + literal = ` E'` + literal + `'` + } else { + // otherwise, we can just wrap the literal with a pair of single quotes + literal = `'` + literal + `'` + } + return literal +} + +func md5s(s string) string { + h := md5.New() + h.Write([]byte(s)) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +func (cn *conn) sendBinaryParameters(b *writeBuf, args []driver.Value) { + // Do one pass over the parameters to see if we're going to send any of + // them over in binary. If we are, create a paramFormats array at the + // same time. + var paramFormats []int + for i, x := range args { + _, ok := x.([]byte) + if ok { + if paramFormats == nil { + paramFormats = make([]int, len(args)) + } + paramFormats[i] = 1 + } + } + if paramFormats == nil { + b.int16(0) + } else { + b.int16(len(paramFormats)) + for _, x := range paramFormats { + b.int16(x) + } + } + + b.int16(len(args)) + for _, x := range args { + if x == nil { + b.int32(-1) + } else { + datum := binaryEncode(&cn.parameterStatus, x) + b.int32(len(datum)) + b.bytes(datum) + } + } +} + +func (cn *conn) sendBinaryModeQuery(query string, args []driver.Value) { + if len(args) >= 65536 { + errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(args)) + } + + b := cn.writeBuf('P') + b.byte(0) // unnamed statement + b.string(query) + b.int16(0) + + b.next('B') + b.int16(0) // unnamed portal and statement + cn.sendBinaryParameters(b, args) + b.bytes(colFmtDataAllText) + + b.next('D') + b.byte('P') + b.byte(0) // unnamed portal + + b.next('E') + b.byte(0) + b.int32(0) + + b.next('S') + cn.send(b) +} + +func (cn *conn) processParameterStatus(r *readBuf) { + var err error + + param := r.string() + switch param { + case "server_version": + var major1 int + var major2 int + var minor int + _, err = fmt.Sscanf(r.string(), "%d.%d.%d", &major1, &major2, &minor) + if err == nil { + cn.parameterStatus.serverVersion = major1*10000 + major2*100 + minor + } + + case "TimeZone": + cn.parameterStatus.currentLocation, err = time.LoadLocation(r.string()) + if err != nil { + cn.parameterStatus.currentLocation = nil + } + + default: + // ignore + } +} + +func (cn *conn) processReadyForQuery(r *readBuf) { + cn.txnStatus = transactionStatus(r.byte()) +} + +func (cn *conn) readReadyForQuery() { + t, r := cn.recv1() + switch t { + case 'Z': + cn.processReadyForQuery(r) + return + default: + cn.bad = true + errorf("unexpected message %q; expected ReadyForQuery", t) + } +} + +func (cn *conn) processBackendKeyData(r *readBuf) { + cn.processID = r.int32() + cn.secretKey = r.int32() +} + +func (cn *conn) readParseResponse() { + t, r := cn.recv1() + switch t { + case '1': + return + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Parse response %q", t) + } +} + +func (cn *conn) readStatementDescribeResponse() (paramTyps []oid.Oid, colNames []string, colTyps []fieldDesc) { + for { + t, r := cn.recv1() + switch t { + case 't': + nparams := r.int16() + paramTyps = make([]oid.Oid, nparams) + for i := range paramTyps { + paramTyps[i] = r.oid() + } + case 'n': + return paramTyps, nil, nil + case 'T': + colNames, colTyps = parseStatementRowDescribe(r) + return paramTyps, colNames, colTyps + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Describe statement response %q", t) + } + } +} + +func (cn *conn) readPortalDescribeResponse() rowsHeader { + t, r := cn.recv1() + switch t { + case 'T': + return parsePortalRowDescribe(r) + case 'n': + return rowsHeader{} + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Describe response %q", t) + } + panic("not reached") +} + +func (cn *conn) readBindResponse() { + t, r := cn.recv1() + switch t { + case '2': + return + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Bind response %q", t) + } +} + +func (cn *conn) postExecuteWorkaround() { + // Work around a bug in sql.DB.QueryRow: in Go 1.2 and earlier it ignores + // any errors from rows.Next, which masks errors that happened during the + // execution of the query. To avoid the problem in common cases, we wait + // here for one more message from the database. If it's not an error the + // query will likely succeed (or perhaps has already, if it's a + // CommandComplete), so we push the message into the conn struct; recv1 + // will return it as the next message for rows.Next or rows.Close. + // However, if it's an error, we wait until ReadyForQuery and then return + // the error to our caller. + for { + t, r := cn.recv1() + switch t { + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + case 'C', 'D', 'I': + // the query didn't fail, but we can't process this message + cn.saveMessage(t, r) + return + default: + cn.bad = true + errorf("unexpected message during extended query execution: %q", t) + } + } +} + +// Only for Exec(), since we ignore the returned data +func (cn *conn) readExecuteResponse(protocolState string) (res driver.Result, commandTag string, err error) { + for { + t, r := cn.recv1() + switch t { + case 'C': + if err != nil { + cn.bad = true + errorf("unexpected CommandComplete after error %s", err) + } + res, commandTag = cn.parseComplete(r.string()) + case 'Z': + cn.processReadyForQuery(r) + if res == nil && err == nil { + err = errUnexpectedReady + } + return res, commandTag, err + case 'E': + err = parseError(r) + case 'T', 'D', 'I': + if err != nil { + cn.bad = true + errorf("unexpected %q after error %s", t, err) + } + if t == 'I' { + res = emptyRows + } + // ignore any results + default: + cn.bad = true + errorf("unknown %s response: %q", protocolState, t) + } + } +} + +func parseStatementRowDescribe(r *readBuf) (colNames []string, colTyps []fieldDesc) { + n := r.int16() + colNames = make([]string, n) + colTyps = make([]fieldDesc, n) + for i := range colNames { + colNames[i] = r.string() + r.next(6) + colTyps[i].OID = r.oid() + colTyps[i].Len = r.int16() + colTyps[i].Mod = r.int32() + // format code not known when describing a statement; always 0 + r.next(2) + } + return +} + +func parsePortalRowDescribe(r *readBuf) rowsHeader { + n := r.int16() + colNames := make([]string, n) + colFmts := make([]format, n) + colTyps := make([]fieldDesc, n) + for i := range colNames { + colNames[i] = r.string() + r.next(6) + colTyps[i].OID = r.oid() + colTyps[i].Len = r.int16() + colTyps[i].Mod = r.int32() + colFmts[i] = format(r.int16()) + } + return rowsHeader{ + colNames: colNames, + colFmts: colFmts, + colTyps: colTyps, + } +} + +// parseEnviron tries to mimic some of libpq's environment handling +// +// To ease testing, it does not directly reference os.Environ, but is +// designed to accept its output. +// +// Environment-set connection information is intended to have a higher +// precedence than a library default but lower than any explicitly +// passed information (such as in the URL or connection string). +func parseEnviron(env []string) (out map[string]string) { + out = make(map[string]string) + + for _, v := range env { + parts := strings.SplitN(v, "=", 2) + + accrue := func(keyname string) { + out[keyname] = parts[1] + } + unsupported := func() { + panic(fmt.Sprintf("setting %v not supported", parts[0])) + } + + // The order of these is the same as is seen in the + // PostgreSQL 9.1 manual. Unsupported but well-defined + // keys cause a panic; these should be unset prior to + // execution. Options which pq expects to be set to a + // certain value are allowed, but must be set to that + // value if present (they can, of course, be absent). + switch parts[0] { + case "PGHOST": + accrue("host") + case "PGHOSTADDR": + unsupported() + case "PGPORT": + accrue("port") + case "PGDATABASE": + accrue("dbname") + case "PGUSER": + accrue("user") + case "PGPASSWORD": + accrue("password") + case "PGSERVICE", "PGSERVICEFILE", "PGREALM": + unsupported() + case "PGOPTIONS": + accrue("options") + case "PGAPPNAME": + accrue("application_name") + case "PGSSLMODE": + accrue("sslmode") + case "PGSSLCERT": + accrue("sslcert") + case "PGSSLKEY": + accrue("sslkey") + case "PGSSLROOTCERT": + accrue("sslrootcert") + case "PGREQUIRESSL", "PGSSLCRL": + unsupported() + case "PGREQUIREPEER": + unsupported() + case "PGKRBSRVNAME", "PGGSSLIB": + unsupported() + case "PGCONNECT_TIMEOUT": + accrue("connect_timeout") + case "PGCLIENTENCODING": + accrue("client_encoding") + case "PGDATESTYLE": + accrue("datestyle") + case "PGTZ": + accrue("timezone") + case "PGGEQO": + accrue("geqo") + case "PGSYSCONFDIR", "PGLOCALEDIR": + unsupported() + } + } + + return out +} + +// isUTF8 returns whether name is a fuzzy variation of the string "UTF-8". +func isUTF8(name string) bool { + // Recognize all sorts of silly things as "UTF-8", like Postgres does + s := strings.Map(alnumLowerASCII, name) + return s == "utf8" || s == "unicode" +} + +func alnumLowerASCII(ch rune) rune { + if 'A' <= ch && ch <= 'Z' { + return ch + ('a' - 'A') + } + if 'a' <= ch && ch <= 'z' || '0' <= ch && ch <= '9' { + return ch + } + return -1 // discard +} diff --git a/vendor/github.com/lib/pq/conn_go18.go b/vendor/github.com/lib/pq/conn_go18.go new file mode 100644 index 0000000000..0fdd06a617 --- /dev/null +++ b/vendor/github.com/lib/pq/conn_go18.go @@ -0,0 +1,149 @@ +package pq + +import ( + "context" + "database/sql" + "database/sql/driver" + "fmt" + "io" + "io/ioutil" + "time" +) + +// Implement the "QueryerContext" interface +func (cn *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { + list := make([]driver.Value, len(args)) + for i, nv := range args { + list[i] = nv.Value + } + finish := cn.watchCancel(ctx) + r, err := cn.query(query, list) + if err != nil { + if finish != nil { + finish() + } + return nil, err + } + r.finish = finish + return r, nil +} + +// Implement the "ExecerContext" interface +func (cn *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { + list := make([]driver.Value, len(args)) + for i, nv := range args { + list[i] = nv.Value + } + + if finish := cn.watchCancel(ctx); finish != nil { + defer finish() + } + + return cn.Exec(query, list) +} + +// Implement the "ConnBeginTx" interface +func (cn *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { + var mode string + + switch sql.IsolationLevel(opts.Isolation) { + case sql.LevelDefault: + // Don't touch mode: use the server's default + case sql.LevelReadUncommitted: + mode = " ISOLATION LEVEL READ UNCOMMITTED" + case sql.LevelReadCommitted: + mode = " ISOLATION LEVEL READ COMMITTED" + case sql.LevelRepeatableRead: + mode = " ISOLATION LEVEL REPEATABLE READ" + case sql.LevelSerializable: + mode = " ISOLATION LEVEL SERIALIZABLE" + default: + return nil, fmt.Errorf("pq: isolation level not supported: %d", opts.Isolation) + } + + if opts.ReadOnly { + mode += " READ ONLY" + } else { + mode += " READ WRITE" + } + + tx, err := cn.begin(mode) + if err != nil { + return nil, err + } + cn.txnFinish = cn.watchCancel(ctx) + return tx, nil +} + +func (cn *conn) Ping(ctx context.Context) error { + if finish := cn.watchCancel(ctx); finish != nil { + defer finish() + } + rows, err := cn.simpleQuery("SELECT 'lib/pq ping test';") + if err != nil { + return driver.ErrBadConn // https://golang.org/pkg/database/sql/driver/#Pinger + } + rows.Close() + return nil +} + +func (cn *conn) watchCancel(ctx context.Context) func() { + if done := ctx.Done(); done != nil { + finished := make(chan struct{}) + go func() { + select { + case <-done: + // At this point the function level context is canceled, + // so it must not be used for the additional network + // request to cancel the query. + // Create a new context to pass into the dial. + ctxCancel, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + + _ = cn.cancel(ctxCancel) + finished <- struct{}{} + case <-finished: + } + }() + return func() { + select { + case <-finished: + case finished <- struct{}{}: + } + } + } + return nil +} + +func (cn *conn) cancel(ctx context.Context) error { + c, err := dial(ctx, cn.dialer, cn.opts) + if err != nil { + return err + } + defer c.Close() + + { + can := conn{ + c: c, + } + err = can.ssl(cn.opts) + if err != nil { + return err + } + + w := can.writeBuf(0) + w.int32(80877102) // cancel request code + w.int32(cn.processID) + w.int32(cn.secretKey) + + if err := can.sendStartupPacket(w); err != nil { + return err + } + } + + // Read until EOF to ensure that the server received the cancel. + { + _, err := io.Copy(ioutil.Discard, c) + return err + } +} diff --git a/vendor/github.com/lib/pq/connector.go b/vendor/github.com/lib/pq/connector.go new file mode 100644 index 0000000000..2f8ced6737 --- /dev/null +++ b/vendor/github.com/lib/pq/connector.go @@ -0,0 +1,110 @@ +package pq + +import ( + "context" + "database/sql/driver" + "errors" + "fmt" + "os" + "strings" +) + +// Connector represents a fixed configuration for the pq driver with a given +// name. Connector satisfies the database/sql/driver Connector interface and +// can be used to create any number of DB Conn's via the database/sql OpenDB +// function. +// +// See https://golang.org/pkg/database/sql/driver/#Connector. +// See https://golang.org/pkg/database/sql/#OpenDB. +type Connector struct { + opts values + dialer Dialer +} + +// Connect returns a connection to the database using the fixed configuration +// of this Connector. Context is not used. +func (c *Connector) Connect(ctx context.Context) (driver.Conn, error) { + return c.open(ctx) +} + +// Driver returnst the underlying driver of this Connector. +func (c *Connector) Driver() driver.Driver { + return &Driver{} +} + +// NewConnector returns a connector for the pq driver in a fixed configuration +// with the given dsn. The returned connector can be used to create any number +// of equivalent Conn's. The returned connector is intended to be used with +// database/sql.OpenDB. +// +// See https://golang.org/pkg/database/sql/driver/#Connector. +// See https://golang.org/pkg/database/sql/#OpenDB. +func NewConnector(dsn string) (*Connector, error) { + var err error + o := make(values) + + // A number of defaults are applied here, in this order: + // + // * Very low precedence defaults applied in every situation + // * Environment variables + // * Explicitly passed connection information + o["host"] = "localhost" + o["port"] = "5432" + // N.B.: Extra float digits should be set to 3, but that breaks + // Postgres 8.4 and older, where the max is 2. + o["extra_float_digits"] = "2" + for k, v := range parseEnviron(os.Environ()) { + o[k] = v + } + + if strings.HasPrefix(dsn, "postgres://") || strings.HasPrefix(dsn, "postgresql://") { + dsn, err = ParseURL(dsn) + if err != nil { + return nil, err + } + } + + if err := parseOpts(dsn, o); err != nil { + return nil, err + } + + // Use the "fallback" application name if necessary + if fallback, ok := o["fallback_application_name"]; ok { + if _, ok := o["application_name"]; !ok { + o["application_name"] = fallback + } + } + + // We can't work with any client_encoding other than UTF-8 currently. + // However, we have historically allowed the user to set it to UTF-8 + // explicitly, and there's no reason to break such programs, so allow that. + // Note that the "options" setting could also set client_encoding, but + // parsing its value is not worth it. Instead, we always explicitly send + // client_encoding as a separate run-time parameter, which should override + // anything set in options. + if enc, ok := o["client_encoding"]; ok && !isUTF8(enc) { + return nil, errors.New("client_encoding must be absent or 'UTF8'") + } + o["client_encoding"] = "UTF8" + // DateStyle needs a similar treatment. + if datestyle, ok := o["datestyle"]; ok { + if datestyle != "ISO, MDY" { + return nil, fmt.Errorf("setting datestyle must be absent or %v; got %v", "ISO, MDY", datestyle) + } + } else { + o["datestyle"] = "ISO, MDY" + } + + // If a user is not provided by any other means, the last + // resort is to use the current operating system provided user + // name. + if _, ok := o["user"]; !ok { + u, err := userCurrent() + if err != nil { + return nil, err + } + o["user"] = u + } + + return &Connector{opts: o, dialer: defaultDialer{}}, nil +} diff --git a/vendor/github.com/lib/pq/copy.go b/vendor/github.com/lib/pq/copy.go new file mode 100644 index 0000000000..345c2398f6 --- /dev/null +++ b/vendor/github.com/lib/pq/copy.go @@ -0,0 +1,282 @@ +package pq + +import ( + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "sync" +) + +var ( + errCopyInClosed = errors.New("pq: copyin statement has already been closed") + errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY") + errCopyToNotSupported = errors.New("pq: COPY TO is not supported") + errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction") + errCopyInProgress = errors.New("pq: COPY in progress") +) + +// CopyIn creates a COPY FROM statement which can be prepared with +// Tx.Prepare(). The target table should be visible in search_path. +func CopyIn(table string, columns ...string) string { + stmt := "COPY " + QuoteIdentifier(table) + " (" + for i, col := range columns { + if i != 0 { + stmt += ", " + } + stmt += QuoteIdentifier(col) + } + stmt += ") FROM STDIN" + return stmt +} + +// CopyInSchema creates a COPY FROM statement which can be prepared with +// Tx.Prepare(). +func CopyInSchema(schema, table string, columns ...string) string { + stmt := "COPY " + QuoteIdentifier(schema) + "." + QuoteIdentifier(table) + " (" + for i, col := range columns { + if i != 0 { + stmt += ", " + } + stmt += QuoteIdentifier(col) + } + stmt += ") FROM STDIN" + return stmt +} + +type copyin struct { + cn *conn + buffer []byte + rowData chan []byte + done chan bool + + closed bool + + sync.Mutex // guards err + err error +} + +const ciBufferSize = 64 * 1024 + +// flush buffer before the buffer is filled up and needs reallocation +const ciBufferFlushSize = 63 * 1024 + +func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) { + if !cn.isInTransaction() { + return nil, errCopyNotSupportedOutsideTxn + } + + ci := ©in{ + cn: cn, + buffer: make([]byte, 0, ciBufferSize), + rowData: make(chan []byte), + done: make(chan bool, 1), + } + // add CopyData identifier + 4 bytes for message length + ci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0) + + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + +awaitCopyInResponse: + for { + t, r := cn.recv1() + switch t { + case 'G': + if r.byte() != 0 { + err = errBinaryCopyNotSupported + break awaitCopyInResponse + } + go ci.resploop() + return ci, nil + case 'H': + err = errCopyToNotSupported + break awaitCopyInResponse + case 'E': + err = parseError(r) + case 'Z': + if err == nil { + ci.setBad() + errorf("unexpected ReadyForQuery in response to COPY") + } + cn.processReadyForQuery(r) + return nil, err + default: + ci.setBad() + errorf("unknown response for copy query: %q", t) + } + } + + // something went wrong, abort COPY before we return + b = cn.writeBuf('f') + b.string(err.Error()) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'c', 'C', 'E': + case 'Z': + // correctly aborted, we're done + cn.processReadyForQuery(r) + return nil, err + default: + ci.setBad() + errorf("unknown response for CopyFail: %q", t) + } + } +} + +func (ci *copyin) flush(buf []byte) { + // set message length (without message identifier) + binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1)) + + _, err := ci.cn.c.Write(buf) + if err != nil { + panic(err) + } +} + +func (ci *copyin) resploop() { + for { + var r readBuf + t, err := ci.cn.recvMessage(&r) + if err != nil { + ci.setBad() + ci.setError(err) + ci.done <- true + return + } + switch t { + case 'C': + // complete + case 'N': + // NoticeResponse + case 'Z': + ci.cn.processReadyForQuery(&r) + ci.done <- true + return + case 'E': + err := parseError(&r) + ci.setError(err) + default: + ci.setBad() + ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t)) + ci.done <- true + return + } + } +} + +func (ci *copyin) setBad() { + ci.Lock() + ci.cn.bad = true + ci.Unlock() +} + +func (ci *copyin) isBad() bool { + ci.Lock() + b := ci.cn.bad + ci.Unlock() + return b +} + +func (ci *copyin) isErrorSet() bool { + ci.Lock() + isSet := (ci.err != nil) + ci.Unlock() + return isSet +} + +// setError() sets ci.err if one has not been set already. Caller must not be +// holding ci.Mutex. +func (ci *copyin) setError(err error) { + ci.Lock() + if ci.err == nil { + ci.err = err + } + ci.Unlock() +} + +func (ci *copyin) NumInput() int { + return -1 +} + +func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) { + return nil, ErrNotSupported +} + +// Exec inserts values into the COPY stream. The insert is asynchronous +// and Exec can return errors from previous Exec calls to the same +// COPY stmt. +// +// You need to call Exec(nil) to sync the COPY stream and to get any +// errors from pending data, since Stmt.Close() doesn't return errors +// to the user. +func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) { + if ci.closed { + return nil, errCopyInClosed + } + + if ci.isBad() { + return nil, driver.ErrBadConn + } + defer ci.cn.errRecover(&err) + + if ci.isErrorSet() { + return nil, ci.err + } + + if len(v) == 0 { + return nil, ci.Close() + } + + numValues := len(v) + for i, value := range v { + ci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value) + if i < numValues-1 { + ci.buffer = append(ci.buffer, '\t') + } + } + + ci.buffer = append(ci.buffer, '\n') + + if len(ci.buffer) > ciBufferFlushSize { + ci.flush(ci.buffer) + // reset buffer, keep bytes for message identifier and length + ci.buffer = ci.buffer[:5] + } + + return driver.RowsAffected(0), nil +} + +func (ci *copyin) Close() (err error) { + if ci.closed { // Don't do anything, we're already closed + return nil + } + ci.closed = true + + if ci.isBad() { + return driver.ErrBadConn + } + defer ci.cn.errRecover(&err) + + if len(ci.buffer) > 0 { + ci.flush(ci.buffer) + } + // Avoid touching the scratch buffer as resploop could be using it. + err = ci.cn.sendSimpleMessage('c') + if err != nil { + return err + } + + <-ci.done + ci.cn.inCopy = false + + if ci.isErrorSet() { + err = ci.err + return err + } + return nil +} diff --git a/vendor/github.com/lib/pq/doc.go b/vendor/github.com/lib/pq/doc.go new file mode 100644 index 0000000000..2a60054e2e --- /dev/null +++ b/vendor/github.com/lib/pq/doc.go @@ -0,0 +1,245 @@ +/* +Package pq is a pure Go Postgres driver for the database/sql package. + +In most cases clients will use the database/sql package instead of +using this package directly. For example: + + import ( + "database/sql" + + _ "github.com/lib/pq" + ) + + func main() { + connStr := "user=pqgotest dbname=pqgotest sslmode=verify-full" + db, err := sql.Open("postgres", connStr) + if err != nil { + log.Fatal(err) + } + + age := 21 + rows, err := db.Query("SELECT name FROM users WHERE age = $1", age) + … + } + +You can also connect to a database using a URL. For example: + + connStr := "postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full" + db, err := sql.Open("postgres", connStr) + + +Connection String Parameters + + +Similarly to libpq, when establishing a connection using pq you are expected to +supply a connection string containing zero or more parameters. +A subset of the connection parameters supported by libpq are also supported by pq. +Additionally, pq also lets you specify run-time parameters (such as search_path or work_mem) +directly in the connection string. This is different from libpq, which does not allow +run-time parameters in the connection string, instead requiring you to supply +them in the options parameter. + +For compatibility with libpq, the following special connection parameters are +supported: + + * dbname - The name of the database to connect to + * user - The user to sign in as + * password - The user's password + * host - The host to connect to. Values that start with / are for unix + domain sockets. (default is localhost) + * port - The port to bind to. (default is 5432) + * sslmode - Whether or not to use SSL (default is require, this is not + the default for libpq) + * fallback_application_name - An application_name to fall back to if one isn't provided. + * connect_timeout - Maximum wait for connection, in seconds. Zero or + not specified means wait indefinitely. + * sslcert - Cert file location. The file must contain PEM encoded data. + * sslkey - Key file location. The file must contain PEM encoded data. + * sslrootcert - The location of the root certificate file. The file + must contain PEM encoded data. + +Valid values for sslmode are: + + * disable - No SSL + * require - Always SSL (skip verification) + * verify-ca - Always SSL (verify that the certificate presented by the + server was signed by a trusted CA) + * verify-full - Always SSL (verify that the certification presented by + the server was signed by a trusted CA and the server host name + matches the one in the certificate) + +See http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING +for more information about connection string parameters. + +Use single quotes for values that contain whitespace: + + "user=pqgotest password='with spaces'" + +A backslash will escape the next character in values: + + "user=space\ man password='it\'s valid'" + +Note that the connection parameter client_encoding (which sets the +text encoding for the connection) may be set but must be "UTF8", +matching with the same rules as Postgres. It is an error to provide +any other value. + +In addition to the parameters listed above, any run-time parameter that can be +set at backend start time can be set in the connection string. For more +information, see +http://www.postgresql.org/docs/current/static/runtime-config.html. + +Most environment variables as specified at http://www.postgresql.org/docs/current/static/libpq-envars.html +supported by libpq are also supported by pq. If any of the environment +variables not supported by pq are set, pq will panic during connection +establishment. Environment variables have a lower precedence than explicitly +provided connection parameters. + +The pgpass mechanism as described in http://www.postgresql.org/docs/current/static/libpq-pgpass.html +is supported, but on Windows PGPASSFILE must be specified explicitly. + + +Queries + + +database/sql does not dictate any specific format for parameter +markers in query strings, and pq uses the Postgres-native ordinal markers, +as shown above. The same marker can be reused for the same parameter: + + rows, err := db.Query(`SELECT name FROM users WHERE favorite_fruit = $1 + OR age BETWEEN $2 AND $2 + 3`, "orange", 64) + +pq does not support the LastInsertId() method of the Result type in database/sql. +To return the identifier of an INSERT (or UPDATE or DELETE), use the Postgres +RETURNING clause with a standard Query or QueryRow call: + + var userid int + err := db.QueryRow(`INSERT INTO users(name, favorite_fruit, age) + VALUES('beatrice', 'starfruit', 93) RETURNING id`).Scan(&userid) + +For more details on RETURNING, see the Postgres documentation: + + http://www.postgresql.org/docs/current/static/sql-insert.html + http://www.postgresql.org/docs/current/static/sql-update.html + http://www.postgresql.org/docs/current/static/sql-delete.html + +For additional instructions on querying see the documentation for the database/sql package. + + +Data Types + + +Parameters pass through driver.DefaultParameterConverter before they are handled +by this package. When the binary_parameters connection option is enabled, +[]byte values are sent directly to the backend as data in binary format. + +This package returns the following types for values from the PostgreSQL backend: + + - integer types smallint, integer, and bigint are returned as int64 + - floating-point types real and double precision are returned as float64 + - character types char, varchar, and text are returned as string + - temporal types date, time, timetz, timestamp, and timestamptz are + returned as time.Time + - the boolean type is returned as bool + - the bytea type is returned as []byte + +All other types are returned directly from the backend as []byte values in text format. + + +Errors + + +pq may return errors of type *pq.Error which can be interrogated for error details: + + if err, ok := err.(*pq.Error); ok { + fmt.Println("pq error:", err.Code.Name()) + } + +See the pq.Error type for details. + + +Bulk imports + +You can perform bulk imports by preparing a statement returned by pq.CopyIn (or +pq.CopyInSchema) in an explicit transaction (sql.Tx). The returned statement +handle can then be repeatedly "executed" to copy data into the target table. +After all data has been processed you should call Exec() once with no arguments +to flush all buffered data. Any call to Exec() might return an error which +should be handled appropriately, but because of the internal buffering an error +returned by Exec() might not be related to the data passed in the call that +failed. + +CopyIn uses COPY FROM internally. It is not possible to COPY outside of an +explicit transaction in pq. + +Usage example: + + txn, err := db.Begin() + if err != nil { + log.Fatal(err) + } + + stmt, err := txn.Prepare(pq.CopyIn("users", "name", "age")) + if err != nil { + log.Fatal(err) + } + + for _, user := range users { + _, err = stmt.Exec(user.Name, int64(user.Age)) + if err != nil { + log.Fatal(err) + } + } + + _, err = stmt.Exec() + if err != nil { + log.Fatal(err) + } + + err = stmt.Close() + if err != nil { + log.Fatal(err) + } + + err = txn.Commit() + if err != nil { + log.Fatal(err) + } + + +Notifications + + +PostgreSQL supports a simple publish/subscribe model over database +connections. See http://www.postgresql.org/docs/current/static/sql-notify.html +for more information about the general mechanism. + +To start listening for notifications, you first have to open a new connection +to the database by calling NewListener. This connection can not be used for +anything other than LISTEN / NOTIFY. Calling Listen will open a "notification +channel"; once a notification channel is open, a notification generated on that +channel will effect a send on the Listener.Notify channel. A notification +channel will remain open until Unlisten is called, though connection loss might +result in some notifications being lost. To solve this problem, Listener sends +a nil pointer over the Notify channel any time the connection is re-established +following a connection loss. The application can get information about the +state of the underlying connection by setting an event callback in the call to +NewListener. + +A single Listener can safely be used from concurrent goroutines, which means +that there is often no need to create more than one Listener in your +application. However, a Listener is always connected to a single database, so +you will need to create a new Listener instance for every database you want to +receive notifications in. + +The channel name in both Listen and Unlisten is case sensitive, and can contain +any characters legal in an identifier (see +http://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS +for more information). Note that the channel name will be truncated to 63 +bytes by the PostgreSQL server. + +You can find a complete, working example of Listener usage at +https://godoc.org/github.com/lib/pq/example/listen. + +*/ +package pq diff --git a/vendor/github.com/lib/pq/encode.go b/vendor/github.com/lib/pq/encode.go new file mode 100644 index 0000000000..a6902fae61 --- /dev/null +++ b/vendor/github.com/lib/pq/encode.go @@ -0,0 +1,602 @@ +package pq + +import ( + "bytes" + "database/sql/driver" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "math" + "strconv" + "strings" + "sync" + "time" + + "github.com/lib/pq/oid" +) + +func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte { + switch v := x.(type) { + case []byte: + return v + default: + return encode(parameterStatus, x, oid.T_unknown) + } +} + +func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) []byte { + switch v := x.(type) { + case int64: + return strconv.AppendInt(nil, v, 10) + case float64: + return strconv.AppendFloat(nil, v, 'f', -1, 64) + case []byte: + if pgtypOid == oid.T_bytea { + return encodeBytea(parameterStatus.serverVersion, v) + } + + return v + case string: + if pgtypOid == oid.T_bytea { + return encodeBytea(parameterStatus.serverVersion, []byte(v)) + } + + return []byte(v) + case bool: + return strconv.AppendBool(nil, v) + case time.Time: + return formatTs(v) + + default: + errorf("encode: unknown type for %T", v) + } + + panic("not reached") +} + +func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid, f format) interface{} { + switch f { + case formatBinary: + return binaryDecode(parameterStatus, s, typ) + case formatText: + return textDecode(parameterStatus, s, typ) + default: + panic("not reached") + } +} + +func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { + switch typ { + case oid.T_bytea: + return s + case oid.T_int8: + return int64(binary.BigEndian.Uint64(s)) + case oid.T_int4: + return int64(int32(binary.BigEndian.Uint32(s))) + case oid.T_int2: + return int64(int16(binary.BigEndian.Uint16(s))) + case oid.T_uuid: + b, err := decodeUUIDBinary(s) + if err != nil { + panic(err) + } + return b + + default: + errorf("don't know how to decode binary parameter of type %d", uint32(typ)) + } + + panic("not reached") +} + +func textDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { + switch typ { + case oid.T_char, oid.T_varchar, oid.T_text: + return string(s) + case oid.T_bytea: + b, err := parseBytea(s) + if err != nil { + errorf("%s", err) + } + return b + case oid.T_timestamptz: + return parseTs(parameterStatus.currentLocation, string(s)) + case oid.T_timestamp, oid.T_date: + return parseTs(nil, string(s)) + case oid.T_time: + return mustParse("15:04:05", typ, s) + case oid.T_timetz: + return mustParse("15:04:05-07", typ, s) + case oid.T_bool: + return s[0] == 't' + case oid.T_int8, oid.T_int4, oid.T_int2: + i, err := strconv.ParseInt(string(s), 10, 64) + if err != nil { + errorf("%s", err) + } + return i + case oid.T_float4, oid.T_float8: + // We always use 64 bit parsing, regardless of whether the input text is for + // a float4 or float8, because clients expect float64s for all float datatypes + // and returning a 32-bit parsed float64 produces lossy results. + f, err := strconv.ParseFloat(string(s), 64) + if err != nil { + errorf("%s", err) + } + return f + } + + return s +} + +// appendEncodedText encodes item in text format as required by COPY +// and appends to buf +func appendEncodedText(parameterStatus *parameterStatus, buf []byte, x interface{}) []byte { + switch v := x.(type) { + case int64: + return strconv.AppendInt(buf, v, 10) + case float64: + return strconv.AppendFloat(buf, v, 'f', -1, 64) + case []byte: + encodedBytea := encodeBytea(parameterStatus.serverVersion, v) + return appendEscapedText(buf, string(encodedBytea)) + case string: + return appendEscapedText(buf, v) + case bool: + return strconv.AppendBool(buf, v) + case time.Time: + return append(buf, formatTs(v)...) + case nil: + return append(buf, "\\N"...) + default: + errorf("encode: unknown type for %T", v) + } + + panic("not reached") +} + +func appendEscapedText(buf []byte, text string) []byte { + escapeNeeded := false + startPos := 0 + var c byte + + // check if we need to escape + for i := 0; i < len(text); i++ { + c = text[i] + if c == '\\' || c == '\n' || c == '\r' || c == '\t' { + escapeNeeded = true + startPos = i + break + } + } + if !escapeNeeded { + return append(buf, text...) + } + + // copy till first char to escape, iterate the rest + result := append(buf, text[:startPos]...) + for i := startPos; i < len(text); i++ { + c = text[i] + switch c { + case '\\': + result = append(result, '\\', '\\') + case '\n': + result = append(result, '\\', 'n') + case '\r': + result = append(result, '\\', 'r') + case '\t': + result = append(result, '\\', 't') + default: + result = append(result, c) + } + } + return result +} + +func mustParse(f string, typ oid.Oid, s []byte) time.Time { + str := string(s) + + // check for a 30-minute-offset timezone + if (typ == oid.T_timestamptz || typ == oid.T_timetz) && + str[len(str)-3] == ':' { + f += ":00" + } + t, err := time.Parse(f, str) + if err != nil { + errorf("decode: %s", err) + } + return t +} + +var errInvalidTimestamp = errors.New("invalid timestamp") + +type timestampParser struct { + err error +} + +func (p *timestampParser) expect(str string, char byte, pos int) { + if p.err != nil { + return + } + if pos+1 > len(str) { + p.err = errInvalidTimestamp + return + } + if c := str[pos]; c != char && p.err == nil { + p.err = fmt.Errorf("expected '%v' at position %v; got '%v'", char, pos, c) + } +} + +func (p *timestampParser) mustAtoi(str string, begin int, end int) int { + if p.err != nil { + return 0 + } + if begin < 0 || end < 0 || begin > end || end > len(str) { + p.err = errInvalidTimestamp + return 0 + } + result, err := strconv.Atoi(str[begin:end]) + if err != nil { + if p.err == nil { + p.err = fmt.Errorf("expected number; got '%v'", str) + } + return 0 + } + return result +} + +// The location cache caches the time zones typically used by the client. +type locationCache struct { + cache map[int]*time.Location + lock sync.Mutex +} + +// All connections share the same list of timezones. Benchmarking shows that +// about 5% speed could be gained by putting the cache in the connection and +// losing the mutex, at the cost of a small amount of memory and a somewhat +// significant increase in code complexity. +var globalLocationCache = newLocationCache() + +func newLocationCache() *locationCache { + return &locationCache{cache: make(map[int]*time.Location)} +} + +// Returns the cached timezone for the specified offset, creating and caching +// it if necessary. +func (c *locationCache) getLocation(offset int) *time.Location { + c.lock.Lock() + defer c.lock.Unlock() + + location, ok := c.cache[offset] + if !ok { + location = time.FixedZone("", offset) + c.cache[offset] = location + } + + return location +} + +var infinityTsEnabled = false +var infinityTsNegative time.Time +var infinityTsPositive time.Time + +const ( + infinityTsEnabledAlready = "pq: infinity timestamp enabled already" + infinityTsNegativeMustBeSmaller = "pq: infinity timestamp: negative value must be smaller (before) than positive" +) + +// EnableInfinityTs controls the handling of Postgres' "-infinity" and +// "infinity" "timestamp"s. +// +// If EnableInfinityTs is not called, "-infinity" and "infinity" will return +// []byte("-infinity") and []byte("infinity") respectively, and potentially +// cause error "sql: Scan error on column index 0: unsupported driver -> Scan +// pair: []uint8 -> *time.Time", when scanning into a time.Time value. +// +// Once EnableInfinityTs has been called, all connections created using this +// driver will decode Postgres' "-infinity" and "infinity" for "timestamp", +// "timestamp with time zone" and "date" types to the predefined minimum and +// maximum times, respectively. When encoding time.Time values, any time which +// equals or precedes the predefined minimum time will be encoded to +// "-infinity". Any values at or past the maximum time will similarly be +// encoded to "infinity". +// +// If EnableInfinityTs is called with negative >= positive, it will panic. +// Calling EnableInfinityTs after a connection has been established results in +// undefined behavior. If EnableInfinityTs is called more than once, it will +// panic. +func EnableInfinityTs(negative time.Time, positive time.Time) { + if infinityTsEnabled { + panic(infinityTsEnabledAlready) + } + if !negative.Before(positive) { + panic(infinityTsNegativeMustBeSmaller) + } + infinityTsEnabled = true + infinityTsNegative = negative + infinityTsPositive = positive +} + +/* + * Testing might want to toggle infinityTsEnabled + */ +func disableInfinityTs() { + infinityTsEnabled = false +} + +// This is a time function specific to the Postgres default DateStyle +// setting ("ISO, MDY"), the only one we currently support. This +// accounts for the discrepancies between the parsing available with +// time.Parse and the Postgres date formatting quirks. +func parseTs(currentLocation *time.Location, str string) interface{} { + switch str { + case "-infinity": + if infinityTsEnabled { + return infinityTsNegative + } + return []byte(str) + case "infinity": + if infinityTsEnabled { + return infinityTsPositive + } + return []byte(str) + } + t, err := ParseTimestamp(currentLocation, str) + if err != nil { + panic(err) + } + return t +} + +// ParseTimestamp parses Postgres' text format. It returns a time.Time in +// currentLocation iff that time's offset agrees with the offset sent from the +// Postgres server. Otherwise, ParseTimestamp returns a time.Time with the +// fixed offset offset provided by the Postgres server. +func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, error) { + p := timestampParser{} + + monSep := strings.IndexRune(str, '-') + // this is Gregorian year, not ISO Year + // In Gregorian system, the year 1 BC is followed by AD 1 + year := p.mustAtoi(str, 0, monSep) + daySep := monSep + 3 + month := p.mustAtoi(str, monSep+1, daySep) + p.expect(str, '-', daySep) + timeSep := daySep + 3 + day := p.mustAtoi(str, daySep+1, timeSep) + + minLen := monSep + len("01-01") + 1 + + isBC := strings.HasSuffix(str, " BC") + if isBC { + minLen += 3 + } + + var hour, minute, second int + if len(str) > minLen { + p.expect(str, ' ', timeSep) + minSep := timeSep + 3 + p.expect(str, ':', minSep) + hour = p.mustAtoi(str, timeSep+1, minSep) + secSep := minSep + 3 + p.expect(str, ':', secSep) + minute = p.mustAtoi(str, minSep+1, secSep) + secEnd := secSep + 3 + second = p.mustAtoi(str, secSep+1, secEnd) + } + remainderIdx := monSep + len("01-01 00:00:00") + 1 + // Three optional (but ordered) sections follow: the + // fractional seconds, the time zone offset, and the BC + // designation. We set them up here and adjust the other + // offsets if the preceding sections exist. + + nanoSec := 0 + tzOff := 0 + + if remainderIdx < len(str) && str[remainderIdx] == '.' { + fracStart := remainderIdx + 1 + fracOff := strings.IndexAny(str[fracStart:], "-+ ") + if fracOff < 0 { + fracOff = len(str) - fracStart + } + fracSec := p.mustAtoi(str, fracStart, fracStart+fracOff) + nanoSec = fracSec * (1000000000 / int(math.Pow(10, float64(fracOff)))) + + remainderIdx += fracOff + 1 + } + if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart] == '-' || str[tzStart] == '+') { + // time zone separator is always '-' or '+' (UTC is +00) + var tzSign int + switch c := str[tzStart]; c { + case '-': + tzSign = -1 + case '+': + tzSign = +1 + default: + return time.Time{}, fmt.Errorf("expected '-' or '+' at position %v; got %v", tzStart, c) + } + tzHours := p.mustAtoi(str, tzStart+1, tzStart+3) + remainderIdx += 3 + var tzMin, tzSec int + if remainderIdx < len(str) && str[remainderIdx] == ':' { + tzMin = p.mustAtoi(str, remainderIdx+1, remainderIdx+3) + remainderIdx += 3 + } + if remainderIdx < len(str) && str[remainderIdx] == ':' { + tzSec = p.mustAtoi(str, remainderIdx+1, remainderIdx+3) + remainderIdx += 3 + } + tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec) + } + var isoYear int + + if isBC { + isoYear = 1 - year + remainderIdx += 3 + } else { + isoYear = year + } + if remainderIdx < len(str) { + return time.Time{}, fmt.Errorf("expected end of input, got %v", str[remainderIdx:]) + } + t := time.Date(isoYear, time.Month(month), day, + hour, minute, second, nanoSec, + globalLocationCache.getLocation(tzOff)) + + if currentLocation != nil { + // Set the location of the returned Time based on the session's + // TimeZone value, but only if the local time zone database agrees with + // the remote database on the offset. + lt := t.In(currentLocation) + _, newOff := lt.Zone() + if newOff == tzOff { + t = lt + } + } + + return t, p.err +} + +// formatTs formats t into a format postgres understands. +func formatTs(t time.Time) []byte { + if infinityTsEnabled { + // t <= -infinity : ! (t > -infinity) + if !t.After(infinityTsNegative) { + return []byte("-infinity") + } + // t >= infinity : ! (!t < infinity) + if !t.Before(infinityTsPositive) { + return []byte("infinity") + } + } + return FormatTimestamp(t) +} + +// FormatTimestamp formats t into Postgres' text format for timestamps. +func FormatTimestamp(t time.Time) []byte { + // Need to send dates before 0001 A.D. with " BC" suffix, instead of the + // minus sign preferred by Go. + // Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on + bc := false + if t.Year() <= 0 { + // flip year sign, and add 1, e.g: "0" will be "1", and "-10" will be "11" + t = t.AddDate((-t.Year())*2+1, 0, 0) + bc = true + } + b := []byte(t.Format("2006-01-02 15:04:05.999999999Z07:00")) + + _, offset := t.Zone() + offset = offset % 60 + if offset != 0 { + // RFC3339Nano already printed the minus sign + if offset < 0 { + offset = -offset + } + + b = append(b, ':') + if offset < 10 { + b = append(b, '0') + } + b = strconv.AppendInt(b, int64(offset), 10) + } + + if bc { + b = append(b, " BC"...) + } + return b +} + +// Parse a bytea value received from the server. Both "hex" and the legacy +// "escape" format are supported. +func parseBytea(s []byte) (result []byte, err error) { + if len(s) >= 2 && bytes.Equal(s[:2], []byte("\\x")) { + // bytea_output = hex + s = s[2:] // trim off leading "\\x" + result = make([]byte, hex.DecodedLen(len(s))) + _, err := hex.Decode(result, s) + if err != nil { + return nil, err + } + } else { + // bytea_output = escape + for len(s) > 0 { + if s[0] == '\\' { + // escaped '\\' + if len(s) >= 2 && s[1] == '\\' { + result = append(result, '\\') + s = s[2:] + continue + } + + // '\\' followed by an octal number + if len(s) < 4 { + return nil, fmt.Errorf("invalid bytea sequence %v", s) + } + r, err := strconv.ParseInt(string(s[1:4]), 8, 9) + if err != nil { + return nil, fmt.Errorf("could not parse bytea value: %s", err.Error()) + } + result = append(result, byte(r)) + s = s[4:] + } else { + // We hit an unescaped, raw byte. Try to read in as many as + // possible in one go. + i := bytes.IndexByte(s, '\\') + if i == -1 { + result = append(result, s...) + break + } + result = append(result, s[:i]...) + s = s[i:] + } + } + } + + return result, nil +} + +func encodeBytea(serverVersion int, v []byte) (result []byte) { + if serverVersion >= 90000 { + // Use the hex format if we know that the server supports it + result = make([]byte, 2+hex.EncodedLen(len(v))) + result[0] = '\\' + result[1] = 'x' + hex.Encode(result[2:], v) + } else { + // .. or resort to "escape" + for _, b := range v { + if b == '\\' { + result = append(result, '\\', '\\') + } else if b < 0x20 || b > 0x7e { + result = append(result, []byte(fmt.Sprintf("\\%03o", b))...) + } else { + result = append(result, b) + } + } + } + + return result +} + +// NullTime represents a time.Time that may be null. NullTime implements the +// sql.Scanner interface so it can be used as a scan destination, similar to +// sql.NullString. +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} + +// Scan implements the Scanner interface. +func (nt *NullTime) Scan(value interface{}) error { + nt.Time, nt.Valid = value.(time.Time) + return nil +} + +// Value implements the driver Valuer interface. +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} diff --git a/vendor/github.com/lib/pq/error.go b/vendor/github.com/lib/pq/error.go new file mode 100644 index 0000000000..3d66ba7c52 --- /dev/null +++ b/vendor/github.com/lib/pq/error.go @@ -0,0 +1,515 @@ +package pq + +import ( + "database/sql/driver" + "fmt" + "io" + "net" + "runtime" +) + +// Error severities +const ( + Efatal = "FATAL" + Epanic = "PANIC" + Ewarning = "WARNING" + Enotice = "NOTICE" + Edebug = "DEBUG" + Einfo = "INFO" + Elog = "LOG" +) + +// Error represents an error communicating with the server. +// +// See http://www.postgresql.org/docs/current/static/protocol-error-fields.html for details of the fields +type Error struct { + Severity string + Code ErrorCode + Message string + Detail string + Hint string + Position string + InternalPosition string + InternalQuery string + Where string + Schema string + Table string + Column string + DataTypeName string + Constraint string + File string + Line string + Routine string +} + +// ErrorCode is a five-character error code. +type ErrorCode string + +// Name returns a more human friendly rendering of the error code, namely the +// "condition name". +// +// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for +// details. +func (ec ErrorCode) Name() string { + return errorCodeNames[ec] +} + +// ErrorClass is only the class part of an error code. +type ErrorClass string + +// Name returns the condition name of an error class. It is equivalent to the +// condition name of the "standard" error code (i.e. the one having the last +// three characters "000"). +func (ec ErrorClass) Name() string { + return errorCodeNames[ErrorCode(ec+"000")] +} + +// Class returns the error class, e.g. "28". +// +// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for +// details. +func (ec ErrorCode) Class() ErrorClass { + return ErrorClass(ec[0:2]) +} + +// errorCodeNames is a mapping between the five-character error codes and the +// human readable "condition names". It is derived from the list at +// http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html +var errorCodeNames = map[ErrorCode]string{ + // Class 00 - Successful Completion + "00000": "successful_completion", + // Class 01 - Warning + "01000": "warning", + "0100C": "dynamic_result_sets_returned", + "01008": "implicit_zero_bit_padding", + "01003": "null_value_eliminated_in_set_function", + "01007": "privilege_not_granted", + "01006": "privilege_not_revoked", + "01004": "string_data_right_truncation", + "01P01": "deprecated_feature", + // Class 02 - No Data (this is also a warning class per the SQL standard) + "02000": "no_data", + "02001": "no_additional_dynamic_result_sets_returned", + // Class 03 - SQL Statement Not Yet Complete + "03000": "sql_statement_not_yet_complete", + // Class 08 - Connection Exception + "08000": "connection_exception", + "08003": "connection_does_not_exist", + "08006": "connection_failure", + "08001": "sqlclient_unable_to_establish_sqlconnection", + "08004": "sqlserver_rejected_establishment_of_sqlconnection", + "08007": "transaction_resolution_unknown", + "08P01": "protocol_violation", + // Class 09 - Triggered Action Exception + "09000": "triggered_action_exception", + // Class 0A - Feature Not Supported + "0A000": "feature_not_supported", + // Class 0B - Invalid Transaction Initiation + "0B000": "invalid_transaction_initiation", + // Class 0F - Locator Exception + "0F000": "locator_exception", + "0F001": "invalid_locator_specification", + // Class 0L - Invalid Grantor + "0L000": "invalid_grantor", + "0LP01": "invalid_grant_operation", + // Class 0P - Invalid Role Specification + "0P000": "invalid_role_specification", + // Class 0Z - Diagnostics Exception + "0Z000": "diagnostics_exception", + "0Z002": "stacked_diagnostics_accessed_without_active_handler", + // Class 20 - Case Not Found + "20000": "case_not_found", + // Class 21 - Cardinality Violation + "21000": "cardinality_violation", + // Class 22 - Data Exception + "22000": "data_exception", + "2202E": "array_subscript_error", + "22021": "character_not_in_repertoire", + "22008": "datetime_field_overflow", + "22012": "division_by_zero", + "22005": "error_in_assignment", + "2200B": "escape_character_conflict", + "22022": "indicator_overflow", + "22015": "interval_field_overflow", + "2201E": "invalid_argument_for_logarithm", + "22014": "invalid_argument_for_ntile_function", + "22016": "invalid_argument_for_nth_value_function", + "2201F": "invalid_argument_for_power_function", + "2201G": "invalid_argument_for_width_bucket_function", + "22018": "invalid_character_value_for_cast", + "22007": "invalid_datetime_format", + "22019": "invalid_escape_character", + "2200D": "invalid_escape_octet", + "22025": "invalid_escape_sequence", + "22P06": "nonstandard_use_of_escape_character", + "22010": "invalid_indicator_parameter_value", + "22023": "invalid_parameter_value", + "2201B": "invalid_regular_expression", + "2201W": "invalid_row_count_in_limit_clause", + "2201X": "invalid_row_count_in_result_offset_clause", + "22009": "invalid_time_zone_displacement_value", + "2200C": "invalid_use_of_escape_character", + "2200G": "most_specific_type_mismatch", + "22004": "null_value_not_allowed", + "22002": "null_value_no_indicator_parameter", + "22003": "numeric_value_out_of_range", + "2200H": "sequence_generator_limit_exceeded", + "22026": "string_data_length_mismatch", + "22001": "string_data_right_truncation", + "22011": "substring_error", + "22027": "trim_error", + "22024": "unterminated_c_string", + "2200F": "zero_length_character_string", + "22P01": "floating_point_exception", + "22P02": "invalid_text_representation", + "22P03": "invalid_binary_representation", + "22P04": "bad_copy_file_format", + "22P05": "untranslatable_character", + "2200L": "not_an_xml_document", + "2200M": "invalid_xml_document", + "2200N": "invalid_xml_content", + "2200S": "invalid_xml_comment", + "2200T": "invalid_xml_processing_instruction", + // Class 23 - Integrity Constraint Violation + "23000": "integrity_constraint_violation", + "23001": "restrict_violation", + "23502": "not_null_violation", + "23503": "foreign_key_violation", + "23505": "unique_violation", + "23514": "check_violation", + "23P01": "exclusion_violation", + // Class 24 - Invalid Cursor State + "24000": "invalid_cursor_state", + // Class 25 - Invalid Transaction State + "25000": "invalid_transaction_state", + "25001": "active_sql_transaction", + "25002": "branch_transaction_already_active", + "25008": "held_cursor_requires_same_isolation_level", + "25003": "inappropriate_access_mode_for_branch_transaction", + "25004": "inappropriate_isolation_level_for_branch_transaction", + "25005": "no_active_sql_transaction_for_branch_transaction", + "25006": "read_only_sql_transaction", + "25007": "schema_and_data_statement_mixing_not_supported", + "25P01": "no_active_sql_transaction", + "25P02": "in_failed_sql_transaction", + // Class 26 - Invalid SQL Statement Name + "26000": "invalid_sql_statement_name", + // Class 27 - Triggered Data Change Violation + "27000": "triggered_data_change_violation", + // Class 28 - Invalid Authorization Specification + "28000": "invalid_authorization_specification", + "28P01": "invalid_password", + // Class 2B - Dependent Privilege Descriptors Still Exist + "2B000": "dependent_privilege_descriptors_still_exist", + "2BP01": "dependent_objects_still_exist", + // Class 2D - Invalid Transaction Termination + "2D000": "invalid_transaction_termination", + // Class 2F - SQL Routine Exception + "2F000": "sql_routine_exception", + "2F005": "function_executed_no_return_statement", + "2F002": "modifying_sql_data_not_permitted", + "2F003": "prohibited_sql_statement_attempted", + "2F004": "reading_sql_data_not_permitted", + // Class 34 - Invalid Cursor Name + "34000": "invalid_cursor_name", + // Class 38 - External Routine Exception + "38000": "external_routine_exception", + "38001": "containing_sql_not_permitted", + "38002": "modifying_sql_data_not_permitted", + "38003": "prohibited_sql_statement_attempted", + "38004": "reading_sql_data_not_permitted", + // Class 39 - External Routine Invocation Exception + "39000": "external_routine_invocation_exception", + "39001": "invalid_sqlstate_returned", + "39004": "null_value_not_allowed", + "39P01": "trigger_protocol_violated", + "39P02": "srf_protocol_violated", + // Class 3B - Savepoint Exception + "3B000": "savepoint_exception", + "3B001": "invalid_savepoint_specification", + // Class 3D - Invalid Catalog Name + "3D000": "invalid_catalog_name", + // Class 3F - Invalid Schema Name + "3F000": "invalid_schema_name", + // Class 40 - Transaction Rollback + "40000": "transaction_rollback", + "40002": "transaction_integrity_constraint_violation", + "40001": "serialization_failure", + "40003": "statement_completion_unknown", + "40P01": "deadlock_detected", + // Class 42 - Syntax Error or Access Rule Violation + "42000": "syntax_error_or_access_rule_violation", + "42601": "syntax_error", + "42501": "insufficient_privilege", + "42846": "cannot_coerce", + "42803": "grouping_error", + "42P20": "windowing_error", + "42P19": "invalid_recursion", + "42830": "invalid_foreign_key", + "42602": "invalid_name", + "42622": "name_too_long", + "42939": "reserved_name", + "42804": "datatype_mismatch", + "42P18": "indeterminate_datatype", + "42P21": "collation_mismatch", + "42P22": "indeterminate_collation", + "42809": "wrong_object_type", + "42703": "undefined_column", + "42883": "undefined_function", + "42P01": "undefined_table", + "42P02": "undefined_parameter", + "42704": "undefined_object", + "42701": "duplicate_column", + "42P03": "duplicate_cursor", + "42P04": "duplicate_database", + "42723": "duplicate_function", + "42P05": "duplicate_prepared_statement", + "42P06": "duplicate_schema", + "42P07": "duplicate_table", + "42712": "duplicate_alias", + "42710": "duplicate_object", + "42702": "ambiguous_column", + "42725": "ambiguous_function", + "42P08": "ambiguous_parameter", + "42P09": "ambiguous_alias", + "42P10": "invalid_column_reference", + "42611": "invalid_column_definition", + "42P11": "invalid_cursor_definition", + "42P12": "invalid_database_definition", + "42P13": "invalid_function_definition", + "42P14": "invalid_prepared_statement_definition", + "42P15": "invalid_schema_definition", + "42P16": "invalid_table_definition", + "42P17": "invalid_object_definition", + // Class 44 - WITH CHECK OPTION Violation + "44000": "with_check_option_violation", + // Class 53 - Insufficient Resources + "53000": "insufficient_resources", + "53100": "disk_full", + "53200": "out_of_memory", + "53300": "too_many_connections", + "53400": "configuration_limit_exceeded", + // Class 54 - Program Limit Exceeded + "54000": "program_limit_exceeded", + "54001": "statement_too_complex", + "54011": "too_many_columns", + "54023": "too_many_arguments", + // Class 55 - Object Not In Prerequisite State + "55000": "object_not_in_prerequisite_state", + "55006": "object_in_use", + "55P02": "cant_change_runtime_param", + "55P03": "lock_not_available", + // Class 57 - Operator Intervention + "57000": "operator_intervention", + "57014": "query_canceled", + "57P01": "admin_shutdown", + "57P02": "crash_shutdown", + "57P03": "cannot_connect_now", + "57P04": "database_dropped", + // Class 58 - System Error (errors external to PostgreSQL itself) + "58000": "system_error", + "58030": "io_error", + "58P01": "undefined_file", + "58P02": "duplicate_file", + // Class F0 - Configuration File Error + "F0000": "config_file_error", + "F0001": "lock_file_exists", + // Class HV - Foreign Data Wrapper Error (SQL/MED) + "HV000": "fdw_error", + "HV005": "fdw_column_name_not_found", + "HV002": "fdw_dynamic_parameter_value_needed", + "HV010": "fdw_function_sequence_error", + "HV021": "fdw_inconsistent_descriptor_information", + "HV024": "fdw_invalid_attribute_value", + "HV007": "fdw_invalid_column_name", + "HV008": "fdw_invalid_column_number", + "HV004": "fdw_invalid_data_type", + "HV006": "fdw_invalid_data_type_descriptors", + "HV091": "fdw_invalid_descriptor_field_identifier", + "HV00B": "fdw_invalid_handle", + "HV00C": "fdw_invalid_option_index", + "HV00D": "fdw_invalid_option_name", + "HV090": "fdw_invalid_string_length_or_buffer_length", + "HV00A": "fdw_invalid_string_format", + "HV009": "fdw_invalid_use_of_null_pointer", + "HV014": "fdw_too_many_handles", + "HV001": "fdw_out_of_memory", + "HV00P": "fdw_no_schemas", + "HV00J": "fdw_option_name_not_found", + "HV00K": "fdw_reply_handle", + "HV00Q": "fdw_schema_not_found", + "HV00R": "fdw_table_not_found", + "HV00L": "fdw_unable_to_create_execution", + "HV00M": "fdw_unable_to_create_reply", + "HV00N": "fdw_unable_to_establish_connection", + // Class P0 - PL/pgSQL Error + "P0000": "plpgsql_error", + "P0001": "raise_exception", + "P0002": "no_data_found", + "P0003": "too_many_rows", + // Class XX - Internal Error + "XX000": "internal_error", + "XX001": "data_corrupted", + "XX002": "index_corrupted", +} + +func parseError(r *readBuf) *Error { + err := new(Error) + for t := r.byte(); t != 0; t = r.byte() { + msg := r.string() + switch t { + case 'S': + err.Severity = msg + case 'C': + err.Code = ErrorCode(msg) + case 'M': + err.Message = msg + case 'D': + err.Detail = msg + case 'H': + err.Hint = msg + case 'P': + err.Position = msg + case 'p': + err.InternalPosition = msg + case 'q': + err.InternalQuery = msg + case 'W': + err.Where = msg + case 's': + err.Schema = msg + case 't': + err.Table = msg + case 'c': + err.Column = msg + case 'd': + err.DataTypeName = msg + case 'n': + err.Constraint = msg + case 'F': + err.File = msg + case 'L': + err.Line = msg + case 'R': + err.Routine = msg + } + } + return err +} + +// Fatal returns true if the Error Severity is fatal. +func (err *Error) Fatal() bool { + return err.Severity == Efatal +} + +// Get implements the legacy PGError interface. New code should use the fields +// of the Error struct directly. +func (err *Error) Get(k byte) (v string) { + switch k { + case 'S': + return err.Severity + case 'C': + return string(err.Code) + case 'M': + return err.Message + case 'D': + return err.Detail + case 'H': + return err.Hint + case 'P': + return err.Position + case 'p': + return err.InternalPosition + case 'q': + return err.InternalQuery + case 'W': + return err.Where + case 's': + return err.Schema + case 't': + return err.Table + case 'c': + return err.Column + case 'd': + return err.DataTypeName + case 'n': + return err.Constraint + case 'F': + return err.File + case 'L': + return err.Line + case 'R': + return err.Routine + } + return "" +} + +func (err Error) Error() string { + return "pq: " + err.Message +} + +// PGError is an interface used by previous versions of pq. It is provided +// only to support legacy code. New code should use the Error type. +type PGError interface { + Error() string + Fatal() bool + Get(k byte) (v string) +} + +func errorf(s string, args ...interface{}) { + panic(fmt.Errorf("pq: %s", fmt.Sprintf(s, args...))) +} + +// TODO(ainar-g) Rename to errorf after removing panics. +func fmterrorf(s string, args ...interface{}) error { + return fmt.Errorf("pq: %s", fmt.Sprintf(s, args...)) +} + +func errRecoverNoErrBadConn(err *error) { + e := recover() + if e == nil { + // Do nothing + return + } + var ok bool + *err, ok = e.(error) + if !ok { + *err = fmt.Errorf("pq: unexpected error: %#v", e) + } +} + +func (cn *conn) errRecover(err *error) { + e := recover() + switch v := e.(type) { + case nil: + // Do nothing + case runtime.Error: + cn.bad = true + panic(v) + case *Error: + if v.Fatal() { + *err = driver.ErrBadConn + } else { + *err = v + } + case *net.OpError: + cn.bad = true + *err = v + case error: + if v == io.EOF || v.(error).Error() == "remote error: handshake failure" { + *err = driver.ErrBadConn + } else { + *err = v + } + + default: + cn.bad = true + panic(fmt.Sprintf("unknown error: %#v", e)) + } + + // Any time we return ErrBadConn, we need to remember it since *Tx doesn't + // mark the connection bad in database/sql. + if *err == driver.ErrBadConn { + cn.bad = true + } +} diff --git a/vendor/github.com/lib/pq/go.mod b/vendor/github.com/lib/pq/go.mod new file mode 100644 index 0000000000..edf0b343fd --- /dev/null +++ b/vendor/github.com/lib/pq/go.mod @@ -0,0 +1 @@ +module github.com/lib/pq diff --git a/vendor/github.com/lib/pq/notify.go b/vendor/github.com/lib/pq/notify.go new file mode 100644 index 0000000000..850bb9040c --- /dev/null +++ b/vendor/github.com/lib/pq/notify.go @@ -0,0 +1,797 @@ +package pq + +// Package pq is a pure Go Postgres driver for the database/sql package. +// This module contains support for Postgres LISTEN/NOTIFY. + +import ( + "errors" + "fmt" + "sync" + "sync/atomic" + "time" +) + +// Notification represents a single notification from the database. +type Notification struct { + // Process ID (PID) of the notifying postgres backend. + BePid int + // Name of the channel the notification was sent on. + Channel string + // Payload, or the empty string if unspecified. + Extra string +} + +func recvNotification(r *readBuf) *Notification { + bePid := r.int32() + channel := r.string() + extra := r.string() + + return &Notification{bePid, channel, extra} +} + +const ( + connStateIdle int32 = iota + connStateExpectResponse + connStateExpectReadyForQuery +) + +type message struct { + typ byte + err error +} + +var errListenerConnClosed = errors.New("pq: ListenerConn has been closed") + +// ListenerConn is a low-level interface for waiting for notifications. You +// should use Listener instead. +type ListenerConn struct { + // guards cn and err + connectionLock sync.Mutex + cn *conn + err error + + connState int32 + + // the sending goroutine will be holding this lock + senderLock sync.Mutex + + notificationChan chan<- *Notification + + replyChan chan message +} + +// NewListenerConn creates a new ListenerConn. Use NewListener instead. +func NewListenerConn(name string, notificationChan chan<- *Notification) (*ListenerConn, error) { + return newDialListenerConn(defaultDialer{}, name, notificationChan) +} + +func newDialListenerConn(d Dialer, name string, c chan<- *Notification) (*ListenerConn, error) { + cn, err := DialOpen(d, name) + if err != nil { + return nil, err + } + + l := &ListenerConn{ + cn: cn.(*conn), + notificationChan: c, + connState: connStateIdle, + replyChan: make(chan message, 2), + } + + go l.listenerConnMain() + + return l, nil +} + +// We can only allow one goroutine at a time to be running a query on the +// connection for various reasons, so the goroutine sending on the connection +// must be holding senderLock. +// +// Returns an error if an unrecoverable error has occurred and the ListenerConn +// should be abandoned. +func (l *ListenerConn) acquireSenderLock() error { + // we must acquire senderLock first to avoid deadlocks; see ExecSimpleQuery + l.senderLock.Lock() + + l.connectionLock.Lock() + err := l.err + l.connectionLock.Unlock() + if err != nil { + l.senderLock.Unlock() + return err + } + return nil +} + +func (l *ListenerConn) releaseSenderLock() { + l.senderLock.Unlock() +} + +// setState advances the protocol state to newState. Returns false if moving +// to that state from the current state is not allowed. +func (l *ListenerConn) setState(newState int32) bool { + var expectedState int32 + + switch newState { + case connStateIdle: + expectedState = connStateExpectReadyForQuery + case connStateExpectResponse: + expectedState = connStateIdle + case connStateExpectReadyForQuery: + expectedState = connStateExpectResponse + default: + panic(fmt.Sprintf("unexpected listenerConnState %d", newState)) + } + + return atomic.CompareAndSwapInt32(&l.connState, expectedState, newState) +} + +// Main logic is here: receive messages from the postgres backend, forward +// notifications and query replies and keep the internal state in sync with the +// protocol state. Returns when the connection has been lost, is about to go +// away or should be discarded because we couldn't agree on the state with the +// server backend. +func (l *ListenerConn) listenerConnLoop() (err error) { + defer errRecoverNoErrBadConn(&err) + + r := &readBuf{} + for { + t, err := l.cn.recvMessage(r) + if err != nil { + return err + } + + switch t { + case 'A': + // recvNotification copies all the data so we don't need to worry + // about the scratch buffer being overwritten. + l.notificationChan <- recvNotification(r) + + case 'T', 'D': + // only used by tests; ignore + + case 'E': + // We might receive an ErrorResponse even when not in a query; it + // is expected that the server will close the connection after + // that, but we should make sure that the error we display is the + // one from the stray ErrorResponse, not io.ErrUnexpectedEOF. + if !l.setState(connStateExpectReadyForQuery) { + return parseError(r) + } + l.replyChan <- message{t, parseError(r)} + + case 'C', 'I': + if !l.setState(connStateExpectReadyForQuery) { + // protocol out of sync + return fmt.Errorf("unexpected CommandComplete") + } + // ExecSimpleQuery doesn't need to know about this message + + case 'Z': + if !l.setState(connStateIdle) { + // protocol out of sync + return fmt.Errorf("unexpected ReadyForQuery") + } + l.replyChan <- message{t, nil} + + case 'N', 'S': + // ignore + default: + return fmt.Errorf("unexpected message %q from server in listenerConnLoop", t) + } + } +} + +// This is the main routine for the goroutine receiving on the database +// connection. Most of the main logic is in listenerConnLoop. +func (l *ListenerConn) listenerConnMain() { + err := l.listenerConnLoop() + + // listenerConnLoop terminated; we're done, but we still have to clean up. + // Make sure nobody tries to start any new queries by making sure the err + // pointer is set. It is important that we do not overwrite its value; a + // connection could be closed by either this goroutine or one sending on + // the connection -- whoever closes the connection is assumed to have the + // more meaningful error message (as the other one will probably get + // net.errClosed), so that goroutine sets the error we expose while the + // other error is discarded. If the connection is lost while two + // goroutines are operating on the socket, it probably doesn't matter which + // error we expose so we don't try to do anything more complex. + l.connectionLock.Lock() + if l.err == nil { + l.err = err + } + l.cn.Close() + l.connectionLock.Unlock() + + // There might be a query in-flight; make sure nobody's waiting for a + // response to it, since there's not going to be one. + close(l.replyChan) + + // let the listener know we're done + close(l.notificationChan) + + // this ListenerConn is done +} + +// Listen sends a LISTEN query to the server. See ExecSimpleQuery. +func (l *ListenerConn) Listen(channel string) (bool, error) { + return l.ExecSimpleQuery("LISTEN " + QuoteIdentifier(channel)) +} + +// Unlisten sends an UNLISTEN query to the server. See ExecSimpleQuery. +func (l *ListenerConn) Unlisten(channel string) (bool, error) { + return l.ExecSimpleQuery("UNLISTEN " + QuoteIdentifier(channel)) +} + +// UnlistenAll sends an `UNLISTEN *` query to the server. See ExecSimpleQuery. +func (l *ListenerConn) UnlistenAll() (bool, error) { + return l.ExecSimpleQuery("UNLISTEN *") +} + +// Ping the remote server to make sure it's alive. Non-nil error means the +// connection has failed and should be abandoned. +func (l *ListenerConn) Ping() error { + sent, err := l.ExecSimpleQuery("") + if !sent { + return err + } + if err != nil { + // shouldn't happen + panic(err) + } + return nil +} + +// Attempt to send a query on the connection. Returns an error if sending the +// query failed, and the caller should initiate closure of this connection. +// The caller must be holding senderLock (see acquireSenderLock and +// releaseSenderLock). +func (l *ListenerConn) sendSimpleQuery(q string) (err error) { + defer errRecoverNoErrBadConn(&err) + + // must set connection state before sending the query + if !l.setState(connStateExpectResponse) { + panic("two queries running at the same time") + } + + // Can't use l.cn.writeBuf here because it uses the scratch buffer which + // might get overwritten by listenerConnLoop. + b := &writeBuf{ + buf: []byte("Q\x00\x00\x00\x00"), + pos: 1, + } + b.string(q) + l.cn.send(b) + + return nil +} + +// ExecSimpleQuery executes a "simple query" (i.e. one with no bindable +// parameters) on the connection. The possible return values are: +// 1) "executed" is true; the query was executed to completion on the +// database server. If the query failed, err will be set to the error +// returned by the database, otherwise err will be nil. +// 2) If "executed" is false, the query could not be executed on the remote +// server. err will be non-nil. +// +// After a call to ExecSimpleQuery has returned an executed=false value, the +// connection has either been closed or will be closed shortly thereafter, and +// all subsequently executed queries will return an error. +func (l *ListenerConn) ExecSimpleQuery(q string) (executed bool, err error) { + if err = l.acquireSenderLock(); err != nil { + return false, err + } + defer l.releaseSenderLock() + + err = l.sendSimpleQuery(q) + if err != nil { + // We can't know what state the protocol is in, so we need to abandon + // this connection. + l.connectionLock.Lock() + // Set the error pointer if it hasn't been set already; see + // listenerConnMain. + if l.err == nil { + l.err = err + } + l.connectionLock.Unlock() + l.cn.c.Close() + return false, err + } + + // now we just wait for a reply.. + for { + m, ok := <-l.replyChan + if !ok { + // We lost the connection to server, don't bother waiting for a + // a response. err should have been set already. + l.connectionLock.Lock() + err := l.err + l.connectionLock.Unlock() + return false, err + } + switch m.typ { + case 'Z': + // sanity check + if m.err != nil { + panic("m.err != nil") + } + // done; err might or might not be set + return true, err + + case 'E': + // sanity check + if m.err == nil { + panic("m.err == nil") + } + // server responded with an error; ReadyForQuery to follow + err = m.err + + default: + return false, fmt.Errorf("unknown response for simple query: %q", m.typ) + } + } +} + +// Close closes the connection. +func (l *ListenerConn) Close() error { + l.connectionLock.Lock() + if l.err != nil { + l.connectionLock.Unlock() + return errListenerConnClosed + } + l.err = errListenerConnClosed + l.connectionLock.Unlock() + // We can't send anything on the connection without holding senderLock. + // Simply close the net.Conn to wake up everyone operating on it. + return l.cn.c.Close() +} + +// Err returns the reason the connection was closed. It is not safe to call +// this function until l.Notify has been closed. +func (l *ListenerConn) Err() error { + return l.err +} + +var errListenerClosed = errors.New("pq: Listener has been closed") + +// ErrChannelAlreadyOpen is returned from Listen when a channel is already +// open. +var ErrChannelAlreadyOpen = errors.New("pq: channel is already open") + +// ErrChannelNotOpen is returned from Unlisten when a channel is not open. +var ErrChannelNotOpen = errors.New("pq: channel is not open") + +// ListenerEventType is an enumeration of listener event types. +type ListenerEventType int + +const ( + // ListenerEventConnected is emitted only when the database connection + // has been initially initialized. The err argument of the callback + // will always be nil. + ListenerEventConnected ListenerEventType = iota + + // ListenerEventDisconnected is emitted after a database connection has + // been lost, either because of an error or because Close has been + // called. The err argument will be set to the reason the database + // connection was lost. + ListenerEventDisconnected + + // ListenerEventReconnected is emitted after a database connection has + // been re-established after connection loss. The err argument of the + // callback will always be nil. After this event has been emitted, a + // nil pq.Notification is sent on the Listener.Notify channel. + ListenerEventReconnected + + // ListenerEventConnectionAttemptFailed is emitted after a connection + // to the database was attempted, but failed. The err argument will be + // set to an error describing why the connection attempt did not + // succeed. + ListenerEventConnectionAttemptFailed +) + +// EventCallbackType is the event callback type. See also ListenerEventType +// constants' documentation. +type EventCallbackType func(event ListenerEventType, err error) + +// Listener provides an interface for listening to notifications from a +// PostgreSQL database. For general usage information, see section +// "Notifications". +// +// Listener can safely be used from concurrently running goroutines. +type Listener struct { + // Channel for receiving notifications from the database. In some cases a + // nil value will be sent. See section "Notifications" above. + Notify chan *Notification + + name string + minReconnectInterval time.Duration + maxReconnectInterval time.Duration + dialer Dialer + eventCallback EventCallbackType + + lock sync.Mutex + isClosed bool + reconnectCond *sync.Cond + cn *ListenerConn + connNotificationChan <-chan *Notification + channels map[string]struct{} +} + +// NewListener creates a new database connection dedicated to LISTEN / NOTIFY. +// +// name should be set to a connection string to be used to establish the +// database connection (see section "Connection String Parameters" above). +// +// minReconnectInterval controls the duration to wait before trying to +// re-establish the database connection after connection loss. After each +// consecutive failure this interval is doubled, until maxReconnectInterval is +// reached. Successfully completing the connection establishment procedure +// resets the interval back to minReconnectInterval. +// +// The last parameter eventCallback can be set to a function which will be +// called by the Listener when the state of the underlying database connection +// changes. This callback will be called by the goroutine which dispatches the +// notifications over the Notify channel, so you should try to avoid doing +// potentially time-consuming operations from the callback. +func NewListener(name string, + minReconnectInterval time.Duration, + maxReconnectInterval time.Duration, + eventCallback EventCallbackType) *Listener { + return NewDialListener(defaultDialer{}, name, minReconnectInterval, maxReconnectInterval, eventCallback) +} + +// NewDialListener is like NewListener but it takes a Dialer. +func NewDialListener(d Dialer, + name string, + minReconnectInterval time.Duration, + maxReconnectInterval time.Duration, + eventCallback EventCallbackType) *Listener { + + l := &Listener{ + name: name, + minReconnectInterval: minReconnectInterval, + maxReconnectInterval: maxReconnectInterval, + dialer: d, + eventCallback: eventCallback, + + channels: make(map[string]struct{}), + + Notify: make(chan *Notification, 32), + } + l.reconnectCond = sync.NewCond(&l.lock) + + go l.listenerMain() + + return l +} + +// NotificationChannel returns the notification channel for this listener. +// This is the same channel as Notify, and will not be recreated during the +// life time of the Listener. +func (l *Listener) NotificationChannel() <-chan *Notification { + return l.Notify +} + +// Listen starts listening for notifications on a channel. Calls to this +// function will block until an acknowledgement has been received from the +// server. Note that Listener automatically re-establishes the connection +// after connection loss, so this function may block indefinitely if the +// connection can not be re-established. +// +// Listen will only fail in three conditions: +// 1) The channel is already open. The returned error will be +// ErrChannelAlreadyOpen. +// 2) The query was executed on the remote server, but PostgreSQL returned an +// error message in response to the query. The returned error will be a +// pq.Error containing the information the server supplied. +// 3) Close is called on the Listener before the request could be completed. +// +// The channel name is case-sensitive. +func (l *Listener) Listen(channel string) error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + // The server allows you to issue a LISTEN on a channel which is already + // open, but it seems useful to be able to detect this case to spot for + // mistakes in application logic. If the application genuinely does't + // care, it can check the exported error and ignore it. + _, exists := l.channels[channel] + if exists { + return ErrChannelAlreadyOpen + } + + if l.cn != nil { + // If gotResponse is true but error is set, the query was executed on + // the remote server, but resulted in an error. This should be + // relatively rare, so it's fine if we just pass the error to our + // caller. However, if gotResponse is false, we could not complete the + // query on the remote server and our underlying connection is about + // to go away, so we only add relname to l.channels, and wait for + // resync() to take care of the rest. + gotResponse, err := l.cn.Listen(channel) + if gotResponse && err != nil { + return err + } + } + + l.channels[channel] = struct{}{} + for l.cn == nil { + l.reconnectCond.Wait() + // we let go of the mutex for a while + if l.isClosed { + return errListenerClosed + } + } + + return nil +} + +// Unlisten removes a channel from the Listener's channel list. Returns +// ErrChannelNotOpen if the Listener is not listening on the specified channel. +// Returns immediately with no error if there is no connection. Note that you +// might still get notifications for this channel even after Unlisten has +// returned. +// +// The channel name is case-sensitive. +func (l *Listener) Unlisten(channel string) error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + // Similarly to LISTEN, this is not an error in Postgres, but it seems + // useful to distinguish from the normal conditions. + _, exists := l.channels[channel] + if !exists { + return ErrChannelNotOpen + } + + if l.cn != nil { + // Similarly to Listen (see comment in that function), the caller + // should only be bothered with an error if it came from the backend as + // a response to our query. + gotResponse, err := l.cn.Unlisten(channel) + if gotResponse && err != nil { + return err + } + } + + // Don't bother waiting for resync if there's no connection. + delete(l.channels, channel) + return nil +} + +// UnlistenAll removes all channels from the Listener's channel list. Returns +// immediately with no error if there is no connection. Note that you might +// still get notifications for any of the deleted channels even after +// UnlistenAll has returned. +func (l *Listener) UnlistenAll() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + if l.cn != nil { + // Similarly to Listen (see comment in that function), the caller + // should only be bothered with an error if it came from the backend as + // a response to our query. + gotResponse, err := l.cn.UnlistenAll() + if gotResponse && err != nil { + return err + } + } + + // Don't bother waiting for resync if there's no connection. + l.channels = make(map[string]struct{}) + return nil +} + +// Ping the remote server to make sure it's alive. Non-nil return value means +// that there is no active connection. +func (l *Listener) Ping() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + if l.cn == nil { + return errors.New("no connection") + } + + return l.cn.Ping() +} + +// Clean up after losing the server connection. Returns l.cn.Err(), which +// should have the reason the connection was lost. +func (l *Listener) disconnectCleanup() error { + l.lock.Lock() + defer l.lock.Unlock() + + // sanity check; can't look at Err() until the channel has been closed + select { + case _, ok := <-l.connNotificationChan: + if ok { + panic("connNotificationChan not closed") + } + default: + panic("connNotificationChan not closed") + } + + err := l.cn.Err() + l.cn.Close() + l.cn = nil + return err +} + +// Synchronize the list of channels we want to be listening on with the server +// after the connection has been established. +func (l *Listener) resync(cn *ListenerConn, notificationChan <-chan *Notification) error { + doneChan := make(chan error) + go func(notificationChan <-chan *Notification) { + for channel := range l.channels { + // If we got a response, return that error to our caller as it's + // going to be more descriptive than cn.Err(). + gotResponse, err := cn.Listen(channel) + if gotResponse && err != nil { + doneChan <- err + return + } + + // If we couldn't reach the server, wait for notificationChan to + // close and then return the error message from the connection, as + // per ListenerConn's interface. + if err != nil { + for range notificationChan { + } + doneChan <- cn.Err() + return + } + } + doneChan <- nil + }(notificationChan) + + // Ignore notifications while synchronization is going on to avoid + // deadlocks. We have to send a nil notification over Notify anyway as + // we can't possibly know which notifications (if any) were lost while + // the connection was down, so there's no reason to try and process + // these messages at all. + for { + select { + case _, ok := <-notificationChan: + if !ok { + notificationChan = nil + } + + case err := <-doneChan: + return err + } + } +} + +// caller should NOT be holding l.lock +func (l *Listener) closed() bool { + l.lock.Lock() + defer l.lock.Unlock() + + return l.isClosed +} + +func (l *Listener) connect() error { + notificationChan := make(chan *Notification, 32) + cn, err := newDialListenerConn(l.dialer, l.name, notificationChan) + if err != nil { + return err + } + + l.lock.Lock() + defer l.lock.Unlock() + + err = l.resync(cn, notificationChan) + if err != nil { + cn.Close() + return err + } + + l.cn = cn + l.connNotificationChan = notificationChan + l.reconnectCond.Broadcast() + + return nil +} + +// Close disconnects the Listener from the database and shuts it down. +// Subsequent calls to its methods will return an error. Close returns an +// error if the connection has already been closed. +func (l *Listener) Close() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + if l.cn != nil { + l.cn.Close() + } + l.isClosed = true + + // Unblock calls to Listen() + l.reconnectCond.Broadcast() + + return nil +} + +func (l *Listener) emitEvent(event ListenerEventType, err error) { + if l.eventCallback != nil { + l.eventCallback(event, err) + } +} + +// Main logic here: maintain a connection to the server when possible, wait +// for notifications and emit events. +func (l *Listener) listenerConnLoop() { + var nextReconnect time.Time + + reconnectInterval := l.minReconnectInterval + for { + for { + err := l.connect() + if err == nil { + break + } + + if l.closed() { + return + } + l.emitEvent(ListenerEventConnectionAttemptFailed, err) + + time.Sleep(reconnectInterval) + reconnectInterval *= 2 + if reconnectInterval > l.maxReconnectInterval { + reconnectInterval = l.maxReconnectInterval + } + } + + if nextReconnect.IsZero() { + l.emitEvent(ListenerEventConnected, nil) + } else { + l.emitEvent(ListenerEventReconnected, nil) + l.Notify <- nil + } + + reconnectInterval = l.minReconnectInterval + nextReconnect = time.Now().Add(reconnectInterval) + + for { + notification, ok := <-l.connNotificationChan + if !ok { + // lost connection, loop again + break + } + l.Notify <- notification + } + + err := l.disconnectCleanup() + if l.closed() { + return + } + l.emitEvent(ListenerEventDisconnected, err) + + time.Sleep(time.Until(nextReconnect)) + } +} + +func (l *Listener) listenerMain() { + l.listenerConnLoop() + close(l.Notify) +} diff --git a/vendor/github.com/lib/pq/oid/doc.go b/vendor/github.com/lib/pq/oid/doc.go new file mode 100644 index 0000000000..caaede2489 --- /dev/null +++ b/vendor/github.com/lib/pq/oid/doc.go @@ -0,0 +1,6 @@ +// Package oid contains OID constants +// as defined by the Postgres server. +package oid + +// Oid is a Postgres Object ID. +type Oid uint32 diff --git a/vendor/github.com/lib/pq/oid/types.go b/vendor/github.com/lib/pq/oid/types.go new file mode 100644 index 0000000000..ecc84c2c86 --- /dev/null +++ b/vendor/github.com/lib/pq/oid/types.go @@ -0,0 +1,343 @@ +// Code generated by gen.go. DO NOT EDIT. + +package oid + +const ( + T_bool Oid = 16 + T_bytea Oid = 17 + T_char Oid = 18 + T_name Oid = 19 + T_int8 Oid = 20 + T_int2 Oid = 21 + T_int2vector Oid = 22 + T_int4 Oid = 23 + T_regproc Oid = 24 + T_text Oid = 25 + T_oid Oid = 26 + T_tid Oid = 27 + T_xid Oid = 28 + T_cid Oid = 29 + T_oidvector Oid = 30 + T_pg_ddl_command Oid = 32 + T_pg_type Oid = 71 + T_pg_attribute Oid = 75 + T_pg_proc Oid = 81 + T_pg_class Oid = 83 + T_json Oid = 114 + T_xml Oid = 142 + T__xml Oid = 143 + T_pg_node_tree Oid = 194 + T__json Oid = 199 + T_smgr Oid = 210 + T_index_am_handler Oid = 325 + T_point Oid = 600 + T_lseg Oid = 601 + T_path Oid = 602 + T_box Oid = 603 + T_polygon Oid = 604 + T_line Oid = 628 + T__line Oid = 629 + T_cidr Oid = 650 + T__cidr Oid = 651 + T_float4 Oid = 700 + T_float8 Oid = 701 + T_abstime Oid = 702 + T_reltime Oid = 703 + T_tinterval Oid = 704 + T_unknown Oid = 705 + T_circle Oid = 718 + T__circle Oid = 719 + T_money Oid = 790 + T__money Oid = 791 + T_macaddr Oid = 829 + T_inet Oid = 869 + T__bool Oid = 1000 + T__bytea Oid = 1001 + T__char Oid = 1002 + T__name Oid = 1003 + T__int2 Oid = 1005 + T__int2vector Oid = 1006 + T__int4 Oid = 1007 + T__regproc Oid = 1008 + T__text Oid = 1009 + T__tid Oid = 1010 + T__xid Oid = 1011 + T__cid Oid = 1012 + T__oidvector Oid = 1013 + T__bpchar Oid = 1014 + T__varchar Oid = 1015 + T__int8 Oid = 1016 + T__point Oid = 1017 + T__lseg Oid = 1018 + T__path Oid = 1019 + T__box Oid = 1020 + T__float4 Oid = 1021 + T__float8 Oid = 1022 + T__abstime Oid = 1023 + T__reltime Oid = 1024 + T__tinterval Oid = 1025 + T__polygon Oid = 1027 + T__oid Oid = 1028 + T_aclitem Oid = 1033 + T__aclitem Oid = 1034 + T__macaddr Oid = 1040 + T__inet Oid = 1041 + T_bpchar Oid = 1042 + T_varchar Oid = 1043 + T_date Oid = 1082 + T_time Oid = 1083 + T_timestamp Oid = 1114 + T__timestamp Oid = 1115 + T__date Oid = 1182 + T__time Oid = 1183 + T_timestamptz Oid = 1184 + T__timestamptz Oid = 1185 + T_interval Oid = 1186 + T__interval Oid = 1187 + T__numeric Oid = 1231 + T_pg_database Oid = 1248 + T__cstring Oid = 1263 + T_timetz Oid = 1266 + T__timetz Oid = 1270 + T_bit Oid = 1560 + T__bit Oid = 1561 + T_varbit Oid = 1562 + T__varbit Oid = 1563 + T_numeric Oid = 1700 + T_refcursor Oid = 1790 + T__refcursor Oid = 2201 + T_regprocedure Oid = 2202 + T_regoper Oid = 2203 + T_regoperator Oid = 2204 + T_regclass Oid = 2205 + T_regtype Oid = 2206 + T__regprocedure Oid = 2207 + T__regoper Oid = 2208 + T__regoperator Oid = 2209 + T__regclass Oid = 2210 + T__regtype Oid = 2211 + T_record Oid = 2249 + T_cstring Oid = 2275 + T_any Oid = 2276 + T_anyarray Oid = 2277 + T_void Oid = 2278 + T_trigger Oid = 2279 + T_language_handler Oid = 2280 + T_internal Oid = 2281 + T_opaque Oid = 2282 + T_anyelement Oid = 2283 + T__record Oid = 2287 + T_anynonarray Oid = 2776 + T_pg_authid Oid = 2842 + T_pg_auth_members Oid = 2843 + T__txid_snapshot Oid = 2949 + T_uuid Oid = 2950 + T__uuid Oid = 2951 + T_txid_snapshot Oid = 2970 + T_fdw_handler Oid = 3115 + T_pg_lsn Oid = 3220 + T__pg_lsn Oid = 3221 + T_tsm_handler Oid = 3310 + T_anyenum Oid = 3500 + T_tsvector Oid = 3614 + T_tsquery Oid = 3615 + T_gtsvector Oid = 3642 + T__tsvector Oid = 3643 + T__gtsvector Oid = 3644 + T__tsquery Oid = 3645 + T_regconfig Oid = 3734 + T__regconfig Oid = 3735 + T_regdictionary Oid = 3769 + T__regdictionary Oid = 3770 + T_jsonb Oid = 3802 + T__jsonb Oid = 3807 + T_anyrange Oid = 3831 + T_event_trigger Oid = 3838 + T_int4range Oid = 3904 + T__int4range Oid = 3905 + T_numrange Oid = 3906 + T__numrange Oid = 3907 + T_tsrange Oid = 3908 + T__tsrange Oid = 3909 + T_tstzrange Oid = 3910 + T__tstzrange Oid = 3911 + T_daterange Oid = 3912 + T__daterange Oid = 3913 + T_int8range Oid = 3926 + T__int8range Oid = 3927 + T_pg_shseclabel Oid = 4066 + T_regnamespace Oid = 4089 + T__regnamespace Oid = 4090 + T_regrole Oid = 4096 + T__regrole Oid = 4097 +) + +var TypeName = map[Oid]string{ + T_bool: "BOOL", + T_bytea: "BYTEA", + T_char: "CHAR", + T_name: "NAME", + T_int8: "INT8", + T_int2: "INT2", + T_int2vector: "INT2VECTOR", + T_int4: "INT4", + T_regproc: "REGPROC", + T_text: "TEXT", + T_oid: "OID", + T_tid: "TID", + T_xid: "XID", + T_cid: "CID", + T_oidvector: "OIDVECTOR", + T_pg_ddl_command: "PG_DDL_COMMAND", + T_pg_type: "PG_TYPE", + T_pg_attribute: "PG_ATTRIBUTE", + T_pg_proc: "PG_PROC", + T_pg_class: "PG_CLASS", + T_json: "JSON", + T_xml: "XML", + T__xml: "_XML", + T_pg_node_tree: "PG_NODE_TREE", + T__json: "_JSON", + T_smgr: "SMGR", + T_index_am_handler: "INDEX_AM_HANDLER", + T_point: "POINT", + T_lseg: "LSEG", + T_path: "PATH", + T_box: "BOX", + T_polygon: "POLYGON", + T_line: "LINE", + T__line: "_LINE", + T_cidr: "CIDR", + T__cidr: "_CIDR", + T_float4: "FLOAT4", + T_float8: "FLOAT8", + T_abstime: "ABSTIME", + T_reltime: "RELTIME", + T_tinterval: "TINTERVAL", + T_unknown: "UNKNOWN", + T_circle: "CIRCLE", + T__circle: "_CIRCLE", + T_money: "MONEY", + T__money: "_MONEY", + T_macaddr: "MACADDR", + T_inet: "INET", + T__bool: "_BOOL", + T__bytea: "_BYTEA", + T__char: "_CHAR", + T__name: "_NAME", + T__int2: "_INT2", + T__int2vector: "_INT2VECTOR", + T__int4: "_INT4", + T__regproc: "_REGPROC", + T__text: "_TEXT", + T__tid: "_TID", + T__xid: "_XID", + T__cid: "_CID", + T__oidvector: "_OIDVECTOR", + T__bpchar: "_BPCHAR", + T__varchar: "_VARCHAR", + T__int8: "_INT8", + T__point: "_POINT", + T__lseg: "_LSEG", + T__path: "_PATH", + T__box: "_BOX", + T__float4: "_FLOAT4", + T__float8: "_FLOAT8", + T__abstime: "_ABSTIME", + T__reltime: "_RELTIME", + T__tinterval: "_TINTERVAL", + T__polygon: "_POLYGON", + T__oid: "_OID", + T_aclitem: "ACLITEM", + T__aclitem: "_ACLITEM", + T__macaddr: "_MACADDR", + T__inet: "_INET", + T_bpchar: "BPCHAR", + T_varchar: "VARCHAR", + T_date: "DATE", + T_time: "TIME", + T_timestamp: "TIMESTAMP", + T__timestamp: "_TIMESTAMP", + T__date: "_DATE", + T__time: "_TIME", + T_timestamptz: "TIMESTAMPTZ", + T__timestamptz: "_TIMESTAMPTZ", + T_interval: "INTERVAL", + T__interval: "_INTERVAL", + T__numeric: "_NUMERIC", + T_pg_database: "PG_DATABASE", + T__cstring: "_CSTRING", + T_timetz: "TIMETZ", + T__timetz: "_TIMETZ", + T_bit: "BIT", + T__bit: "_BIT", + T_varbit: "VARBIT", + T__varbit: "_VARBIT", + T_numeric: "NUMERIC", + T_refcursor: "REFCURSOR", + T__refcursor: "_REFCURSOR", + T_regprocedure: "REGPROCEDURE", + T_regoper: "REGOPER", + T_regoperator: "REGOPERATOR", + T_regclass: "REGCLASS", + T_regtype: "REGTYPE", + T__regprocedure: "_REGPROCEDURE", + T__regoper: "_REGOPER", + T__regoperator: "_REGOPERATOR", + T__regclass: "_REGCLASS", + T__regtype: "_REGTYPE", + T_record: "RECORD", + T_cstring: "CSTRING", + T_any: "ANY", + T_anyarray: "ANYARRAY", + T_void: "VOID", + T_trigger: "TRIGGER", + T_language_handler: "LANGUAGE_HANDLER", + T_internal: "INTERNAL", + T_opaque: "OPAQUE", + T_anyelement: "ANYELEMENT", + T__record: "_RECORD", + T_anynonarray: "ANYNONARRAY", + T_pg_authid: "PG_AUTHID", + T_pg_auth_members: "PG_AUTH_MEMBERS", + T__txid_snapshot: "_TXID_SNAPSHOT", + T_uuid: "UUID", + T__uuid: "_UUID", + T_txid_snapshot: "TXID_SNAPSHOT", + T_fdw_handler: "FDW_HANDLER", + T_pg_lsn: "PG_LSN", + T__pg_lsn: "_PG_LSN", + T_tsm_handler: "TSM_HANDLER", + T_anyenum: "ANYENUM", + T_tsvector: "TSVECTOR", + T_tsquery: "TSQUERY", + T_gtsvector: "GTSVECTOR", + T__tsvector: "_TSVECTOR", + T__gtsvector: "_GTSVECTOR", + T__tsquery: "_TSQUERY", + T_regconfig: "REGCONFIG", + T__regconfig: "_REGCONFIG", + T_regdictionary: "REGDICTIONARY", + T__regdictionary: "_REGDICTIONARY", + T_jsonb: "JSONB", + T__jsonb: "_JSONB", + T_anyrange: "ANYRANGE", + T_event_trigger: "EVENT_TRIGGER", + T_int4range: "INT4RANGE", + T__int4range: "_INT4RANGE", + T_numrange: "NUMRANGE", + T__numrange: "_NUMRANGE", + T_tsrange: "TSRANGE", + T__tsrange: "_TSRANGE", + T_tstzrange: "TSTZRANGE", + T__tstzrange: "_TSTZRANGE", + T_daterange: "DATERANGE", + T__daterange: "_DATERANGE", + T_int8range: "INT8RANGE", + T__int8range: "_INT8RANGE", + T_pg_shseclabel: "PG_SHSECLABEL", + T_regnamespace: "REGNAMESPACE", + T__regnamespace: "_REGNAMESPACE", + T_regrole: "REGROLE", + T__regrole: "_REGROLE", +} diff --git a/vendor/github.com/lib/pq/rows.go b/vendor/github.com/lib/pq/rows.go new file mode 100644 index 0000000000..c6aa5b9a36 --- /dev/null +++ b/vendor/github.com/lib/pq/rows.go @@ -0,0 +1,93 @@ +package pq + +import ( + "math" + "reflect" + "time" + + "github.com/lib/pq/oid" +) + +const headerSize = 4 + +type fieldDesc struct { + // The object ID of the data type. + OID oid.Oid + // The data type size (see pg_type.typlen). + // Note that negative values denote variable-width types. + Len int + // The type modifier (see pg_attribute.atttypmod). + // The meaning of the modifier is type-specific. + Mod int +} + +func (fd fieldDesc) Type() reflect.Type { + switch fd.OID { + case oid.T_int8: + return reflect.TypeOf(int64(0)) + case oid.T_int4: + return reflect.TypeOf(int32(0)) + case oid.T_int2: + return reflect.TypeOf(int16(0)) + case oid.T_varchar, oid.T_text: + return reflect.TypeOf("") + case oid.T_bool: + return reflect.TypeOf(false) + case oid.T_date, oid.T_time, oid.T_timetz, oid.T_timestamp, oid.T_timestamptz: + return reflect.TypeOf(time.Time{}) + case oid.T_bytea: + return reflect.TypeOf([]byte(nil)) + default: + return reflect.TypeOf(new(interface{})).Elem() + } +} + +func (fd fieldDesc) Name() string { + return oid.TypeName[fd.OID] +} + +func (fd fieldDesc) Length() (length int64, ok bool) { + switch fd.OID { + case oid.T_text, oid.T_bytea: + return math.MaxInt64, true + case oid.T_varchar, oid.T_bpchar: + return int64(fd.Mod - headerSize), true + default: + return 0, false + } +} + +func (fd fieldDesc) PrecisionScale() (precision, scale int64, ok bool) { + switch fd.OID { + case oid.T_numeric, oid.T__numeric: + mod := fd.Mod - headerSize + precision = int64((mod >> 16) & 0xffff) + scale = int64(mod & 0xffff) + return precision, scale, true + default: + return 0, 0, false + } +} + +// ColumnTypeScanType returns the value type that can be used to scan types into. +func (rs *rows) ColumnTypeScanType(index int) reflect.Type { + return rs.colTyps[index].Type() +} + +// ColumnTypeDatabaseTypeName return the database system type name. +func (rs *rows) ColumnTypeDatabaseTypeName(index int) string { + return rs.colTyps[index].Name() +} + +// ColumnTypeLength returns the length of the column type if the column is a +// variable length type. If the column is not a variable length type ok +// should return false. +func (rs *rows) ColumnTypeLength(index int) (length int64, ok bool) { + return rs.colTyps[index].Length() +} + +// ColumnTypePrecisionScale should return the precision and scale for decimal +// types. If not applicable, ok should be false. +func (rs *rows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) { + return rs.colTyps[index].PrecisionScale() +} diff --git a/vendor/github.com/lib/pq/scram/scram.go b/vendor/github.com/lib/pq/scram/scram.go new file mode 100644 index 0000000000..484f378a76 --- /dev/null +++ b/vendor/github.com/lib/pq/scram/scram.go @@ -0,0 +1,264 @@ +// Copyright (c) 2014 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package scram implements a SCRAM-{SHA-1,etc} client per RFC5802. +// +// http://tools.ietf.org/html/rfc5802 +// +package scram + +import ( + "bytes" + "crypto/hmac" + "crypto/rand" + "encoding/base64" + "fmt" + "hash" + "strconv" + "strings" +) + +// Client implements a SCRAM-* client (SCRAM-SHA-1, SCRAM-SHA-256, etc). +// +// A Client may be used within a SASL conversation with logic resembling: +// +// var in []byte +// var client = scram.NewClient(sha1.New, user, pass) +// for client.Step(in) { +// out := client.Out() +// // send out to server +// in := serverOut +// } +// if client.Err() != nil { +// // auth failed +// } +// +type Client struct { + newHash func() hash.Hash + + user string + pass string + step int + out bytes.Buffer + err error + + clientNonce []byte + serverNonce []byte + saltedPass []byte + authMsg bytes.Buffer +} + +// NewClient returns a new SCRAM-* client with the provided hash algorithm. +// +// For SCRAM-SHA-256, for example, use: +// +// client := scram.NewClient(sha256.New, user, pass) +// +func NewClient(newHash func() hash.Hash, user, pass string) *Client { + c := &Client{ + newHash: newHash, + user: user, + pass: pass, + } + c.out.Grow(256) + c.authMsg.Grow(256) + return c +} + +// Out returns the data to be sent to the server in the current step. +func (c *Client) Out() []byte { + if c.out.Len() == 0 { + return nil + } + return c.out.Bytes() +} + +// Err returns the error that ocurred, or nil if there were no errors. +func (c *Client) Err() error { + return c.err +} + +// SetNonce sets the client nonce to the provided value. +// If not set, the nonce is generated automatically out of crypto/rand on the first step. +func (c *Client) SetNonce(nonce []byte) { + c.clientNonce = nonce +} + +var escaper = strings.NewReplacer("=", "=3D", ",", "=2C") + +// Step processes the incoming data from the server and makes the +// next round of data for the server available via Client.Out. +// Step returns false if there are no errors and more data is +// still expected. +func (c *Client) Step(in []byte) bool { + c.out.Reset() + if c.step > 2 || c.err != nil { + return false + } + c.step++ + switch c.step { + case 1: + c.err = c.step1(in) + case 2: + c.err = c.step2(in) + case 3: + c.err = c.step3(in) + } + return c.step > 2 || c.err != nil +} + +func (c *Client) step1(in []byte) error { + if len(c.clientNonce) == 0 { + const nonceLen = 16 + buf := make([]byte, nonceLen+b64.EncodedLen(nonceLen)) + if _, err := rand.Read(buf[:nonceLen]); err != nil { + return fmt.Errorf("cannot read random SCRAM-SHA-256 nonce from operating system: %v", err) + } + c.clientNonce = buf[nonceLen:] + b64.Encode(c.clientNonce, buf[:nonceLen]) + } + c.authMsg.WriteString("n=") + escaper.WriteString(&c.authMsg, c.user) + c.authMsg.WriteString(",r=") + c.authMsg.Write(c.clientNonce) + + c.out.WriteString("n,,") + c.out.Write(c.authMsg.Bytes()) + return nil +} + +var b64 = base64.StdEncoding + +func (c *Client) step2(in []byte) error { + c.authMsg.WriteByte(',') + c.authMsg.Write(in) + + fields := bytes.Split(in, []byte(",")) + if len(fields) != 3 { + return fmt.Errorf("expected 3 fields in first SCRAM-SHA-256 server message, got %d: %q", len(fields), in) + } + if !bytes.HasPrefix(fields[0], []byte("r=")) || len(fields[0]) < 2 { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 nonce: %q", fields[0]) + } + if !bytes.HasPrefix(fields[1], []byte("s=")) || len(fields[1]) < 6 { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 salt: %q", fields[1]) + } + if !bytes.HasPrefix(fields[2], []byte("i=")) || len(fields[2]) < 6 { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2]) + } + + c.serverNonce = fields[0][2:] + if !bytes.HasPrefix(c.serverNonce, c.clientNonce) { + return fmt.Errorf("server SCRAM-SHA-256 nonce is not prefixed by client nonce: got %q, want %q+\"...\"", c.serverNonce, c.clientNonce) + } + + salt := make([]byte, b64.DecodedLen(len(fields[1][2:]))) + n, err := b64.Decode(salt, fields[1][2:]) + if err != nil { + return fmt.Errorf("cannot decode SCRAM-SHA-256 salt sent by server: %q", fields[1]) + } + salt = salt[:n] + iterCount, err := strconv.Atoi(string(fields[2][2:])) + if err != nil { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2]) + } + c.saltPassword(salt, iterCount) + + c.authMsg.WriteString(",c=biws,r=") + c.authMsg.Write(c.serverNonce) + + c.out.WriteString("c=biws,r=") + c.out.Write(c.serverNonce) + c.out.WriteString(",p=") + c.out.Write(c.clientProof()) + return nil +} + +func (c *Client) step3(in []byte) error { + var isv, ise bool + var fields = bytes.Split(in, []byte(",")) + if len(fields) == 1 { + isv = bytes.HasPrefix(fields[0], []byte("v=")) + ise = bytes.HasPrefix(fields[0], []byte("e=")) + } + if ise { + return fmt.Errorf("SCRAM-SHA-256 authentication error: %s", fields[0][2:]) + } else if !isv { + return fmt.Errorf("unsupported SCRAM-SHA-256 final message from server: %q", in) + } + if !bytes.Equal(c.serverSignature(), fields[0][2:]) { + return fmt.Errorf("cannot authenticate SCRAM-SHA-256 server signature: %q", fields[0][2:]) + } + return nil +} + +func (c *Client) saltPassword(salt []byte, iterCount int) { + mac := hmac.New(c.newHash, []byte(c.pass)) + mac.Write(salt) + mac.Write([]byte{0, 0, 0, 1}) + ui := mac.Sum(nil) + hi := make([]byte, len(ui)) + copy(hi, ui) + for i := 1; i < iterCount; i++ { + mac.Reset() + mac.Write(ui) + mac.Sum(ui[:0]) + for j, b := range ui { + hi[j] ^= b + } + } + c.saltedPass = hi +} + +func (c *Client) clientProof() []byte { + mac := hmac.New(c.newHash, c.saltedPass) + mac.Write([]byte("Client Key")) + clientKey := mac.Sum(nil) + hash := c.newHash() + hash.Write(clientKey) + storedKey := hash.Sum(nil) + mac = hmac.New(c.newHash, storedKey) + mac.Write(c.authMsg.Bytes()) + clientProof := mac.Sum(nil) + for i, b := range clientKey { + clientProof[i] ^= b + } + clientProof64 := make([]byte, b64.EncodedLen(len(clientProof))) + b64.Encode(clientProof64, clientProof) + return clientProof64 +} + +func (c *Client) serverSignature() []byte { + mac := hmac.New(c.newHash, c.saltedPass) + mac.Write([]byte("Server Key")) + serverKey := mac.Sum(nil) + + mac = hmac.New(c.newHash, serverKey) + mac.Write(c.authMsg.Bytes()) + serverSignature := mac.Sum(nil) + + encoded := make([]byte, b64.EncodedLen(len(serverSignature))) + b64.Encode(encoded, serverSignature) + return encoded +} diff --git a/vendor/github.com/lib/pq/ssl.go b/vendor/github.com/lib/pq/ssl.go new file mode 100644 index 0000000000..d902084558 --- /dev/null +++ b/vendor/github.com/lib/pq/ssl.go @@ -0,0 +1,175 @@ +package pq + +import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" + "net" + "os" + "os/user" + "path/filepath" +) + +// ssl generates a function to upgrade a net.Conn based on the "sslmode" and +// related settings. The function is nil when no upgrade should take place. +func ssl(o values) (func(net.Conn) (net.Conn, error), error) { + verifyCaOnly := false + tlsConf := tls.Config{} + switch mode := o["sslmode"]; mode { + // "require" is the default. + case "", "require": + // We must skip TLS's own verification since it requires full + // verification since Go 1.3. + tlsConf.InsecureSkipVerify = true + + // From http://www.postgresql.org/docs/current/static/libpq-ssl.html: + // + // Note: For backwards compatibility with earlier versions of + // PostgreSQL, if a root CA file exists, the behavior of + // sslmode=require will be the same as that of verify-ca, meaning the + // server certificate is validated against the CA. Relying on this + // behavior is discouraged, and applications that need certificate + // validation should always use verify-ca or verify-full. + if sslrootcert, ok := o["sslrootcert"]; ok { + if _, err := os.Stat(sslrootcert); err == nil { + verifyCaOnly = true + } else { + delete(o, "sslrootcert") + } + } + case "verify-ca": + // We must skip TLS's own verification since it requires full + // verification since Go 1.3. + tlsConf.InsecureSkipVerify = true + verifyCaOnly = true + case "verify-full": + tlsConf.ServerName = o["host"] + case "disable": + return nil, nil + default: + return nil, fmterrorf(`unsupported sslmode %q; only "require" (default), "verify-full", "verify-ca", and "disable" supported`, mode) + } + + err := sslClientCertificates(&tlsConf, o) + if err != nil { + return nil, err + } + err = sslCertificateAuthority(&tlsConf, o) + if err != nil { + return nil, err + } + + // Accept renegotiation requests initiated by the backend. + // + // Renegotiation was deprecated then removed from PostgreSQL 9.5, but + // the default configuration of older versions has it enabled. Redshift + // also initiates renegotiations and cannot be reconfigured. + tlsConf.Renegotiation = tls.RenegotiateFreelyAsClient + + return func(conn net.Conn) (net.Conn, error) { + client := tls.Client(conn, &tlsConf) + if verifyCaOnly { + err := sslVerifyCertificateAuthority(client, &tlsConf) + if err != nil { + return nil, err + } + } + return client, nil + }, nil +} + +// sslClientCertificates adds the certificate specified in the "sslcert" and +// "sslkey" settings, or if they aren't set, from the .postgresql directory +// in the user's home directory. The configured files must exist and have +// the correct permissions. +func sslClientCertificates(tlsConf *tls.Config, o values) error { + // user.Current() might fail when cross-compiling. We have to ignore the + // error and continue without home directory defaults, since we wouldn't + // know from where to load them. + user, _ := user.Current() + + // In libpq, the client certificate is only loaded if the setting is not blank. + // + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1036-L1037 + sslcert := o["sslcert"] + if len(sslcert) == 0 && user != nil { + sslcert = filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt") + } + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1045 + if len(sslcert) == 0 { + return nil + } + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1050:L1054 + if _, err := os.Stat(sslcert); os.IsNotExist(err) { + return nil + } else if err != nil { + return err + } + + // In libpq, the ssl key is only loaded if the setting is not blank. + // + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1123-L1222 + sslkey := o["sslkey"] + if len(sslkey) == 0 && user != nil { + sslkey = filepath.Join(user.HomeDir, ".postgresql", "postgresql.key") + } + + if len(sslkey) > 0 { + if err := sslKeyPermissions(sslkey); err != nil { + return err + } + } + + cert, err := tls.LoadX509KeyPair(sslcert, sslkey) + if err != nil { + return err + } + + tlsConf.Certificates = []tls.Certificate{cert} + return nil +} + +// sslCertificateAuthority adds the RootCA specified in the "sslrootcert" setting. +func sslCertificateAuthority(tlsConf *tls.Config, o values) error { + // In libpq, the root certificate is only loaded if the setting is not blank. + // + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L950-L951 + if sslrootcert := o["sslrootcert"]; len(sslrootcert) > 0 { + tlsConf.RootCAs = x509.NewCertPool() + + cert, err := ioutil.ReadFile(sslrootcert) + if err != nil { + return err + } + + if !tlsConf.RootCAs.AppendCertsFromPEM(cert) { + return fmterrorf("couldn't parse pem in sslrootcert") + } + } + + return nil +} + +// sslVerifyCertificateAuthority carries out a TLS handshake to the server and +// verifies the presented certificate against the CA, i.e. the one specified in +// sslrootcert or the system CA if sslrootcert was not specified. +func sslVerifyCertificateAuthority(client *tls.Conn, tlsConf *tls.Config) error { + err := client.Handshake() + if err != nil { + return err + } + certs := client.ConnectionState().PeerCertificates + opts := x509.VerifyOptions{ + DNSName: client.ConnectionState().ServerName, + Intermediates: x509.NewCertPool(), + Roots: tlsConf.RootCAs, + } + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + _, err = certs[0].Verify(opts) + return err +} diff --git a/vendor/github.com/lib/pq/ssl_permissions.go b/vendor/github.com/lib/pq/ssl_permissions.go new file mode 100644 index 0000000000..3b7c3a2a31 --- /dev/null +++ b/vendor/github.com/lib/pq/ssl_permissions.go @@ -0,0 +1,20 @@ +// +build !windows + +package pq + +import "os" + +// sslKeyPermissions checks the permissions on user-supplied ssl key files. +// The key file should have very little access. +// +// libpq does not check key file permissions on Windows. +func sslKeyPermissions(sslkey string) error { + info, err := os.Stat(sslkey) + if err != nil { + return err + } + if info.Mode().Perm()&0077 != 0 { + return ErrSSLKeyHasWorldPermissions + } + return nil +} diff --git a/vendor/github.com/lib/pq/ssl_windows.go b/vendor/github.com/lib/pq/ssl_windows.go new file mode 100644 index 0000000000..5d2c763ceb --- /dev/null +++ b/vendor/github.com/lib/pq/ssl_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package pq + +// sslKeyPermissions checks the permissions on user-supplied ssl key files. +// The key file should have very little access. +// +// libpq does not check key file permissions on Windows. +func sslKeyPermissions(string) error { return nil } diff --git a/vendor/github.com/lib/pq/url.go b/vendor/github.com/lib/pq/url.go new file mode 100644 index 0000000000..f4d8a7c206 --- /dev/null +++ b/vendor/github.com/lib/pq/url.go @@ -0,0 +1,76 @@ +package pq + +import ( + "fmt" + "net" + nurl "net/url" + "sort" + "strings" +) + +// ParseURL no longer needs to be used by clients of this library since supplying a URL as a +// connection string to sql.Open() is now supported: +// +// sql.Open("postgres", "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full") +// +// It remains exported here for backwards-compatibility. +// +// ParseURL converts a url to a connection string for driver.Open. +// Example: +// +// "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full" +// +// converts to: +// +// "user=bob password=secret host=1.2.3.4 port=5432 dbname=mydb sslmode=verify-full" +// +// A minimal example: +// +// "postgres://" +// +// This will be blank, causing driver.Open to use all of the defaults +func ParseURL(url string) (string, error) { + u, err := nurl.Parse(url) + if err != nil { + return "", err + } + + if u.Scheme != "postgres" && u.Scheme != "postgresql" { + return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme) + } + + var kvs []string + escaper := strings.NewReplacer(` `, `\ `, `'`, `\'`, `\`, `\\`) + accrue := func(k, v string) { + if v != "" { + kvs = append(kvs, k+"="+escaper.Replace(v)) + } + } + + if u.User != nil { + v := u.User.Username() + accrue("user", v) + + v, _ = u.User.Password() + accrue("password", v) + } + + if host, port, err := net.SplitHostPort(u.Host); err != nil { + accrue("host", u.Host) + } else { + accrue("host", host) + accrue("port", port) + } + + if u.Path != "" { + accrue("dbname", u.Path[1:]) + } + + q := u.Query() + for k := range q { + accrue(k, q.Get(k)) + } + + sort.Strings(kvs) // Makes testing easier (not a performance concern) + return strings.Join(kvs, " "), nil +} diff --git a/vendor/github.com/lib/pq/user_posix.go b/vendor/github.com/lib/pq/user_posix.go new file mode 100644 index 0000000000..bf982524f9 --- /dev/null +++ b/vendor/github.com/lib/pq/user_posix.go @@ -0,0 +1,24 @@ +// Package pq is a pure Go Postgres driver for the database/sql package. + +// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris rumprun + +package pq + +import ( + "os" + "os/user" +) + +func userCurrent() (string, error) { + u, err := user.Current() + if err == nil { + return u.Username, nil + } + + name := os.Getenv("USER") + if name != "" { + return name, nil + } + + return "", ErrCouldNotDetectUsername +} diff --git a/vendor/github.com/lib/pq/user_windows.go b/vendor/github.com/lib/pq/user_windows.go new file mode 100644 index 0000000000..2b691267b9 --- /dev/null +++ b/vendor/github.com/lib/pq/user_windows.go @@ -0,0 +1,27 @@ +// Package pq is a pure Go Postgres driver for the database/sql package. +package pq + +import ( + "path/filepath" + "syscall" +) + +// Perform Windows user name lookup identically to libpq. +// +// The PostgreSQL code makes use of the legacy Win32 function +// GetUserName, and that function has not been imported into stock Go. +// GetUserNameEx is available though, the difference being that a +// wider range of names are available. To get the output to be the +// same as GetUserName, only the base (or last) component of the +// result is returned. +func userCurrent() (string, error) { + pw_name := make([]uint16, 128) + pwname_size := uint32(len(pw_name)) - 1 + err := syscall.GetUserNameEx(syscall.NameSamCompatible, &pw_name[0], &pwname_size) + if err != nil { + return "", ErrCouldNotDetectUsername + } + s := syscall.UTF16ToString(pw_name) + u := filepath.Base(s) + return u, nil +} diff --git a/vendor/github.com/lib/pq/uuid.go b/vendor/github.com/lib/pq/uuid.go new file mode 100644 index 0000000000..9a1b9e0748 --- /dev/null +++ b/vendor/github.com/lib/pq/uuid.go @@ -0,0 +1,23 @@ +package pq + +import ( + "encoding/hex" + "fmt" +) + +// decodeUUIDBinary interprets the binary format of a uuid, returning it in text format. +func decodeUUIDBinary(src []byte) ([]byte, error) { + if len(src) != 16 { + return nil, fmt.Errorf("pq: unable to decode uuid; bad length: %d", len(src)) + } + + dst := make([]byte, 36) + dst[8], dst[13], dst[18], dst[23] = '-', '-', '-', '-' + hex.Encode(dst[0:], src[0:4]) + hex.Encode(dst[9:], src[4:6]) + hex.Encode(dst[14:], src[6:8]) + hex.Encode(dst[19:], src[8:10]) + hex.Encode(dst[24:], src[10:16]) + + return dst, nil +} diff --git a/vendor/github.com/operator-framework/helm-operator-plugins/LICENSE b/vendor/github.com/operator-framework/helm-operator-plugins/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/operator-framework/helm-operator-plugins/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/operator-framework/helm-operator-plugins/pkg/annotation/annotation.go b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/annotation/annotation.go new file mode 100644 index 0000000000..4cf7ac435b --- /dev/null +++ b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/annotation/annotation.go @@ -0,0 +1,239 @@ +/* +Copyright 2020 The Operator-SDK Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package annotation allows to set custom install, upgrade or uninstall options on custom resource objects with annotations. +// To create custom annotations implement the Install, Upgrade or Uninstall interface. +// +// Example: +// +// To disable hooks based on annotations the InstallDisableHooks is passed to the reconciler as an option. +// +// r, err := reconciler.New( +// reconciler.WithChart(*w.Chart), +// reconciler.WithGroupVersionKind(w.GroupVersionKind), +// reconciler.WithInstallAnnotations(annotation.InstallDisableHook{}), +// ) +// +// If the reconciler detects an annotation named "helm.sdk.operatorframework.io/install-disable-hooks" +// on the watched custom resource it sets the install.DisableHooks option to the annotations value. For more information +// take a look at the InstallDisableHooks.InstallOption method. +// +// kind: OperatorHelmKind +// apiVersion: test.example.com/v1 +// metadata: +// name: nginx-sample +// annotations: +// "helm.sdk.operatorframework.io/install-disable-hooks": true +// +package annotation + +import ( + "strconv" + + "helm.sh/helm/v3/pkg/action" + + helmclient "github.com/operator-framework/helm-operator-plugins/pkg/client" +) + +var ( + DefaultInstallAnnotations = []Install{InstallDescription{}, InstallDisableHooks{}} + DefaultUpgradeAnnotations = []Upgrade{UpgradeDescription{}, UpgradeDisableHooks{}, UpgradeForce{}} + DefaultUninstallAnnotations = []Uninstall{UninstallDescription{}, UninstallDisableHooks{}} +) + +// Install configures an install annotation. +type Install interface { + Name() string + InstallOption(string) helmclient.InstallOption +} + +// Upgrade configures an upgrade annotation. +type Upgrade interface { + Name() string + UpgradeOption(string) helmclient.UpgradeOption +} + +// Uninstall configures an install annotation. +type Uninstall interface { + Name() string + UninstallOption(string) helmclient.UninstallOption +} + +const ( + defaultDomain = "helm.sdk.operatorframework.io" + defaultInstallDisableHooksName = defaultDomain + "/install-disable-hooks" + defaultUpgradeDisableHooksName = defaultDomain + "/upgrade-disable-hooks" + defaultUninstallDisableHooksName = defaultDomain + "/uninstall-disable-hooks" + + defaultUpgradeForceName = defaultDomain + "/upgrade-force" + + defaultInstallDescriptionName = defaultDomain + "/install-description" + defaultUpgradeDescriptionName = defaultDomain + "/upgrade-description" + defaultUninstallDescriptionName = defaultDomain + "/uninstall-description" +) + +type InstallDisableHooks struct { + CustomName string +} + +var _ Install = &InstallDisableHooks{} + +func (i InstallDisableHooks) Name() string { + if i.CustomName != "" { + return i.CustomName + } + return defaultInstallDisableHooksName +} + +func (i InstallDisableHooks) InstallOption(val string) helmclient.InstallOption { + disableHooks := false + if v, err := strconv.ParseBool(val); err == nil { + disableHooks = v + } + return func(install *action.Install) error { + install.DisableHooks = disableHooks + return nil + } +} + +type UpgradeDisableHooks struct { + CustomName string +} + +var _ Upgrade = &UpgradeDisableHooks{} + +func (u UpgradeDisableHooks) Name() string { + if u.CustomName != "" { + return u.CustomName + } + return defaultUpgradeDisableHooksName +} + +func (u UpgradeDisableHooks) UpgradeOption(val string) helmclient.UpgradeOption { + disableHooks := false + if v, err := strconv.ParseBool(val); err == nil { + disableHooks = v + } + return func(upgrade *action.Upgrade) error { + upgrade.DisableHooks = disableHooks + return nil + } +} + +type UpgradeForce struct { + CustomName string +} + +var _ Upgrade = &UpgradeForce{} + +func (u UpgradeForce) Name() string { + if u.CustomName != "" { + return u.CustomName + } + return defaultUpgradeForceName +} + +func (u UpgradeForce) UpgradeOption(val string) helmclient.UpgradeOption { + force := false + if v, err := strconv.ParseBool(val); err == nil { + force = v + } + return func(upgrade *action.Upgrade) error { + upgrade.Force = force + return nil + } +} + +type UninstallDisableHooks struct { + CustomName string +} + +var _ Uninstall = &UninstallDisableHooks{} + +func (u UninstallDisableHooks) Name() string { + if u.CustomName != "" { + return u.CustomName + } + return defaultUninstallDisableHooksName +} + +func (u UninstallDisableHooks) UninstallOption(val string) helmclient.UninstallOption { + disableHooks := false + if v, err := strconv.ParseBool(val); err == nil { + disableHooks = v + } + return func(uninstall *action.Uninstall) error { + uninstall.DisableHooks = disableHooks + return nil + } +} + +var _ Install = &InstallDescription{} + +type InstallDescription struct { + CustomName string +} + +func (i InstallDescription) Name() string { + if i.CustomName != "" { + return i.CustomName + } + return defaultInstallDescriptionName +} +func (i InstallDescription) InstallOption(v string) helmclient.InstallOption { + return func(i *action.Install) error { + i.Description = v + return nil + } +} + +var _ Upgrade = &UpgradeDescription{} + +type UpgradeDescription struct { + CustomName string +} + +func (u UpgradeDescription) Name() string { + if u.CustomName != "" { + return u.CustomName + } + return defaultUpgradeDescriptionName +} +func (u UpgradeDescription) UpgradeOption(v string) helmclient.UpgradeOption { + return func(upgrade *action.Upgrade) error { + upgrade.Description = v + return nil + } +} + +var _ Uninstall = &UninstallDescription{} + +type UninstallDescription struct { + CustomName string +} + +func (u UninstallDescription) Name() string { + if u.CustomName != "" { + return u.CustomName + } + return defaultUninstallDescriptionName +} +func (u UninstallDescription) UninstallOption(v string) helmclient.UninstallOption { + return func(uninstall *action.Uninstall) error { + uninstall.Description = v + return nil + } +} diff --git a/vendor/github.com/operator-framework/helm-operator-plugins/pkg/client/actionclient.go b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/client/actionclient.go new file mode 100644 index 0000000000..9706c81dc7 --- /dev/null +++ b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/client/actionclient.go @@ -0,0 +1,351 @@ +/* +Copyright 2020 The Operator-SDK Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + + sdkhandler "github.com/operator-framework/operator-lib/handler" + "gomodules.xyz/jsonpatch/v2" + "helm.sh/helm/v3/pkg/action" + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/kube" + helmkube "helm.sh/helm/v3/pkg/kube" + "helm.sh/helm/v3/pkg/postrender" + "helm.sh/helm/v3/pkg/release" + "helm.sh/helm/v3/pkg/storage/driver" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + apitypes "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/cli-runtime/pkg/resource" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" + + "github.com/operator-framework/helm-operator-plugins/pkg/internal/sdk/controllerutil" + "github.com/operator-framework/helm-operator-plugins/pkg/manifestutil" +) + +type ActionClientGetter interface { + ActionClientFor(obj client.Object) (ActionInterface, error) +} + +type ActionClientGetterFunc func(obj client.Object) (ActionInterface, error) + +func (acgf ActionClientGetterFunc) ActionClientFor(obj client.Object) (ActionInterface, error) { + return acgf(obj) +} + +type ActionInterface interface { + Get(name string, opts ...GetOption) (*release.Release, error) + Install(name, namespace string, chrt *chart.Chart, vals map[string]interface{}, opts ...InstallOption) (*release.Release, error) + Upgrade(name, namespace string, chrt *chart.Chart, vals map[string]interface{}, opts ...UpgradeOption) (*release.Release, error) + Uninstall(name string, opts ...UninstallOption) (*release.UninstallReleaseResponse, error) + Reconcile(rel *release.Release) error +} + +type GetOption func(*action.Get) error +type InstallOption func(*action.Install) error +type UpgradeOption func(*action.Upgrade) error +type UninstallOption func(*action.Uninstall) error + +func NewActionClientGetter(acg ActionConfigGetter) ActionClientGetter { + return &actionClientGetter{acg} +} + +type actionClientGetter struct { + acg ActionConfigGetter +} + +var _ ActionClientGetter = &actionClientGetter{} + +func (hcg *actionClientGetter) ActionClientFor(obj client.Object) (ActionInterface, error) { + actionConfig, err := hcg.acg.ActionConfigFor(obj) + if err != nil { + return nil, err + } + rm, err := actionConfig.RESTClientGetter.ToRESTMapper() + if err != nil { + return nil, err + } + postRenderer := createPostRenderer(rm, actionConfig.KubeClient, obj) + return &actionClient{actionConfig, postRenderer}, nil +} + +type actionClient struct { + conf *action.Configuration + postRenderer postrender.PostRenderer +} + +var _ ActionInterface = &actionClient{} + +func (c *actionClient) Get(name string, opts ...GetOption) (*release.Release, error) { + get := action.NewGet(c.conf) + for _, o := range opts { + if err := o(get); err != nil { + return nil, err + } + } + return get.Run(name) +} + +func (c *actionClient) Install(name, namespace string, chrt *chart.Chart, vals map[string]interface{}, opts ...InstallOption) (*release.Release, error) { + install := action.NewInstall(c.conf) + install.PostRenderer = c.postRenderer + for _, o := range opts { + if err := o(install); err != nil { + return nil, err + } + } + install.ReleaseName = name + install.Namespace = namespace + c.conf.Log("Starting install") + rel, err := install.Run(chrt, vals) + if err != nil { + c.conf.Log("Install failed") + if rel != nil { + // Uninstall the failed release installation so that we can retry + // the installation again during the next reconciliation. In many + // cases, the issue is unresolvable without a change to the CR, but + // controller-runtime will backoff on retries after failed attempts. + // + // In certain cases, Install will return a partial release in + // the response even when it doesn't record the release in its release + // store (e.g. when there is an error rendering the release manifest). + // In that case the rollback will fail with a not found error because + // there was nothing to rollback. + // + // Only return an error about a rollback failure if the failure was + // caused by something other than the release not being found. + _, uninstallErr := c.Uninstall(name) + if !errors.Is(uninstallErr, driver.ErrReleaseNotFound) { + return nil, fmt.Errorf("uninstall failed: %v: original install error: %w", uninstallErr, err) + } + } + return nil, err + } + return rel, nil +} + +func (c *actionClient) Upgrade(name, namespace string, chrt *chart.Chart, vals map[string]interface{}, opts ...UpgradeOption) (*release.Release, error) { + upgrade := action.NewUpgrade(c.conf) + upgrade.PostRenderer = c.postRenderer + for _, o := range opts { + if err := o(upgrade); err != nil { + return nil, err + } + } + upgrade.Namespace = namespace + rel, err := upgrade.Run(name, chrt, vals) + if err != nil { + if rel != nil { + rollback := action.NewRollback(c.conf) + rollback.Force = true + + // As of Helm 2.13, if Upgrade returns a non-nil release, that + // means the release was also recorded in the release store. + // Therefore, we should perform the rollback when we have a non-nil + // release. Any rollback error here would be unexpected, so always + // log both the update and rollback errors. + rollbackErr := rollback.Run(name) + if rollbackErr != nil { + return nil, fmt.Errorf("rollback failed: %v: original upgrade error: %w", rollbackErr, err) + } + } + return nil, err + } + return rel, nil +} + +func (c *actionClient) Uninstall(name string, opts ...UninstallOption) (*release.UninstallReleaseResponse, error) { + uninstall := action.NewUninstall(c.conf) + for _, o := range opts { + if err := o(uninstall); err != nil { + return nil, err + } + } + return uninstall.Run(name) +} + +func (c *actionClient) Reconcile(rel *release.Release) error { + infos, err := c.conf.KubeClient.Build(bytes.NewBufferString(rel.Manifest), false) + if err != nil { + return err + } + return infos.Visit(func(expected *resource.Info, err error) error { + if err != nil { + return fmt.Errorf("visit error: %w", err) + } + + helper := resource.NewHelper(expected.Client, expected.Mapping) + + existing, err := helper.Get(expected.Namespace, expected.Name) + if apierrors.IsNotFound(err) { + if _, err := helper.Create(expected.Namespace, true, expected.Object); err != nil { + return fmt.Errorf("create error: %w", err) + } + return nil + } else if err != nil { + return fmt.Errorf("could not get object: %w", err) + } + + patch, patchType, err := createPatch(existing, expected) + if err != nil { + return fmt.Errorf("error creating patch: %w", err) + } + + if patch == nil { + // nothing to do + return nil + } + + _, err = helper.Patch(expected.Namespace, expected.Name, patchType, patch, + &metav1.PatchOptions{}) + if err != nil { + return fmt.Errorf("patch error: %w", err) + } + return nil + }) +} + +func createPatch(existing runtime.Object, expected *resource.Info) ([]byte, apitypes.PatchType, error) { + existingJSON, err := json.Marshal(existing) + if err != nil { + return nil, apitypes.StrategicMergePatchType, err + } + expectedJSON, err := json.Marshal(expected.Object) + if err != nil { + return nil, apitypes.StrategicMergePatchType, err + } + + // Get a versioned object + versionedObject := helmkube.AsVersioned(expected) + + // Unstructured objects, such as CRDs, may not have an not registered error + // returned from ConvertToVersion. Anything that's unstructured should + // use the jsonpatch.CreateMergePatch. Strategic Merge Patch is not supported + // on objects like CRDs. + _, isUnstructured := versionedObject.(runtime.Unstructured) + + // On newer K8s versions, CRDs aren't unstructured but has this dedicated type + _, isCRDv1beta1 := versionedObject.(*apiextv1beta1.CustomResourceDefinition) + _, isCRDv1 := versionedObject.(*apiextv1.CustomResourceDefinition) + + if isUnstructured || isCRDv1beta1 || isCRDv1 { + // fall back to generic JSON merge patch + patch, err := createJSONMergePatch(existingJSON, expectedJSON) + return patch, apitypes.JSONPatchType, err + } + + patchMeta, err := strategicpatch.NewPatchMetaFromStruct(versionedObject) + if err != nil { + return nil, apitypes.StrategicMergePatchType, err + } + + patch, err := strategicpatch.CreateThreeWayMergePatch(expectedJSON, expectedJSON, existingJSON, patchMeta, true) + return patch, apitypes.StrategicMergePatchType, err +} + +func createJSONMergePatch(existingJSON, expectedJSON []byte) ([]byte, error) { + ops, err := jsonpatch.CreatePatch(existingJSON, expectedJSON) + if err != nil { + return nil, err + } + + // We ignore the "remove" operations from the full patch because they are + // fields added by Kubernetes or by the user after the existing release + // resource has been applied. The goal for this patch is to make sure that + // the fields managed by the Helm chart are applied. + // All "add" operations without a value (null) can be ignored + patchOps := make([]jsonpatch.JsonPatchOperation, 0) + for _, op := range ops { + if op.Operation != "remove" && !(op.Operation == "add" && op.Value == nil) { + patchOps = append(patchOps, op) + } + } + + // If there are no patch operations, return nil. Callers are expected + // to check for a nil response and skip the patch operation to avoid + // unnecessary chatter with the API server. + if len(patchOps) == 0 { + return nil, nil + } + + return json.Marshal(patchOps) +} + +func createPostRenderer(rm meta.RESTMapper, kubeClient kube.Interface, owner client.Object) postrender.PostRenderer { + return &ownerPostRenderer{rm, kubeClient, owner} +} + +type ownerPostRenderer struct { + rm meta.RESTMapper + kubeClient kube.Interface + owner client.Object +} + +func (pr *ownerPostRenderer) Run(in *bytes.Buffer) (*bytes.Buffer, error) { + resourceList, err := pr.kubeClient.Build(in, false) + if err != nil { + return nil, err + } + out := bytes.Buffer{} + + err = resourceList.Visit(func(r *resource.Info, err error) error { + if err != nil { + return err + } + objMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(r.Object) + if err != nil { + return err + } + u := &unstructured.Unstructured{Object: objMap} + useOwnerRef, err := controllerutil.SupportsOwnerReference(pr.rm, pr.owner, u) + if err != nil { + return err + } + if useOwnerRef && !manifestutil.HasResourcePolicyKeep(u.GetAnnotations()) { + ownerRef := metav1.NewControllerRef(pr.owner, pr.owner.GetObjectKind().GroupVersionKind()) + ownerRefs := append(u.GetOwnerReferences(), *ownerRef) + u.SetOwnerReferences(ownerRefs) + } else { + if err := sdkhandler.SetOwnerAnnotations(pr.owner, u); err != nil { + return err + } + } + outData, err := yaml.Marshal(u.Object) + if err != nil { + return err + } + if _, err := out.WriteString("---\n" + string(outData)); err != nil { + return err + } + return nil + }) + if err != nil { + return nil, err + } + return &out, nil +} diff --git a/vendor/github.com/operator-framework/helm-operator-plugins/pkg/client/actionconfig.go b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/client/actionconfig.go new file mode 100644 index 0000000000..7fe103294c --- /dev/null +++ b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/client/actionconfig.go @@ -0,0 +1,117 @@ +/* +Copyright 2020 The Operator-SDK Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + + "github.com/go-logr/logr" + "helm.sh/helm/v3/pkg/action" + "helm.sh/helm/v3/pkg/kube" + "helm.sh/helm/v3/pkg/storage" + "helm.sh/helm/v3/pkg/storage/driver" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/rest" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type ActionConfigGetter interface { + ActionConfigFor(obj client.Object) (*action.Configuration, error) +} + +func NewActionConfigGetter(cfg *rest.Config, rm meta.RESTMapper, log logr.Logger) ActionConfigGetter { + return &actionConfigGetter{ + cfg: cfg, + restMapper: rm, + log: log, + } +} + +var _ ActionConfigGetter = &actionConfigGetter{} + +type actionConfigGetter struct { + cfg *rest.Config + restMapper meta.RESTMapper + log logr.Logger +} + +func (acg *actionConfigGetter) ActionConfigFor(obj client.Object) (*action.Configuration, error) { + // Create a RESTClientGetter + rcg := newRESTClientGetter(acg.cfg, acg.restMapper, obj.GetNamespace()) + + // Setup the debug log function that Helm will use + debugLog := func(format string, v ...interface{}) { + if acg.log != nil { + acg.log.V(1).Info(fmt.Sprintf(format, v...)) + } + } + + // Create a client that helm will use to manage release resources. + // The passed object is used as an owner reference on every + // object the client creates. + kc := kube.New(rcg) + kc.Log = debugLog + + // Create the Kubernetes Secrets client. The passed object is + // also used as an owner reference in the release secrets + // created by this client. + kcs, err := cmdutil.NewFactory(rcg).KubernetesClientSet() + if err != nil { + return nil, err + } + + ownerRef := metav1.NewControllerRef(obj, obj.GetObjectKind().GroupVersionKind()) + d := driver.NewSecrets(&ownerRefSecretClient{ + SecretInterface: kcs.CoreV1().Secrets(obj.GetNamespace()), + refs: []metav1.OwnerReference{*ownerRef}, + }) + + // Also, use the debug log for the storage driver + d.Log = debugLog + + // Initialize the storage backend + s := storage.Init(d) + + return &action.Configuration{ + RESTClientGetter: rcg, + Releases: s, + KubeClient: kc, + Log: debugLog, + }, nil +} + +var _ v1.SecretInterface = &ownerRefSecretClient{} + +type ownerRefSecretClient struct { + v1.SecretInterface + refs []metav1.OwnerReference +} + +func (c *ownerRefSecretClient) Create(ctx context.Context, in *corev1.Secret, opts metav1.CreateOptions) (*corev1.Secret, error) { + in.OwnerReferences = append(in.OwnerReferences, c.refs...) + return c.SecretInterface.Create(ctx, in, opts) +} + +func (c *ownerRefSecretClient) Update(ctx context.Context, in *corev1.Secret, opts metav1.UpdateOptions) (*corev1.Secret, error) { + in.OwnerReferences = append(in.OwnerReferences, c.refs...) + return c.SecretInterface.Update(ctx, in, opts) +} diff --git a/vendor/github.com/operator-framework/helm-operator-plugins/pkg/client/restclientgetter.go b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/client/restclientgetter.go new file mode 100644 index 0000000000..1fd3e7afd5 --- /dev/null +++ b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/client/restclientgetter.go @@ -0,0 +1,100 @@ +/* +Copyright 2020 The Operator-SDK Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/client-go/discovery" + cached "k8s.io/client-go/discovery/cached" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +var _ genericclioptions.RESTClientGetter = &restClientGetter{} + +func newRESTClientGetter(cfg *rest.Config, rm meta.RESTMapper, ns string) genericclioptions.RESTClientGetter { + return &restClientGetter{ + restConfig: cfg, + restMapper: rm, + namespaceConfig: &namespaceClientConfig{ns}, + } +} + +type restClientGetter struct { + restConfig *rest.Config + restMapper meta.RESTMapper + namespaceConfig clientcmd.ClientConfig + + setupDiscoveryClient sync.Once + cachedDiscoveryClient discovery.CachedDiscoveryInterface +} + +func (c *restClientGetter) ToRESTConfig() (*rest.Config, error) { + return c.restConfig, nil +} + +func (c *restClientGetter) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) { + var ( + dc discovery.DiscoveryInterface + err error + ) + c.setupDiscoveryClient.Do(func() { + dc, err = discovery.NewDiscoveryClientForConfig(c.restConfig) + if err != nil { + return + } + c.cachedDiscoveryClient = cached.NewMemCacheClient(dc) + }) + if err != nil { + return nil, err + } + return c.cachedDiscoveryClient, nil +} + +func (c *restClientGetter) ToRESTMapper() (meta.RESTMapper, error) { + return c.restMapper, nil +} + +func (c *restClientGetter) ToRawKubeConfigLoader() clientcmd.ClientConfig { + return c.namespaceConfig +} + +var _ clientcmd.ClientConfig = &namespaceClientConfig{} + +type namespaceClientConfig struct { + namespace string +} + +func (c namespaceClientConfig) RawConfig() (clientcmdapi.Config, error) { + return clientcmdapi.Config{}, nil +} + +func (c namespaceClientConfig) ClientConfig() (*rest.Config, error) { + return nil, nil +} + +func (c namespaceClientConfig) Namespace() (string, bool, error) { + return c.namespace, false, nil +} + +func (c namespaceClientConfig) ConfigAccess() clientcmd.ConfigAccess { + return nil +} diff --git a/vendor/github.com/operator-framework/helm-operator-plugins/pkg/hook/hook.go b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/hook/hook.go new file mode 100644 index 0000000000..d1f42cc941 --- /dev/null +++ b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/hook/hook.go @@ -0,0 +1,44 @@ +/* +Copyright 2020 The Operator-SDK Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hook + +import ( + "github.com/go-logr/logr" + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/release" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +type PreHook interface { + Exec(*unstructured.Unstructured, chartutil.Values, logr.Logger) error +} + +type PreHookFunc func(*unstructured.Unstructured, chartutil.Values, logr.Logger) error + +func (f PreHookFunc) Exec(obj *unstructured.Unstructured, vals chartutil.Values, log logr.Logger) error { + return f(obj, vals, log) +} + +type PostHook interface { + Exec(*unstructured.Unstructured, release.Release, logr.Logger) error +} + +type PostHookFunc func(*unstructured.Unstructured, release.Release, logr.Logger) error + +func (f PostHookFunc) Exec(obj *unstructured.Unstructured, rel release.Release, log logr.Logger) error { + return f(obj, rel, log) +} diff --git a/vendor/github.com/operator-framework/helm-operator-plugins/pkg/internal/sdk/controllerutil/controllerutil.go b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/internal/sdk/controllerutil/controllerutil.go new file mode 100644 index 0000000000..0c95f82d3a --- /dev/null +++ b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/internal/sdk/controllerutil/controllerutil.go @@ -0,0 +1,90 @@ +/* +Copyright 2020 The Operator-SDK Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllerutil + +import ( + "context" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +var ( + AddFinalizer = controllerutil.AddFinalizer + RemoveFinalizer = controllerutil.RemoveFinalizer + ContainsFinalizer = func(obj metav1.Object, finalizer string) bool { + for _, f := range obj.GetFinalizers() { + if f == finalizer { + return true + } + } + return false + } +) + +func WaitForDeletion(ctx context.Context, cl client.Reader, o client.Object) error { + key := client.ObjectKeyFromObject(o) + + return wait.PollImmediateUntil(time.Millisecond*10, func() (bool, error) { + err := cl.Get(ctx, key, o) + if apierrors.IsNotFound(err) { + return true, nil + } + if err != nil { + return false, err + } + return false, nil + }, ctx.Done()) +} + +func SupportsOwnerReference(restMapper meta.RESTMapper, owner, dependent client.Object) (bool, error) { + ownerGVK := owner.GetObjectKind().GroupVersionKind() + ownerMapping, err := restMapper.RESTMapping(ownerGVK.GroupKind(), ownerGVK.Version) + if err != nil { + return false, err + } + + depGVK := dependent.GetObjectKind().GroupVersionKind() + depMapping, err := restMapper.RESTMapping(depGVK.GroupKind(), depGVK.Version) + if err != nil { + return false, err + } + + ownerClusterScoped := ownerMapping.Scope.Name() == meta.RESTScopeNameRoot + ownerNamespace := owner.GetNamespace() + depClusterScoped := depMapping.Scope.Name() == meta.RESTScopeNameRoot + depNamespace := dependent.GetNamespace() + + if ownerClusterScoped { + return true, nil + } + + if depClusterScoped { + return false, nil + } + + if ownerNamespace != depNamespace { + return false, nil + } + + return true, nil +} diff --git a/vendor/github.com/operator-framework/helm-operator-plugins/pkg/internal/sdk/predicate/predicates.go b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/internal/sdk/predicate/predicates.go new file mode 100644 index 0000000000..b4b2e40d28 --- /dev/null +++ b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/internal/sdk/predicate/predicates.go @@ -0,0 +1,81 @@ +/* +Copyright 2020 The Operator-SDK Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package predicate + +import ( + "reflect" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/controller-runtime/pkg/event" + logf "sigs.k8s.io/controller-runtime/pkg/log" + crtpredicate "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +var log = logf.Log.WithName("predicate") + +type GenerationChangedPredicate = crtpredicate.GenerationChangedPredicate + +// DependentPredicateFuncs returns functions defined for filtering events +func DependentPredicateFuncs() crtpredicate.Funcs { + dependentPredicate := crtpredicate.Funcs{ + // We don't need to reconcile dependent resource creation events + // because dependent resources are only ever created during + // reconciliation. Another reconcile would be redundant. + CreateFunc: func(e event.CreateEvent) bool { + o := e.Object.(*unstructured.Unstructured) + log.V(1).Info("Skipping reconciliation for dependent resource creation", "name", o.GetName(), "namespace", o.GetNamespace(), "apiVersion", o.GroupVersionKind().GroupVersion(), "kind", o.GroupVersionKind().Kind) + return false + }, + + // Reconcile when a dependent resource is deleted so that it can be + // recreated. + DeleteFunc: func(e event.DeleteEvent) bool { + o := e.Object.(*unstructured.Unstructured) + log.V(1).Info("Reconciling due to dependent resource deletion", "name", o.GetName(), "namespace", o.GetNamespace(), "apiVersion", o.GroupVersionKind().GroupVersion(), "kind", o.GroupVersionKind().Kind) + return true + }, + + // Don't reconcile when a generic event is received for a dependent + GenericFunc: func(e event.GenericEvent) bool { + o := e.Object.(*unstructured.Unstructured) + log.V(1).Info("Skipping reconcile due to generic event", "name", o.GetName(), "namespace", o.GetNamespace(), "apiVersion", o.GroupVersionKind().GroupVersion(), "kind", o.GroupVersionKind().Kind) + return false + }, + + // Reconcile when a dependent resource is updated, so that it can + // be patched back to the resource managed by the CR, if + // necessary. Ignore updates that only change the status and + // resourceVersion. + UpdateFunc: func(e event.UpdateEvent) bool { + old := e.ObjectOld.(*unstructured.Unstructured).DeepCopy() + new := e.ObjectNew.(*unstructured.Unstructured).DeepCopy() + + delete(old.Object, "status") + delete(new.Object, "status") + old.SetResourceVersion("") + new.SetResourceVersion("") + + if reflect.DeepEqual(old.Object, new.Object) { + return false + } + log.V(1).Info("Reconciling due to dependent resource update", "name", new.GetName(), "namespace", new.GetNamespace(), "apiVersion", new.GroupVersionKind().GroupVersion(), "kind", new.GroupVersionKind().Kind) + return true + }, + } + + return dependentPredicate +} diff --git a/vendor/github.com/operator-framework/helm-operator-plugins/pkg/internal/sdk/status/conditions.go b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/internal/sdk/status/conditions.go new file mode 100644 index 0000000000..86b3667fec --- /dev/null +++ b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/internal/sdk/status/conditions.go @@ -0,0 +1,191 @@ +/* +Copyright 2020 The Operator-SDK Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package status + +import ( + "encoding/json" + "sort" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubeclock "k8s.io/apimachinery/pkg/util/clock" +) + +// clock is used to set status condition timestamps. +// This variable makes it easier to test conditions. +var clock kubeclock.Clock = &kubeclock.RealClock{} + +// ConditionType is the type of the condition and is typically a CamelCased +// word or short phrase. +// +// Condition types should indicate state in the "abnormal-true" polarity. For +// example, if the condition indicates when a policy is invalid, the "is valid" +// case is probably the norm, so the condition should be called "Invalid". +type ConditionType string + +// ConditionReason is intended to be a one-word, CamelCase representation of +// the category of cause of the current status. It is intended to be used in +// concise output, such as one-line kubectl get output, and in summarizing +// occurrences of causes. +type ConditionReason string + +// Condition represents an observation of an object's state. Conditions are an +// extension mechanism intended to be used when the details of an observation +// are not a priori known or would not apply to all instances of a given Kind. +// +// Conditions should be added to explicitly convey properties that users and +// components care about rather than requiring those properties to be inferred +// from other observations. Once defined, the meaning of a Condition can not be +// changed arbitrarily - it becomes part of the API, and has the same +// backwards- and forwards-compatibility concerns of any other part of the API. +type Condition struct { + Type ConditionType `json:"type"` + Status corev1.ConditionStatus `json:"status"` + Reason ConditionReason `json:"reason,omitempty"` + Message string `json:"message,omitempty"` + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` +} + +// IsTrue Condition whether the condition status is "True". +func (c Condition) IsTrue() bool { + return c.Status == corev1.ConditionTrue +} + +// IsFalse returns whether the condition status is "False". +func (c Condition) IsFalse() bool { + return c.Status == corev1.ConditionFalse +} + +// IsUnknown returns whether the condition status is "Unknown". +func (c Condition) IsUnknown() bool { + return c.Status == corev1.ConditionUnknown +} + +// DeepCopyInto copies in into out. +func (c *Condition) DeepCopyInto(cpy *Condition) { + *cpy = *c +} + +// Conditions is a set of Condition instances. +type Conditions []Condition + +// NewConditions initializes a set of conditions with the given list of +// conditions. +func NewConditions(conds ...Condition) Conditions { + conditions := Conditions{} + for _, c := range conds { + conditions.SetCondition(c) + } + return conditions +} + +// IsTrueFor searches the set of conditions for a condition with the given +// ConditionType. If found, it returns `condition.IsTrue()`. If not found, +// it returns false. +func (conditions Conditions) IsTrueFor(t ConditionType) bool { + for _, condition := range conditions { + if condition.Type == t { + return condition.IsTrue() + } + } + return false +} + +// IsFalseFor searches the set of conditions for a condition with the given +// ConditionType. If found, it returns `condition.IsFalse()`. If not found, +// it returns false. +func (conditions Conditions) IsFalseFor(t ConditionType) bool { + for _, condition := range conditions { + if condition.Type == t { + return condition.IsFalse() + } + } + return false +} + +// IsUnknownFor searches the set of conditions for a condition with the given +// ConditionType. If found, it returns `condition.IsUnknown()`. If not found, +// it returns true. +func (conditions Conditions) IsUnknownFor(t ConditionType) bool { + for _, condition := range conditions { + if condition.Type == t { + return condition.IsUnknown() + } + } + return true +} + +// SetCondition adds (or updates) the set of conditions with the given +// condition. It returns a boolean value indicating whether the set condition +// is new or was a change to the existing condition with the same type. +func (conditions *Conditions) SetCondition(newCond Condition) bool { + newCond.LastTransitionTime = metav1.Time{Time: clock.Now()} + + for i, condition := range *conditions { + if condition.Type == newCond.Type { + if condition.Status == newCond.Status { + newCond.LastTransitionTime = condition.LastTransitionTime + } + changed := condition.Status != newCond.Status || + condition.Reason != newCond.Reason || + condition.Message != newCond.Message + (*conditions)[i] = newCond + return changed + } + } + *conditions = append(*conditions, newCond) + return true +} + +// GetCondition searches the set of conditions for the condition with the given +// ConditionType and returns it. If the matching condition is not found, +// GetCondition returns nil. +func (conditions Conditions) GetCondition(t ConditionType) *Condition { + for _, condition := range conditions { + if condition.Type == t { + return &condition + } + } + return nil +} + +// RemoveCondition removes the condition with the given ConditionType from +// the conditions set. If no condition with that type is found, RemoveCondition +// returns without performing any action. If the passed condition type is not +// found in the set of conditions, RemoveCondition returns false. +func (conditions *Conditions) RemoveCondition(t ConditionType) bool { + if conditions == nil { + return false + } + for i, condition := range *conditions { + if condition.Type == t { + *conditions = append((*conditions)[:i], (*conditions)[i+1:]...) + return true + } + } + return false +} + +// MarshalJSON marshals the set of conditions as a JSON array, sorted by +// condition type. +func (conditions Conditions) MarshalJSON() ([]byte, error) { + conds := []Condition(conditions) + sort.Slice(conds, func(a, b int) bool { + return conds[a].Type < conds[b].Type + }) + return json.Marshal(conds) +} diff --git a/vendor/github.com/operator-framework/helm-operator-plugins/pkg/manifestutil/resourcepolicykeep.go b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/manifestutil/resourcepolicykeep.go new file mode 100644 index 0000000000..167b7e8b50 --- /dev/null +++ b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/manifestutil/resourcepolicykeep.go @@ -0,0 +1,35 @@ +/* +Copyright 2021 The Operator-SDK Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package manifestutil + +import ( + "strings" + + "helm.sh/helm/v3/pkg/kube" +) + +func HasResourcePolicyKeep(annotations map[string]string) bool { + if annotations == nil { + return false + } + resourcePolicyType, ok := annotations[kube.ResourcePolicyAnno] + if !ok { + return false + } + resourcePolicyType = strings.ToLower(strings.TrimSpace(resourcePolicyType)) + return resourcePolicyType == kube.KeepPolicy +} diff --git a/vendor/github.com/operator-framework/helm-operator-plugins/pkg/reconciler/internal/conditions/conditions.go b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/reconciler/internal/conditions/conditions.go new file mode 100644 index 0000000000..1148e1257c --- /dev/null +++ b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/reconciler/internal/conditions/conditions.go @@ -0,0 +1,70 @@ +/* +Copyright 2020 The Operator-SDK Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package conditions + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + + "github.com/operator-framework/helm-operator-plugins/pkg/internal/sdk/status" +) + +const ( + TypeInitialized = "Initialized" + TypeDeployed = "Deployed" + TypeReleaseFailed = "ReleaseFailed" + TypeIrreconcilable = "Irreconcilable" + + ReasonInstallSuccessful = status.ConditionReason("InstallSuccessful") + ReasonUpgradeSuccessful = status.ConditionReason("UpgradeSuccessful") + ReasonUninstallSuccessful = status.ConditionReason("UninstallSuccessful") + + ReasonErrorGettingClient = status.ConditionReason("ErrorGettingClient") + ReasonErrorGettingValues = status.ConditionReason("ErrorGettingValues") + ReasonErrorGettingReleaseState = status.ConditionReason("ErrorGettingReleaseState") + ReasonInstallError = status.ConditionReason("InstallError") + ReasonUpgradeError = status.ConditionReason("UpgradeError") + ReasonReconcileError = status.ConditionReason("ReconcileError") + ReasonUninstallError = status.ConditionReason("UninstallError") +) + +func Initialized(stat corev1.ConditionStatus, reason status.ConditionReason, message interface{}) status.Condition { + return newCondition(TypeInitialized, stat, reason, message) +} + +func Deployed(stat corev1.ConditionStatus, reason status.ConditionReason, message interface{}) status.Condition { + return newCondition(TypeDeployed, stat, reason, message) +} + +func ReleaseFailed(stat corev1.ConditionStatus, reason status.ConditionReason, message interface{}) status.Condition { + return newCondition(TypeReleaseFailed, stat, reason, message) +} + +func Irreconcilable(stat corev1.ConditionStatus, reason status.ConditionReason, message interface{}) status.Condition { + return newCondition(TypeIrreconcilable, stat, reason, message) +} + +func newCondition(t status.ConditionType, s corev1.ConditionStatus, r status.ConditionReason, m interface{}) status.Condition { + message := fmt.Sprintf("%s", m) + return status.Condition{ + Type: t, + Status: s, + Reason: r, + Message: message, + } +} diff --git a/vendor/github.com/operator-framework/helm-operator-plugins/pkg/reconciler/internal/hook/hook.go b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/reconciler/internal/hook/hook.go new file mode 100644 index 0000000000..f76ac7966a --- /dev/null +++ b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/reconciler/internal/hook/hook.go @@ -0,0 +1,100 @@ +/* +Copyright 2020 The Operator-SDK Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hook + +import ( + "sync" + + "github.com/go-logr/logr" + sdkhandler "github.com/operator-framework/operator-lib/handler" + "helm.sh/helm/v3/pkg/release" + "helm.sh/helm/v3/pkg/releaseutil" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/source" + "sigs.k8s.io/yaml" + + "github.com/operator-framework/helm-operator-plugins/pkg/hook" + "github.com/operator-framework/helm-operator-plugins/pkg/internal/sdk/controllerutil" + "github.com/operator-framework/helm-operator-plugins/pkg/internal/sdk/predicate" + "github.com/operator-framework/helm-operator-plugins/pkg/manifestutil" +) + +func NewDependentResourceWatcher(c controller.Controller, rm meta.RESTMapper) hook.PostHook { + return &dependentResourceWatcher{ + controller: c, + restMapper: rm, + m: sync.Mutex{}, + watches: make(map[schema.GroupVersionKind]struct{}), + } +} + +type dependentResourceWatcher struct { + controller controller.Controller + restMapper meta.RESTMapper + + m sync.Mutex + watches map[schema.GroupVersionKind]struct{} +} + +func (d *dependentResourceWatcher) Exec(owner *unstructured.Unstructured, rel release.Release, log logr.Logger) error { + // using predefined functions for filtering events + dependentPredicate := predicate.DependentPredicateFuncs() + + resources := releaseutil.SplitManifests(rel.Manifest) + d.m.Lock() + defer d.m.Unlock() + for _, r := range resources { + var obj unstructured.Unstructured + err := yaml.Unmarshal([]byte(r), &obj) + if err != nil { + return err + } + + depGVK := obj.GroupVersionKind() + if _, ok := d.watches[depGVK]; ok || depGVK.Empty() { + continue + } + + useOwnerRef, err := controllerutil.SupportsOwnerReference(d.restMapper, owner, &obj) + if err != nil { + return err + } + + if useOwnerRef && !manifestutil.HasResourcePolicyKeep(obj.GetAnnotations()) { + if err := d.controller.Watch(&source.Kind{Type: &obj}, &handler.EnqueueRequestForOwner{ + OwnerType: owner, + IsController: true, + }, dependentPredicate); err != nil { + return err + } + } else { + if err := d.controller.Watch(&source.Kind{Type: &obj}, &sdkhandler.EnqueueRequestForAnnotation{ + Type: owner.GetObjectKind().GroupVersionKind().GroupKind(), + }, dependentPredicate); err != nil { + return err + } + } + + d.watches[depGVK] = struct{}{} + log.V(1).Info("Watching dependent resource", "dependentAPIVersion", depGVK.GroupVersion(), "dependentKind", depGVK.Kind) + } + return nil +} diff --git a/vendor/github.com/operator-framework/helm-operator-plugins/pkg/reconciler/internal/updater/updater.go b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/reconciler/internal/updater/updater.go new file mode 100644 index 0000000000..28c068575d --- /dev/null +++ b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/reconciler/internal/updater/updater.go @@ -0,0 +1,192 @@ +/* +Copyright 2020 The Operator-SDK Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package updater + +import ( + "context" + + "helm.sh/helm/v3/pkg/release" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/operator-framework/helm-operator-plugins/pkg/internal/sdk/controllerutil" + "github.com/operator-framework/helm-operator-plugins/pkg/internal/sdk/status" +) + +func New(client client.Client) Updater { + return Updater{ + client: client, + } +} + +type Updater struct { + client client.Client + updateFuncs []UpdateFunc + updateStatusFuncs []UpdateStatusFunc +} + +type UpdateFunc func(*unstructured.Unstructured) bool +type UpdateStatusFunc func(*helmAppStatus) bool + +func (u *Updater) Update(fs ...UpdateFunc) { + u.updateFuncs = append(u.updateFuncs, fs...) +} + +func (u *Updater) UpdateStatus(fs ...UpdateStatusFunc) { + u.updateStatusFuncs = append(u.updateStatusFuncs, fs...) +} + +func (u *Updater) Apply(ctx context.Context, obj *unstructured.Unstructured) error { + backoff := retry.DefaultRetry + + // Always update the status first. During uninstall, if + // we remove the finalizer, updating the status will fail + // because the object and its status will be garbage-collected + if err := retry.RetryOnConflict(backoff, func() error { + st := statusFor(obj) + needsStatusUpdate := false + for _, f := range u.updateStatusFuncs { + needsStatusUpdate = f(st) || needsStatusUpdate + } + if needsStatusUpdate { + uSt, err := runtime.DefaultUnstructuredConverter.ToUnstructured(st) + if err != nil { + return err + } + obj.Object["status"] = uSt + return u.client.Status().Update(ctx, obj) + } + return nil + }); err != nil { + return err + } + + if err := retry.RetryOnConflict(backoff, func() error { + needsUpdate := false + for _, f := range u.updateFuncs { + needsUpdate = f(obj) || needsUpdate + } + if needsUpdate { + return u.client.Update(ctx, obj) + } + return nil + }); err != nil { + return err + } + + return nil +} + +func EnsureFinalizer(finalizer string) UpdateFunc { + return func(obj *unstructured.Unstructured) bool { + if controllerutil.ContainsFinalizer(obj, finalizer) { + return false + } + controllerutil.AddFinalizer(obj, finalizer) + return true + } +} + +func RemoveFinalizer(finalizer string) UpdateFunc { + return func(obj *unstructured.Unstructured) bool { + if !controllerutil.ContainsFinalizer(obj, finalizer) { + return false + } + controllerutil.RemoveFinalizer(obj, finalizer) + return true + } +} + +func EnsureCondition(condition status.Condition) UpdateStatusFunc { + return func(status *helmAppStatus) bool { + return status.Conditions.SetCondition(condition) + } +} + +func EnsureConditionUnknown(t status.ConditionType) UpdateStatusFunc { + return func(s *helmAppStatus) bool { + return s.Conditions.SetCondition(status.Condition{ + Type: t, + Status: corev1.ConditionUnknown, + }) + } +} + +func EnsureDeployedRelease(rel *release.Release) UpdateStatusFunc { + return func(status *helmAppStatus) bool { + newRel := helmAppReleaseFor(rel) + if status.DeployedRelease == nil && newRel == nil { + return false + } + if status.DeployedRelease != nil && newRel != nil && + *status.DeployedRelease == *newRel { + return false + } + status.DeployedRelease = newRel + return true + } +} + +func RemoveDeployedRelease() UpdateStatusFunc { + return EnsureDeployedRelease(nil) +} + +type helmAppStatus struct { + Conditions status.Conditions `json:"conditions"` + DeployedRelease *helmAppRelease `json:"deployedRelease,omitempty"` +} + +type helmAppRelease struct { + Name string `json:"name,omitempty"` + Manifest string `json:"manifest,omitempty"` +} + +func statusFor(obj *unstructured.Unstructured) *helmAppStatus { + if obj == nil || obj.Object == nil { + return nil + } + status, ok := obj.Object["status"] + if !ok { + return &helmAppStatus{} + } + + switch s := status.(type) { + case *helmAppStatus: + return s + case helmAppStatus: + return &s + case map[string]interface{}: + out := &helmAppStatus{} + _ = runtime.DefaultUnstructuredConverter.FromUnstructured(s, out) + return out + default: + return &helmAppStatus{} + } +} + +func helmAppReleaseFor(rel *release.Release) *helmAppRelease { + if rel == nil { + return nil + } + return &helmAppRelease{ + Name: rel.Name, + Manifest: rel.Manifest, + } +} diff --git a/vendor/github.com/operator-framework/helm-operator-plugins/pkg/reconciler/internal/values/values.go b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/reconciler/internal/values/values.go new file mode 100644 index 0000000000..dd6e51fd06 --- /dev/null +++ b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/reconciler/internal/values/values.go @@ -0,0 +1,70 @@ +/* +Copyright 2020 The Operator-SDK Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package values + +import ( + "fmt" + "os" + + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/strvals" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + "github.com/operator-framework/helm-operator-plugins/pkg/values" +) + +type Values struct { + m map[string]interface{} +} + +func FromUnstructured(obj *unstructured.Unstructured) (*Values, error) { + if obj == nil || obj.Object == nil { + return nil, fmt.Errorf("nil object") + } + spec, ok := obj.Object["spec"] + if !ok { + return nil, fmt.Errorf("spec not found") + } + specMap, ok := spec.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("spec must be a map") + } + return New(specMap), nil +} + +func New(m map[string]interface{}) *Values { + return &Values{m: m} +} + +func (v *Values) Map() map[string]interface{} { + if v == nil { + return nil + } + return v.m +} + +func (v *Values) ApplyOverrides(in map[string]string) error { + for inK, inV := range in { + val := fmt.Sprintf("%s=%s", inK, os.ExpandEnv(inV)) + if err := strvals.ParseInto(val, v.m); err != nil { + return err + } + } + return nil +} + +var DefaultMapper = values.MapperFunc(func(v chartutil.Values) chartutil.Values { return v }) diff --git a/vendor/github.com/operator-framework/helm-operator-plugins/pkg/reconciler/reconciler.go b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/reconciler/reconciler.go new file mode 100644 index 0000000000..11872bf2e1 --- /dev/null +++ b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/reconciler/reconciler.go @@ -0,0 +1,797 @@ +/* +Copyright 2020 The Operator-SDK Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconciler + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/go-logr/logr" + sdkhandler "github.com/operator-framework/operator-lib/handler" + "helm.sh/helm/v3/pkg/action" + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/release" + "helm.sh/helm/v3/pkg/storage/driver" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/source" + + "github.com/operator-framework/helm-operator-plugins/pkg/annotation" + helmclient "github.com/operator-framework/helm-operator-plugins/pkg/client" + "github.com/operator-framework/helm-operator-plugins/pkg/hook" + "github.com/operator-framework/helm-operator-plugins/pkg/internal/sdk/controllerutil" + "github.com/operator-framework/helm-operator-plugins/pkg/reconciler/internal/conditions" + internalhook "github.com/operator-framework/helm-operator-plugins/pkg/reconciler/internal/hook" + "github.com/operator-framework/helm-operator-plugins/pkg/reconciler/internal/updater" + internalvalues "github.com/operator-framework/helm-operator-plugins/pkg/reconciler/internal/values" + "github.com/operator-framework/helm-operator-plugins/pkg/values" +) + +const uninstallFinalizer = "uninstall-helm-release" + +// Reconciler reconciles a Helm object +type Reconciler struct { + client client.Client + actionClientGetter helmclient.ActionClientGetter + valueMapper values.Mapper + eventRecorder record.EventRecorder + preHooks []hook.PreHook + postHooks []hook.PostHook + + log logr.Logger + gvk *schema.GroupVersionKind + chrt *chart.Chart + overrideValues map[string]string + skipDependentWatches bool + maxConcurrentReconciles int + reconcilePeriod time.Duration + + annotSetupOnce sync.Once + annotations map[string]struct{} + installAnnotations map[string]annotation.Install + upgradeAnnotations map[string]annotation.Upgrade + uninstallAnnotations map[string]annotation.Uninstall +} + +// New creates a new Reconciler that reconciles custom resources that define a +// Helm release. New takes variadic Option arguments that are used to configure +// the Reconciler. +// +// Required options are: +// - WithGroupVersionKind +// - WithChart +// +// Other options are defaulted to sane defaults when SetupWithManager is called. +// +// If an error occurs configuring or validating the Reconciler, it is returned. +func New(opts ...Option) (*Reconciler, error) { + r := &Reconciler{} + r.annotSetupOnce.Do(r.setupAnnotationMaps) + for _, o := range opts { + if err := o(r); err != nil { + return nil, err + } + } + + if err := r.validate(); err != nil { + return nil, err + } + + return r, nil +} + +func (r *Reconciler) setupAnnotationMaps() { + r.annotations = make(map[string]struct{}) + r.installAnnotations = make(map[string]annotation.Install) + r.upgradeAnnotations = make(map[string]annotation.Upgrade) + r.uninstallAnnotations = make(map[string]annotation.Uninstall) +} + +// SetupWithManager configures a controller for the Reconciler and registers +// watches. It also uses the passed Manager to initialize default values for the +// Reconciler and sets up the manager's scheme with the Reconciler's configured +// GroupVersionKind. +// +// If an error occurs setting up the Reconciler with the manager, it is +// returned. +func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { + controllerName := fmt.Sprintf("%v-controller", strings.ToLower(r.gvk.Kind)) + + r.addDefaults(mgr, controllerName) + r.setupScheme(mgr) + + c, err := controller.New(controllerName, mgr, controller.Options{Reconciler: r, MaxConcurrentReconciles: r.maxConcurrentReconciles}) + if err != nil { + return err + } + + if err := r.setupWatches(mgr, c); err != nil { + return err + } + + r.log.Info("Watching resource", + "group", r.gvk.Group, + "version", r.gvk.Version, + "kind", r.gvk.Kind, + ) + + return nil +} + +// Option is a function that configures the helm Reconciler. +type Option func(r *Reconciler) error + +// WithClient is an Option that configures a Reconciler's client. +// +// By default, manager.GetClient() is used if this option is not configured. +func WithClient(cl client.Client) Option { + return func(r *Reconciler) error { + r.client = cl + return nil + } +} + +// WithActionClientGetter is an Option that configures a Reconciler's +// ActionClientGetter. +// +// A default ActionClientGetter is used if this option is not configured. +func WithActionClientGetter(actionClientGetter helmclient.ActionClientGetter) Option { + return func(r *Reconciler) error { + r.actionClientGetter = actionClientGetter + return nil + } +} + +// WithEventRecorder is an Option that configures a Reconciler's EventRecorder. +// +// By default, manager.GetEventRecorderFor() is used if this option is not +// configured. +func WithEventRecorder(er record.EventRecorder) Option { + return func(r *Reconciler) error { + r.eventRecorder = er + return nil + } +} + +// WithLog is an Option that configures a Reconciler's logger. +// +// A default logger is used if this option is not configured. +func WithLog(log logr.Logger) Option { + return func(r *Reconciler) error { + r.log = log + return nil + } +} + +// WithGroupVersionKind is an Option that configures a Reconciler's +// GroupVersionKind. +// +// This option is required. +func WithGroupVersionKind(gvk schema.GroupVersionKind) Option { + return func(r *Reconciler) error { + r.gvk = &gvk + return nil + } +} + +// WithChart is an Option that configures a Reconciler's helm chart. +// +// This option is required. +func WithChart(chrt chart.Chart) Option { + return func(r *Reconciler) error { + r.chrt = &chrt + return nil + } +} + +// WithOverrideValues is an Option that configures a Reconciler's override +// values. +// +// Override values can be used to enforce that certain values provided by the +// chart's default values.yaml or by a CR spec are always overridden when +// rendering the chart. If a value in overrides is set by a CR, it is +// overridden by the override value. The override value can be static but can +// also refer to an environment variable. +// +// If an environment variable reference is listed in override values but is not +// present in the environment when this function runs, it will resolve to an +// empty string and override all other values. Therefore, when using +// environment variable expansion, ensure that the environment variable is set. +func WithOverrideValues(overrides map[string]string) Option { + return func(r *Reconciler) error { + // Validate that overrides can be parsed and applied + // so that we fail fast during operator setup rather + // than during the first reconciliation. + m := internalvalues.New(map[string]interface{}{}) + if err := m.ApplyOverrides(overrides); err != nil { + return err + } + + r.overrideValues = overrides + return nil + } +} + +// WithDependentWatchesEnabled is an Option that configures whether the +// Reconciler will register watches for dependent objects in releases and +// trigger reconciliations when they change. +// +// By default, dependent watches are enabled. +func SkipDependentWatches(skip bool) Option { + return func(r *Reconciler) error { + r.skipDependentWatches = skip + return nil + } +} + +// WithMaxConcurrentReconciles is an Option that configures the number of +// concurrent reconciles that the controller will run. +// +// The default is 1. +func WithMaxConcurrentReconciles(max int) Option { + return func(r *Reconciler) error { + if max < 1 { + return errors.New("maxConcurrentReconciles must be at least 1") + } + r.maxConcurrentReconciles = max + return nil + } +} + +// WithReconcilePeriod is an Option that configures the reconcile period of the +// controller. This will cause the controller to reconcile CRs at least once +// every period. By default, the reconcile period is set to 0, which means no +// time-based reconciliations will occur. +func WithReconcilePeriod(rp time.Duration) Option { + return func(r *Reconciler) error { + if rp < 0 { + return errors.New("reconcile period must not be negative") + } + r.reconcilePeriod = rp + return nil + } +} + +// WithInstallAnnotations is an Option that configures Install annotations +// to enable custom action.Install fields to be set based on the value of +// annotations found in the custom resource watched by this reconciler. +// Duplicate annotation names will result in an error. +func WithInstallAnnotations(as ...annotation.Install) Option { + return func(r *Reconciler) error { + r.annotSetupOnce.Do(r.setupAnnotationMaps) + + for _, a := range as { + name := a.Name() + if _, ok := r.annotations[name]; ok { + return fmt.Errorf("annotation %q already exists", name) + } + + r.annotations[name] = struct{}{} + r.installAnnotations[name] = a + } + return nil + } +} + +// WithUpgradeAnnotations is an Option that configures Upgrade annotations +// to enable custom action.Upgrade fields to be set based on the value of +// annotations found in the custom resource watched by this reconciler. +// Duplicate annotation names will result in an error. +func WithUpgradeAnnotations(as ...annotation.Upgrade) Option { + return func(r *Reconciler) error { + r.annotSetupOnce.Do(r.setupAnnotationMaps) + + for _, a := range as { + name := a.Name() + if _, ok := r.annotations[name]; ok { + return fmt.Errorf("annotation %q already exists", name) + } + + r.annotations[name] = struct{}{} + r.upgradeAnnotations[name] = a + } + return nil + } +} + +// WithUninstallAnnotations is an Option that configures Uninstall annotations +// to enable custom action.Uninstall fields to be set based on the value of +// annotations found in the custom resource watched by this reconciler. +// Duplicate annotation names will result in an error. +func WithUninstallAnnotations(as ...annotation.Uninstall) Option { + return func(r *Reconciler) error { + r.annotSetupOnce.Do(r.setupAnnotationMaps) + + for _, a := range as { + name := a.Name() + if _, ok := r.annotations[name]; ok { + return fmt.Errorf("annotation %q already exists", name) + } + + r.annotations[name] = struct{}{} + r.uninstallAnnotations[name] = a + } + return nil + } +} + +// WithPreHook is an Option that configures the reconciler to run the given +// PreHook just before performing any actions (e.g. install, upgrade, uninstall, +// or reconciliation). +func WithPreHook(h hook.PreHook) Option { + return func(r *Reconciler) error { + r.preHooks = append(r.preHooks, h) + return nil + } +} + +// WithPostHook is an Option that configures the reconciler to run the given +// PostHook just after performing any non-uninstall release actions. +func WithPostHook(h hook.PostHook) Option { + return func(r *Reconciler) error { + r.postHooks = append(r.postHooks, h) + return nil + } +} + +// WithValueMapper is an Option that configures a function that maps values +// from a custom resource spec to the values passed to Helm +func WithValueMapper(m values.Mapper) Option { + return func(r *Reconciler) error { + r.valueMapper = m + return nil + } +} + +// Reconcile reconciles a CR that defines a Helm v3 release. +// +// - If a release does not exist for this CR, a new release is installed. +// - If a release exists and the CR spec has changed since the last, +// reconciliation, the release is upgraded. +// - If a release exists and the CR spec has not changed since the last +// reconciliation, the release is reconciled. Any dependent resources that +// have diverged from the release manifest are re-created or patched so that +// they are re-aligned with the release. +// - If the CR has been deleted, the release will be uninstalled. The +// Reconciler uses a finalizer to ensure the release uninstall succeeds +// before CR deletion occurs. +// +// If an error occurs during release installation or upgrade, the change will be +// rolled back to restore the previous state. +// +// Reconcile also manages the status field of the custom resource. It includes +// the release name and manifest in `status.deployedRelease`, and it updates +// `status.conditions` based on reconciliation progress and success. Condition +// types include: +// +// - Deployed - a release for this CR is deployed (but not necessarily ready). +// - ReleaseFailed - an installation or upgrade failed. +// - Irreconcilable - an error occurred during reconciliation +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, err error) { + log := r.log.WithValues(strings.ToLower(r.gvk.Kind), req.NamespacedName) + + obj := &unstructured.Unstructured{} + obj.SetGroupVersionKind(*r.gvk) + err = r.client.Get(ctx, req.NamespacedName, obj) + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + if err != nil { + return ctrl.Result{}, err + } + + u := updater.New(r.client) + defer func() { + applyErr := u.Apply(ctx, obj) + if err == nil && !apierrors.IsNotFound(applyErr) { + err = applyErr + } + }() + + actionClient, err := r.actionClientGetter.ActionClientFor(obj) + if err != nil { + u.UpdateStatus( + updater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionTrue, conditions.ReasonErrorGettingClient, err)), + updater.EnsureConditionUnknown(conditions.TypeDeployed), + updater.EnsureConditionUnknown(conditions.TypeInitialized), + updater.EnsureConditionUnknown(conditions.TypeReleaseFailed), + updater.EnsureDeployedRelease(nil), + ) + // NOTE: If obj has the uninstall finalizer, that means a release WAS deployed at some point + // in the past, but we don't know if it still is because we don't have an actionClient to check. + // So the question is, what do we do with the finalizer? We could: + // - Leave it in place. This would make the CR impossible to delete without either resolving this error, or + // manually uninstalling the release, deleting the finalizer, and deleting the CR. + // - Remove the finalizer. This would make it possible to delete the CR, but it would leave around any + // release resources that are not owned by the CR (those in the cluster scope or in other namespaces). + // + // The decision made for now is to leave the finalizer in place, so that the user can intervene and try to + // resolve the issue, instead of the operator silently leaving some dangling resources hanging around after the + // CR is deleted. + return ctrl.Result{}, err + } + + // As soon as we get the actionClient, lookup the release and + // update the status with this info. We need to do this as + // early as possible in case other irreconcilable errors occur. + // + // We also make sure not to return any errors we encounter so + // we can still attempt an uninstall if the CR is being deleted. + rel, err := actionClient.Get(obj.GetName()) + if errors.Is(err, driver.ErrReleaseNotFound) { + u.UpdateStatus(updater.EnsureCondition(conditions.Deployed(corev1.ConditionFalse, "", ""))) + } else if err == nil { + ensureDeployedRelease(&u, rel) + } + u.UpdateStatus(updater.EnsureCondition(conditions.Initialized(corev1.ConditionTrue, "", ""))) + + if obj.GetDeletionTimestamp() != nil { + err := r.handleDeletion(ctx, actionClient, obj, log) + return ctrl.Result{}, err + } + + vals, err := r.getValues(obj) + if err != nil { + u.UpdateStatus( + updater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionTrue, conditions.ReasonErrorGettingValues, err)), + updater.EnsureConditionUnknown(conditions.TypeReleaseFailed), + ) + return ctrl.Result{}, err + } + + rel, state, err := r.getReleaseState(actionClient, obj, vals.AsMap()) + if err != nil { + u.UpdateStatus( + updater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionTrue, conditions.ReasonErrorGettingReleaseState, err)), + updater.EnsureConditionUnknown(conditions.TypeReleaseFailed), + updater.EnsureConditionUnknown(conditions.TypeDeployed), + updater.EnsureDeployedRelease(nil), + ) + return ctrl.Result{}, err + } + u.UpdateStatus(updater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionFalse, "", ""))) + + for _, h := range r.preHooks { + if err := h.Exec(obj, vals, log); err != nil { + log.Error(err, "pre-release hook failed") + } + } + + switch state { + case stateNeedsInstall: + rel, err = r.doInstall(actionClient, &u, obj, vals.AsMap(), log) + if err != nil { + return ctrl.Result{}, err + } + + case stateNeedsUpgrade: + rel, err = r.doUpgrade(actionClient, &u, obj, vals.AsMap(), log) + if err != nil { + return ctrl.Result{}, err + } + + case stateUnchanged: + if err := r.doReconcile(actionClient, &u, rel, log); err != nil { + return ctrl.Result{}, err + } + default: + return ctrl.Result{}, fmt.Errorf("unexpected release state: %s", state) + } + + for _, h := range r.postHooks { + if err := h.Exec(obj, *rel, log); err != nil { + log.Error(err, "post-release hook failed", "name", rel.Name, "version", rel.Version) + } + } + + ensureDeployedRelease(&u, rel) + u.UpdateStatus( + updater.EnsureCondition(conditions.ReleaseFailed(corev1.ConditionFalse, "", "")), + updater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionFalse, "", "")), + ) + + return ctrl.Result{RequeueAfter: r.reconcilePeriod}, nil +} + +func (r *Reconciler) getValues(obj *unstructured.Unstructured) (chartutil.Values, error) { + crVals, err := internalvalues.FromUnstructured(obj) + if err != nil { + return chartutil.Values{}, err + } + if err := crVals.ApplyOverrides(r.overrideValues); err != nil { + return chartutil.Values{}, err + } + vals := r.valueMapper.Map(crVals.Map()) + vals, err = chartutil.CoalesceValues(r.chrt, vals) + if err != nil { + return chartutil.Values{}, err + } + return vals, nil +} + +type helmReleaseState string + +const ( + stateNeedsInstall helmReleaseState = "needs install" + stateNeedsUpgrade helmReleaseState = "needs upgrade" + stateUnchanged helmReleaseState = "unchanged" + stateError helmReleaseState = "error" +) + +func (r *Reconciler) handleDeletion(ctx context.Context, actionClient helmclient.ActionInterface, obj *unstructured.Unstructured, log logr.Logger) error { + if !controllerutil.ContainsFinalizer(obj, uninstallFinalizer) { + log.Info("Resource is terminated, skipping reconciliation") + return nil + } + + // Use defer in a closure so that it executes before we wait for + // the deletion of the CR. This might seem unnecessary since we're + // applying changes to the CR after is has a deletion timestamp. + // However, if uninstall fails, the finalizer will not be removed + // and we need to be able to update the conditions on the CR to + // indicate that the uninstall failed. + if err := func() (err error) { + uninstallUpdater := updater.New(r.client) + defer func() { + applyErr := uninstallUpdater.Apply(ctx, obj) + if err == nil { + err = applyErr + } + }() + return r.doUninstall(actionClient, &uninstallUpdater, obj, log) + }(); err != nil { + return err + } + + // Since the client is hitting a cache, waiting for the + // deletion here will guarantee that the next reconciliation + // will see that the CR has been deleted and that there's + // nothing left to do. + if err := controllerutil.WaitForDeletion(ctx, r.client, obj); err != nil { + return err + } + return nil +} + +func (r *Reconciler) getReleaseState(client helmclient.ActionInterface, obj metav1.Object, vals map[string]interface{}) (*release.Release, helmReleaseState, error) { + currentRelease, err := client.Get(obj.GetName()) + if err != nil && !errors.Is(err, driver.ErrReleaseNotFound) { + return nil, stateError, err + } + + if errors.Is(err, driver.ErrReleaseNotFound) { + return nil, stateNeedsInstall, nil + } + + var opts []helmclient.UpgradeOption + for name, annot := range r.upgradeAnnotations { + if v, ok := obj.GetAnnotations()[name]; ok { + opts = append(opts, annot.UpgradeOption(v)) + } + } + opts = append(opts, func(u *action.Upgrade) error { + u.DryRun = true + return nil + }) + specRelease, err := client.Upgrade(obj.GetName(), obj.GetNamespace(), r.chrt, vals, opts...) + if err != nil { + return currentRelease, stateError, err + } + if specRelease.Manifest != currentRelease.Manifest || + currentRelease.Info.Status == release.StatusFailed || + currentRelease.Info.Status == release.StatusSuperseded { + return currentRelease, stateNeedsUpgrade, nil + } + return currentRelease, stateUnchanged, nil +} + +func (r *Reconciler) doInstall(actionClient helmclient.ActionInterface, u *updater.Updater, obj *unstructured.Unstructured, vals map[string]interface{}, log logr.Logger) (*release.Release, error) { + var opts []helmclient.InstallOption + for name, annot := range r.installAnnotations { + if v, ok := obj.GetAnnotations()[name]; ok { + opts = append(opts, annot.InstallOption(v)) + } + } + rel, err := actionClient.Install(obj.GetName(), obj.GetNamespace(), r.chrt, vals, opts...) + if err != nil { + u.UpdateStatus( + updater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionTrue, conditions.ReasonReconcileError, err)), + updater.EnsureCondition(conditions.ReleaseFailed(corev1.ConditionTrue, conditions.ReasonInstallError, err)), + ) + return nil, err + } + r.reportOverrideEvents(obj) + + log.Info("Release installed", "name", rel.Name, "version", rel.Version) + return rel, nil +} + +func (r *Reconciler) doUpgrade(actionClient helmclient.ActionInterface, u *updater.Updater, obj *unstructured.Unstructured, vals map[string]interface{}, log logr.Logger) (*release.Release, error) { + var opts []helmclient.UpgradeOption + for name, annot := range r.upgradeAnnotations { + if v, ok := obj.GetAnnotations()[name]; ok { + opts = append(opts, annot.UpgradeOption(v)) + } + } + + rel, err := actionClient.Upgrade(obj.GetName(), obj.GetNamespace(), r.chrt, vals, opts...) + if err != nil { + u.UpdateStatus( + updater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionTrue, conditions.ReasonReconcileError, err)), + updater.EnsureCondition(conditions.ReleaseFailed(corev1.ConditionTrue, conditions.ReasonUpgradeError, err)), + ) + return nil, err + } + r.reportOverrideEvents(obj) + + log.Info("Release upgraded", "name", rel.Name, "version", rel.Version) + return rel, nil +} + +func (r *Reconciler) reportOverrideEvents(obj runtime.Object) { + for k, v := range r.overrideValues { + r.eventRecorder.Eventf(obj, "Warning", "ValueOverridden", + "Chart value %q overridden to %q by operator", k, v) + } +} + +func (r *Reconciler) doReconcile(actionClient helmclient.ActionInterface, u *updater.Updater, rel *release.Release, log logr.Logger) error { + // If a change is made to the CR spec that causes a release failure, a + // ConditionReleaseFailed is added to the status conditions. If that change + // is then reverted to its previous state, the operator will stop + // attempting the release and will resume reconciling. In this case, we + // need to set the ConditionReleaseFailed to false because the failing + // release is no longer being attempted. + u.UpdateStatus( + updater.EnsureCondition(conditions.ReleaseFailed(corev1.ConditionFalse, "", "")), + ) + + if err := actionClient.Reconcile(rel); err != nil { + u.UpdateStatus(updater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionTrue, conditions.ReasonReconcileError, err))) + return err + } + + log.Info("Release reconciled", "name", rel.Name, "version", rel.Version) + return nil +} + +func (r *Reconciler) doUninstall(actionClient helmclient.ActionInterface, u *updater.Updater, obj *unstructured.Unstructured, log logr.Logger) error { + var opts []helmclient.UninstallOption + for name, annot := range r.uninstallAnnotations { + if v, ok := obj.GetAnnotations()[name]; ok { + opts = append(opts, annot.UninstallOption(v)) + } + } + + resp, err := actionClient.Uninstall(obj.GetName(), opts...) + if errors.Is(err, driver.ErrReleaseNotFound) { + log.Info("Release not found, removing finalizer") + } else if err != nil { + u.UpdateStatus( + updater.EnsureCondition(conditions.Irreconcilable(corev1.ConditionTrue, conditions.ReasonReconcileError, err)), + updater.EnsureCondition(conditions.ReleaseFailed(corev1.ConditionTrue, conditions.ReasonUninstallError, err)), + ) + return err + } else { + log.Info("Release uninstalled", "name", resp.Release.Name, "version", resp.Release.Version) + } + u.Update(updater.RemoveFinalizer(uninstallFinalizer)) + u.UpdateStatus( + updater.EnsureCondition(conditions.ReleaseFailed(corev1.ConditionFalse, "", "")), + updater.EnsureCondition(conditions.Deployed(corev1.ConditionFalse, conditions.ReasonUninstallSuccessful, "")), + updater.RemoveDeployedRelease(), + ) + return nil +} + +func (r *Reconciler) validate() error { + if r.gvk == nil { + return errors.New("gvk must not be nil") + } + if r.chrt == nil { + return errors.New("chart must not be nil") + } + return nil +} + +func (r *Reconciler) addDefaults(mgr ctrl.Manager, controllerName string) { + if r.client == nil { + r.client = mgr.GetClient() + } + if r.log == nil { + r.log = ctrl.Log.WithName("controllers").WithName("Helm") + } + if r.actionClientGetter == nil { + actionConfigGetter := helmclient.NewActionConfigGetter(mgr.GetConfig(), mgr.GetRESTMapper(), r.log) + r.actionClientGetter = helmclient.NewActionClientGetter(actionConfigGetter) + } + if r.eventRecorder == nil { + r.eventRecorder = mgr.GetEventRecorderFor(controllerName) + } + if r.valueMapper == nil { + r.valueMapper = internalvalues.DefaultMapper + } +} + +func (r *Reconciler) setupScheme(mgr ctrl.Manager) { + mgr.GetScheme().AddKnownTypeWithName(*r.gvk, &unstructured.Unstructured{}) + metav1.AddToGroupVersion(mgr.GetScheme(), r.gvk.GroupVersion()) +} + +func (r *Reconciler) setupWatches(mgr ctrl.Manager, c controller.Controller) error { + obj := &unstructured.Unstructured{} + obj.SetGroupVersionKind(*r.gvk) + if err := c.Watch( + &source.Kind{Type: obj}, + &sdkhandler.InstrumentedEnqueueRequestForObject{}, + ); err != nil { + return err + } + + secret := &corev1.Secret{} + secret.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Secret", + }) + + if err := c.Watch( + &source.Kind{Type: secret}, + &handler.EnqueueRequestForOwner{ + OwnerType: obj, + IsController: true, + }, + ); err != nil { + return err + } + + if !r.skipDependentWatches { + r.postHooks = append([]hook.PostHook{internalhook.NewDependentResourceWatcher(c, mgr.GetRESTMapper())}, r.postHooks...) + } + return nil +} + +func ensureDeployedRelease(u *updater.Updater, rel *release.Release) { + reason := conditions.ReasonInstallSuccessful + message := "release was successfully installed" + if rel.Version > 1 { + reason = conditions.ReasonUpgradeSuccessful + message = "release was successfully upgraded" + } + if rel.Info != nil && len(rel.Info.Notes) > 0 { + message = rel.Info.Notes + } + u.Update(updater.EnsureFinalizer(uninstallFinalizer)) + u.UpdateStatus( + updater.EnsureCondition(conditions.Deployed(corev1.ConditionTrue, reason, message)), + updater.EnsureDeployedRelease(rel), + ) +} diff --git a/vendor/github.com/operator-framework/helm-operator-plugins/pkg/values/values.go b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/values/values.go new file mode 100644 index 0000000000..46e1941289 --- /dev/null +++ b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/values/values.go @@ -0,0 +1,31 @@ +/* +Copyright 2020 The Operator-SDK Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package values + +import ( + "helm.sh/helm/v3/pkg/chartutil" +) + +type Mapper interface { + Map(chartutil.Values) chartutil.Values +} + +type MapperFunc func(chartutil.Values) chartutil.Values + +func (m MapperFunc) Map(v chartutil.Values) chartutil.Values { + return m(v) +} diff --git a/vendor/github.com/operator-framework/helm-operator-plugins/pkg/watches/watches.go b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/watches/watches.go new file mode 100644 index 0000000000..d7b12cb1be --- /dev/null +++ b/vendor/github.com/operator-framework/helm-operator-plugins/pkg/watches/watches.go @@ -0,0 +1,107 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package watches + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/yaml" +) + +type Watch struct { + schema.GroupVersionKind `json:",inline"` + ChartPath string `json:"chart"` + + WatchDependentResources *bool `json:"watchDependentResources,omitempty"` + OverrideValues map[string]string `json:"overrideValues,omitempty"` + ReconcilePeriod *metav1.Duration `json:"reconcilePeriod,omitempty"` + MaxConcurrentReconciles *int `json:"maxConcurrentReconciles,omitempty"` + + Chart *chart.Chart `json:"-"` +} + +// Load loads a slice of Watches from the watch file at `path`. For each entry +// in the watches file, it verifies the configuration. If an error is +// encountered loading the file or verifying the configuration, it will be +// returned. +func Load(path string) ([]Watch, error) { + b, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + + watches := []Watch{} + err = yaml.Unmarshal(b, &watches) + if err != nil { + return nil, err + } + + watchesMap := make(map[schema.GroupVersionKind]Watch) + for i, w := range watches { + if err := verifyGVK(w.GroupVersionKind); err != nil { + return nil, fmt.Errorf("invalid GVK: %s: %w", w.GroupVersionKind, err) + } + + cl, err := loader.Load(w.ChartPath) + if err != nil { + return nil, fmt.Errorf("invalid chart %s: %w", w.ChartPath, err) + } + w.Chart = cl + w.OverrideValues = expandOverrideEnvs(w.OverrideValues) + if w.WatchDependentResources == nil { + trueVal := true + w.WatchDependentResources = &trueVal + } + + if _, ok := watchesMap[w.GroupVersionKind]; ok { + return nil, fmt.Errorf("duplicate GVK: %s", w.GroupVersionKind) + } + + watchesMap[w.GroupVersionKind] = w + watches[i] = w + } + return watches, nil +} + +func expandOverrideEnvs(in map[string]string) map[string]string { + if in == nil { + return nil + } + out := make(map[string]string) + for k, v := range in { + out[k] = os.ExpandEnv(v) + } + return out +} + +func verifyGVK(gvk schema.GroupVersionKind) error { + // A GVK without a group is valid. Certain scenarios may cause a GVK + // without a group to fail in other ways later in the initialization + // process. + if gvk.Version == "" { + return errors.New("version must not be empty") + } + if gvk.Kind == "" { + return errors.New("kind must not be empty") + } + return nil +} diff --git a/vendor/github.com/operator-framework/operator-lib/LICENSE b/vendor/github.com/operator-framework/operator-lib/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/operator-framework/operator-lib/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/operator-framework/operator-lib/handler/enqueue_annotation.go b/vendor/github.com/operator-framework/operator-lib/handler/enqueue_annotation.go new file mode 100644 index 0000000000..81aa4d9103 --- /dev/null +++ b/vendor/github.com/operator-framework/operator-lib/handler/enqueue_annotation.go @@ -0,0 +1,180 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package handler + +import ( + "fmt" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + crtHandler "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var log = logf.Log.WithName("event_handler") + +const ( + // NamespacedNameAnnotation is an annotation whose value encodes the name and namespace of a resource to + // reconcile when a resource containing this annotation changes. Valid values are of the form + // `/` for namespace-scoped owners and `` for cluster-scoped owners. + NamespacedNameAnnotation = "operator-sdk/primary-resource" + // TypeAnnotation is an annotation whose value encodes the group and kind of a resource to reconcil when a + // resource containing this annotation changes. Valid values are of the form `` for resource in the + // core group, and `.` for all other resources. + TypeAnnotation = "operator-sdk/primary-resource-type" +) + +// EnqueueRequestForAnnotation enqueues Request containing the Name and Namespace specified in the +// annotations of the object that is the source of the Event. The source of the event triggers reconciliation +// of the parent resource which is identified by annotations. `NamespacedNameAnnotation` and +// `TypeAnnotation` together uniquely identify an owner resource to reconcile. +// +// handler.EnqueueRequestForAnnotation can be used to trigger reconciliation of resources which are +// cross-referenced. This allows a namespace-scoped dependent to trigger reconciliation of an owner +// which is in a different namespace, and a cluster-scoped dependent can trigger the reconciliation +// of a namespace(scoped)-owner. +// +// As an example, consider the case where we would like to watch clusterroles based on which we reconcile +// namespace-scoped replicasets. With native owner references, this would not be possible since the +// cluster-scoped dependent (clusterroles) is trying to specify a namespace-scoped owner (replicasets). +// Whereas in case of annotations-based handlers, we could implement the following: +// +// if err := c.Watch(&source.Kind{ +// // Watch clusterroles +// Type: &rbacv1.ClusterRole{}}, +// +// // Enqueue ReplicaSet reconcile requests using the namespacedName annotation value in the request. +// &handler.EnqueueRequestForAnnotation{schema.GroupKind{Group:"apps", Kind:"ReplicaSet"}}); err != nil { +// entryLog.Error(err, "unable to watch ClusterRole") +// os.Exit(1) +// } +// } +// +// With this watch, the ReplicaSet reconciler would receive a request to reconcile +// "my-namespace/my-replicaset" based on a change to a ClusterRole that has the following annotations: +// +// annotations: +// operator-sdk/primary-resource:"my-namespace/my-replicaset" +// operator-sdk/primary-resource-type:"ReplicaSet.apps" +// +// Though an annotation-based watch handler removes the boundaries set by native owner reference implementation, +// the garbage collector still respects the scope restrictions. For example, +// if a parent creates a child resource across scopes not supported by owner references, it becomes the +// responsibility of the reconciler to clean up the child resource. Hence, the resource utilizing this handler +// SHOULD ALWAYS BE IMPLEMENTED WITH A FINALIZER. +type EnqueueRequestForAnnotation struct { + Type schema.GroupKind +} + +var _ crtHandler.EventHandler = &EnqueueRequestForAnnotation{} + +// Create implements EventHandler +func (e *EnqueueRequestForAnnotation) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { + if ok, req := e.getAnnotationRequests(evt.Object); ok { + q.Add(req) + } +} + +// Update implements EventHandler +func (e *EnqueueRequestForAnnotation) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { + if ok, req := e.getAnnotationRequests(evt.ObjectOld); ok { + q.Add(req) + } + if ok, req := e.getAnnotationRequests(evt.ObjectNew); ok { + q.Add(req) + } +} + +// Delete implements EventHandler +func (e *EnqueueRequestForAnnotation) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { + if ok, req := e.getAnnotationRequests(evt.Object); ok { + q.Add(req) + } +} + +// Generic implements EventHandler +func (e *EnqueueRequestForAnnotation) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { + if ok, req := e.getAnnotationRequests(evt.Object); ok { + q.Add(req) + } +} + +// getAnnotationRequests checks if the provided object has the annotations so as to enqueue the reconcile request. +func (e *EnqueueRequestForAnnotation) getAnnotationRequests(object metav1.Object) (bool, reconcile.Request) { + if len(object.GetAnnotations()) == 0 { + return false, reconcile.Request{} + } + + if typeString, ok := object.GetAnnotations()[TypeAnnotation]; ok && typeString == e.Type.String() { + namespacedNameString, ok := object.GetAnnotations()[NamespacedNameAnnotation] + if !ok { + log.Info("Unable to find namespaced name annotation for resource", "resource", object) + } + if strings.TrimSpace(namespacedNameString) == "" { + return false, reconcile.Request{} + } + nsn := parseNamespacedName(namespacedNameString) + return true, reconcile.Request{NamespacedName: nsn} + } + return false, reconcile.Request{} +} + +// parseNamespacedName parses the provided string to extract the namespace and name into a +// types.NamespacedName. The edge case of empty string is handled prior to calling this function. +func parseNamespacedName(namespacedNameString string) types.NamespacedName { + values := strings.SplitN(namespacedNameString, "/", 2) + + switch len(values) { + case 1: + return types.NamespacedName{Name: values[0]} + default: + return types.NamespacedName{Namespace: values[0], Name: values[1]} + } +} + +// SetOwnerAnnotations helps in adding 'NamespacedNameAnnotation' and 'TypeAnnotation' to object based on +// the values obtained from owner. The object gets the annotations from owner's namespace, name, group +// and kind. In other terms, object can be said to be the dependent having annotations from the owner. +// When a watch is set on the object, the annotations help to identify the owner and trigger reconciliation. +// Annotations are ALWAYS overwritten. +func SetOwnerAnnotations(owner, object client.Object) error { + if owner.GetName() == "" { + return fmt.Errorf("%T does not have a name, cannot call SetOwnerAnnotations", owner) + } + + ownerGK := owner.GetObjectKind().GroupVersionKind().GroupKind() + + if ownerGK.Kind == "" { + return fmt.Errorf("Owner %s Kind not found, cannot call SetOwnerAnnotations", owner.GetName()) + } + + annotations := object.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + + annotations[NamespacedNameAnnotation] = fmt.Sprintf("%s/%s", owner.GetNamespace(), owner.GetName()) + annotations[TypeAnnotation] = ownerGK.String() + + object.SetAnnotations(annotations) + + return nil +} diff --git a/vendor/github.com/operator-framework/operator-lib/handler/instrumented_enqueue_object.go b/vendor/github.com/operator-framework/operator-lib/handler/instrumented_enqueue_object.go new file mode 100644 index 0000000000..e70e104512 --- /dev/null +++ b/vendor/github.com/operator-framework/operator-lib/handler/instrumented_enqueue_object.go @@ -0,0 +1,84 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package handler + +import ( + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + + "github.com/operator-framework/operator-lib/handler/internal/metrics" +) + +// InstrumentedEnqueueRequestForObject wraps controller-runtime handler for +// "EnqueueRequestForObject", and sets up primary resource metrics on event +// handlers. The main objective of this handler is to set prometheus metrics +// when create/update/delete events occur. These metrics contain the following +// information on resource. +// +// resource_created_at_seconds{"name", "namespace", "group", "version", "kind"} +// +// To call the handler use: +// +// &handler.InstrumentedEnqueueRequestForObject{} +type InstrumentedEnqueueRequestForObject struct { + handler.EnqueueRequestForObject +} + +// Create implements EventHandler, and creates the metrics. +func (h InstrumentedEnqueueRequestForObject) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) { + setResourceMetric(e.Object) + h.EnqueueRequestForObject.Create(e, q) +} + +// Update implements EventHandler, and updates the metrics. +func (h InstrumentedEnqueueRequestForObject) Update(e event.UpdateEvent, q workqueue.RateLimitingInterface) { + setResourceMetric(e.ObjectOld) + setResourceMetric(e.ObjectNew) + + h.EnqueueRequestForObject.Update(e, q) +} + +// Delete implements EventHandler, and deletes metrics. +func (h InstrumentedEnqueueRequestForObject) Delete(e event.DeleteEvent, q workqueue.RateLimitingInterface) { + deleteResourceMetric(e.Object) + h.EnqueueRequestForObject.Delete(e, q) +} + +func setResourceMetric(obj client.Object) { + if obj != nil { + labels := getResourceLabels(obj) + m, _ := metrics.ResourceCreatedAt.GetMetricWith(labels) + m.Set(float64(obj.GetCreationTimestamp().UTC().Unix())) + } +} + +func deleteResourceMetric(obj client.Object) { + if obj != nil { + labels := getResourceLabels(obj) + _ = metrics.ResourceCreatedAt.Delete(labels) + } +} + +func getResourceLabels(obj client.Object) map[string]string { + return map[string]string{ + "name": obj.GetName(), + "namespace": obj.GetNamespace(), + "group": obj.GetObjectKind().GroupVersionKind().Group, + "version": obj.GetObjectKind().GroupVersionKind().Version, + "kind": obj.GetObjectKind().GroupVersionKind().Kind, + } +} diff --git a/vendor/github.com/operator-framework/operator-lib/handler/internal/metrics/metrics.go b/vendor/github.com/operator-framework/operator-lib/handler/internal/metrics/metrics.go new file mode 100644 index 0000000000..07d19ea8aa --- /dev/null +++ b/vendor/github.com/operator-framework/operator-lib/handler/internal/metrics/metrics.go @@ -0,0 +1,33 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "sigs.k8s.io/controller-runtime/pkg/metrics" +) + +// ResourceCreatedAt creates new prometheus metrics for primary resource, +// with information {"name", "namespace", "group", "version", "kind"} +var ResourceCreatedAt = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "resource_created_at_seconds", + Help: "Timestamp at which a resource was created", +}, []string{"name", "namespace", "group", "version", "kind"}) + +func init() { + metrics.Registry.MustRegister( + ResourceCreatedAt, + ) +} diff --git a/vendor/github.com/rubenv/sql-migrate/.gitignore b/vendor/github.com/rubenv/sql-migrate/.gitignore new file mode 100644 index 0000000000..f543d10c28 --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/.gitignore @@ -0,0 +1,6 @@ +.*.swp +*.test +.idea + +/sql-migrate/test.db +/test.db diff --git a/vendor/github.com/rubenv/sql-migrate/.travis.yml b/vendor/github.com/rubenv/sql-migrate/.travis.yml new file mode 100644 index 0000000000..a892b87cdd --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/.travis.yml @@ -0,0 +1,31 @@ +language: go + +sudo: false + +go: + - "1.13" + - "1.14" + +services: + - mysql + - postgresql + +before_install: + - mysql -e "CREATE DATABASE IF NOT EXISTS test;" -uroot + - mysql -e "CREATE DATABASE IF NOT EXISTS test_env;" -uroot + - psql -c "CREATE DATABASE test;" -U postgres + +install: + - go get -t ./... + - go install ./... + - go get -u github.com/kisielk/errcheck + +script: + - CGO_ENABLED=0 go build -v . + - go test -v ./... + - bash test-integration/postgres.sh + - bash test-integration/mysql.sh + - bash test-integration/mysql-flag.sh + - bash test-integration/mysql-env.sh + - bash test-integration/sqlite.sh + - errcheck ./... diff --git a/vendor/github.com/rubenv/sql-migrate/LICENSE b/vendor/github.com/rubenv/sql-migrate/LICENSE new file mode 100644 index 0000000000..ebf1ed335c --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (C) 2014-2019 by Ruben Vermeersch + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/rubenv/sql-migrate/README.md b/vendor/github.com/rubenv/sql-migrate/README.md new file mode 100644 index 0000000000..36f9940dd8 --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/README.md @@ -0,0 +1,400 @@ +# sql-migrate + +> SQL Schema migration tool for [Go](http://golang.org/). Based on [gorp](https://github.com/go-gorp/gorp) and [goose](https://bitbucket.org/liamstask/goose). + +[![Build Status](https://travis-ci.org/rubenv/sql-migrate.svg?branch=master)](https://travis-ci.org/rubenv/sql-migrate) [![GoDoc](https://godoc.org/github.com/rubenv/sql-migrate?status.svg)](https://godoc.org/github.com/rubenv/sql-migrate) + +Using [modl](https://github.com/jmoiron/modl)? Check out [modl-migrate](https://github.com/rubenv/modl-migrate). + +## Features + +* Usable as a CLI tool or as a library +* Supports SQLite, PostgreSQL, MySQL, MSSQL and Oracle databases (through [gorp](https://github.com/go-gorp/gorp)) +* Can embed migrations into your application +* Migrations are defined with SQL for full flexibility +* Atomic migrations +* Up/down migrations to allow rollback +* Supports multiple database types in one project +* Works great with other libraries such as [sqlx](http://jmoiron.github.io/sqlx/) + +## Installation + +To install the library and command line program, use the following: + +```bash +go get -v github.com/rubenv/sql-migrate/... +``` + +## Usage + +### As a standalone tool + +``` +$ sql-migrate --help +usage: sql-migrate [--version] [--help] [] + +Available commands are: + down Undo a database migration + new Create a new migration + redo Reapply the last migration + status Show migration status + up Migrates the database to the most recent version available +``` + +Each command requires a configuration file (which defaults to `dbconfig.yml`, but can be specified with the `-config` flag). This config file should specify one or more environments: + +```yml +development: + dialect: sqlite3 + datasource: test.db + dir: migrations/sqlite3 + +production: + dialect: postgres + datasource: dbname=myapp sslmode=disable + dir: migrations/postgres + table: migrations +``` + +(See more examples for different set ups [here](test-integration/dbconfig.yml)) + +Also one can obtain env variables in datasource field via `os.ExpandEnv` embedded call for the field. +This may be useful if one doesn't want to store credentials in file: + +```yml +production: + dialect: postgres + datasource: host=prodhost dbname=proddb user=${DB_USER} password=${DB_PASSWORD} sslmode=required + dir: migrations + table: migrations +``` + +The `table` setting is optional and will default to `gorp_migrations`. + +The environment that will be used can be specified with the `-env` flag (defaults to `development`). + +Use the `--help` flag in combination with any of the commands to get an overview of its usage: + +``` +$ sql-migrate up --help +Usage: sql-migrate up [options] ... + + Migrates the database to the most recent version available. + +Options: + + -config=dbconfig.yml Configuration file to use. + -env="development" Environment. + -limit=0 Limit the number of migrations (0 = unlimited). + -dryrun Don't apply migrations, just print them. +``` + +The `new` command creates a new empty migration template using the following pattern `-.sql`. + +The `up` command applies all available migrations. By contrast, `down` will only apply one migration by default. This behavior can be changed for both by using the `-limit` parameter. + +The `redo` command will unapply the last migration and reapply it. This is useful during development, when you're writing migrations. + +Use the `status` command to see the state of the applied migrations: + +```bash +$ sql-migrate status ++---------------+-----------------------------------------+ +| MIGRATION | APPLIED | ++---------------+-----------------------------------------+ +| 1_initial.sql | 2014-09-13 08:19:06.788354925 +0000 UTC | +| 2_record.sql | no | ++---------------+-----------------------------------------+ +``` + +#### Running Test Integrations +You can see how to run setups for different setups by executing the `.sh` files in [test-integration](test-integration/) + +```bash +# Run mysql-env.sh example (you need to be in the project root directory) + +./test-integration/mysql-env.sh +``` + +### MySQL Caveat + +If you are using MySQL, you must append `?parseTime=true` to the `datasource` configuration. For example: + +```yml +production: + dialect: mysql + datasource: root@/dbname?parseTime=true + dir: migrations/mysql + table: migrations +``` + +See [here](https://github.com/go-sql-driver/mysql#parsetime) for more information. + +### Oracle (oci8) +Oracle Driver is [oci8](https://github.com/mattn/go-oci8), it is not pure Go code and relies on Oracle Office Client ([Instant Client](https://www.oracle.com/database/technologies/instant-client/downloads.html)), more detailed information is in the [oci8 repo](https://github.com/mattn/go-oci8). + +#### Install with Oracle support + +To install the library and command line program, use the following: + +```bash +go get -tags oracle -v github.com/rubenv/sql-migrate/... +``` + +```yml +development: + dialect: oci8 + datasource: user/password@localhost:1521/sid + dir: migrations/oracle + table: migrations +``` + +### Oracle (godror) +Oracle Driver is [godror](https://github.com/godror/godror), it is not pure Go code and relies on Oracle Office Client ([Instant Client](https://www.oracle.com/database/technologies/instant-client/downloads.html)), more detailed information is in the [godror repository](https://github.com/godror/godror). + +#### Install with Oracle support + +To install the library and command line program, use the following: + +1. Install sql-migrate +```bash +go get -tags godror -v github.com/rubenv/sql-migrate/... +``` + +2. Download Oracle Office Client(e.g. macos, click [Instant Client](https://www.oracle.com/database/technologies/instant-client/downloads.html) if you are other system) +```bash +wget https://download.oracle.com/otn_software/mac/instantclient/193000/instantclient-basic-macos.x64-19.3.0.0.0dbru.zip +``` + +3. Configure environment variables `LD_LIBRARY_PATH` +``` +export LD_LIBRARY_PATH=your_oracle_office_path/instantclient_19_3 +``` + +```yml +development: + dialect: godror + datasource: user/password@localhost:1521/sid + dir: migrations/oracle + table: migrations +``` + + +### As a library + +Import sql-migrate into your application: + +```go +import "github.com/rubenv/sql-migrate" +``` + +Set up a source of migrations, this can be from memory, from a set of files, from bindata (more on that later), or from any library that implements [`http.FileSystem`](https://godoc.org/net/http#FileSystem): + +```go +// Hardcoded strings in memory: +migrations := &migrate.MemoryMigrationSource{ + Migrations: []*migrate.Migration{ + &migrate.Migration{ + Id: "123", + Up: []string{"CREATE TABLE people (id int)"}, + Down: []string{"DROP TABLE people"}, + }, + }, +} + +// OR: Read migrations from a folder: +migrations := &migrate.FileMigrationSource{ + Dir: "db/migrations", +} + +// OR: Use migrations from a packr box +migrations := &migrate.PackrMigrationSource{ + Box: packr.New("migrations", "./migrations"), +} + +// OR: Use pkger which implements `http.FileSystem` +migrationSource := &migrate.HttpFileSystemMigrationSource{ + FileSystem: pkger.Dir("/db/migrations"), +} + +// OR: Use migrations from bindata: +migrations := &migrate.AssetMigrationSource{ + Asset: Asset, + AssetDir: AssetDir, + Dir: "migrations", +} + +// OR: Read migrations from a `http.FileSystem` +migrationSource := &migrate.HttpFileSystemMigrationSource{ + FileSystem: httpFS, +} +``` + +Then use the `Exec` function to upgrade your database: + +```go +db, err := sql.Open("sqlite3", filename) +if err != nil { + // Handle errors! +} + +n, err := migrate.Exec(db, "sqlite3", migrations, migrate.Up) +if err != nil { + // Handle errors! +} +fmt.Printf("Applied %d migrations!\n", n) +``` + +Note that `n` can be greater than `0` even if there is an error: any migration that succeeded will remain applied even if a later one fails. + +Check [the GoDoc reference](https://godoc.org/github.com/rubenv/sql-migrate) for the full documentation. + +## Writing migrations +Migrations are defined in SQL files, which contain a set of SQL statements. Special comments are used to distinguish up and down migrations. + +```sql +-- +migrate Up +-- SQL in section 'Up' is executed when this migration is applied +CREATE TABLE people (id int); + + +-- +migrate Down +-- SQL section 'Down' is executed when this migration is rolled back +DROP TABLE people; +``` + +You can put multiple statements in each block, as long as you end them with a semicolon (`;`). + +You can alternatively set up a separator string that matches an entire line by setting `sqlparse.LineSeparator`. This +can be used to imitate, for example, MS SQL Query Analyzer functionality where commands can be separated by a line with +contents of `GO`. If `sqlparse.LineSeparator` is matched, it will not be included in the resulting migration scripts. + +If you have complex statements which contain semicolons, use `StatementBegin` and `StatementEnd` to indicate boundaries: + +```sql +-- +migrate Up +CREATE TABLE people (id int); + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION do_something() +returns void AS $$ +DECLARE + create_query text; +BEGIN + -- Do something here +END; +$$ +language plpgsql; +-- +migrate StatementEnd + +-- +migrate Down +DROP FUNCTION do_something(); +DROP TABLE people; +``` + +The order in which migrations are applied is defined through the filename: sql-migrate will sort migrations based on their name. It's recommended to use an increasing version number or a timestamp as the first part of the filename. + +Normally each migration is run within a transaction in order to guarantee that it is fully atomic. However some SQL commands (for example creating an index concurrently in PostgreSQL) cannot be executed inside a transaction. In order to execute such a command in a migration, the migration can be run using the `notransaction` option: + +```sql +-- +migrate Up notransaction +CREATE UNIQUE INDEX people_unique_id_idx CONCURRENTLY ON people (id); + +-- +migrate Down +DROP INDEX people_unique_id_idx; +``` + +## Embedding migrations with [packr](https://github.com/gobuffalo/packr) + +If you like your Go applications self-contained (that is: a single binary): use [packr](https://github.com/gobuffalo/packr) to embed the migration files. + +Just write your migration files as usual, as a set of SQL files in a folder. + +Import the packr package into your application: + +```go +import "github.com/gobuffalo/packr/v2" +``` + +Use the `PackrMigrationSource` in your application to find the migrations: + +```go +migrations := &migrate.PackrMigrationSource{ + Box: packr.New("migrations", "./migrations"), +} +``` + +If you already have a box and would like to use a subdirectory: + +```go +migrations := &migrate.PackrMigrationSource{ + Box: myBox, + Dir: "./migrations", +} +``` + +## Embedding migrations with [bindata](https://github.com/shuLhan/go-bindata) + +As an alternative, but slightly less maintained, you can use [bindata](https://github.com/shuLhan/go-bindata) to embed the migration files. + +Just write your migration files as usual, as a set of SQL files in a folder. + +Then use bindata to generate a `.go` file with the migrations embedded: + +```bash +go-bindata -pkg myapp -o bindata.go db/migrations/ +``` + +The resulting `bindata.go` file will contain your migrations. Remember to regenerate your `bindata.go` file whenever you add/modify a migration (`go generate` will help here, once it arrives). + +Use the `AssetMigrationSource` in your application to find the migrations: + +```go +migrations := &migrate.AssetMigrationSource{ + Asset: Asset, + AssetDir: AssetDir, + Dir: "db/migrations", +} +``` + +Both `Asset` and `AssetDir` are functions provided by bindata. + +Then proceed as usual. + +## Embedding migrations with libraries that implement `http.FileSystem` + +You can also embed migrations with any library that implements `http.FileSystem`, like [`vfsgen`](https://github.com/shurcooL/vfsgen), [`parcello`](https://github.com/phogolabs/parcello), or [`go-resources`](https://github.com/omeid/go-resources). + +```go +migrationSource := &migrate.HttpFileSystemMigrationSource{ + FileSystem: httpFS, +} +``` + +## Extending + +Adding a new migration source means implementing `MigrationSource`. + +```go +type MigrationSource interface { + FindMigrations() ([]*Migration, error) +} +``` + +The resulting slice of migrations will be executed in the given order, so it should usually be sorted by the `Id` field. + +## Usage with [sqlx](http://jmoiron.github.io/sqlx/) + +This library is compatible with sqlx. When calling migrate just dereference the DB from your `*sqlx.DB`: + +``` +n, err := migrate.Exec(db.DB, "sqlite3", migrations, migrate.Up) + // ^^^ <-- Here db is a *sqlx.DB, the db.DB field is the plain sql.DB +if err != nil { + // Handle errors! +} +``` + +## License + +This library is distributed under the [MIT](LICENSE) license. diff --git a/vendor/github.com/rubenv/sql-migrate/doc.go b/vendor/github.com/rubenv/sql-migrate/doc.go new file mode 100644 index 0000000000..eb4ed8575a --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/doc.go @@ -0,0 +1,239 @@ +/* + +SQL Schema migration tool for Go. + +Key features: + + * Usable as a CLI tool or as a library + * Supports SQLite, PostgreSQL, MySQL, MSSQL and Oracle databases (through gorp) + * Can embed migrations into your application + * Migrations are defined with SQL for full flexibility + * Atomic migrations + * Up/down migrations to allow rollback + * Supports multiple database types in one project + +Installation + +To install the library and command line program, use the following: + + go get -v github.com/rubenv/sql-migrate/... + +Command-line tool + +The main command is called sql-migrate. + + $ sql-migrate --help + usage: sql-migrate [--version] [--help] [] + + Available commands are: + down Undo a database migration + new Create a new migration + redo Reapply the last migration + status Show migration status + up Migrates the database to the most recent version available + +Each command requires a configuration file (which defaults to dbconfig.yml, but can be specified with the -config flag). This config file should specify one or more environments: + + development: + dialect: sqlite3 + datasource: test.db + dir: migrations/sqlite3 + + production: + dialect: postgres + datasource: dbname=myapp sslmode=disable + dir: migrations/postgres + table: migrations + +The `table` setting is optional and will default to `gorp_migrations`. + +The environment that will be used can be specified with the -env flag (defaults to development). + +Use the --help flag in combination with any of the commands to get an overview of its usage: + + $ sql-migrate up --help + Usage: sql-migrate up [options] ... + + Migrates the database to the most recent version available. + + Options: + + -config=config.yml Configuration file to use. + -env="development" Environment. + -limit=0 Limit the number of migrations (0 = unlimited). + -dryrun Don't apply migrations, just print them. + +The up command applies all available migrations. By contrast, down will only apply one migration by default. This behavior can be changed for both by using the -limit parameter. + +The redo command will unapply the last migration and reapply it. This is useful during development, when you're writing migrations. + +Use the status command to see the state of the applied migrations: + + $ sql-migrate status + +---------------+-----------------------------------------+ + | MIGRATION | APPLIED | + +---------------+-----------------------------------------+ + | 1_initial.sql | 2014-09-13 08:19:06.788354925 +0000 UTC | + | 2_record.sql | no | + +---------------+-----------------------------------------+ + +MySQL Caveat + +If you are using MySQL, you must append ?parseTime=true to the datasource configuration. For example: + + production: + dialect: mysql + datasource: root@/dbname?parseTime=true + dir: migrations/mysql + table: migrations + +See https://github.com/go-sql-driver/mysql#parsetime for more information. + +Library + +Import sql-migrate into your application: + + import "github.com/rubenv/sql-migrate" + +Set up a source of migrations, this can be from memory, from a set of files or from bindata (more on that later): + + // Hardcoded strings in memory: + migrations := &migrate.MemoryMigrationSource{ + Migrations: []*migrate.Migration{ + &migrate.Migration{ + Id: "123", + Up: []string{"CREATE TABLE people (id int)"}, + Down: []string{"DROP TABLE people"}, + }, + }, + } + + // OR: Read migrations from a folder: + migrations := &migrate.FileMigrationSource{ + Dir: "db/migrations", + } + + // OR: Use migrations from bindata: + migrations := &migrate.AssetMigrationSource{ + Asset: Asset, + AssetDir: AssetDir, + Dir: "migrations", + } + +Then use the Exec function to upgrade your database: + + db, err := sql.Open("sqlite3", filename) + if err != nil { + // Handle errors! + } + + n, err := migrate.Exec(db, "sqlite3", migrations, migrate.Up) + if err != nil { + // Handle errors! + } + fmt.Printf("Applied %d migrations!\n", n) + +Note that n can be greater than 0 even if there is an error: any migration that succeeded will remain applied even if a later one fails. + +The full set of capabilities can be found in the API docs below. + +Writing migrations + +Migrations are defined in SQL files, which contain a set of SQL statements. Special comments are used to distinguish up and down migrations. + + -- +migrate Up + -- SQL in section 'Up' is executed when this migration is applied + CREATE TABLE people (id int); + + + -- +migrate Down + -- SQL section 'Down' is executed when this migration is rolled back + DROP TABLE people; + +You can put multiple statements in each block, as long as you end them with a semicolon (;). + +If you have complex statements which contain semicolons, use StatementBegin and StatementEnd to indicate boundaries: + + -- +migrate Up + CREATE TABLE people (id int); + + -- +migrate StatementBegin + CREATE OR REPLACE FUNCTION do_something() + returns void AS $$ + DECLARE + create_query text; + BEGIN + -- Do something here + END; + $$ + language plpgsql; + -- +migrate StatementEnd + + -- +migrate Down + DROP FUNCTION do_something(); + DROP TABLE people; + +The order in which migrations are applied is defined through the filename: sql-migrate will sort migrations based on their name. It's recommended to use an increasing version number or a timestamp as the first part of the filename. + +Normally each migration is run within a transaction in order to guarantee that it is fully atomic. However some SQL commands (for example creating an index concurrently in PostgreSQL) cannot be executed inside a transaction. In order to execute such a command in a migration, the migration can be run using the notransaction option: + + -- +migrate Up notransaction + CREATE UNIQUE INDEX people_unique_id_idx CONCURRENTLY ON people (id); + + -- +migrate Down + DROP INDEX people_unique_id_idx; + +Embedding migrations with packr + +If you like your Go applications self-contained (that is: a single binary): use packr (https://github.com/gobuffalo/packr) to embed the migration files. + +Just write your migration files as usual, as a set of SQL files in a folder. + +Use the PackrMigrationSource in your application to find the migrations: + + migrations := &migrate.PackrMigrationSource{ + Box: packr.NewBox("./migrations"), + } + +If you already have a box and would like to use a subdirectory: + + migrations := &migrate.PackrMigrationSource{ + Box: myBox, + Dir: "./migrations", + } + +Embedding migrations with bindata + +As an alternative, but slightly less maintained, you can use bindata (https://github.com/shuLhan/go-bindata) to embed the migration files. + +Just write your migration files as usual, as a set of SQL files in a folder. + +Then use bindata to generate a .go file with the migrations embedded: + + go-bindata -pkg myapp -o bindata.go db/migrations/ + +The resulting bindata.go file will contain your migrations. Remember to regenerate your bindata.go file whenever you add/modify a migration (go generate will help here, once it arrives). + +Use the AssetMigrationSource in your application to find the migrations: + + migrations := &migrate.AssetMigrationSource{ + Asset: Asset, + AssetDir: AssetDir, + Dir: "db/migrations", + } + +Both Asset and AssetDir are functions provided by bindata. + +Then proceed as usual. + +Extending + +Adding a new migration source means implementing MigrationSource. + + type MigrationSource interface { + FindMigrations() ([]*Migration, error) + } + +The resulting slice of migrations will be executed in the given order, so it should usually be sorted by the Id field. +*/ +package migrate diff --git a/vendor/github.com/rubenv/sql-migrate/go.mod b/vendor/github.com/rubenv/sql-migrate/go.mod new file mode 100644 index 0000000000..1ef1c6ba94 --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/go.mod @@ -0,0 +1,20 @@ +module github.com/rubenv/sql-migrate + +go 1.11 + +require ( + github.com/denisenkom/go-mssqldb v0.0.0-20191001013358-cfbb681360f0 + github.com/go-sql-driver/mysql v1.4.1 + github.com/gobuffalo/packr/v2 v2.7.1 + github.com/godror/godror v0.13.3 + github.com/lib/pq v1.2.0 + github.com/mattn/go-oci8 v0.0.7 + github.com/mattn/go-sqlite3 v1.12.0 + github.com/mitchellh/cli v1.0.0 + github.com/olekukonko/tablewriter v0.0.2 + github.com/ziutek/mymysql v1.5.4 // indirect + google.golang.org/appengine v1.6.5 // indirect + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 + gopkg.in/gorp.v1 v1.7.2 + gopkg.in/yaml.v2 v2.2.5 +) diff --git a/vendor/github.com/rubenv/sql-migrate/go.sum b/vendor/github.com/rubenv/sql-migrate/go.sum new file mode 100644 index 0000000000..3501c69140 --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/go.sum @@ -0,0 +1,418 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denisenkom/go-mssqldb v0.0.0-20191001013358-cfbb681360f0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.1/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6jIHf0w= +github.com/gobuffalo/logger v1.0.1/go.mod h1:2zbswyIUa45I+c+FLXuWl9zSWEiVuthsk8ze5s8JvPs= +github.com/gobuffalo/packd v0.3.0 h1:eMwymTkA1uXsqxS0Tpoop3Lc0u3kTfiMBE6nKtQU4g4= +github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q= +github.com/gobuffalo/packr/v2 v2.7.1/go.mod h1:qYEvAazPaVxy7Y7KR0W8qYEE+RymX74kETFqjFoFlOc= +github.com/godror/godror v0.13.3 h1:4A5GLGAJTSuELw1NThqY5bINYB+mqrln+kF5C2vuyCs= +github.com/godror/godror v0.13.3/go.mod h1:2ouUT4kdhUBk7TAkHWD4SN0CdI0pgEQbo8FVHhbSKWg= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-oci8 v0.0.7 h1:BBXYpvzPO43QNTLDEivPFteeFZ9nKA6JQ6eifpxOmio= +github.com/mattn/go-oci8 v0.0.7/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-sqlite3 v1.12.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.4.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= +github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f h1:68K/z8GLUxV76xGSqwTWw2gyk/jwn79LUL43rES2g8o= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191004055002-72853e10c5a3/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/gorp.v1 v1.7.2 h1:j3DWlAyGVv8whO7AcIWznQ2Yj7yJkn34B8s63GViAAw= +gopkg.in/gorp.v1 v1.7.2/go.mod h1:Wo3h+DBQZIxATwftsglhdD/62zRFPhGhTiu5jUJmCaw= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/vendor/github.com/rubenv/sql-migrate/migrate.go b/vendor/github.com/rubenv/sql-migrate/migrate.go new file mode 100644 index 0000000000..314007d547 --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/migrate.go @@ -0,0 +1,768 @@ +package migrate + +import ( + "bytes" + "database/sql" + "errors" + "fmt" + "io" + "net/http" + "os" + "path" + "regexp" + "sort" + "strconv" + "strings" + "time" + + "github.com/rubenv/sql-migrate/sqlparse" + "gopkg.in/gorp.v1" +) + +type MigrationDirection int + +const ( + Up MigrationDirection = iota + Down +) + +// MigrationSet provides database parameters for a migration execution +type MigrationSet struct { + // TableName name of the table used to store migration info. + TableName string + // SchemaName schema that the migration table be referenced. + SchemaName string + // IgnoreUnknown skips the check to see if there is a migration + // ran in the database that is not in MigrationSource. + // + // This should be used sparingly as it is removing a safety check. + IgnoreUnknown bool +} + +var migSet = MigrationSet{} + +// NewMigrationSet returns a parametrized Migration object +func (ms MigrationSet) getTableName() string { + if ms.TableName == "" { + return "gorp_migrations" + } + return ms.TableName +} + +var numberPrefixRegex = regexp.MustCompile(`^(\d+).*$`) + +// PlanError happens where no migration plan could be created between the sets +// of already applied migrations and the currently found. For example, when the database +// contains a migration which is not among the migrations list found for an operation. +type PlanError struct { + Migration *Migration + ErrorMessage string +} + +func newPlanError(migration *Migration, errorMessage string) error { + return &PlanError{ + Migration: migration, + ErrorMessage: errorMessage, + } +} + +func (p *PlanError) Error() string { + return fmt.Sprintf("Unable to create migration plan because of %s: %s", + p.Migration.Id, p.ErrorMessage) +} + +// TxError is returned when any error is encountered during a database +// transaction. It contains the relevant *Migration and notes it's Id in the +// Error function output. +type TxError struct { + Migration *Migration + Err error +} + +func newTxError(migration *PlannedMigration, err error) error { + return &TxError{ + Migration: migration.Migration, + Err: err, + } +} + +func (e *TxError) Error() string { + return e.Err.Error() + " handling " + e.Migration.Id +} + +// Set the name of the table used to store migration info. +// +// Should be called before any other call such as (Exec, ExecMax, ...). +func SetTable(name string) { + if name != "" { + migSet.TableName = name + } +} + +// SetSchema sets the name of a schema that the migration table be referenced. +func SetSchema(name string) { + if name != "" { + migSet.SchemaName = name + } +} + +// SetIgnoreUnknown sets the flag that skips database check to see if there is a +// migration in the database that is not in migration source. +// +// This should be used sparingly as it is removing a safety check. +func SetIgnoreUnknown(v bool) { + migSet.IgnoreUnknown = v +} + +type Migration struct { + Id string + Up []string + Down []string + + DisableTransactionUp bool + DisableTransactionDown bool +} + +func (m Migration) Less(other *Migration) bool { + switch { + case m.isNumeric() && other.isNumeric() && m.VersionInt() != other.VersionInt(): + return m.VersionInt() < other.VersionInt() + case m.isNumeric() && !other.isNumeric(): + return true + case !m.isNumeric() && other.isNumeric(): + return false + default: + return m.Id < other.Id + } +} + +func (m Migration) isNumeric() bool { + return len(m.NumberPrefixMatches()) > 0 +} + +func (m Migration) NumberPrefixMatches() []string { + return numberPrefixRegex.FindStringSubmatch(m.Id) +} + +func (m Migration) VersionInt() int64 { + v := m.NumberPrefixMatches()[1] + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + panic(fmt.Sprintf("Could not parse %q into int64: %s", v, err)) + } + return value +} + +type PlannedMigration struct { + *Migration + + DisableTransaction bool + Queries []string +} + +type byId []*Migration + +func (b byId) Len() int { return len(b) } +func (b byId) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byId) Less(i, j int) bool { return b[i].Less(b[j]) } + +type MigrationRecord struct { + Id string `db:"id"` + AppliedAt time.Time `db:"applied_at"` +} + +type OracleDialect struct { + gorp.OracleDialect +} + +func (d OracleDialect) IfTableNotExists(command, schema, table string) string { + return command +} + +func (d OracleDialect) IfSchemaNotExists(command, schema string) string { + return command +} + +func (d OracleDialect) IfTableExists(command, schema, table string) string { + return command +} + +var MigrationDialects = map[string]gorp.Dialect{ + "sqlite3": gorp.SqliteDialect{}, + "postgres": gorp.PostgresDialect{}, + "mysql": gorp.MySQLDialect{Engine: "InnoDB", Encoding: "UTF8"}, + "mssql": gorp.SqlServerDialect{}, + "oci8": OracleDialect{}, + "godror": OracleDialect{}, +} + +type MigrationSource interface { + // Finds the migrations. + // + // The resulting slice of migrations should be sorted by Id. + FindMigrations() ([]*Migration, error) +} + +// A hardcoded set of migrations, in-memory. +type MemoryMigrationSource struct { + Migrations []*Migration +} + +var _ MigrationSource = (*MemoryMigrationSource)(nil) + +func (m MemoryMigrationSource) FindMigrations() ([]*Migration, error) { + // Make sure migrations are sorted. In order to make the MemoryMigrationSource safe for + // concurrent use we should not mutate it in place. So `FindMigrations` would sort a copy + // of the m.Migrations. + migrations := make([]*Migration, len(m.Migrations)) + copy(migrations, m.Migrations) + sort.Sort(byId(migrations)) + return migrations, nil +} + +// A set of migrations loaded from an http.FileServer + +type HttpFileSystemMigrationSource struct { + FileSystem http.FileSystem +} + +var _ MigrationSource = (*HttpFileSystemMigrationSource)(nil) + +func (f HttpFileSystemMigrationSource) FindMigrations() ([]*Migration, error) { + return findMigrations(f.FileSystem) +} + +// A set of migrations loaded from a directory. +type FileMigrationSource struct { + Dir string +} + +var _ MigrationSource = (*FileMigrationSource)(nil) + +func (f FileMigrationSource) FindMigrations() ([]*Migration, error) { + filesystem := http.Dir(f.Dir) + return findMigrations(filesystem) +} + +func findMigrations(dir http.FileSystem) ([]*Migration, error) { + migrations := make([]*Migration, 0) + + file, err := dir.Open("/") + if err != nil { + return nil, err + } + + files, err := file.Readdir(0) + if err != nil { + return nil, err + } + + for _, info := range files { + if strings.HasSuffix(info.Name(), ".sql") { + migration, err := migrationFromFile(dir, info) + if err != nil { + return nil, err + } + + migrations = append(migrations, migration) + } + } + + // Make sure migrations are sorted + sort.Sort(byId(migrations)) + + return migrations, nil +} + +func migrationFromFile(dir http.FileSystem, info os.FileInfo) (*Migration, error) { + path := fmt.Sprintf("/%s", strings.TrimPrefix(info.Name(), "/")) + file, err := dir.Open(path) + if err != nil { + return nil, fmt.Errorf("Error while opening %s: %s", info.Name(), err) + } + defer func() { _ = file.Close() }() + + migration, err := ParseMigration(info.Name(), file) + if err != nil { + return nil, fmt.Errorf("Error while parsing %s: %s", info.Name(), err) + } + return migration, nil +} + +// Migrations from a bindata asset set. +type AssetMigrationSource struct { + // Asset should return content of file in path if exists + Asset func(path string) ([]byte, error) + + // AssetDir should return list of files in the path + AssetDir func(path string) ([]string, error) + + // Path in the bindata to use. + Dir string +} + +var _ MigrationSource = (*AssetMigrationSource)(nil) + +func (a AssetMigrationSource) FindMigrations() ([]*Migration, error) { + migrations := make([]*Migration, 0) + + files, err := a.AssetDir(a.Dir) + if err != nil { + return nil, err + } + + for _, name := range files { + if strings.HasSuffix(name, ".sql") { + file, err := a.Asset(path.Join(a.Dir, name)) + if err != nil { + return nil, err + } + + migration, err := ParseMigration(name, bytes.NewReader(file)) + if err != nil { + return nil, err + } + + migrations = append(migrations, migration) + } + } + + // Make sure migrations are sorted + sort.Sort(byId(migrations)) + + return migrations, nil +} + +// Avoids pulling in the packr library for everyone, mimicks the bits of +// packr.Box that we need. +type PackrBox interface { + List() []string + Find(name string) ([]byte, error) +} + +// Migrations from a packr box. +type PackrMigrationSource struct { + Box PackrBox + + // Path in the box to use. + Dir string +} + +var _ MigrationSource = (*PackrMigrationSource)(nil) + +func (p PackrMigrationSource) FindMigrations() ([]*Migration, error) { + migrations := make([]*Migration, 0) + items := p.Box.List() + + prefix := "" + dir := path.Clean(p.Dir) + if dir != "." { + prefix = fmt.Sprintf("%s/", dir) + } + + for _, item := range items { + if !strings.HasPrefix(item, prefix) { + continue + } + name := strings.TrimPrefix(item, prefix) + if strings.Contains(name, "/") { + continue + } + + if strings.HasSuffix(name, ".sql") { + file, err := p.Box.Find(item) + if err != nil { + return nil, err + } + + migration, err := ParseMigration(name, bytes.NewReader(file)) + if err != nil { + return nil, err + } + + migrations = append(migrations, migration) + } + } + + // Make sure migrations are sorted + sort.Sort(byId(migrations)) + + return migrations, nil +} + +// Migration parsing +func ParseMigration(id string, r io.ReadSeeker) (*Migration, error) { + m := &Migration{ + Id: id, + } + + parsed, err := sqlparse.ParseMigration(r) + if err != nil { + return nil, fmt.Errorf("Error parsing migration (%s): %s", id, err) + } + + m.Up = parsed.UpStatements + m.Down = parsed.DownStatements + + m.DisableTransactionUp = parsed.DisableTransactionUp + m.DisableTransactionDown = parsed.DisableTransactionDown + + return m, nil +} + +type SqlExecutor interface { + Exec(query string, args ...interface{}) (sql.Result, error) + Insert(list ...interface{}) error + Delete(list ...interface{}) (int64, error) +} + +// Execute a set of migrations +// +// Returns the number of applied migrations. +func Exec(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection) (int, error) { + return ExecMax(db, dialect, m, dir, 0) +} + +// Returns the number of applied migrations. +func (ms MigrationSet) Exec(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection) (int, error) { + return ms.ExecMax(db, dialect, m, dir, 0) +} + +// Execute a set of migrations +// +// Will apply at most `max` migrations. Pass 0 for no limit (or use Exec). +// +// Returns the number of applied migrations. +func ExecMax(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) (int, error) { + return migSet.ExecMax(db, dialect, m, dir, max) +} + +// Returns the number of applied migrations. +func (ms MigrationSet) ExecMax(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) (int, error) { + migrations, dbMap, err := ms.PlanMigration(db, dialect, m, dir, max) + if err != nil { + return 0, err + } + + // Apply migrations + applied := 0 + for _, migration := range migrations { + var executor SqlExecutor + + if migration.DisableTransaction { + executor = dbMap + } else { + executor, err = dbMap.Begin() + if err != nil { + return applied, newTxError(migration, err) + } + } + + for _, stmt := range migration.Queries { + // remove the semicolon from stmt, fix ORA-00922 issue in database oracle + stmt = strings.TrimSuffix(stmt, "\n") + stmt = strings.TrimSuffix(stmt, " ") + stmt = strings.TrimSuffix(stmt, ";") + if _, err := executor.Exec(stmt); err != nil { + if trans, ok := executor.(*gorp.Transaction); ok { + _ = trans.Rollback() + } + + return applied, newTxError(migration, err) + } + } + + switch dir { + case Up: + err = executor.Insert(&MigrationRecord{ + Id: migration.Id, + AppliedAt: time.Now(), + }) + if err != nil { + if trans, ok := executor.(*gorp.Transaction); ok { + _ = trans.Rollback() + } + + return applied, newTxError(migration, err) + } + case Down: + _, err := executor.Delete(&MigrationRecord{ + Id: migration.Id, + }) + if err != nil { + if trans, ok := executor.(*gorp.Transaction); ok { + _ = trans.Rollback() + } + + return applied, newTxError(migration, err) + } + default: + panic("Not possible") + } + + if trans, ok := executor.(*gorp.Transaction); ok { + if err := trans.Commit(); err != nil { + return applied, newTxError(migration, err) + } + } + + applied++ + } + + return applied, nil +} + +// Plan a migration. +func PlanMigration(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) ([]*PlannedMigration, *gorp.DbMap, error) { + return migSet.PlanMigration(db, dialect, m, dir, max) +} + +func (ms MigrationSet) PlanMigration(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) ([]*PlannedMigration, *gorp.DbMap, error) { + dbMap, err := ms.getMigrationDbMap(db, dialect) + if err != nil { + return nil, nil, err + } + + migrations, err := m.FindMigrations() + if err != nil { + return nil, nil, err + } + + var migrationRecords []MigrationRecord + _, err = dbMap.Select(&migrationRecords, fmt.Sprintf("SELECT * FROM %s", dbMap.Dialect.QuotedTableForQuery(ms.SchemaName, ms.getTableName()))) + if err != nil { + return nil, nil, err + } + + // Sort migrations that have been run by Id. + var existingMigrations []*Migration + for _, migrationRecord := range migrationRecords { + existingMigrations = append(existingMigrations, &Migration{ + Id: migrationRecord.Id, + }) + } + sort.Sort(byId(existingMigrations)) + + // Make sure all migrations in the database are among the found migrations which + // are to be applied. + if !ms.IgnoreUnknown { + migrationsSearch := make(map[string]struct{}) + for _, migration := range migrations { + migrationsSearch[migration.Id] = struct{}{} + } + for _, existingMigration := range existingMigrations { + if _, ok := migrationsSearch[existingMigration.Id]; !ok { + return nil, nil, newPlanError(existingMigration, "unknown migration in database") + } + } + } + + // Get last migration that was run + record := &Migration{} + if len(existingMigrations) > 0 { + record = existingMigrations[len(existingMigrations)-1] + } + + result := make([]*PlannedMigration, 0) + + // Add missing migrations up to the last run migration. + // This can happen for example when merges happened. + if len(existingMigrations) > 0 { + result = append(result, ToCatchup(migrations, existingMigrations, record)...) + } + + // Figure out which migrations to apply + toApply := ToApply(migrations, record.Id, dir) + toApplyCount := len(toApply) + if max > 0 && max < toApplyCount { + toApplyCount = max + } + for _, v := range toApply[0:toApplyCount] { + + if dir == Up { + result = append(result, &PlannedMigration{ + Migration: v, + Queries: v.Up, + DisableTransaction: v.DisableTransactionUp, + }) + } else if dir == Down { + result = append(result, &PlannedMigration{ + Migration: v, + Queries: v.Down, + DisableTransaction: v.DisableTransactionDown, + }) + } + } + + return result, dbMap, nil +} + +// Skip a set of migrations +// +// Will skip at most `max` migrations. Pass 0 for no limit. +// +// Returns the number of skipped migrations. +func SkipMax(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) (int, error) { + migrations, dbMap, err := PlanMigration(db, dialect, m, dir, max) + if err != nil { + return 0, err + } + + // Skip migrations + applied := 0 + for _, migration := range migrations { + var executor SqlExecutor + + if migration.DisableTransaction { + executor = dbMap + } else { + executor, err = dbMap.Begin() + if err != nil { + return applied, newTxError(migration, err) + } + } + + err = executor.Insert(&MigrationRecord{ + Id: migration.Id, + AppliedAt: time.Now(), + }) + if err != nil { + if trans, ok := executor.(*gorp.Transaction); ok { + _ = trans.Rollback() + } + + return applied, newTxError(migration, err) + } + + if trans, ok := executor.(*gorp.Transaction); ok { + if err := trans.Commit(); err != nil { + return applied, newTxError(migration, err) + } + } + + applied++ + } + + return applied, nil +} + +// Filter a slice of migrations into ones that should be applied. +func ToApply(migrations []*Migration, current string, direction MigrationDirection) []*Migration { + var index = -1 + if current != "" { + for index < len(migrations)-1 { + index++ + if migrations[index].Id == current { + break + } + } + } + + if direction == Up { + return migrations[index+1:] + } else if direction == Down { + if index == -1 { + return []*Migration{} + } + + // Add in reverse order + toApply := make([]*Migration, index+1) + for i := 0; i < index+1; i++ { + toApply[index-i] = migrations[i] + } + return toApply + } + + panic("Not possible") +} + +func ToCatchup(migrations, existingMigrations []*Migration, lastRun *Migration) []*PlannedMigration { + missing := make([]*PlannedMigration, 0) + for _, migration := range migrations { + found := false + for _, existing := range existingMigrations { + if existing.Id == migration.Id { + found = true + break + } + } + if !found && migration.Less(lastRun) { + missing = append(missing, &PlannedMigration{ + Migration: migration, + Queries: migration.Up, + DisableTransaction: migration.DisableTransactionUp, + }) + } + } + return missing +} + +func GetMigrationRecords(db *sql.DB, dialect string) ([]*MigrationRecord, error) { + return migSet.GetMigrationRecords(db, dialect) +} + +func (ms MigrationSet) GetMigrationRecords(db *sql.DB, dialect string) ([]*MigrationRecord, error) { + dbMap, err := ms.getMigrationDbMap(db, dialect) + if err != nil { + return nil, err + } + + var records []*MigrationRecord + query := fmt.Sprintf("SELECT * FROM %s ORDER BY %s ASC", dbMap.Dialect.QuotedTableForQuery(ms.SchemaName, ms.getTableName()), dbMap.Dialect.QuoteField("id")) + _, err = dbMap.Select(&records, query) + if err != nil { + return nil, err + } + + return records, nil +} + +func (ms MigrationSet) getMigrationDbMap(db *sql.DB, dialect string) (*gorp.DbMap, error) { + d, ok := MigrationDialects[dialect] + if !ok { + return nil, fmt.Errorf("Unknown dialect: %s", dialect) + } + + // When using the mysql driver, make sure that the parseTime option is + // configured, otherwise it won't map time columns to time.Time. See + // https://github.com/rubenv/sql-migrate/issues/2 + if dialect == "mysql" { + var out *time.Time + err := db.QueryRow("SELECT NOW()").Scan(&out) + if err != nil { + if err.Error() == "sql: Scan error on column index 0: unsupported driver -> Scan pair: []uint8 -> *time.Time" || + err.Error() == "sql: Scan error on column index 0: unsupported Scan, storing driver.Value type []uint8 into type *time.Time" || + err.Error() == "sql: Scan error on column index 0, name \"NOW()\": unsupported Scan, storing driver.Value type []uint8 into type *time.Time" { + return nil, errors.New(`Cannot parse dates. + +Make sure that the parseTime option is supplied to your database connection. +Check https://github.com/go-sql-driver/mysql#parsetime for more info.`) + } else { + return nil, err + } + } + } + + // Create migration database map + dbMap := &gorp.DbMap{Db: db, Dialect: d} + table := dbMap.AddTableWithNameAndSchema(MigrationRecord{}, ms.SchemaName, ms.getTableName()).SetKeys(false, "Id") + //dbMap.TraceOn("", log.New(os.Stdout, "migrate: ", log.Lmicroseconds)) + + if dialect == "oci8" || dialect == "godror" { + table.ColMap("Id").SetMaxSize(4000) + } + + err := dbMap.CreateTablesIfNotExists() + if err != nil { + // Oracle database does not support `if not exists`, so use `ORA-00955:` error code + // to check if the table exists. + if (dialect == "oci8" || dialect == "godror") && strings.Contains(err.Error(), "ORA-00955:") { + return dbMap, nil + } + return nil, err + } + + return dbMap, nil +} + +// TODO: Run migration + record insert in transaction. diff --git a/vendor/github.com/rubenv/sql-migrate/sqlparse/LICENSE b/vendor/github.com/rubenv/sql-migrate/sqlparse/LICENSE new file mode 100644 index 0000000000..9c12525138 --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/sqlparse/LICENSE @@ -0,0 +1,22 @@ +MIT License + +Copyright (C) 2014-2017 by Ruben Vermeersch +Copyright (C) 2012-2014 by Liam Staskawicz + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/rubenv/sql-migrate/sqlparse/README.md b/vendor/github.com/rubenv/sql-migrate/sqlparse/README.md new file mode 100644 index 0000000000..fa5341abdb --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/sqlparse/README.md @@ -0,0 +1,7 @@ +# SQL migration parser + +Based on the [goose](https://bitbucket.org/liamstask/goose) migration parser. + +## License + +This library is distributed under the [MIT](LICENSE) license. diff --git a/vendor/github.com/rubenv/sql-migrate/sqlparse/sqlparse.go b/vendor/github.com/rubenv/sql-migrate/sqlparse/sqlparse.go new file mode 100644 index 0000000000..d336e772ad --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/sqlparse/sqlparse.go @@ -0,0 +1,235 @@ +package sqlparse + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + + "strings" +) + +const ( + sqlCmdPrefix = "-- +migrate " + optionNoTransaction = "notransaction" +) + +type ParsedMigration struct { + UpStatements []string + DownStatements []string + + DisableTransactionUp bool + DisableTransactionDown bool +} + +var ( + // LineSeparator can be used to split migrations by an exact line match. This line + // will be removed from the output. If left blank, it is not considered. It is defaulted + // to blank so you will have to set it manually. + // Use case: in MSSQL, it is convenient to separate commands by GO statements like in + // SQL Query Analyzer. + LineSeparator = "" +) + +func errNoTerminator() error { + if len(LineSeparator) == 0 { + return errors.New(`ERROR: The last statement must be ended by a semicolon or '-- +migrate StatementEnd' marker. + See https://github.com/rubenv/sql-migrate for details.`) + } + + return errors.New(fmt.Sprintf(`ERROR: The last statement must be ended by a semicolon, a line whose contents are %q, or '-- +migrate StatementEnd' marker. + See https://github.com/rubenv/sql-migrate for details.`, LineSeparator)) +} + +// Checks the line to see if the line has a statement-ending semicolon +// or if the line contains a double-dash comment. +func endsWithSemicolon(line string) bool { + + prev := "" + scanner := bufio.NewScanner(strings.NewReader(line)) + scanner.Split(bufio.ScanWords) + + for scanner.Scan() { + word := scanner.Text() + if strings.HasPrefix(word, "--") { + break + } + prev = word + } + + return strings.HasSuffix(prev, ";") +} + +type migrationDirection int + +const ( + directionNone migrationDirection = iota + directionUp + directionDown +) + +type migrateCommand struct { + Command string + Options []string +} + +func (c *migrateCommand) HasOption(opt string) bool { + for _, specifiedOption := range c.Options { + if specifiedOption == opt { + return true + } + } + + return false +} + +func parseCommand(line string) (*migrateCommand, error) { + cmd := &migrateCommand{} + + if !strings.HasPrefix(line, sqlCmdPrefix) { + return nil, errors.New("ERROR: not a sql-migrate command") + } + + fields := strings.Fields(line[len(sqlCmdPrefix):]) + if len(fields) == 0 { + return nil, errors.New(`ERROR: incomplete migration command`) + } + + cmd.Command = fields[0] + + cmd.Options = fields[1:] + + return cmd, nil +} + +// Split the given sql script into individual statements. +// +// The base case is to simply split on semicolons, as these +// naturally terminate a statement. +// +// However, more complex cases like pl/pgsql can have semicolons +// within a statement. For these cases, we provide the explicit annotations +// 'StatementBegin' and 'StatementEnd' to allow the script to +// tell us to ignore semicolons. +func ParseMigration(r io.ReadSeeker) (*ParsedMigration, error) { + p := &ParsedMigration{} + + _, err := r.Seek(0, 0) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + scanner := bufio.NewScanner(r) + scanner.Buffer(make([]byte, 0, 64*1024), 1024*1024) + + statementEnded := false + ignoreSemicolons := false + currentDirection := directionNone + + for scanner.Scan() { + line := scanner.Text() + // ignore comment except beginning with '-- +' + if strings.HasPrefix(line, "-- ") && !strings.HasPrefix(line, "-- +") { + continue + } + + // handle any migrate-specific commands + if strings.HasPrefix(line, sqlCmdPrefix) { + cmd, err := parseCommand(line) + if err != nil { + return nil, err + } + + switch cmd.Command { + case "Up": + if len(strings.TrimSpace(buf.String())) > 0 { + return nil, errNoTerminator() + } + currentDirection = directionUp + if cmd.HasOption(optionNoTransaction) { + p.DisableTransactionUp = true + } + break + + case "Down": + if len(strings.TrimSpace(buf.String())) > 0 { + return nil, errNoTerminator() + } + currentDirection = directionDown + if cmd.HasOption(optionNoTransaction) { + p.DisableTransactionDown = true + } + break + + case "StatementBegin": + if currentDirection != directionNone { + ignoreSemicolons = true + } + break + + case "StatementEnd": + if currentDirection != directionNone { + statementEnded = (ignoreSemicolons == true) + ignoreSemicolons = false + } + break + } + } + + if currentDirection == directionNone { + continue + } + + isLineSeparator := !ignoreSemicolons && len(LineSeparator) > 0 && line == LineSeparator + + if !isLineSeparator && !strings.HasPrefix(line, "-- +") { + if _, err := buf.WriteString(line + "\n"); err != nil { + return nil, err + } + } + + // Wrap up the two supported cases: 1) basic with semicolon; 2) psql statement + // Lines that end with semicolon that are in a statement block + // do not conclude statement. + if (!ignoreSemicolons && (endsWithSemicolon(line) || isLineSeparator)) || statementEnded { + statementEnded = false + switch currentDirection { + case directionUp: + p.UpStatements = append(p.UpStatements, buf.String()) + + case directionDown: + p.DownStatements = append(p.DownStatements, buf.String()) + + default: + panic("impossible state") + } + + buf.Reset() + } + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + // diagnose likely migration script errors + if ignoreSemicolons { + return nil, errors.New("ERROR: saw '-- +migrate StatementBegin' with no matching '-- +migrate StatementEnd'") + } + + if currentDirection == directionNone { + return nil, errors.New(`ERROR: no Up/Down annotations found, so no statements were executed. + See https://github.com/rubenv/sql-migrate for details.`) + } + + // allow comment without sql instruction. Example: + // -- +migrate Down + // -- nothing to downgrade! + if len(strings.TrimSpace(buf.String())) > 0 && !strings.HasPrefix(buf.String(), "-- +") { + return nil, errNoTerminator() + } + + return p, nil +} diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt.go b/vendor/golang.org/x/crypto/scrypt/scrypt.go new file mode 100644 index 0000000000..bbe4494c6c --- /dev/null +++ b/vendor/golang.org/x/crypto/scrypt/scrypt.go @@ -0,0 +1,212 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package scrypt implements the scrypt key derivation function as defined in +// Colin Percival's paper "Stronger Key Derivation via Sequential Memory-Hard +// Functions" (https://www.tarsnap.com/scrypt/scrypt.pdf). +package scrypt // import "golang.org/x/crypto/scrypt" + +import ( + "crypto/sha256" + "encoding/binary" + "errors" + "math/bits" + + "golang.org/x/crypto/pbkdf2" +) + +const maxInt = int(^uint(0) >> 1) + +// blockCopy copies n numbers from src into dst. +func blockCopy(dst, src []uint32, n int) { + copy(dst, src[:n]) +} + +// blockXOR XORs numbers from dst with n numbers from src. +func blockXOR(dst, src []uint32, n int) { + for i, v := range src[:n] { + dst[i] ^= v + } +} + +// salsaXOR applies Salsa20/8 to the XOR of 16 numbers from tmp and in, +// and puts the result into both tmp and out. +func salsaXOR(tmp *[16]uint32, in, out []uint32) { + w0 := tmp[0] ^ in[0] + w1 := tmp[1] ^ in[1] + w2 := tmp[2] ^ in[2] + w3 := tmp[3] ^ in[3] + w4 := tmp[4] ^ in[4] + w5 := tmp[5] ^ in[5] + w6 := tmp[6] ^ in[6] + w7 := tmp[7] ^ in[7] + w8 := tmp[8] ^ in[8] + w9 := tmp[9] ^ in[9] + w10 := tmp[10] ^ in[10] + w11 := tmp[11] ^ in[11] + w12 := tmp[12] ^ in[12] + w13 := tmp[13] ^ in[13] + w14 := tmp[14] ^ in[14] + w15 := tmp[15] ^ in[15] + + x0, x1, x2, x3, x4, x5, x6, x7, x8 := w0, w1, w2, w3, w4, w5, w6, w7, w8 + x9, x10, x11, x12, x13, x14, x15 := w9, w10, w11, w12, w13, w14, w15 + + for i := 0; i < 8; i += 2 { + x4 ^= bits.RotateLeft32(x0+x12, 7) + x8 ^= bits.RotateLeft32(x4+x0, 9) + x12 ^= bits.RotateLeft32(x8+x4, 13) + x0 ^= bits.RotateLeft32(x12+x8, 18) + + x9 ^= bits.RotateLeft32(x5+x1, 7) + x13 ^= bits.RotateLeft32(x9+x5, 9) + x1 ^= bits.RotateLeft32(x13+x9, 13) + x5 ^= bits.RotateLeft32(x1+x13, 18) + + x14 ^= bits.RotateLeft32(x10+x6, 7) + x2 ^= bits.RotateLeft32(x14+x10, 9) + x6 ^= bits.RotateLeft32(x2+x14, 13) + x10 ^= bits.RotateLeft32(x6+x2, 18) + + x3 ^= bits.RotateLeft32(x15+x11, 7) + x7 ^= bits.RotateLeft32(x3+x15, 9) + x11 ^= bits.RotateLeft32(x7+x3, 13) + x15 ^= bits.RotateLeft32(x11+x7, 18) + + x1 ^= bits.RotateLeft32(x0+x3, 7) + x2 ^= bits.RotateLeft32(x1+x0, 9) + x3 ^= bits.RotateLeft32(x2+x1, 13) + x0 ^= bits.RotateLeft32(x3+x2, 18) + + x6 ^= bits.RotateLeft32(x5+x4, 7) + x7 ^= bits.RotateLeft32(x6+x5, 9) + x4 ^= bits.RotateLeft32(x7+x6, 13) + x5 ^= bits.RotateLeft32(x4+x7, 18) + + x11 ^= bits.RotateLeft32(x10+x9, 7) + x8 ^= bits.RotateLeft32(x11+x10, 9) + x9 ^= bits.RotateLeft32(x8+x11, 13) + x10 ^= bits.RotateLeft32(x9+x8, 18) + + x12 ^= bits.RotateLeft32(x15+x14, 7) + x13 ^= bits.RotateLeft32(x12+x15, 9) + x14 ^= bits.RotateLeft32(x13+x12, 13) + x15 ^= bits.RotateLeft32(x14+x13, 18) + } + x0 += w0 + x1 += w1 + x2 += w2 + x3 += w3 + x4 += w4 + x5 += w5 + x6 += w6 + x7 += w7 + x8 += w8 + x9 += w9 + x10 += w10 + x11 += w11 + x12 += w12 + x13 += w13 + x14 += w14 + x15 += w15 + + out[0], tmp[0] = x0, x0 + out[1], tmp[1] = x1, x1 + out[2], tmp[2] = x2, x2 + out[3], tmp[3] = x3, x3 + out[4], tmp[4] = x4, x4 + out[5], tmp[5] = x5, x5 + out[6], tmp[6] = x6, x6 + out[7], tmp[7] = x7, x7 + out[8], tmp[8] = x8, x8 + out[9], tmp[9] = x9, x9 + out[10], tmp[10] = x10, x10 + out[11], tmp[11] = x11, x11 + out[12], tmp[12] = x12, x12 + out[13], tmp[13] = x13, x13 + out[14], tmp[14] = x14, x14 + out[15], tmp[15] = x15, x15 +} + +func blockMix(tmp *[16]uint32, in, out []uint32, r int) { + blockCopy(tmp[:], in[(2*r-1)*16:], 16) + for i := 0; i < 2*r; i += 2 { + salsaXOR(tmp, in[i*16:], out[i*8:]) + salsaXOR(tmp, in[i*16+16:], out[i*8+r*16:]) + } +} + +func integer(b []uint32, r int) uint64 { + j := (2*r - 1) * 16 + return uint64(b[j]) | uint64(b[j+1])<<32 +} + +func smix(b []byte, r, N int, v, xy []uint32) { + var tmp [16]uint32 + R := 32 * r + x := xy + y := xy[R:] + + j := 0 + for i := 0; i < R; i++ { + x[i] = binary.LittleEndian.Uint32(b[j:]) + j += 4 + } + for i := 0; i < N; i += 2 { + blockCopy(v[i*R:], x, R) + blockMix(&tmp, x, y, r) + + blockCopy(v[(i+1)*R:], y, R) + blockMix(&tmp, y, x, r) + } + for i := 0; i < N; i += 2 { + j := int(integer(x, r) & uint64(N-1)) + blockXOR(x, v[j*R:], R) + blockMix(&tmp, x, y, r) + + j = int(integer(y, r) & uint64(N-1)) + blockXOR(y, v[j*R:], R) + blockMix(&tmp, y, x, r) + } + j = 0 + for _, v := range x[:R] { + binary.LittleEndian.PutUint32(b[j:], v) + j += 4 + } +} + +// Key derives a key from the password, salt, and cost parameters, returning +// a byte slice of length keyLen that can be used as cryptographic key. +// +// N is a CPU/memory cost parameter, which must be a power of two greater than 1. +// r and p must satisfy r * p < 2³⁰. If the parameters do not satisfy the +// limits, the function returns a nil byte slice and an error. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// dk, err := scrypt.Key([]byte("some password"), salt, 32768, 8, 1, 32) +// +// The recommended parameters for interactive logins as of 2017 are N=32768, r=8 +// and p=1. The parameters N, r, and p should be increased as memory latency and +// CPU parallelism increases; consider setting N to the highest power of 2 you +// can derive within 100 milliseconds. Remember to get a good random salt. +func Key(password, salt []byte, N, r, p, keyLen int) ([]byte, error) { + if N <= 1 || N&(N-1) != 0 { + return nil, errors.New("scrypt: N must be > 1 and a power of 2") + } + if uint64(r)*uint64(p) >= 1<<30 || r > maxInt/128/p || r > maxInt/256 || N > maxInt/128/r { + return nil, errors.New("scrypt: parameters are too large") + } + + xy := make([]uint32, 64*r) + v := make([]uint32, 32*N*r) + b := pbkdf2.Key(password, salt, 1, p*128*r, sha256.New) + + for i := 0; i < p; i++ { + smix(b[i*128*r:], r, N, v, xy) + } + + return pbkdf2.Key(password, b, 1, keyLen, sha256.New), nil +} diff --git a/vendor/gopkg.in/gorp.v1/.gitignore b/vendor/gopkg.in/gorp.v1/.gitignore new file mode 100644 index 0000000000..8a06adea5c --- /dev/null +++ b/vendor/gopkg.in/gorp.v1/.gitignore @@ -0,0 +1,8 @@ +_test +_testmain.go +_obj +*~ +*.6 +6.out +gorptest.bin +tmp diff --git a/vendor/gopkg.in/gorp.v1/.travis.yml b/vendor/gopkg.in/gorp.v1/.travis.yml new file mode 100644 index 0000000000..fcc2866dc1 --- /dev/null +++ b/vendor/gopkg.in/gorp.v1/.travis.yml @@ -0,0 +1,22 @@ +language: go +go: + - 1.3 + - 1.4 + - tip + +services: + - mysql + - postgres + - sqlite3 + +before_script: + - mysql -e "CREATE DATABASE gorptest;" + - mysql -u root -e "GRANT ALL ON gorptest.* TO gorptest@localhost IDENTIFIED BY 'gorptest'" + - psql -c "CREATE DATABASE gorptest;" -U postgres + - psql -c "CREATE USER "gorptest" WITH SUPERUSER PASSWORD 'gorptest';" -U postgres + - go get github.com/lib/pq + - go get github.com/mattn/go-sqlite3 + - go get github.com/ziutek/mymysql/godrv + - go get github.com/go-sql-driver/mysql + +script: ./test_all.sh diff --git a/vendor/gopkg.in/gorp.v1/LICENSE b/vendor/gopkg.in/gorp.v1/LICENSE new file mode 100644 index 0000000000..b661111d0a --- /dev/null +++ b/vendor/gopkg.in/gorp.v1/LICENSE @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2012 James Cooper + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/gopkg.in/gorp.v1/Makefile b/vendor/gopkg.in/gorp.v1/Makefile new file mode 100644 index 0000000000..edf771c178 --- /dev/null +++ b/vendor/gopkg.in/gorp.v1/Makefile @@ -0,0 +1,6 @@ +include $(GOROOT)/src/Make.inc + +TARG = github.com/coopernurse/gorp +GOFILES = gorp.go dialect.go + +include $(GOROOT)/src/Make.pkg \ No newline at end of file diff --git a/vendor/gopkg.in/gorp.v1/README.md b/vendor/gopkg.in/gorp.v1/README.md new file mode 100644 index 0000000000..c7cd5d8a2f --- /dev/null +++ b/vendor/gopkg.in/gorp.v1/README.md @@ -0,0 +1,672 @@ +# Go Relational Persistence + +[![build status](https://secure.travis-ci.org/go-gorp/gorp.png)](http://travis-ci.org/go-gorp/gorp) + +I hesitate to call gorp an ORM. Go doesn't really have objects, at least +not in the classic Smalltalk/Java sense. There goes the "O". gorp doesn't +know anything about the relationships between your structs (at least not +yet). So the "R" is questionable too (but I use it in the name because, +well, it seemed more clever). + +The "M" is alive and well. Given some Go structs and a database, gorp +should remove a fair amount of boilerplate busy-work from your code. + +I hope that gorp saves you time, minimizes the drudgery of getting data +in and out of your database, and helps your code focus on algorithms, +not infrastructure. + +* Bind struct fields to table columns via API or tag +* Support for embedded structs +* Support for transactions +* Forward engineer db schema from structs (great for unit tests) +* Pre/post insert/update/delete hooks +* Automatically generate insert/update/delete statements for a struct +* Automatic binding of auto increment PKs back to struct after insert +* Delete by primary key(s) +* Select by primary key(s) +* Optional trace sql logging +* Bind arbitrary SQL queries to a struct +* Bind slice to SELECT query results without type assertions +* Use positional or named bind parameters in custom SELECT queries +* Optional optimistic locking using a version column (for update/deletes) + +## Installation + + # install the library: + go get gopkg.in/gorp.v1 + + // use in your .go code: + import ( + "gopkg.in/gorp.v1" + ) + +## Versioning + +This project provides a stable release (v1.x tags) and a bleeding edge codebase (master). + +`gopkg.in/gorp.v1` points to the latest v1.x tag. The API's for v1 are stable and shouldn't change. Development takes place at the master branch. Althought the code in master should always compile and test successfully, it might break API's. We aim to maintain backwards compatibility, but API's and behaviour might be changed to fix a bug. Also note that API's that are new in the master branch can change until released as v2. + +If you want to use bleeding edge, use `github.com/go-gorp/gorp` as import path. + +## API Documentation + +Full godoc output from the latest v1 release is available here: + +https://godoc.org/gopkg.in/gorp.v1 + +For the latest code in master: + +https://godoc.org/github.com/go-gorp/gorp + +## Quickstart + +```go +package main + +import ( + "database/sql" + "gopkg.in/gorp.v1" + _ "github.com/mattn/go-sqlite3" + "log" + "time" +) + +func main() { + // initialize the DbMap + dbmap := initDb() + defer dbmap.Db.Close() + + // delete any existing rows + err := dbmap.TruncateTables() + checkErr(err, "TruncateTables failed") + + // create two posts + p1 := newPost("Go 1.1 released!", "Lorem ipsum lorem ipsum") + p2 := newPost("Go 1.2 released!", "Lorem ipsum lorem ipsum") + + // insert rows - auto increment PKs will be set properly after the insert + err = dbmap.Insert(&p1, &p2) + checkErr(err, "Insert failed") + + // use convenience SelectInt + count, err := dbmap.SelectInt("select count(*) from posts") + checkErr(err, "select count(*) failed") + log.Println("Rows after inserting:", count) + + // update a row + p2.Title = "Go 1.2 is better than ever" + count, err = dbmap.Update(&p2) + checkErr(err, "Update failed") + log.Println("Rows updated:", count) + + // fetch one row - note use of "post_id" instead of "Id" since column is aliased + // + // Postgres users should use $1 instead of ? placeholders + // See 'Known Issues' below + // + err = dbmap.SelectOne(&p2, "select * from posts where post_id=?", p2.Id) + checkErr(err, "SelectOne failed") + log.Println("p2 row:", p2) + + // fetch all rows + var posts []Post + _, err = dbmap.Select(&posts, "select * from posts order by post_id") + checkErr(err, "Select failed") + log.Println("All rows:") + for x, p := range posts { + log.Printf(" %d: %v\n", x, p) + } + + // delete row by PK + count, err = dbmap.Delete(&p1) + checkErr(err, "Delete failed") + log.Println("Rows deleted:", count) + + // delete row manually via Exec + _, err = dbmap.Exec("delete from posts where post_id=?", p2.Id) + checkErr(err, "Exec failed") + + // confirm count is zero + count, err = dbmap.SelectInt("select count(*) from posts") + checkErr(err, "select count(*) failed") + log.Println("Row count - should be zero:", count) + + log.Println("Done!") +} + +type Post struct { + // db tag lets you specify the column name if it differs from the struct field + Id int64 `db:"post_id"` + Created int64 + Title string + Body string +} + +func newPost(title, body string) Post { + return Post{ + Created: time.Now().UnixNano(), + Title: title, + Body: body, + } +} + +func initDb() *gorp.DbMap { + // connect to db using standard Go database/sql API + // use whatever database/sql driver you wish + db, err := sql.Open("sqlite3", "/tmp/post_db.bin") + checkErr(err, "sql.Open failed") + + // construct a gorp DbMap + dbmap := &gorp.DbMap{Db: db, Dialect: gorp.SqliteDialect{}} + + // add a table, setting the table name to 'posts' and + // specifying that the Id property is an auto incrementing PK + dbmap.AddTableWithName(Post{}, "posts").SetKeys(true, "Id") + + // create the table. in a production system you'd generally + // use a migration tool, or create the tables via scripts + err = dbmap.CreateTablesIfNotExists() + checkErr(err, "Create tables failed") + + return dbmap +} + +func checkErr(err error, msg string) { + if err != nil { + log.Fatalln(msg, err) + } +} +``` + +## Examples + +### Mapping structs to tables + +First define some types: + +```go +type Invoice struct { + Id int64 + Created int64 + Updated int64 + Memo string + PersonId int64 +} + +type Person struct { + Id int64 + Created int64 + Updated int64 + FName string + LName string +} + +// Example of using tags to alias fields to column names +// The 'db' value is the column name +// +// A hyphen will cause gorp to skip this field, similar to the +// Go json package. +// +// This is equivalent to using the ColMap methods: +// +// table := dbmap.AddTableWithName(Product{}, "product") +// table.ColMap("Id").Rename("product_id") +// table.ColMap("Price").Rename("unit_price") +// table.ColMap("IgnoreMe").SetTransient(true) +// +type Product struct { + Id int64 `db:"product_id"` + Price int64 `db:"unit_price"` + IgnoreMe string `db:"-"` +} +``` + +Then create a mapper, typically you'd do this one time at app startup: + +```go +// connect to db using standard Go database/sql API +// use whatever database/sql driver you wish +db, err := sql.Open("mymysql", "tcp:localhost:3306*mydb/myuser/mypassword") + +// construct a gorp DbMap +dbmap := &gorp.DbMap{Db: db, Dialect: gorp.MySQLDialect{"InnoDB", "UTF8"}} + +// register the structs you wish to use with gorp +// you can also use the shorter dbmap.AddTable() if you +// don't want to override the table name +// +// SetKeys(true) means we have a auto increment primary key, which +// will get automatically bound to your struct post-insert +// +t1 := dbmap.AddTableWithName(Invoice{}, "invoice_test").SetKeys(true, "Id") +t2 := dbmap.AddTableWithName(Person{}, "person_test").SetKeys(true, "Id") +t3 := dbmap.AddTableWithName(Product{}, "product_test").SetKeys(true, "Id") +``` + +### Struct Embedding + +gorp supports embedding structs. For example: + +```go +type Names struct { + FirstName string + LastName string +} + +type WithEmbeddedStruct struct { + Id int64 + Names +} + +es := &WithEmbeddedStruct{-1, Names{FirstName: "Alice", LastName: "Smith"}} +err := dbmap.Insert(es) +``` + +See the `TestWithEmbeddedStruct` function in `gorp_test.go` for a full example. + +### Create/Drop Tables ### + +Automatically create / drop registered tables. This is useful for unit tests +but is entirely optional. You can of course use gorp with tables created manually, +or with a separate migration tool (like goose: https://bitbucket.org/liamstask/goose). + +```go +// create all registered tables +dbmap.CreateTables() + +// same as above, but uses "if not exists" clause to skip tables that are +// already defined +dbmap.CreateTablesIfNotExists() + +// drop +dbmap.DropTables() +``` + +### SQL Logging + +Optionally you can pass in a logger to trace all SQL statements. +I recommend enabling this initially while you're getting the feel for what +gorp is doing on your behalf. + +Gorp defines a `GorpLogger` interface that Go's built in `log.Logger` satisfies. +However, you can write your own `GorpLogger` implementation, or use a package such +as `glog` if you want more control over how statements are logged. + +```go +// Will log all SQL statements + args as they are run +// The first arg is a string prefix to prepend to all log messages +dbmap.TraceOn("[gorp]", log.New(os.Stdout, "myapp:", log.Lmicroseconds)) + +// Turn off tracing +dbmap.TraceOff() +``` + +### Insert + +```go +// Must declare as pointers so optional callback hooks +// can operate on your data, not copies +inv1 := &Invoice{0, 100, 200, "first order", 0} +inv2 := &Invoice{0, 100, 200, "second order", 0} + +// Insert your rows +err := dbmap.Insert(inv1, inv2) + +// Because we called SetKeys(true) on Invoice, the Id field +// will be populated after the Insert() automatically +fmt.Printf("inv1.Id=%d inv2.Id=%d\n", inv1.Id, inv2.Id) +``` + +### Update + +Continuing the above example, use the `Update` method to modify an Invoice: + +```go +// count is the # of rows updated, which should be 1 in this example +count, err := dbmap.Update(inv1) +``` + +### Delete + +If you have primary key(s) defined for a struct, you can use the `Delete` +method to remove rows: + +```go +count, err := dbmap.Delete(inv1) +``` + +### Select by Key + +Use the `Get` method to fetch a single row by primary key. It returns +nil if no row is found. + +```go +// fetch Invoice with Id=99 +obj, err := dbmap.Get(Invoice{}, 99) +inv := obj.(*Invoice) +``` + +### Ad Hoc SQL + +#### SELECT + +`Select()` and `SelectOne()` provide a simple way to bind arbitrary queries to a slice +or a single struct. + +```go +// Select a slice - first return value is not needed when a slice pointer is passed to Select() +var posts []Post +_, err := dbmap.Select(&posts, "select * from post order by id") + +// You can also use primitive types +var ids []string +_, err := dbmap.Select(&ids, "select id from post") + +// Select a single row. +// Returns an error if no row found, or if more than one row is found +var post Post +err := dbmap.SelectOne(&post, "select * from post where id=?", id) +``` + +Want to do joins? Just write the SQL and the struct. gorp will bind them: + +```go +// Define a type for your join +// It *must* contain all the columns in your SELECT statement +// +// The names here should match the aliased column names you specify +// in your SQL - no additional binding work required. simple. +// +type InvoicePersonView struct { + InvoiceId int64 + PersonId int64 + Memo string + FName string +} + +// Create some rows +p1 := &Person{0, 0, 0, "bob", "smith"} +dbmap.Insert(p1) + +// notice how we can wire up p1.Id to the invoice easily +inv1 := &Invoice{0, 0, 0, "xmas order", p1.Id} +dbmap.Insert(inv1) + +// Run your query +query := "select i.Id InvoiceId, p.Id PersonId, i.Memo, p.FName " + + "from invoice_test i, person_test p " + + "where i.PersonId = p.Id" + +// pass a slice to Select() +var list []InvoicePersonView +_, err := dbmap.Select(&list, query) + +// this should test true +expected := InvoicePersonView{inv1.Id, p1.Id, inv1.Memo, p1.FName} +if reflect.DeepEqual(list[0], expected) { + fmt.Println("Woot! My join worked!") +} +``` + +#### SELECT string or int64 + +gorp provides a few convenience methods for selecting a single string or int64. + +```go +// select single int64 from db (use $1 instead of ? for postgresql) +i64, err := dbmap.SelectInt("select count(*) from foo where blah=?", blahVal) + +// select single string from db: +s, err := dbmap.SelectStr("select name from foo where blah=?", blahVal) + +``` + +#### Named bind parameters + +You may use a map or struct to bind parameters by name. This is currently +only supported in SELECT queries. + +```go +_, err := dbm.Select(&dest, "select * from Foo where name = :name and age = :age", map[string]interface{}{ + "name": "Rob", + "age": 31, +}) +``` + +#### UPDATE / DELETE + +You can execute raw SQL if you wish. Particularly good for batch operations. + +```go +res, err := dbmap.Exec("delete from invoice_test where PersonId=?", 10) +``` + +### Transactions + +You can batch operations into a transaction: + +```go +func InsertInv(dbmap *DbMap, inv *Invoice, per *Person) error { + // Start a new transaction + trans, err := dbmap.Begin() + if err != nil { + return err + } + + trans.Insert(per) + inv.PersonId = per.Id + trans.Insert(inv) + + // if the commit is successful, a nil error is returned + return trans.Commit() +} +``` + +### Hooks + +Use hooks to update data before/after saving to the db. Good for timestamps: + +```go +// implement the PreInsert and PreUpdate hooks +func (i *Invoice) PreInsert(s gorp.SqlExecutor) error { + i.Created = time.Now().UnixNano() + i.Updated = i.Created + return nil +} + +func (i *Invoice) PreUpdate(s gorp.SqlExecutor) error { + i.Updated = time.Now().UnixNano() + return nil +} + +// You can use the SqlExecutor to cascade additional SQL +// Take care to avoid cycles. gorp won't prevent them. +// +// Here's an example of a cascading delete +// +func (p *Person) PreDelete(s gorp.SqlExecutor) error { + query := "delete from invoice_test where PersonId=?" + err := s.Exec(query, p.Id); if err != nil { + return err + } + return nil +} +``` + +Full list of hooks that you can implement: + + PostGet + PreInsert + PostInsert + PreUpdate + PostUpdate + PreDelete + PostDelete + + All have the same signature. for example: + + func (p *MyStruct) PostUpdate(s gorp.SqlExecutor) error + +### Optimistic Locking + +gorp provides a simple optimistic locking feature, similar to Java's JPA, that +will raise an error if you try to update/delete a row whose `version` column +has a value different than the one in memory. This provides a safe way to do +"select then update" style operations without explicit read and write locks. + +```go +// Version is an auto-incremented number, managed by gorp +// If this property is present on your struct, update +// operations will be constrained +// +// For example, say we defined Person as: + +type Person struct { + Id int64 + Created int64 + Updated int64 + FName string + LName string + + // automatically used as the Version col + // use table.SetVersionCol("columnName") to map a different + // struct field as the version field + Version int64 +} + +p1 := &Person{0, 0, 0, "Bob", "Smith", 0} +dbmap.Insert(p1) // Version is now 1 + +obj, err := dbmap.Get(Person{}, p1.Id) +p2 := obj.(*Person) +p2.LName = "Edwards" +dbmap.Update(p2) // Version is now 2 + +p1.LName = "Howard" + +// Raises error because p1.Version == 1, which is out of date +count, err := dbmap.Update(p1) +_, ok := err.(gorp.OptimisticLockError) +if ok { + // should reach this statement + + // in a real app you might reload the row and retry, or + // you might propegate this to the user, depending on the desired + // semantics + fmt.Printf("Tried to update row with stale data: %v\n", err) +} else { + // some other db error occurred - log or return up the stack + fmt.Printf("Unknown db err: %v\n", err) +} +``` + +## Database Drivers + +gorp uses the Go 1 `database/sql` package. A full list of compliant drivers is available here: + +http://code.google.com/p/go-wiki/wiki/SQLDrivers + +Sadly, SQL databases differ on various issues. gorp provides a Dialect interface that should be +implemented per database vendor. Dialects are provided for: + +* MySQL +* PostgreSQL +* sqlite3 + +Each of these three databases pass the test suite. See `gorp_test.go` for example +DSNs for these three databases. + +Support is also provided for: + +* Oracle (contributed by @klaidliadon) +* SQL Server (contributed by @qrawl) - use driver: github.com/denisenkom/go-mssqldb + +Note that these databases are not covered by CI and I (@coopernurse) have no good way to +test them locally. So please try them and send patches as needed, but expect a bit more +unpredicability. + +## Known Issues + +### SQL placeholder portability + +Different databases use different strings to indicate variable placeholders in +prepared SQL statements. Unlike some database abstraction layers (such as JDBC), +Go's `database/sql` does not standardize this. + +SQL generated by gorp in the `Insert`, `Update`, `Delete`, and `Get` methods delegates +to a Dialect implementation for each database, and will generate portable SQL. + +Raw SQL strings passed to `Exec`, `Select`, `SelectOne`, `SelectInt`, etc will not be +parsed. Consequently you may have portability issues if you write a query like this: + +```go +// works on MySQL and Sqlite3, but not with Postgresql +err := dbmap.SelectOne(&val, "select * from foo where id = ?", 30) +``` + +In `Select` and `SelectOne` you can use named parameters to work around this. +The following is portable: + +```go +err := dbmap.SelectOne(&val, "select * from foo where id = :id", + map[string]interface{} { "id": 30}) +``` + +### time.Time and time zones + +gorp will pass `time.Time` fields through to the `database/sql` driver, but note that +the behavior of this type varies across database drivers. + +MySQL users should be especially cautious. See: https://github.com/ziutek/mymysql/pull/77 + +To avoid any potential issues with timezone/DST, consider using an integer field for time +data and storing UNIX time. + +## Running the tests + +The included tests may be run against MySQL, Postgresql, or sqlite3. +You must set two environment variables so the test code knows which driver to +use, and how to connect to your database. + +```sh +# MySQL example: +export GORP_TEST_DSN=gomysql_test/gomysql_test/abc123 +export GORP_TEST_DIALECT=mysql + +# run the tests +go test + +# run the tests and benchmarks +go test -bench="Bench" -benchtime 10 +``` + +Valid `GORP_TEST_DIALECT` values are: "mysql", "postgres", "sqlite3" +See the `test_all.sh` script for examples of all 3 databases. This is the script I run +locally to test the library. + +## Performance + +gorp uses reflection to construct SQL queries and bind parameters. See the BenchmarkNativeCrud vs BenchmarkGorpCrud in gorp_test.go for a simple perf test. On my MacBook Pro gorp is about 2-3% slower than hand written SQL. + +## Help/Support + +IRC: #gorp +Mailing list: gorp-dev@googlegroups.com +Bugs/Enhancements: Create a github issue + +## Pull requests / Contributions + +Contributions are very welcome. Please follow these guidelines: + +* Fork the `master` branch and issue pull requests targeting the `master` branch +* If you are adding an enhancement, please open an issue first with your proposed change. +* Changes that break backwards compatibility in the public API are only accepted after we + discuss on a GitHub issue for a while. + +Thanks! + +## Contributors + +* matthias-margush - column aliasing via tags +* Rob Figueiredo - @robfig +* Quinn Slack - @sqs diff --git a/vendor/gopkg.in/gorp.v1/dialect.go b/vendor/gopkg.in/gorp.v1/dialect.go new file mode 100644 index 0000000000..5e8fdc6670 --- /dev/null +++ b/vendor/gopkg.in/gorp.v1/dialect.go @@ -0,0 +1,692 @@ +package gorp + +import ( + "errors" + "fmt" + "reflect" + "strings" +) + +// The Dialect interface encapsulates behaviors that differ across +// SQL databases. At present the Dialect is only used by CreateTables() +// but this could change in the future +type Dialect interface { + + // adds a suffix to any query, usually ";" + QuerySuffix() string + + // ToSqlType returns the SQL column type to use when creating a + // table of the given Go Type. maxsize can be used to switch based on + // size. For example, in MySQL []byte could map to BLOB, MEDIUMBLOB, + // or LONGBLOB depending on the maxsize + ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string + + // string to append to primary key column definitions + AutoIncrStr() string + + // string to bind autoincrement columns to. Empty string will + // remove reference to those columns in the INSERT statement. + AutoIncrBindValue() string + + AutoIncrInsertSuffix(col *ColumnMap) string + + // string to append to "create table" statement for vendor specific + // table attributes + CreateTableSuffix() string + + // string to truncate tables + TruncateClause() string + + // bind variable string to use when forming SQL statements + // in many dbs it is "?", but Postgres appears to use $1 + // + // i is a zero based index of the bind variable in this statement + // + BindVar(i int) string + + // Handles quoting of a field name to ensure that it doesn't raise any + // SQL parsing exceptions by using a reserved word as a field name. + QuoteField(field string) string + + // Handles building up of a schema.database string that is compatible with + // the given dialect + // + // schema - The schema that lives in + // table - The table name + QuotedTableForQuery(schema string, table string) string + + // Existance clause for table creation / deletion + IfSchemaNotExists(command, schema string) string + IfTableExists(command, schema, table string) string + IfTableNotExists(command, schema, table string) string +} + +// IntegerAutoIncrInserter is implemented by dialects that can perform +// inserts with automatically incremented integer primary keys. If +// the dialect can handle automatic assignment of more than just +// integers, see TargetedAutoIncrInserter. +type IntegerAutoIncrInserter interface { + InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) +} + +// TargetedAutoIncrInserter is implemented by dialects that can +// perform automatic assignment of any primary key type (i.e. strings +// for uuids, integers for serials, etc). +type TargetedAutoIncrInserter interface { + // InsertAutoIncrToTarget runs an insert operation and assigns the + // automatically generated primary key directly to the passed in + // target. The target should be a pointer to the primary key + // field of the value being inserted. + InsertAutoIncrToTarget(exec SqlExecutor, insertSql string, target interface{}, params ...interface{}) error +} + +func standardInsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { + res, err := exec.Exec(insertSql, params...) + if err != nil { + return 0, err + } + return res.LastInsertId() +} + +/////////////////////////////////////////////////////// +// sqlite3 // +///////////// + +type SqliteDialect struct { + suffix string +} + +func (d SqliteDialect) QuerySuffix() string { return ";" } + +func (d SqliteDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "integer" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return "integer" + case reflect.Float64, reflect.Float32: + return "real" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "blob" + } + } + + switch val.Name() { + case "NullInt64": + return "integer" + case "NullFloat64": + return "real" + case "NullBool": + return "integer" + case "Time": + return "datetime" + } + + if maxsize < 1 { + maxsize = 255 + } + return fmt.Sprintf("varchar(%d)", maxsize) +} + +// Returns autoincrement +func (d SqliteDialect) AutoIncrStr() string { + return "autoincrement" +} + +func (d SqliteDialect) AutoIncrBindValue() string { + return "null" +} + +func (d SqliteDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return "" +} + +// Returns suffix +func (d SqliteDialect) CreateTableSuffix() string { + return d.suffix +} + +// With sqlite, there technically isn't a TRUNCATE statement, +// but a DELETE FROM uses a truncate optimization: +// http://www.sqlite.org/lang_delete.html +func (d SqliteDialect) TruncateClause() string { + return "delete from" +} + +// Returns "?" +func (d SqliteDialect) BindVar(i int) string { + return "?" +} + +func (d SqliteDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { + return standardInsertAutoIncr(exec, insertSql, params...) +} + +func (d SqliteDialect) QuoteField(f string) string { + return `"` + f + `"` +} + +// sqlite does not have schemas like PostgreSQL does, so just escape it like normal +func (d SqliteDialect) QuotedTableForQuery(schema string, table string) string { + return d.QuoteField(table) +} + +func (d SqliteDialect) IfSchemaNotExists(command, schema string) string { + return fmt.Sprintf("%s if not exists", command) +} + +func (d SqliteDialect) IfTableExists(command, schema, table string) string { + return fmt.Sprintf("%s if exists", command) +} + +func (d SqliteDialect) IfTableNotExists(command, schema, table string) string { + return fmt.Sprintf("%s if not exists", command) +} + +/////////////////////////////////////////////////////// +// PostgreSQL // +//////////////// + +type PostgresDialect struct { + suffix string +} + +func (d PostgresDialect) QuerySuffix() string { return ";" } + +func (d PostgresDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "boolean" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32: + if isAutoIncr { + return "serial" + } + return "integer" + case reflect.Int64, reflect.Uint64: + if isAutoIncr { + return "bigserial" + } + return "bigint" + case reflect.Float64: + return "double precision" + case reflect.Float32: + return "real" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "bytea" + } + } + + switch val.Name() { + case "NullInt64": + return "bigint" + case "NullFloat64": + return "double precision" + case "NullBool": + return "boolean" + case "Time": + return "timestamp with time zone" + } + + if maxsize > 0 { + return fmt.Sprintf("varchar(%d)", maxsize) + } else { + return "text" + } + +} + +// Returns empty string +func (d PostgresDialect) AutoIncrStr() string { + return "" +} + +func (d PostgresDialect) AutoIncrBindValue() string { + return "default" +} + +func (d PostgresDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return " returning " + col.ColumnName +} + +// Returns suffix +func (d PostgresDialect) CreateTableSuffix() string { + return d.suffix +} + +func (d PostgresDialect) TruncateClause() string { + return "truncate" +} + +// Returns "$(i+1)" +func (d PostgresDialect) BindVar(i int) string { + return fmt.Sprintf("$%d", i+1) +} + +func (d PostgresDialect) InsertAutoIncrToTarget(exec SqlExecutor, insertSql string, target interface{}, params ...interface{}) error { + rows, err := exec.query(insertSql, params...) + if err != nil { + return err + } + defer rows.Close() + + if rows.Next() { + err := rows.Scan(target) + return err + } + + return errors.New("No serial value returned for insert: " + insertSql + " Encountered error: " + rows.Err().Error()) +} + +func (d PostgresDialect) QuoteField(f string) string { + return `"` + strings.ToLower(f) + `"` +} + +func (d PostgresDialect) QuotedTableForQuery(schema string, table string) string { + if strings.TrimSpace(schema) == "" { + return d.QuoteField(table) + } + + return schema + "." + d.QuoteField(table) +} + +func (d PostgresDialect) IfSchemaNotExists(command, schema string) string { + return fmt.Sprintf("%s if not exists", command) +} + +func (d PostgresDialect) IfTableExists(command, schema, table string) string { + return fmt.Sprintf("%s if exists", command) +} + +func (d PostgresDialect) IfTableNotExists(command, schema, table string) string { + return fmt.Sprintf("%s if not exists", command) +} + +/////////////////////////////////////////////////////// +// MySQL // +/////////// + +// Implementation of Dialect for MySQL databases. +type MySQLDialect struct { + + // Engine is the storage engine to use "InnoDB" vs "MyISAM" for example + Engine string + + // Encoding is the character encoding to use for created tables + Encoding string +} + +func (d MySQLDialect) QuerySuffix() string { return ";" } + +func (d MySQLDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "boolean" + case reflect.Int8: + return "tinyint" + case reflect.Uint8: + return "tinyint unsigned" + case reflect.Int16: + return "smallint" + case reflect.Uint16: + return "smallint unsigned" + case reflect.Int, reflect.Int32: + return "int" + case reflect.Uint, reflect.Uint32: + return "int unsigned" + case reflect.Int64: + return "bigint" + case reflect.Uint64: + return "bigint unsigned" + case reflect.Float64, reflect.Float32: + return "double" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "mediumblob" + } + } + + switch val.Name() { + case "NullInt64": + return "bigint" + case "NullFloat64": + return "double" + case "NullBool": + return "tinyint" + case "Time": + return "datetime" + } + + if maxsize < 1 { + maxsize = 255 + } + return fmt.Sprintf("varchar(%d)", maxsize) +} + +// Returns auto_increment +func (d MySQLDialect) AutoIncrStr() string { + return "auto_increment" +} + +func (d MySQLDialect) AutoIncrBindValue() string { + return "null" +} + +func (d MySQLDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return "" +} + +// Returns engine=%s charset=%s based on values stored on struct +func (d MySQLDialect) CreateTableSuffix() string { + if d.Engine == "" || d.Encoding == "" { + msg := "gorp - undefined" + + if d.Engine == "" { + msg += " MySQLDialect.Engine" + } + if d.Engine == "" && d.Encoding == "" { + msg += "," + } + if d.Encoding == "" { + msg += " MySQLDialect.Encoding" + } + msg += ". Check that your MySQLDialect was correctly initialized when declared." + panic(msg) + } + + return fmt.Sprintf(" engine=%s charset=%s", d.Engine, d.Encoding) +} + +func (d MySQLDialect) TruncateClause() string { + return "truncate" +} + +// Returns "?" +func (d MySQLDialect) BindVar(i int) string { + return "?" +} + +func (d MySQLDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { + return standardInsertAutoIncr(exec, insertSql, params...) +} + +func (d MySQLDialect) QuoteField(f string) string { + return "`" + f + "`" +} + +func (d MySQLDialect) QuotedTableForQuery(schema string, table string) string { + if strings.TrimSpace(schema) == "" { + return d.QuoteField(table) + } + + return schema + "." + d.QuoteField(table) +} + +func (d MySQLDialect) IfSchemaNotExists(command, schema string) string { + return fmt.Sprintf("%s if not exists", command) +} + +func (d MySQLDialect) IfTableExists(command, schema, table string) string { + return fmt.Sprintf("%s if exists", command) +} + +func (d MySQLDialect) IfTableNotExists(command, schema, table string) string { + return fmt.Sprintf("%s if not exists", command) +} + +/////////////////////////////////////////////////////// +// Sql Server // +//////////////// + +// Implementation of Dialect for Microsoft SQL Server databases. +// Tested on SQL Server 2008 with driver: github.com/denisenkom/go-mssqldb + +type SqlServerDialect struct { + suffix string +} + +func (d SqlServerDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "bit" + case reflect.Int8: + return "tinyint" + case reflect.Uint8: + return "smallint" + case reflect.Int16: + return "smallint" + case reflect.Uint16: + return "int" + case reflect.Int, reflect.Int32: + return "int" + case reflect.Uint, reflect.Uint32: + return "bigint" + case reflect.Int64: + return "bigint" + case reflect.Uint64: + return "bigint" + case reflect.Float32: + return "real" + case reflect.Float64: + return "float(53)" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "varbinary" + } + } + + switch val.Name() { + case "NullInt64": + return "bigint" + case "NullFloat64": + return "float(53)" + case "NullBool": + return "tinyint" + case "Time": + return "datetime" + } + + if maxsize < 1 { + maxsize = 255 + } + return fmt.Sprintf("varchar(%d)", maxsize) +} + +// Returns auto_increment +func (d SqlServerDialect) AutoIncrStr() string { + return "identity(0,1)" +} + +// Empty string removes autoincrement columns from the INSERT statements. +func (d SqlServerDialect) AutoIncrBindValue() string { + return "" +} + +func (d SqlServerDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return "" +} + +// Returns suffix +func (d SqlServerDialect) CreateTableSuffix() string { + + return d.suffix +} + +func (d SqlServerDialect) TruncateClause() string { + return "delete from" +} + +// Returns "?" +func (d SqlServerDialect) BindVar(i int) string { + return "?" +} + +func (d SqlServerDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { + return standardInsertAutoIncr(exec, insertSql, params...) +} + +func (d SqlServerDialect) QuoteField(f string) string { + return `"` + f + `"` +} + +func (d SqlServerDialect) QuotedTableForQuery(schema string, table string) string { + if strings.TrimSpace(schema) == "" { + return table + } + return schema + "." + table +} + +func (d SqlServerDialect) QuerySuffix() string { return ";" } + +func (d SqlServerDialect) IfSchemaNotExists(command, schema string) string { + s := fmt.Sprintf("if not exists (select name from sys.schemas where name = '%s') %s", schema, command) + return s +} + +func (d SqlServerDialect) IfTableExists(command, schema, table string) string { + var schema_clause string + if strings.TrimSpace(schema) != "" { + schema_clause = fmt.Sprintf("table_schema = '%s' and ", schema) + } + s := fmt.Sprintf("if exists (select * from information_schema.tables where %stable_name = '%s') %s", schema_clause, table, command) + return s +} + +func (d SqlServerDialect) IfTableNotExists(command, schema, table string) string { + var schema_clause string + if strings.TrimSpace(schema) != "" { + schema_clause = fmt.Sprintf("table_schema = '%s' and ", schema) + } + s := fmt.Sprintf("if not exists (select * from information_schema.tables where %stable_name = '%s') %s", schema_clause, table, command) + return s +} + +/////////////////////////////////////////////////////// +// Oracle // +/////////// + +// Implementation of Dialect for Oracle databases. +type OracleDialect struct{} + +func (d OracleDialect) QuerySuffix() string { return "" } + +func (d OracleDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "boolean" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32: + if isAutoIncr { + return "serial" + } + return "integer" + case reflect.Int64, reflect.Uint64: + if isAutoIncr { + return "bigserial" + } + return "bigint" + case reflect.Float64: + return "double precision" + case reflect.Float32: + return "real" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "bytea" + } + } + + switch val.Name() { + case "NullInt64": + return "bigint" + case "NullFloat64": + return "double precision" + case "NullBool": + return "boolean" + case "NullTime", "Time": + return "timestamp with time zone" + } + + if maxsize > 0 { + return fmt.Sprintf("varchar(%d)", maxsize) + } else { + return "text" + } + +} + +// Returns empty string +func (d OracleDialect) AutoIncrStr() string { + return "" +} + +func (d OracleDialect) AutoIncrBindValue() string { + return "default" +} + +func (d OracleDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return " returning " + col.ColumnName +} + +// Returns suffix +func (d OracleDialect) CreateTableSuffix() string { + return "" +} + +func (d OracleDialect) TruncateClause() string { + return "truncate" +} + +// Returns "$(i+1)" +func (d OracleDialect) BindVar(i int) string { + return fmt.Sprintf(":%d", i+1) +} + +func (d OracleDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { + rows, err := exec.query(insertSql, params...) + if err != nil { + return 0, err + } + defer rows.Close() + + if rows.Next() { + var id int64 + err := rows.Scan(&id) + return id, err + } + + return 0, errors.New("No serial value returned for insert: " + insertSql + " Encountered error: " + rows.Err().Error()) +} + +func (d OracleDialect) QuoteField(f string) string { + return `"` + strings.ToUpper(f) + `"` +} + +func (d OracleDialect) QuotedTableForQuery(schema string, table string) string { + if strings.TrimSpace(schema) == "" { + return d.QuoteField(table) + } + + return schema + "." + d.QuoteField(table) +} + +func (d OracleDialect) IfSchemaNotExists(command, schema string) string { + return fmt.Sprintf("%s if not exists", command) +} + +func (d OracleDialect) IfTableExists(command, schema, table string) string { + return fmt.Sprintf("%s if exists", command) +} + +func (d OracleDialect) IfTableNotExists(command, schema, table string) string { + return fmt.Sprintf("%s if not exists", command) +} diff --git a/vendor/gopkg.in/gorp.v1/errors.go b/vendor/gopkg.in/gorp.v1/errors.go new file mode 100644 index 0000000000..356d684757 --- /dev/null +++ b/vendor/gopkg.in/gorp.v1/errors.go @@ -0,0 +1,26 @@ +package gorp + +import ( + "fmt" +) + +// A non-fatal error, when a select query returns columns that do not exist +// as fields in the struct it is being mapped to +type NoFieldInTypeError struct { + TypeName string + MissingColNames []string +} + +func (err *NoFieldInTypeError) Error() string { + return fmt.Sprintf("gorp: No fields %+v in type %s", err.MissingColNames, err.TypeName) +} + +// returns true if the error is non-fatal (ie, we shouldn't immediately return) +func NonFatalError(err error) bool { + switch err.(type) { + case *NoFieldInTypeError: + return true + default: + return false + } +} diff --git a/vendor/gopkg.in/gorp.v1/gorp.go b/vendor/gopkg.in/gorp.v1/gorp.go new file mode 100644 index 0000000000..448d737d9c --- /dev/null +++ b/vendor/gopkg.in/gorp.v1/gorp.go @@ -0,0 +1,2083 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/coopernurse/gorp +// +package gorp + +import ( + "bytes" + "database/sql" + "database/sql/driver" + "errors" + "fmt" + "log" + "os" + "reflect" + "regexp" + "strings" + "sync" + "time" +) + +// Oracle String (empty string is null) +type OracleString struct { + sql.NullString +} + +// Scan implements the Scanner interface. +func (os *OracleString) Scan(value interface{}) error { + if value == nil { + os.String, os.Valid = "", false + return nil + } + os.Valid = true + return os.NullString.Scan(value) +} + +// Value implements the driver Valuer interface. +func (os OracleString) Value() (driver.Value, error) { + if !os.Valid || os.String == "" { + return nil, nil + } + return os.String, nil +} + +// A nullable Time value +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} + +// Scan implements the Scanner interface. +func (nt *NullTime) Scan(value interface{}) error { + nt.Time, nt.Valid = value.(time.Time) + return nil +} + +// Value implements the driver Valuer interface. +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} + +var zeroVal reflect.Value +var versFieldConst = "[gorp_ver_field]" + +// OptimisticLockError is returned by Update() or Delete() if the +// struct being modified has a Version field and the value is not equal to +// the current value in the database +type OptimisticLockError struct { + // Table name where the lock error occurred + TableName string + + // Primary key values of the row being updated/deleted + Keys []interface{} + + // true if a row was found with those keys, indicating the + // LocalVersion is stale. false if no value was found with those + // keys, suggesting the row has been deleted since loaded, or + // was never inserted to begin with + RowExists bool + + // Version value on the struct passed to Update/Delete. This value is + // out of sync with the database. + LocalVersion int64 +} + +// Error returns a description of the cause of the lock error +func (e OptimisticLockError) Error() string { + if e.RowExists { + return fmt.Sprintf("gorp: OptimisticLockError table=%s keys=%v out of date version=%d", e.TableName, e.Keys, e.LocalVersion) + } + + return fmt.Sprintf("gorp: OptimisticLockError no row found for table=%s keys=%v", e.TableName, e.Keys) +} + +// The TypeConverter interface provides a way to map a value of one +// type to another type when persisting to, or loading from, a database. +// +// Example use cases: Implement type converter to convert bool types to "y"/"n" strings, +// or serialize a struct member as a JSON blob. +type TypeConverter interface { + // ToDb converts val to another type. Called before INSERT/UPDATE operations + ToDb(val interface{}) (interface{}, error) + + // FromDb returns a CustomScanner appropriate for this type. This will be used + // to hold values returned from SELECT queries. + // + // In particular the CustomScanner returned should implement a Binder + // function appropriate for the Go type you wish to convert the db value to + // + // If bool==false, then no custom scanner will be used for this field. + FromDb(target interface{}) (CustomScanner, bool) +} + +// CustomScanner binds a database column value to a Go type +type CustomScanner struct { + // After a row is scanned, Holder will contain the value from the database column. + // Initialize the CustomScanner with the concrete Go type you wish the database + // driver to scan the raw column into. + Holder interface{} + // Target typically holds a pointer to the target struct field to bind the Holder + // value to. + Target interface{} + // Binder is a custom function that converts the holder value to the target type + // and sets target accordingly. This function should return error if a problem + // occurs converting the holder to the target. + Binder func(holder interface{}, target interface{}) error +} + +// Bind is called automatically by gorp after Scan() +func (me CustomScanner) Bind() error { + return me.Binder(me.Holder, me.Target) +} + +// DbMap is the root gorp mapping object. Create one of these for each +// database schema you wish to map. Each DbMap contains a list of +// mapped tables. +// +// Example: +// +// dialect := gorp.MySQLDialect{"InnoDB", "UTF8"} +// dbmap := &gorp.DbMap{Db: db, Dialect: dialect} +// +type DbMap struct { + // Db handle to use with this map + Db *sql.DB + + // Dialect implementation to use with this map + Dialect Dialect + + TypeConverter TypeConverter + + tables []*TableMap + logger GorpLogger + logPrefix string +} + +// TableMap represents a mapping between a Go struct and a database table +// Use dbmap.AddTable() or dbmap.AddTableWithName() to create these +type TableMap struct { + // Name of database table. + TableName string + SchemaName string + gotype reflect.Type + Columns []*ColumnMap + keys []*ColumnMap + uniqueTogether [][]string + version *ColumnMap + insertPlan bindPlan + updatePlan bindPlan + deletePlan bindPlan + getPlan bindPlan + dbmap *DbMap +} + +// ResetSql removes cached insert/update/select/delete SQL strings +// associated with this TableMap. Call this if you've modified +// any column names or the table name itself. +func (t *TableMap) ResetSql() { + t.insertPlan = bindPlan{} + t.updatePlan = bindPlan{} + t.deletePlan = bindPlan{} + t.getPlan = bindPlan{} +} + +// SetKeys lets you specify the fields on a struct that map to primary +// key columns on the table. If isAutoIncr is set, result.LastInsertId() +// will be used after INSERT to bind the generated id to the Go struct. +// +// Automatically calls ResetSql() to ensure SQL statements are regenerated. +// +// Panics if isAutoIncr is true, and fieldNames length != 1 +// +func (t *TableMap) SetKeys(isAutoIncr bool, fieldNames ...string) *TableMap { + if isAutoIncr && len(fieldNames) != 1 { + panic(fmt.Sprintf( + "gorp: SetKeys: fieldNames length must be 1 if key is auto-increment. (Saw %v fieldNames)", + len(fieldNames))) + } + t.keys = make([]*ColumnMap, 0) + for _, name := range fieldNames { + colmap := t.ColMap(name) + colmap.isPK = true + colmap.isAutoIncr = isAutoIncr + t.keys = append(t.keys, colmap) + } + t.ResetSql() + + return t +} + +// SetUniqueTogether lets you specify uniqueness constraints across multiple +// columns on the table. Each call adds an additional constraint for the +// specified columns. +// +// Automatically calls ResetSql() to ensure SQL statements are regenerated. +// +// Panics if fieldNames length < 2. +// +func (t *TableMap) SetUniqueTogether(fieldNames ...string) *TableMap { + if len(fieldNames) < 2 { + panic(fmt.Sprintf( + "gorp: SetUniqueTogether: must provide at least two fieldNames to set uniqueness constraint.")) + } + + columns := make([]string, 0) + for _, name := range fieldNames { + columns = append(columns, name) + } + t.uniqueTogether = append(t.uniqueTogether, columns) + t.ResetSql() + + return t +} + +// ColMap returns the ColumnMap pointer matching the given struct field +// name. It panics if the struct does not contain a field matching this +// name. +func (t *TableMap) ColMap(field string) *ColumnMap { + col := colMapOrNil(t, field) + if col == nil { + e := fmt.Sprintf("No ColumnMap in table %s type %s with field %s", + t.TableName, t.gotype.Name(), field) + + panic(e) + } + return col +} + +func colMapOrNil(t *TableMap, field string) *ColumnMap { + for _, col := range t.Columns { + if col.fieldName == field || col.ColumnName == field { + return col + } + } + return nil +} + +// SetVersionCol sets the column to use as the Version field. By default +// the "Version" field is used. Returns the column found, or panics +// if the struct does not contain a field matching this name. +// +// Automatically calls ResetSql() to ensure SQL statements are regenerated. +func (t *TableMap) SetVersionCol(field string) *ColumnMap { + c := t.ColMap(field) + t.version = c + t.ResetSql() + return c +} + +type bindPlan struct { + query string + argFields []string + keyFields []string + versField string + autoIncrIdx int + autoIncrFieldName string + once sync.Once +} + +func (plan *bindPlan) createBindInstance(elem reflect.Value, conv TypeConverter) (bindInstance, error) { + bi := bindInstance{query: plan.query, autoIncrIdx: plan.autoIncrIdx, autoIncrFieldName: plan.autoIncrFieldName, versField: plan.versField} + if plan.versField != "" { + bi.existingVersion = elem.FieldByName(plan.versField).Int() + } + + var err error + + for i := 0; i < len(plan.argFields); i++ { + k := plan.argFields[i] + if k == versFieldConst { + newVer := bi.existingVersion + 1 + bi.args = append(bi.args, newVer) + if bi.existingVersion == 0 { + elem.FieldByName(plan.versField).SetInt(int64(newVer)) + } + } else { + val := elem.FieldByName(k).Interface() + if conv != nil { + val, err = conv.ToDb(val) + if err != nil { + return bindInstance{}, err + } + } + bi.args = append(bi.args, val) + } + } + + for i := 0; i < len(plan.keyFields); i++ { + k := plan.keyFields[i] + val := elem.FieldByName(k).Interface() + if conv != nil { + val, err = conv.ToDb(val) + if err != nil { + return bindInstance{}, err + } + } + bi.keys = append(bi.keys, val) + } + + return bi, nil +} + +type bindInstance struct { + query string + args []interface{} + keys []interface{} + existingVersion int64 + versField string + autoIncrIdx int + autoIncrFieldName string +} + +func (t *TableMap) bindInsert(elem reflect.Value) (bindInstance, error) { + plan := &t.insertPlan + plan.once.Do(func() { + plan.autoIncrIdx = -1 + + s := bytes.Buffer{} + s2 := bytes.Buffer{} + s.WriteString(fmt.Sprintf("insert into %s (", t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName))) + + x := 0 + first := true + for y := range t.Columns { + col := t.Columns[y] + if !(col.isAutoIncr && t.dbmap.Dialect.AutoIncrBindValue() == "") { + if !col.Transient { + if !first { + s.WriteString(",") + s2.WriteString(",") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + + if col.isAutoIncr { + s2.WriteString(t.dbmap.Dialect.AutoIncrBindValue()) + plan.autoIncrIdx = y + plan.autoIncrFieldName = col.fieldName + } else { + s2.WriteString(t.dbmap.Dialect.BindVar(x)) + if col == t.version { + plan.versField = col.fieldName + plan.argFields = append(plan.argFields, versFieldConst) + } else { + plan.argFields = append(plan.argFields, col.fieldName) + } + + x++ + } + first = false + } + } else { + plan.autoIncrIdx = y + plan.autoIncrFieldName = col.fieldName + } + } + s.WriteString(") values (") + s.WriteString(s2.String()) + s.WriteString(")") + if plan.autoIncrIdx > -1 { + s.WriteString(t.dbmap.Dialect.AutoIncrInsertSuffix(t.Columns[plan.autoIncrIdx])) + } + s.WriteString(t.dbmap.Dialect.QuerySuffix()) + + plan.query = s.String() + }) + + return plan.createBindInstance(elem, t.dbmap.TypeConverter) +} + +func (t *TableMap) bindUpdate(elem reflect.Value) (bindInstance, error) { + plan := &t.updatePlan + plan.once.Do(func() { + + s := bytes.Buffer{} + s.WriteString(fmt.Sprintf("update %s set ", t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName))) + x := 0 + + for y := range t.Columns { + col := t.Columns[y] + if !col.isAutoIncr && !col.Transient { + if x > 0 { + s.WriteString(", ") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + + if col == t.version { + plan.versField = col.fieldName + plan.argFields = append(plan.argFields, versFieldConst) + } else { + plan.argFields = append(plan.argFields, col.fieldName) + } + x++ + } + } + + s.WriteString(" where ") + for y := range t.keys { + col := t.keys[y] + if y > 0 { + s.WriteString(" and ") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + + plan.argFields = append(plan.argFields, col.fieldName) + plan.keyFields = append(plan.keyFields, col.fieldName) + x++ + } + if plan.versField != "" { + s.WriteString(" and ") + s.WriteString(t.dbmap.Dialect.QuoteField(t.version.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + plan.argFields = append(plan.argFields, plan.versField) + } + s.WriteString(t.dbmap.Dialect.QuerySuffix()) + + plan.query = s.String() + }) + + return plan.createBindInstance(elem, t.dbmap.TypeConverter) +} + +func (t *TableMap) bindDelete(elem reflect.Value) (bindInstance, error) { + plan := &t.deletePlan + plan.once.Do(func() { + + s := bytes.Buffer{} + s.WriteString(fmt.Sprintf("delete from %s", t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName))) + + for y := range t.Columns { + col := t.Columns[y] + if !col.Transient { + if col == t.version { + plan.versField = col.fieldName + } + } + } + + s.WriteString(" where ") + for x := range t.keys { + k := t.keys[x] + if x > 0 { + s.WriteString(" and ") + } + s.WriteString(t.dbmap.Dialect.QuoteField(k.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + + plan.keyFields = append(plan.keyFields, k.fieldName) + plan.argFields = append(plan.argFields, k.fieldName) + } + if plan.versField != "" { + s.WriteString(" and ") + s.WriteString(t.dbmap.Dialect.QuoteField(t.version.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(len(plan.argFields))) + + plan.argFields = append(plan.argFields, plan.versField) + } + s.WriteString(t.dbmap.Dialect.QuerySuffix()) + + plan.query = s.String() + }) + + return plan.createBindInstance(elem, t.dbmap.TypeConverter) +} + +func (t *TableMap) bindGet() *bindPlan { + plan := &t.getPlan + plan.once.Do(func() { + + s := bytes.Buffer{} + s.WriteString("select ") + + x := 0 + for _, col := range t.Columns { + if !col.Transient { + if x > 0 { + s.WriteString(",") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + plan.argFields = append(plan.argFields, col.fieldName) + x++ + } + } + s.WriteString(" from ") + s.WriteString(t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName)) + s.WriteString(" where ") + for x := range t.keys { + col := t.keys[x] + if x > 0 { + s.WriteString(" and ") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + + plan.keyFields = append(plan.keyFields, col.fieldName) + } + s.WriteString(t.dbmap.Dialect.QuerySuffix()) + + plan.query = s.String() + }) + + return plan +} + +// ColumnMap represents a mapping between a Go struct field and a single +// column in a table. +// Unique and MaxSize only inform the +// CreateTables() function and are not used by Insert/Update/Delete/Get. +type ColumnMap struct { + // Column name in db table + ColumnName string + + // If true, this column is skipped in generated SQL statements + Transient bool + + // If true, " unique" is added to create table statements. + // Not used elsewhere + Unique bool + + // Passed to Dialect.ToSqlType() to assist in informing the + // correct column type to map to in CreateTables() + // Not used elsewhere + MaxSize int + + fieldName string + gotype reflect.Type + isPK bool + isAutoIncr bool + isNotNull bool +} + +// Rename allows you to specify the column name in the table +// +// Example: table.ColMap("Updated").Rename("date_updated") +// +func (c *ColumnMap) Rename(colname string) *ColumnMap { + c.ColumnName = colname + return c +} + +// SetTransient allows you to mark the column as transient. If true +// this column will be skipped when SQL statements are generated +func (c *ColumnMap) SetTransient(b bool) *ColumnMap { + c.Transient = b + return c +} + +// SetUnique adds "unique" to the create table statements for this +// column, if b is true. +func (c *ColumnMap) SetUnique(b bool) *ColumnMap { + c.Unique = b + return c +} + +// SetNotNull adds "not null" to the create table statements for this +// column, if nn is true. +func (c *ColumnMap) SetNotNull(nn bool) *ColumnMap { + c.isNotNull = nn + return c +} + +// SetMaxSize specifies the max length of values of this column. This is +// passed to the dialect.ToSqlType() function, which can use the value +// to alter the generated type for "create table" statements +func (c *ColumnMap) SetMaxSize(size int) *ColumnMap { + c.MaxSize = size + return c +} + +// Transaction represents a database transaction. +// Insert/Update/Delete/Get/Exec operations will be run in the context +// of that transaction. Transactions should be terminated with +// a call to Commit() or Rollback() +type Transaction struct { + dbmap *DbMap + tx *sql.Tx + closed bool +} + +// SqlExecutor exposes gorp operations that can be run from Pre/Post +// hooks. This hides whether the current operation that triggered the +// hook is in a transaction. +// +// See the DbMap function docs for each of the functions below for more +// information. +type SqlExecutor interface { + Get(i interface{}, keys ...interface{}) (interface{}, error) + Insert(list ...interface{}) error + Update(list ...interface{}) (int64, error) + Delete(list ...interface{}) (int64, error) + Exec(query string, args ...interface{}) (sql.Result, error) + Select(i interface{}, query string, + args ...interface{}) ([]interface{}, error) + SelectInt(query string, args ...interface{}) (int64, error) + SelectNullInt(query string, args ...interface{}) (sql.NullInt64, error) + SelectFloat(query string, args ...interface{}) (float64, error) + SelectNullFloat(query string, args ...interface{}) (sql.NullFloat64, error) + SelectStr(query string, args ...interface{}) (string, error) + SelectNullStr(query string, args ...interface{}) (sql.NullString, error) + SelectOne(holder interface{}, query string, args ...interface{}) error + query(query string, args ...interface{}) (*sql.Rows, error) + queryRow(query string, args ...interface{}) *sql.Row +} + +// Compile-time check that DbMap and Transaction implement the SqlExecutor +// interface. +var _, _ SqlExecutor = &DbMap{}, &Transaction{} + +type GorpLogger interface { + Printf(format string, v ...interface{}) +} + +// TraceOn turns on SQL statement logging for this DbMap. After this is +// called, all SQL statements will be sent to the logger. If prefix is +// a non-empty string, it will be written to the front of all logged +// strings, which can aid in filtering log lines. +// +// Use TraceOn if you want to spy on the SQL statements that gorp +// generates. +// +// Note that the base log.Logger type satisfies GorpLogger, but adapters can +// easily be written for other logging packages (e.g., the golang-sanctioned +// glog framework). +func (m *DbMap) TraceOn(prefix string, logger GorpLogger) { + m.logger = logger + if prefix == "" { + m.logPrefix = prefix + } else { + m.logPrefix = fmt.Sprintf("%s ", prefix) + } +} + +// TraceOff turns off tracing. It is idempotent. +func (m *DbMap) TraceOff() { + m.logger = nil + m.logPrefix = "" +} + +// AddTable registers the given interface type with gorp. The table name +// will be given the name of the TypeOf(i). You must call this function, +// or AddTableWithName, for any struct type you wish to persist with +// the given DbMap. +// +// This operation is idempotent. If i's type is already mapped, the +// existing *TableMap is returned +func (m *DbMap) AddTable(i interface{}) *TableMap { + return m.AddTableWithName(i, "") +} + +// AddTableWithName has the same behavior as AddTable, but sets +// table.TableName to name. +func (m *DbMap) AddTableWithName(i interface{}, name string) *TableMap { + return m.AddTableWithNameAndSchema(i, "", name) +} + +// AddTableWithNameAndSchema has the same behavior as AddTable, but sets +// table.TableName to name. +func (m *DbMap) AddTableWithNameAndSchema(i interface{}, schema string, name string) *TableMap { + t := reflect.TypeOf(i) + if name == "" { + name = t.Name() + } + + // check if we have a table for this type already + // if so, update the name and return the existing pointer + for i := range m.tables { + table := m.tables[i] + if table.gotype == t { + table.TableName = name + return table + } + } + + tmap := &TableMap{gotype: t, TableName: name, SchemaName: schema, dbmap: m} + tmap.Columns, tmap.version = m.readStructColumns(t) + m.tables = append(m.tables, tmap) + + return tmap +} + +func (m *DbMap) readStructColumns(t reflect.Type) (cols []*ColumnMap, version *ColumnMap) { + n := t.NumField() + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Anonymous && f.Type.Kind() == reflect.Struct { + // Recursively add nested fields in embedded structs. + subcols, subversion := m.readStructColumns(f.Type) + // Don't append nested fields that have the same field + // name as an already-mapped field. + for _, subcol := range subcols { + shouldAppend := true + for _, col := range cols { + if !subcol.Transient && subcol.fieldName == col.fieldName { + shouldAppend = false + break + } + } + if shouldAppend { + cols = append(cols, subcol) + } + } + if subversion != nil { + version = subversion + } + } else { + columnName := f.Tag.Get("db") + if columnName == "" { + columnName = f.Name + } + gotype := f.Type + if m.TypeConverter != nil { + // Make a new pointer to a value of type gotype and + // pass it to the TypeConverter's FromDb method to see + // if a different type should be used for the column + // type during table creation. + value := reflect.New(gotype).Interface() + scanner, useHolder := m.TypeConverter.FromDb(value) + if useHolder { + gotype = reflect.TypeOf(scanner.Holder) + } + } + cm := &ColumnMap{ + ColumnName: columnName, + Transient: columnName == "-", + fieldName: f.Name, + gotype: gotype, + } + // Check for nested fields of the same field name and + // override them. + shouldAppend := true + for index, col := range cols { + if !col.Transient && col.fieldName == cm.fieldName { + cols[index] = cm + shouldAppend = false + break + } + } + if shouldAppend { + cols = append(cols, cm) + } + if cm.fieldName == "Version" { + log.New(os.Stderr, "", log.LstdFlags).Println("Warning: Automatic mapping of Version struct members to version columns (see optimistic locking) will be deprecated in next version (V2) See: https://github.com/go-gorp/gorp/pull/214") + version = cm + } + } + } + return +} + +// CreateTables iterates through TableMaps registered to this DbMap and +// executes "create table" statements against the database for each. +// +// This is particularly useful in unit tests where you want to create +// and destroy the schema automatically. +func (m *DbMap) CreateTables() error { + return m.createTables(false) +} + +// CreateTablesIfNotExists is similar to CreateTables, but starts +// each statement with "create table if not exists" so that existing +// tables do not raise errors +func (m *DbMap) CreateTablesIfNotExists() error { + return m.createTables(true) +} + +func (m *DbMap) createTables(ifNotExists bool) error { + var err error + for i := range m.tables { + table := m.tables[i] + + s := bytes.Buffer{} + + if strings.TrimSpace(table.SchemaName) != "" { + schemaCreate := "create schema" + if ifNotExists { + s.WriteString(m.Dialect.IfSchemaNotExists(schemaCreate, table.SchemaName)) + } else { + s.WriteString(schemaCreate) + } + s.WriteString(fmt.Sprintf(" %s;", table.SchemaName)) + } + + tableCreate := "create table" + if ifNotExists { + s.WriteString(m.Dialect.IfTableNotExists(tableCreate, table.SchemaName, table.TableName)) + } else { + s.WriteString(tableCreate) + } + s.WriteString(fmt.Sprintf(" %s (", m.Dialect.QuotedTableForQuery(table.SchemaName, table.TableName))) + + x := 0 + for _, col := range table.Columns { + if !col.Transient { + if x > 0 { + s.WriteString(", ") + } + stype := m.Dialect.ToSqlType(col.gotype, col.MaxSize, col.isAutoIncr) + s.WriteString(fmt.Sprintf("%s %s", m.Dialect.QuoteField(col.ColumnName), stype)) + + if col.isPK || col.isNotNull { + s.WriteString(" not null") + } + if col.isPK && len(table.keys) == 1 { + s.WriteString(" primary key") + } + if col.Unique { + s.WriteString(" unique") + } + if col.isAutoIncr { + s.WriteString(fmt.Sprintf(" %s", m.Dialect.AutoIncrStr())) + } + + x++ + } + } + if len(table.keys) > 1 { + s.WriteString(", primary key (") + for x := range table.keys { + if x > 0 { + s.WriteString(", ") + } + s.WriteString(m.Dialect.QuoteField(table.keys[x].ColumnName)) + } + s.WriteString(")") + } + if len(table.uniqueTogether) > 0 { + for _, columns := range table.uniqueTogether { + s.WriteString(", unique (") + for i, column := range columns { + if i > 0 { + s.WriteString(", ") + } + s.WriteString(m.Dialect.QuoteField(column)) + } + s.WriteString(")") + } + } + s.WriteString(") ") + s.WriteString(m.Dialect.CreateTableSuffix()) + s.WriteString(m.Dialect.QuerySuffix()) + _, err = m.Exec(s.String()) + if err != nil { + break + } + } + return err +} + +// DropTable drops an individual table. Will throw an error +// if the table does not exist. +func (m *DbMap) DropTable(table interface{}) error { + t := reflect.TypeOf(table) + return m.dropTable(t, false) +} + +// DropTable drops an individual table. Will NOT throw an error +// if the table does not exist. +func (m *DbMap) DropTableIfExists(table interface{}) error { + t := reflect.TypeOf(table) + return m.dropTable(t, true) +} + +// DropTables iterates through TableMaps registered to this DbMap and +// executes "drop table" statements against the database for each. +func (m *DbMap) DropTables() error { + return m.dropTables(false) +} + +// DropTablesIfExists is the same as DropTables, but uses the "if exists" clause to +// avoid errors for tables that do not exist. +func (m *DbMap) DropTablesIfExists() error { + return m.dropTables(true) +} + +// Goes through all the registered tables, dropping them one by one. +// If an error is encountered, then it is returned and the rest of +// the tables are not dropped. +func (m *DbMap) dropTables(addIfExists bool) (err error) { + for _, table := range m.tables { + err = m.dropTableImpl(table, addIfExists) + if err != nil { + return + } + } + return err +} + +// Implementation of dropping a single table. +func (m *DbMap) dropTable(t reflect.Type, addIfExists bool) error { + table := tableOrNil(m, t) + if table == nil { + return errors.New(fmt.Sprintf("table %s was not registered!", table.TableName)) + } + + return m.dropTableImpl(table, addIfExists) +} + +func (m *DbMap) dropTableImpl(table *TableMap, ifExists bool) (err error) { + tableDrop := "drop table" + if ifExists { + tableDrop = m.Dialect.IfTableExists(tableDrop, table.SchemaName, table.TableName) + } + _, err = m.Exec(fmt.Sprintf("%s %s;", tableDrop, m.Dialect.QuotedTableForQuery(table.SchemaName, table.TableName))) + return err +} + +// TruncateTables iterates through TableMaps registered to this DbMap and +// executes "truncate table" statements against the database for each, or in the case of +// sqlite, a "delete from" with no "where" clause, which uses the truncate optimization +// (http://www.sqlite.org/lang_delete.html) +func (m *DbMap) TruncateTables() error { + var err error + for i := range m.tables { + table := m.tables[i] + _, e := m.Exec(fmt.Sprintf("%s %s;", m.Dialect.TruncateClause(), m.Dialect.QuotedTableForQuery(table.SchemaName, table.TableName))) + if e != nil { + err = e + } + } + return err +} + +// Insert runs a SQL INSERT statement for each element in list. List +// items must be pointers. +// +// Any interface whose TableMap has an auto-increment primary key will +// have its last insert id bound to the PK field on the struct. +// +// The hook functions PreInsert() and/or PostInsert() will be executed +// before/after the INSERT statement if the interface defines them. +// +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) Insert(list ...interface{}) error { + return insert(m, m, list...) +} + +// Update runs a SQL UPDATE statement for each element in list. List +// items must be pointers. +// +// The hook functions PreUpdate() and/or PostUpdate() will be executed +// before/after the UPDATE statement if the interface defines them. +// +// Returns the number of rows updated. +// +// Returns an error if SetKeys has not been called on the TableMap +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) Update(list ...interface{}) (int64, error) { + return update(m, m, list...) +} + +// Delete runs a SQL DELETE statement for each element in list. List +// items must be pointers. +// +// The hook functions PreDelete() and/or PostDelete() will be executed +// before/after the DELETE statement if the interface defines them. +// +// Returns the number of rows deleted. +// +// Returns an error if SetKeys has not been called on the TableMap +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) Delete(list ...interface{}) (int64, error) { + return delete(m, m, list...) +} + +// Get runs a SQL SELECT to fetch a single row from the table based on the +// primary key(s) +// +// i should be an empty value for the struct to load. keys should be +// the primary key value(s) for the row to load. If multiple keys +// exist on the table, the order should match the column order +// specified in SetKeys() when the table mapping was defined. +// +// The hook function PostGet() will be executed after the SELECT +// statement if the interface defines them. +// +// Returns a pointer to a struct that matches or nil if no row is found. +// +// Returns an error if SetKeys has not been called on the TableMap +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) Get(i interface{}, keys ...interface{}) (interface{}, error) { + return get(m, m, i, keys...) +} + +// Select runs an arbitrary SQL query, binding the columns in the result +// to fields on the struct specified by i. args represent the bind +// parameters for the SQL statement. +// +// Column names on the SELECT statement should be aliased to the field names +// on the struct i. Returns an error if one or more columns in the result +// do not match. It is OK if fields on i are not part of the SQL +// statement. +// +// The hook function PostGet() will be executed after the SELECT +// statement if the interface defines them. +// +// Values are returned in one of two ways: +// 1. If i is a struct or a pointer to a struct, returns a slice of pointers to +// matching rows of type i. +// 2. If i is a pointer to a slice, the results will be appended to that slice +// and nil returned. +// +// i does NOT need to be registered with AddTable() +func (m *DbMap) Select(i interface{}, query string, args ...interface{}) ([]interface{}, error) { + return hookedselect(m, m, i, query, args...) +} + +// Exec runs an arbitrary SQL statement. args represent the bind parameters. +// This is equivalent to running: Exec() using database/sql +func (m *DbMap) Exec(query string, args ...interface{}) (sql.Result, error) { + m.trace(query, args...) + return m.Db.Exec(query, args...) +} + +// SelectInt is a convenience wrapper around the gorp.SelectInt function +func (m *DbMap) SelectInt(query string, args ...interface{}) (int64, error) { + return SelectInt(m, query, args...) +} + +// SelectNullInt is a convenience wrapper around the gorp.SelectNullInt function +func (m *DbMap) SelectNullInt(query string, args ...interface{}) (sql.NullInt64, error) { + return SelectNullInt(m, query, args...) +} + +// SelectFloat is a convenience wrapper around the gorp.SelectFlot function +func (m *DbMap) SelectFloat(query string, args ...interface{}) (float64, error) { + return SelectFloat(m, query, args...) +} + +// SelectNullFloat is a convenience wrapper around the gorp.SelectNullFloat function +func (m *DbMap) SelectNullFloat(query string, args ...interface{}) (sql.NullFloat64, error) { + return SelectNullFloat(m, query, args...) +} + +// SelectStr is a convenience wrapper around the gorp.SelectStr function +func (m *DbMap) SelectStr(query string, args ...interface{}) (string, error) { + return SelectStr(m, query, args...) +} + +// SelectNullStr is a convenience wrapper around the gorp.SelectNullStr function +func (m *DbMap) SelectNullStr(query string, args ...interface{}) (sql.NullString, error) { + return SelectNullStr(m, query, args...) +} + +// SelectOne is a convenience wrapper around the gorp.SelectOne function +func (m *DbMap) SelectOne(holder interface{}, query string, args ...interface{}) error { + return SelectOne(m, m, holder, query, args...) +} + +// Begin starts a gorp Transaction +func (m *DbMap) Begin() (*Transaction, error) { + m.trace("begin;") + tx, err := m.Db.Begin() + if err != nil { + return nil, err + } + return &Transaction{m, tx, false}, nil +} + +// TableFor returns the *TableMap corresponding to the given Go Type +// If no table is mapped to that type an error is returned. +// If checkPK is true and the mapped table has no registered PKs, an error is returned. +func (m *DbMap) TableFor(t reflect.Type, checkPK bool) (*TableMap, error) { + table := tableOrNil(m, t) + if table == nil { + return nil, errors.New(fmt.Sprintf("No table found for type: %v", t.Name())) + } + + if checkPK && len(table.keys) < 1 { + e := fmt.Sprintf("gorp: No keys defined for table: %s", + table.TableName) + return nil, errors.New(e) + } + + return table, nil +} + +// Prepare creates a prepared statement for later queries or executions. +// Multiple queries or executions may be run concurrently from the returned statement. +// This is equivalent to running: Prepare() using database/sql +func (m *DbMap) Prepare(query string) (*sql.Stmt, error) { + m.trace(query, nil) + return m.Db.Prepare(query) +} + +func tableOrNil(m *DbMap, t reflect.Type) *TableMap { + for i := range m.tables { + table := m.tables[i] + if table.gotype == t { + return table + } + } + return nil +} + +func (m *DbMap) tableForPointer(ptr interface{}, checkPK bool) (*TableMap, reflect.Value, error) { + ptrv := reflect.ValueOf(ptr) + if ptrv.Kind() != reflect.Ptr { + e := fmt.Sprintf("gorp: passed non-pointer: %v (kind=%v)", ptr, + ptrv.Kind()) + return nil, reflect.Value{}, errors.New(e) + } + elem := ptrv.Elem() + etype := reflect.TypeOf(elem.Interface()) + t, err := m.TableFor(etype, checkPK) + if err != nil { + return nil, reflect.Value{}, err + } + + return t, elem, nil +} + +func (m *DbMap) queryRow(query string, args ...interface{}) *sql.Row { + m.trace(query, args...) + return m.Db.QueryRow(query, args...) +} + +func (m *DbMap) query(query string, args ...interface{}) (*sql.Rows, error) { + m.trace(query, args...) + return m.Db.Query(query, args...) +} + +func (m *DbMap) trace(query string, args ...interface{}) { + if m.logger != nil { + var margs = argsString(args...) + m.logger.Printf("%s%s [%s]", m.logPrefix, query, margs) + } +} + +func argsString(args ...interface{}) string { + var margs string + for i, a := range args { + var v interface{} = a + if x, ok := v.(driver.Valuer); ok { + y, err := x.Value() + if err == nil { + v = y + } + } + switch v.(type) { + case string: + v = fmt.Sprintf("%q", v) + default: + v = fmt.Sprintf("%v", v) + } + margs += fmt.Sprintf("%d:%s", i+1, v) + if i+1 < len(args) { + margs += " " + } + } + return margs +} + +/////////////// + +// Insert has the same behavior as DbMap.Insert(), but runs in a transaction. +func (t *Transaction) Insert(list ...interface{}) error { + return insert(t.dbmap, t, list...) +} + +// Update had the same behavior as DbMap.Update(), but runs in a transaction. +func (t *Transaction) Update(list ...interface{}) (int64, error) { + return update(t.dbmap, t, list...) +} + +// Delete has the same behavior as DbMap.Delete(), but runs in a transaction. +func (t *Transaction) Delete(list ...interface{}) (int64, error) { + return delete(t.dbmap, t, list...) +} + +// Get has the same behavior as DbMap.Get(), but runs in a transaction. +func (t *Transaction) Get(i interface{}, keys ...interface{}) (interface{}, error) { + return get(t.dbmap, t, i, keys...) +} + +// Select has the same behavior as DbMap.Select(), but runs in a transaction. +func (t *Transaction) Select(i interface{}, query string, args ...interface{}) ([]interface{}, error) { + return hookedselect(t.dbmap, t, i, query, args...) +} + +// Exec has the same behavior as DbMap.Exec(), but runs in a transaction. +func (t *Transaction) Exec(query string, args ...interface{}) (sql.Result, error) { + t.dbmap.trace(query, args...) + return t.tx.Exec(query, args...) +} + +// SelectInt is a convenience wrapper around the gorp.SelectInt function. +func (t *Transaction) SelectInt(query string, args ...interface{}) (int64, error) { + return SelectInt(t, query, args...) +} + +// SelectNullInt is a convenience wrapper around the gorp.SelectNullInt function. +func (t *Transaction) SelectNullInt(query string, args ...interface{}) (sql.NullInt64, error) { + return SelectNullInt(t, query, args...) +} + +// SelectFloat is a convenience wrapper around the gorp.SelectFloat function. +func (t *Transaction) SelectFloat(query string, args ...interface{}) (float64, error) { + return SelectFloat(t, query, args...) +} + +// SelectNullFloat is a convenience wrapper around the gorp.SelectNullFloat function. +func (t *Transaction) SelectNullFloat(query string, args ...interface{}) (sql.NullFloat64, error) { + return SelectNullFloat(t, query, args...) +} + +// SelectStr is a convenience wrapper around the gorp.SelectStr function. +func (t *Transaction) SelectStr(query string, args ...interface{}) (string, error) { + return SelectStr(t, query, args...) +} + +// SelectNullStr is a convenience wrapper around the gorp.SelectNullStr function. +func (t *Transaction) SelectNullStr(query string, args ...interface{}) (sql.NullString, error) { + return SelectNullStr(t, query, args...) +} + +// SelectOne is a convenience wrapper around the gorp.SelectOne function. +func (t *Transaction) SelectOne(holder interface{}, query string, args ...interface{}) error { + return SelectOne(t.dbmap, t, holder, query, args...) +} + +// Commit commits the underlying database transaction. +func (t *Transaction) Commit() error { + if !t.closed { + t.closed = true + t.dbmap.trace("commit;") + return t.tx.Commit() + } + + return sql.ErrTxDone +} + +// Rollback rolls back the underlying database transaction. +func (t *Transaction) Rollback() error { + if !t.closed { + t.closed = true + t.dbmap.trace("rollback;") + return t.tx.Rollback() + } + + return sql.ErrTxDone +} + +// Savepoint creates a savepoint with the given name. The name is interpolated +// directly into the SQL SAVEPOINT statement, so you must sanitize it if it is +// derived from user input. +func (t *Transaction) Savepoint(name string) error { + query := "savepoint " + t.dbmap.Dialect.QuoteField(name) + t.dbmap.trace(query, nil) + _, err := t.tx.Exec(query) + return err +} + +// RollbackToSavepoint rolls back to the savepoint with the given name. The +// name is interpolated directly into the SQL SAVEPOINT statement, so you must +// sanitize it if it is derived from user input. +func (t *Transaction) RollbackToSavepoint(savepoint string) error { + query := "rollback to savepoint " + t.dbmap.Dialect.QuoteField(savepoint) + t.dbmap.trace(query, nil) + _, err := t.tx.Exec(query) + return err +} + +// ReleaseSavepint releases the savepoint with the given name. The name is +// interpolated directly into the SQL SAVEPOINT statement, so you must sanitize +// it if it is derived from user input. +func (t *Transaction) ReleaseSavepoint(savepoint string) error { + query := "release savepoint " + t.dbmap.Dialect.QuoteField(savepoint) + t.dbmap.trace(query, nil) + _, err := t.tx.Exec(query) + return err +} + +// Prepare has the same behavior as DbMap.Prepare(), but runs in a transaction. +func (t *Transaction) Prepare(query string) (*sql.Stmt, error) { + t.dbmap.trace(query, nil) + return t.tx.Prepare(query) +} + +func (t *Transaction) queryRow(query string, args ...interface{}) *sql.Row { + t.dbmap.trace(query, args...) + return t.tx.QueryRow(query, args...) +} + +func (t *Transaction) query(query string, args ...interface{}) (*sql.Rows, error) { + t.dbmap.trace(query, args...) + return t.tx.Query(query, args...) +} + +/////////////// + +// SelectInt executes the given query, which should be a SELECT statement for a single +// integer column, and returns the value of the first row returned. If no rows are +// found, zero is returned. +func SelectInt(e SqlExecutor, query string, args ...interface{}) (int64, error) { + var h int64 + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return 0, err + } + return h, nil +} + +// SelectNullInt executes the given query, which should be a SELECT statement for a single +// integer column, and returns the value of the first row returned. If no rows are +// found, the empty sql.NullInt64 value is returned. +func SelectNullInt(e SqlExecutor, query string, args ...interface{}) (sql.NullInt64, error) { + var h sql.NullInt64 + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return h, err + } + return h, nil +} + +// SelectFloat executes the given query, which should be a SELECT statement for a single +// float column, and returns the value of the first row returned. If no rows are +// found, zero is returned. +func SelectFloat(e SqlExecutor, query string, args ...interface{}) (float64, error) { + var h float64 + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return 0, err + } + return h, nil +} + +// SelectNullFloat executes the given query, which should be a SELECT statement for a single +// float column, and returns the value of the first row returned. If no rows are +// found, the empty sql.NullInt64 value is returned. +func SelectNullFloat(e SqlExecutor, query string, args ...interface{}) (sql.NullFloat64, error) { + var h sql.NullFloat64 + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return h, err + } + return h, nil +} + +// SelectStr executes the given query, which should be a SELECT statement for a single +// char/varchar column, and returns the value of the first row returned. If no rows are +// found, an empty string is returned. +func SelectStr(e SqlExecutor, query string, args ...interface{}) (string, error) { + var h string + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return "", err + } + return h, nil +} + +// SelectNullStr executes the given query, which should be a SELECT +// statement for a single char/varchar column, and returns the value +// of the first row returned. If no rows are found, the empty +// sql.NullString is returned. +func SelectNullStr(e SqlExecutor, query string, args ...interface{}) (sql.NullString, error) { + var h sql.NullString + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return h, err + } + return h, nil +} + +// SelectOne executes the given query (which should be a SELECT statement) +// and binds the result to holder, which must be a pointer. +// +// If no row is found, an error (sql.ErrNoRows specifically) will be returned +// +// If more than one row is found, an error will be returned. +// +func SelectOne(m *DbMap, e SqlExecutor, holder interface{}, query string, args ...interface{}) error { + t := reflect.TypeOf(holder) + if t.Kind() == reflect.Ptr { + t = t.Elem() + } else { + return fmt.Errorf("gorp: SelectOne holder must be a pointer, but got: %t", holder) + } + + // Handle pointer to pointer + isptr := false + if t.Kind() == reflect.Ptr { + isptr = true + t = t.Elem() + } + + if t.Kind() == reflect.Struct { + var nonFatalErr error + + list, err := hookedselect(m, e, holder, query, args...) + if err != nil { + if !NonFatalError(err) { + return err + } + nonFatalErr = err + } + + dest := reflect.ValueOf(holder) + if isptr { + dest = dest.Elem() + } + + if list != nil && len(list) > 0 { + // check for multiple rows + if len(list) > 1 { + return fmt.Errorf("gorp: multiple rows returned for: %s - %v", query, args) + } + + // Initialize if nil + if dest.IsNil() { + dest.Set(reflect.New(t)) + } + + // only one row found + src := reflect.ValueOf(list[0]) + dest.Elem().Set(src.Elem()) + } else { + // No rows found, return a proper error. + return sql.ErrNoRows + } + + return nonFatalErr + } + + return selectVal(e, holder, query, args...) +} + +func selectVal(e SqlExecutor, holder interface{}, query string, args ...interface{}) error { + if len(args) == 1 { + switch m := e.(type) { + case *DbMap: + query, args = maybeExpandNamedQuery(m, query, args) + case *Transaction: + query, args = maybeExpandNamedQuery(m.dbmap, query, args) + } + } + rows, err := e.query(query, args...) + if err != nil { + return err + } + defer rows.Close() + + if !rows.Next() { + return sql.ErrNoRows + } + + return rows.Scan(holder) +} + +/////////////// + +func hookedselect(m *DbMap, exec SqlExecutor, i interface{}, query string, + args ...interface{}) ([]interface{}, error) { + + var nonFatalErr error + + list, err := rawselect(m, exec, i, query, args...) + if err != nil { + if !NonFatalError(err) { + return nil, err + } + nonFatalErr = err + } + + // Determine where the results are: written to i, or returned in list + if t, _ := toSliceType(i); t == nil { + for _, v := range list { + if v, ok := v.(HasPostGet); ok { + err := v.PostGet(exec) + if err != nil { + return nil, err + } + } + } + } else { + resultsValue := reflect.Indirect(reflect.ValueOf(i)) + for i := 0; i < resultsValue.Len(); i++ { + if v, ok := resultsValue.Index(i).Interface().(HasPostGet); ok { + err := v.PostGet(exec) + if err != nil { + return nil, err + } + } + } + } + return list, nonFatalErr +} + +func rawselect(m *DbMap, exec SqlExecutor, i interface{}, query string, + args ...interface{}) ([]interface{}, error) { + var ( + appendToSlice = false // Write results to i directly? + intoStruct = true // Selecting into a struct? + pointerElements = true // Are the slice elements pointers (vs values)? + ) + + var nonFatalErr error + + // get type for i, verifying it's a supported destination + t, err := toType(i) + if err != nil { + var err2 error + if t, err2 = toSliceType(i); t == nil { + if err2 != nil { + return nil, err2 + } + return nil, err + } + pointerElements = t.Kind() == reflect.Ptr + if pointerElements { + t = t.Elem() + } + appendToSlice = true + intoStruct = t.Kind() == reflect.Struct + } + + // If the caller supplied a single struct/map argument, assume a "named + // parameter" query. Extract the named arguments from the struct/map, create + // the flat arg slice, and rewrite the query to use the dialect's placeholder. + if len(args) == 1 { + query, args = maybeExpandNamedQuery(m, query, args) + } + + // Run the query + rows, err := exec.query(query, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + // Fetch the column names as returned from db + cols, err := rows.Columns() + if err != nil { + return nil, err + } + + if !intoStruct && len(cols) > 1 { + return nil, fmt.Errorf("gorp: select into non-struct slice requires 1 column, got %d", len(cols)) + } + + var colToFieldIndex [][]int + if intoStruct { + if colToFieldIndex, err = columnToFieldIndex(m, t, cols); err != nil { + if !NonFatalError(err) { + return nil, err + } + nonFatalErr = err + } + } + + conv := m.TypeConverter + + // Add results to one of these two slices. + var ( + list = make([]interface{}, 0) + sliceValue = reflect.Indirect(reflect.ValueOf(i)) + ) + + for { + if !rows.Next() { + // if error occured return rawselect + if rows.Err() != nil { + return nil, rows.Err() + } + // time to exit from outer "for" loop + break + } + v := reflect.New(t) + dest := make([]interface{}, len(cols)) + + custScan := make([]CustomScanner, 0) + + for x := range cols { + f := v.Elem() + if intoStruct { + index := colToFieldIndex[x] + if index == nil { + // this field is not present in the struct, so create a dummy + // value for rows.Scan to scan into + var dummy sql.RawBytes + dest[x] = &dummy + continue + } + f = f.FieldByIndex(index) + } + target := f.Addr().Interface() + if conv != nil { + scanner, ok := conv.FromDb(target) + if ok { + target = scanner.Holder + custScan = append(custScan, scanner) + } + } + dest[x] = target + } + + err = rows.Scan(dest...) + if err != nil { + return nil, err + } + + for _, c := range custScan { + err = c.Bind() + if err != nil { + return nil, err + } + } + + if appendToSlice { + if !pointerElements { + v = v.Elem() + } + sliceValue.Set(reflect.Append(sliceValue, v)) + } else { + list = append(list, v.Interface()) + } + } + + if appendToSlice && sliceValue.IsNil() { + sliceValue.Set(reflect.MakeSlice(sliceValue.Type(), 0, 0)) + } + + return list, nonFatalErr +} + +// maybeExpandNamedQuery checks the given arg to see if it's eligible to be used +// as input to a named query. If so, it rewrites the query to use +// dialect-dependent bindvars and instantiates the corresponding slice of +// parameters by extracting data from the map / struct. +// If not, returns the input values unchanged. +func maybeExpandNamedQuery(m *DbMap, query string, args []interface{}) (string, []interface{}) { + arg := reflect.ValueOf(args[0]) + for arg.Kind() == reflect.Ptr { + arg = arg.Elem() + } + switch { + case arg.Kind() == reflect.Map && arg.Type().Key().Kind() == reflect.String: + return expandNamedQuery(m, query, func(key string) reflect.Value { + return arg.MapIndex(reflect.ValueOf(key)) + }) + // #84 - ignore time.Time structs here - there may be a cleaner way to do this + case arg.Kind() == reflect.Struct && !(arg.Type().PkgPath() == "time" && arg.Type().Name() == "Time"): + return expandNamedQuery(m, query, arg.FieldByName) + } + return query, args +} + +var keyRegexp = regexp.MustCompile(`:[[:word:]]+`) + +// expandNamedQuery accepts a query with placeholders of the form ":key", and a +// single arg of Kind Struct or Map[string]. It returns the query with the +// dialect's placeholders, and a slice of args ready for positional insertion +// into the query. +func expandNamedQuery(m *DbMap, query string, keyGetter func(key string) reflect.Value) (string, []interface{}) { + var ( + n int + args []interface{} + ) + return keyRegexp.ReplaceAllStringFunc(query, func(key string) string { + val := keyGetter(key[1:]) + if !val.IsValid() { + return key + } + args = append(args, val.Interface()) + newVar := m.Dialect.BindVar(n) + n++ + return newVar + }), args +} + +func columnToFieldIndex(m *DbMap, t reflect.Type, cols []string) ([][]int, error) { + colToFieldIndex := make([][]int, len(cols)) + + // check if type t is a mapped table - if so we'll + // check the table for column aliasing below + tableMapped := false + table := tableOrNil(m, t) + if table != nil { + tableMapped = true + } + + // Loop over column names and find field in i to bind to + // based on column name. all returned columns must match + // a field in the i struct + missingColNames := []string{} + for x := range cols { + colName := strings.ToLower(cols[x]) + field, found := t.FieldByNameFunc(func(fieldName string) bool { + field, _ := t.FieldByName(fieldName) + fieldName = field.Tag.Get("db") + + if fieldName == "-" { + return false + } else if fieldName == "" { + fieldName = field.Name + } + if tableMapped { + colMap := colMapOrNil(table, fieldName) + if colMap != nil { + fieldName = colMap.ColumnName + } + } + return colName == strings.ToLower(fieldName) + }) + if found { + colToFieldIndex[x] = field.Index + } + if colToFieldIndex[x] == nil { + missingColNames = append(missingColNames, colName) + } + } + if len(missingColNames) > 0 { + return colToFieldIndex, &NoFieldInTypeError{ + TypeName: t.Name(), + MissingColNames: missingColNames, + } + } + return colToFieldIndex, nil +} + +func fieldByName(val reflect.Value, fieldName string) *reflect.Value { + // try to find field by exact match + f := val.FieldByName(fieldName) + + if f != zeroVal { + return &f + } + + // try to find by case insensitive match - only the Postgres driver + // seems to require this - in the case where columns are aliased in the sql + fieldNameL := strings.ToLower(fieldName) + fieldCount := val.NumField() + t := val.Type() + for i := 0; i < fieldCount; i++ { + sf := t.Field(i) + if strings.ToLower(sf.Name) == fieldNameL { + f := val.Field(i) + return &f + } + } + + return nil +} + +// toSliceType returns the element type of the given object, if the object is a +// "*[]*Element" or "*[]Element". If not, returns nil. +// err is returned if the user was trying to pass a pointer-to-slice but failed. +func toSliceType(i interface{}) (reflect.Type, error) { + t := reflect.TypeOf(i) + if t.Kind() != reflect.Ptr { + // If it's a slice, return a more helpful error message + if t.Kind() == reflect.Slice { + return nil, fmt.Errorf("gorp: Cannot SELECT into a non-pointer slice: %v", t) + } + return nil, nil + } + if t = t.Elem(); t.Kind() != reflect.Slice { + return nil, nil + } + return t.Elem(), nil +} + +func toType(i interface{}) (reflect.Type, error) { + t := reflect.TypeOf(i) + + // If a Pointer to a type, follow + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + return nil, fmt.Errorf("gorp: Cannot SELECT into this type: %v", reflect.TypeOf(i)) + } + return t, nil +} + +func get(m *DbMap, exec SqlExecutor, i interface{}, + keys ...interface{}) (interface{}, error) { + + t, err := toType(i) + if err != nil { + return nil, err + } + + table, err := m.TableFor(t, true) + if err != nil { + return nil, err + } + + plan := table.bindGet() + + v := reflect.New(t) + dest := make([]interface{}, len(plan.argFields)) + + conv := m.TypeConverter + custScan := make([]CustomScanner, 0) + + for x, fieldName := range plan.argFields { + f := v.Elem().FieldByName(fieldName) + target := f.Addr().Interface() + if conv != nil { + scanner, ok := conv.FromDb(target) + if ok { + target = scanner.Holder + custScan = append(custScan, scanner) + } + } + dest[x] = target + } + + row := exec.queryRow(plan.query, keys...) + err = row.Scan(dest...) + if err != nil { + if err == sql.ErrNoRows { + err = nil + } + return nil, err + } + + for _, c := range custScan { + err = c.Bind() + if err != nil { + return nil, err + } + } + + if v, ok := v.Interface().(HasPostGet); ok { + err := v.PostGet(exec) + if err != nil { + return nil, err + } + } + + return v.Interface(), nil +} + +func delete(m *DbMap, exec SqlExecutor, list ...interface{}) (int64, error) { + count := int64(0) + for _, ptr := range list { + table, elem, err := m.tableForPointer(ptr, true) + if err != nil { + return -1, err + } + + eval := elem.Addr().Interface() + if v, ok := eval.(HasPreDelete); ok { + err = v.PreDelete(exec) + if err != nil { + return -1, err + } + } + + bi, err := table.bindDelete(elem) + if err != nil { + return -1, err + } + + res, err := exec.Exec(bi.query, bi.args...) + if err != nil { + return -1, err + } + rows, err := res.RowsAffected() + if err != nil { + return -1, err + } + + if rows == 0 && bi.existingVersion > 0 { + return lockError(m, exec, table.TableName, + bi.existingVersion, elem, bi.keys...) + } + + count += rows + + if v, ok := eval.(HasPostDelete); ok { + err := v.PostDelete(exec) + if err != nil { + return -1, err + } + } + } + + return count, nil +} + +func update(m *DbMap, exec SqlExecutor, list ...interface{}) (int64, error) { + count := int64(0) + for _, ptr := range list { + table, elem, err := m.tableForPointer(ptr, true) + if err != nil { + return -1, err + } + + eval := elem.Addr().Interface() + if v, ok := eval.(HasPreUpdate); ok { + err = v.PreUpdate(exec) + if err != nil { + return -1, err + } + } + + bi, err := table.bindUpdate(elem) + if err != nil { + return -1, err + } + + res, err := exec.Exec(bi.query, bi.args...) + if err != nil { + return -1, err + } + + rows, err := res.RowsAffected() + if err != nil { + return -1, err + } + + if rows == 0 && bi.existingVersion > 0 { + return lockError(m, exec, table.TableName, + bi.existingVersion, elem, bi.keys...) + } + + if bi.versField != "" { + elem.FieldByName(bi.versField).SetInt(bi.existingVersion + 1) + } + + count += rows + + if v, ok := eval.(HasPostUpdate); ok { + err = v.PostUpdate(exec) + if err != nil { + return -1, err + } + } + } + return count, nil +} + +func insert(m *DbMap, exec SqlExecutor, list ...interface{}) error { + for _, ptr := range list { + table, elem, err := m.tableForPointer(ptr, false) + if err != nil { + return err + } + + eval := elem.Addr().Interface() + if v, ok := eval.(HasPreInsert); ok { + err := v.PreInsert(exec) + if err != nil { + return err + } + } + + bi, err := table.bindInsert(elem) + if err != nil { + return err + } + + if bi.autoIncrIdx > -1 { + f := elem.FieldByName(bi.autoIncrFieldName) + switch inserter := m.Dialect.(type) { + case IntegerAutoIncrInserter: + id, err := inserter.InsertAutoIncr(exec, bi.query, bi.args...) + if err != nil { + return err + } + k := f.Kind() + if (k == reflect.Int) || (k == reflect.Int16) || (k == reflect.Int32) || (k == reflect.Int64) { + f.SetInt(id) + } else if (k == reflect.Uint) || (k == reflect.Uint16) || (k == reflect.Uint32) || (k == reflect.Uint64) { + f.SetUint(uint64(id)) + } else { + return fmt.Errorf("gorp: Cannot set autoincrement value on non-Int field. SQL=%s autoIncrIdx=%d autoIncrFieldName=%s", bi.query, bi.autoIncrIdx, bi.autoIncrFieldName) + } + case TargetedAutoIncrInserter: + err := inserter.InsertAutoIncrToTarget(exec, bi.query, f.Addr().Interface(), bi.args...) + if err != nil { + return err + } + default: + return fmt.Errorf("gorp: Cannot use autoincrement fields on dialects that do not implement an autoincrementing interface") + } + } else { + _, err := exec.Exec(bi.query, bi.args...) + if err != nil { + return err + } + } + + if v, ok := eval.(HasPostInsert); ok { + err := v.PostInsert(exec) + if err != nil { + return err + } + } + } + return nil +} + +func lockError(m *DbMap, exec SqlExecutor, tableName string, + existingVer int64, elem reflect.Value, + keys ...interface{}) (int64, error) { + + existing, err := get(m, exec, elem.Interface(), keys...) + if err != nil { + return -1, err + } + + ole := OptimisticLockError{tableName, keys, true, existingVer} + if existing == nil { + ole.RowExists = false + } + return -1, ole +} + +// PostUpdate() will be executed after the GET statement. +type HasPostGet interface { + PostGet(SqlExecutor) error +} + +// PostUpdate() will be executed after the DELETE statement +type HasPostDelete interface { + PostDelete(SqlExecutor) error +} + +// PostUpdate() will be executed after the UPDATE statement +type HasPostUpdate interface { + PostUpdate(SqlExecutor) error +} + +// PostInsert() will be executed after the INSERT statement +type HasPostInsert interface { + PostInsert(SqlExecutor) error +} + +// PreDelete() will be executed before the DELETE statement. +type HasPreDelete interface { + PreDelete(SqlExecutor) error +} + +// PreUpdate() will be executed before UPDATE statement. +type HasPreUpdate interface { + PreUpdate(SqlExecutor) error +} + +// PreInsert() will be executed before INSERT statement. +type HasPreInsert interface { + PreInsert(SqlExecutor) error +} diff --git a/vendor/gopkg.in/gorp.v1/test_all.sh b/vendor/gopkg.in/gorp.v1/test_all.sh new file mode 100644 index 0000000000..f870b39a39 --- /dev/null +++ b/vendor/gopkg.in/gorp.v1/test_all.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +# on macs, you may need to: +# export GOBUILDFLAG=-ldflags -linkmode=external + +set -e + +export GORP_TEST_DSN=gorptest/gorptest/gorptest +export GORP_TEST_DIALECT=mysql +go test $GOBUILDFLAG . + +export GORP_TEST_DSN=gorptest:gorptest@/gorptest +export GORP_TEST_DIALECT=gomysql +go test $GOBUILDFLAG . + +export GORP_TEST_DSN="user=gorptest password=gorptest dbname=gorptest sslmode=disable" +export GORP_TEST_DIALECT=postgres +go test $GOBUILDFLAG . + +export GORP_TEST_DSN=/tmp/gorptest.bin +export GORP_TEST_DIALECT=sqlite +go test $GOBUILDFLAG . diff --git a/vendor/helm.sh/helm/v3/internal/resolver/resolver.go b/vendor/helm.sh/helm/v3/internal/resolver/resolver.go new file mode 100644 index 0000000000..9c65d29650 --- /dev/null +++ b/vendor/helm.sh/helm/v3/internal/resolver/resolver.go @@ -0,0 +1,234 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resolver + +import ( + "bytes" + "encoding/json" + "os" + "path/filepath" + "strings" + "time" + + "github.com/Masterminds/semver/v3" + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/gates" + "helm.sh/helm/v3/pkg/helmpath" + "helm.sh/helm/v3/pkg/provenance" + "helm.sh/helm/v3/pkg/repo" +) + +const FeatureGateOCI = gates.Gate("HELM_EXPERIMENTAL_OCI") + +// Resolver resolves dependencies from semantic version ranges to a particular version. +type Resolver struct { + chartpath string + cachepath string +} + +// New creates a new resolver for a given chart and a given helm home. +func New(chartpath, cachepath string) *Resolver { + return &Resolver{ + chartpath: chartpath, + cachepath: cachepath, + } +} + +// Resolve resolves dependencies and returns a lock file with the resolution. +func (r *Resolver) Resolve(reqs []*chart.Dependency, repoNames map[string]string) (*chart.Lock, error) { + + // Now we clone the dependencies, locking as we go. + locked := make([]*chart.Dependency, len(reqs)) + missing := []string{} + for i, d := range reqs { + constraint, err := semver.NewConstraint(d.Version) + if err != nil { + return nil, errors.Wrapf(err, "dependency %q has an invalid version/constraint format", d.Name) + } + + if d.Repository == "" { + // Local chart subfolder + if _, err := GetLocalPath(filepath.Join("charts", d.Name), r.chartpath); err != nil { + return nil, err + } + + locked[i] = &chart.Dependency{ + Name: d.Name, + Repository: "", + Version: d.Version, + } + continue + } + if strings.HasPrefix(d.Repository, "file://") { + + chartpath, err := GetLocalPath(d.Repository, r.chartpath) + if err != nil { + return nil, err + } + + ch, err := loader.LoadDir(chartpath) + if err != nil { + return nil, err + } + + v, err := semver.NewVersion(ch.Metadata.Version) + if err != nil { + // Not a legit entry. + continue + } + + if !constraint.Check(v) { + missing = append(missing, d.Name) + continue + } + + locked[i] = &chart.Dependency{ + Name: d.Name, + Repository: d.Repository, + Version: ch.Metadata.Version, + } + continue + } + + repoName := repoNames[d.Name] + // if the repository was not defined, but the dependency defines a repository url, bypass the cache + if repoName == "" && d.Repository != "" { + locked[i] = &chart.Dependency{ + Name: d.Name, + Repository: d.Repository, + Version: d.Version, + } + continue + } + + var vs repo.ChartVersions + var version string + var ok bool + found := true + if !strings.HasPrefix(d.Repository, "oci://") { + repoIndex, err := repo.LoadIndexFile(filepath.Join(r.cachepath, helmpath.CacheIndexFile(repoName))) + if err != nil { + return nil, errors.Wrapf(err, "no cached repository for %s found. (try 'helm repo update')", repoName) + } + + vs, ok = repoIndex.Entries[d.Name] + if !ok { + return nil, errors.Errorf("%s chart not found in repo %s", d.Name, d.Repository) + } + found = false + } else { + version = d.Version + if !FeatureGateOCI.IsEnabled() { + return nil, errors.Wrapf(FeatureGateOCI.Error(), + "repository %s is an OCI registry", d.Repository) + } + } + + locked[i] = &chart.Dependency{ + Name: d.Name, + Repository: d.Repository, + Version: version, + } + // The version are already sorted and hence the first one to satisfy the constraint is used + for _, ver := range vs { + v, err := semver.NewVersion(ver.Version) + if err != nil || len(ver.URLs) == 0 { + // Not a legit entry. + continue + } + if constraint.Check(v) { + found = true + locked[i].Version = v.Original() + break + } + } + + if !found { + missing = append(missing, d.Name) + } + } + if len(missing) > 0 { + return nil, errors.Errorf("can't get a valid version for repositories %s. Try changing the version constraint in Chart.yaml", strings.Join(missing, ", ")) + } + + digest, err := HashReq(reqs, locked) + if err != nil { + return nil, err + } + + return &chart.Lock{ + Generated: time.Now(), + Digest: digest, + Dependencies: locked, + }, nil +} + +// HashReq generates a hash of the dependencies. +// +// This should be used only to compare against another hash generated by this +// function. +func HashReq(req, lock []*chart.Dependency) (string, error) { + data, err := json.Marshal([2][]*chart.Dependency{req, lock}) + if err != nil { + return "", err + } + s, err := provenance.Digest(bytes.NewBuffer(data)) + return "sha256:" + s, err +} + +// HashV2Req generates a hash of requirements generated in Helm v2. +// +// This should be used only to compare against another hash generated by the +// Helm v2 hash function. It is to handle issue: +// https://github.com/helm/helm/issues/7233 +func HashV2Req(req []*chart.Dependency) (string, error) { + dep := make(map[string][]*chart.Dependency) + dep["dependencies"] = req + data, err := json.Marshal(dep) + if err != nil { + return "", err + } + s, err := provenance.Digest(bytes.NewBuffer(data)) + return "sha256:" + s, err +} + +// GetLocalPath generates absolute local path when use +// "file://" in repository of dependencies +func GetLocalPath(repo, chartpath string) (string, error) { + var depPath string + var err error + p := strings.TrimPrefix(repo, "file://") + + // root path is absolute + if strings.HasPrefix(p, "/") { + if depPath, err = filepath.Abs(p); err != nil { + return "", err + } + } else { + depPath = filepath.Join(chartpath, p) + } + + if _, err = os.Stat(depPath); os.IsNotExist(err) { + return "", errors.Errorf("directory %s not found", depPath) + } else if err != nil { + return "", err + } + + return depPath, nil +} diff --git a/vendor/helm.sh/helm/v3/internal/version/version.go b/vendor/helm.sh/helm/v3/internal/version/version.go index 15822e9145..4db8c05498 100644 --- a/vendor/helm.sh/helm/v3/internal/version/version.go +++ b/vendor/helm.sh/helm/v3/internal/version/version.go @@ -29,7 +29,7 @@ var ( // // Increment major number for new feature additions and behavioral changes. // Increment minor number for bug fixes and performance enhancements. - version = "v3.5" + version = "v3.6" // metadata is extra build time data metadata = "" diff --git a/vendor/helm.sh/helm/v3/pkg/action/action.go b/vendor/helm.sh/helm/v3/pkg/action/action.go new file mode 100644 index 0000000000..38ba638e4c --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/action.go @@ -0,0 +1,420 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "bytes" + "fmt" + "os" + "path" + "path/filepath" + "regexp" + "strings" + + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/client-go/discovery" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + + "helm.sh/helm/v3/internal/experimental/registry" + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/engine" + "helm.sh/helm/v3/pkg/kube" + "helm.sh/helm/v3/pkg/postrender" + "helm.sh/helm/v3/pkg/release" + "helm.sh/helm/v3/pkg/releaseutil" + "helm.sh/helm/v3/pkg/storage" + "helm.sh/helm/v3/pkg/storage/driver" + "helm.sh/helm/v3/pkg/time" +) + +// Timestamper is a function capable of producing a timestamp.Timestamper. +// +// By default, this is a time.Time function from the Helm time package. This can +// be overridden for testing though, so that timestamps are predictable. +var Timestamper = time.Now + +var ( + // errMissingChart indicates that a chart was not provided. + errMissingChart = errors.New("no chart provided") + // errMissingRelease indicates that a release (name) was not provided. + errMissingRelease = errors.New("no release provided") + // errInvalidRevision indicates that an invalid release revision number was provided. + errInvalidRevision = errors.New("invalid release revision") + // errPending indicates that another instance of Helm is already applying an operation on a release. + errPending = errors.New("another operation (install/upgrade/rollback) is in progress") +) + +// ValidName is a regular expression for resource names. +// +// DEPRECATED: This will be removed in Helm 4, and is no longer used here. See +// pkg/lint/rules.validateMetadataNameFunc for the replacement. +// +// According to the Kubernetes help text, the regular expression it uses is: +// +// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* +// +// This follows the above regular expression (but requires a full string match, not partial). +// +// The Kubernetes documentation is here, though it is not entirely correct: +// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names +var ValidName = regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`) + +// Configuration injects the dependencies that all actions share. +type Configuration struct { + // RESTClientGetter is an interface that loads Kubernetes clients. + RESTClientGetter RESTClientGetter + + // Releases stores records of releases. + Releases *storage.Storage + + // KubeClient is a Kubernetes API client. + KubeClient kube.Interface + + // RegistryClient is a client for working with registries + RegistryClient *registry.Client + + // Capabilities describes the capabilities of the Kubernetes cluster. + Capabilities *chartutil.Capabilities + + Log func(string, ...interface{}) +} + +// renderResources renders the templates in a chart +// +// TODO: This function is badly in need of a refactor. +// TODO: As part of the refactor the duplicate code in cmd/helm/template.go should be removed +// This code has to do with writing files to disk. +func (c *Configuration) renderResources(ch *chart.Chart, values chartutil.Values, releaseName, outputDir string, subNotes, useReleaseName, includeCrds bool, pr postrender.PostRenderer, dryRun bool) ([]*release.Hook, *bytes.Buffer, string, error) { + hs := []*release.Hook{} + b := bytes.NewBuffer(nil) + + caps, err := c.getCapabilities() + if err != nil { + return hs, b, "", err + } + + if ch.Metadata.KubeVersion != "" { + if !chartutil.IsCompatibleRange(ch.Metadata.KubeVersion, caps.KubeVersion.String()) { + return hs, b, "", errors.Errorf("chart requires kubeVersion: %s which is incompatible with Kubernetes %s", ch.Metadata.KubeVersion, caps.KubeVersion.String()) + } + } + + var files map[string]string + var err2 error + + // A `helm template` or `helm install --dry-run` should not talk to the remote cluster. + // It will break in interesting and exotic ways because other data (e.g. discovery) + // is mocked. It is not up to the template author to decide when the user wants to + // connect to the cluster. So when the user says to dry run, respect the user's + // wishes and do not connect to the cluster. + if !dryRun && c.RESTClientGetter != nil { + rest, err := c.RESTClientGetter.ToRESTConfig() + if err != nil { + return hs, b, "", err + } + files, err2 = engine.RenderWithClient(ch, values, rest) + } else { + files, err2 = engine.Render(ch, values) + } + + if err2 != nil { + return hs, b, "", err2 + } + + // NOTES.txt gets rendered like all the other files, but because it's not a hook nor a resource, + // pull it out of here into a separate file so that we can actually use the output of the rendered + // text file. We have to spin through this map because the file contains path information, so we + // look for terminating NOTES.txt. We also remove it from the files so that we don't have to skip + // it in the sortHooks. + var notesBuffer bytes.Buffer + for k, v := range files { + if strings.HasSuffix(k, notesFileSuffix) { + if subNotes || (k == path.Join(ch.Name(), "templates", notesFileSuffix)) { + // If buffer contains data, add newline before adding more + if notesBuffer.Len() > 0 { + notesBuffer.WriteString("\n") + } + notesBuffer.WriteString(v) + } + delete(files, k) + } + } + notes := notesBuffer.String() + + // Sort hooks, manifests, and partials. Only hooks and manifests are returned, + // as partials are not used after renderer.Render. Empty manifests are also + // removed here. + hs, manifests, err := releaseutil.SortManifests(files, caps.APIVersions, releaseutil.InstallOrder) + if err != nil { + // By catching parse errors here, we can prevent bogus releases from going + // to Kubernetes. + // + // We return the files as a big blob of data to help the user debug parser + // errors. + for name, content := range files { + if strings.TrimSpace(content) == "" { + continue + } + fmt.Fprintf(b, "---\n# Source: %s\n%s\n", name, content) + } + return hs, b, "", err + } + + // Aggregate all valid manifests into one big doc. + fileWritten := make(map[string]bool) + + if includeCrds { + for _, crd := range ch.CRDObjects() { + if outputDir == "" { + fmt.Fprintf(b, "---\n# Source: %s\n%s\n", crd.Name, string(crd.File.Data[:])) + } else { + err = writeToFile(outputDir, crd.Filename, string(crd.File.Data[:]), fileWritten[crd.Name]) + if err != nil { + return hs, b, "", err + } + fileWritten[crd.Name] = true + } + } + } + + for _, m := range manifests { + if outputDir == "" { + fmt.Fprintf(b, "---\n# Source: %s\n%s\n", m.Name, m.Content) + } else { + newDir := outputDir + if useReleaseName { + newDir = filepath.Join(outputDir, releaseName) + } + // NOTE: We do not have to worry about the post-renderer because + // output dir is only used by `helm template`. In the next major + // release, we should move this logic to template only as it is not + // used by install or upgrade + err = writeToFile(newDir, m.Name, m.Content, fileWritten[m.Name]) + if err != nil { + return hs, b, "", err + } + fileWritten[m.Name] = true + } + } + + if pr != nil { + b, err = pr.Run(b) + if err != nil { + return hs, b, notes, errors.Wrap(err, "error while running post render on files") + } + } + + return hs, b, notes, nil +} + +// RESTClientGetter gets the rest client +type RESTClientGetter interface { + ToRESTConfig() (*rest.Config, error) + ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) + ToRESTMapper() (meta.RESTMapper, error) +} + +// DebugLog sets the logger that writes debug strings +type DebugLog func(format string, v ...interface{}) + +// capabilities builds a Capabilities from discovery information. +func (c *Configuration) getCapabilities() (*chartutil.Capabilities, error) { + if c.Capabilities != nil { + return c.Capabilities, nil + } + dc, err := c.RESTClientGetter.ToDiscoveryClient() + if err != nil { + return nil, errors.Wrap(err, "could not get Kubernetes discovery client") + } + // force a discovery cache invalidation to always fetch the latest server version/capabilities. + dc.Invalidate() + kubeVersion, err := dc.ServerVersion() + if err != nil { + return nil, errors.Wrap(err, "could not get server version from Kubernetes") + } + // Issue #6361: + // Client-Go emits an error when an API service is registered but unimplemented. + // We trap that error here and print a warning. But since the discovery client continues + // building the API object, it is correctly populated with all valid APIs. + // See https://github.com/kubernetes/kubernetes/issues/72051#issuecomment-521157642 + apiVersions, err := GetVersionSet(dc) + if err != nil { + if discovery.IsGroupDiscoveryFailedError(err) { + c.Log("WARNING: The Kubernetes server has an orphaned API service. Server reports: %s", err) + c.Log("WARNING: To fix this, kubectl delete apiservice ") + } else { + return nil, errors.Wrap(err, "could not get apiVersions from Kubernetes") + } + } + + c.Capabilities = &chartutil.Capabilities{ + APIVersions: apiVersions, + KubeVersion: chartutil.KubeVersion{ + Version: kubeVersion.GitVersion, + Major: kubeVersion.Major, + Minor: kubeVersion.Minor, + }, + } + return c.Capabilities, nil +} + +// KubernetesClientSet creates a new kubernetes ClientSet based on the configuration +func (c *Configuration) KubernetesClientSet() (kubernetes.Interface, error) { + conf, err := c.RESTClientGetter.ToRESTConfig() + if err != nil { + return nil, errors.Wrap(err, "unable to generate config for kubernetes client") + } + + return kubernetes.NewForConfig(conf) +} + +// Now generates a timestamp +// +// If the configuration has a Timestamper on it, that will be used. +// Otherwise, this will use time.Now(). +func (c *Configuration) Now() time.Time { + return Timestamper() +} + +func (c *Configuration) releaseContent(name string, version int) (*release.Release, error) { + if err := chartutil.ValidateReleaseName(name); err != nil { + return nil, errors.Errorf("releaseContent: Release name is invalid: %s", name) + } + + if version <= 0 { + return c.Releases.Last(name) + } + + return c.Releases.Get(name, version) +} + +// GetVersionSet retrieves a set of available k8s API versions +func GetVersionSet(client discovery.ServerResourcesInterface) (chartutil.VersionSet, error) { + groups, resources, err := client.ServerGroupsAndResources() + if err != nil && !discovery.IsGroupDiscoveryFailedError(err) { + return chartutil.DefaultVersionSet, errors.Wrap(err, "could not get apiVersions from Kubernetes") + } + + // FIXME: The Kubernetes test fixture for cli appears to always return nil + // for calls to Discovery().ServerGroupsAndResources(). So in this case, we + // return the default API list. This is also a safe value to return in any + // other odd-ball case. + if len(groups) == 0 && len(resources) == 0 { + return chartutil.DefaultVersionSet, nil + } + + versionMap := make(map[string]interface{}) + versions := []string{} + + // Extract the groups + for _, g := range groups { + for _, gv := range g.Versions { + versionMap[gv.GroupVersion] = struct{}{} + } + } + + // Extract the resources + var id string + var ok bool + for _, r := range resources { + for _, rl := range r.APIResources { + + // A Kind at a GroupVersion can show up more than once. We only want + // it displayed once in the final output. + id = path.Join(r.GroupVersion, rl.Kind) + if _, ok = versionMap[id]; !ok { + versionMap[id] = struct{}{} + } + } + } + + // Convert to a form that NewVersionSet can use + for k := range versionMap { + versions = append(versions, k) + } + + return chartutil.VersionSet(versions), nil +} + +// recordRelease with an update operation in case reuse has been set. +func (c *Configuration) recordRelease(r *release.Release) { + if err := c.Releases.Update(r); err != nil { + c.Log("warning: Failed to update release %s: %s", r.Name, err) + } +} + +// Init initializes the action configuration +func (c *Configuration) Init(getter genericclioptions.RESTClientGetter, namespace, helmDriver string, log DebugLog) error { + kc := kube.New(getter) + kc.Log = log + + lazyClient := &lazyClient{ + namespace: namespace, + clientFn: kc.Factory.KubernetesClientSet, + } + + var store *storage.Storage + switch helmDriver { + case "secret", "secrets", "": + d := driver.NewSecrets(newSecretClient(lazyClient)) + d.Log = log + store = storage.Init(d) + case "configmap", "configmaps": + d := driver.NewConfigMaps(newConfigMapClient(lazyClient)) + d.Log = log + store = storage.Init(d) + case "memory": + var d *driver.Memory + if c.Releases != nil { + if mem, ok := c.Releases.Driver.(*driver.Memory); ok { + // This function can be called more than once (e.g., helm list --all-namespaces). + // If a memory driver was already initialized, re-use it but set the possibly new namespace. + // We re-use it in case some releases where already created in the existing memory driver. + d = mem + } + } + if d == nil { + d = driver.NewMemory() + } + d.SetNamespace(namespace) + store = storage.Init(d) + case "sql": + d, err := driver.NewSQL( + os.Getenv("HELM_DRIVER_SQL_CONNECTION_STRING"), + log, + namespace, + ) + if err != nil { + panic(fmt.Sprintf("Unable to instantiate SQL driver: %v", err)) + } + store = storage.Init(d) + default: + // Not sure what to do here. + panic("Unknown driver in HELM_DRIVER: " + helmDriver) + } + + c.RESTClientGetter = getter + c.KubeClient = kc + c.Releases = store + c.Log = log + + return nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/chart_export.go b/vendor/helm.sh/helm/v3/pkg/action/chart_export.go new file mode 100644 index 0000000000..75840d8bce --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/chart_export.go @@ -0,0 +1,63 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "fmt" + "io" + "path/filepath" + + "helm.sh/helm/v3/internal/experimental/registry" + "helm.sh/helm/v3/pkg/chartutil" +) + +// ChartExport performs a chart export operation. +type ChartExport struct { + cfg *Configuration + + Destination string +} + +// NewChartExport creates a new ChartExport object with the given configuration. +func NewChartExport(cfg *Configuration) *ChartExport { + return &ChartExport{ + cfg: cfg, + } +} + +// Run executes the chart export operation +func (a *ChartExport) Run(out io.Writer, ref string) error { + r, err := registry.ParseReference(ref) + if err != nil { + return err + } + + ch, err := a.cfg.RegistryClient.LoadChart(r) + if err != nil { + return err + } + + // Save the chart to local destination directory + err = chartutil.SaveDir(ch, a.Destination) + if err != nil { + return err + } + + d := filepath.Join(a.Destination, ch.Metadata.Name) + fmt.Fprintf(out, "Exported chart to %s/\n", d) + return nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/chart_list.go b/vendor/helm.sh/helm/v3/pkg/action/chart_list.go new file mode 100644 index 0000000000..db764b3a31 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/chart_list.go @@ -0,0 +1,38 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "io" +) + +// ChartList performs a chart list operation. +type ChartList struct { + cfg *Configuration +} + +// NewChartList creates a new ChartList object with the given configuration. +func NewChartList(cfg *Configuration) *ChartList { + return &ChartList{ + cfg: cfg, + } +} + +// Run executes the chart list operation +func (a *ChartList) Run(out io.Writer) error { + return a.cfg.RegistryClient.PrintChartTable() +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/chart_pull.go b/vendor/helm.sh/helm/v3/pkg/action/chart_pull.go new file mode 100644 index 0000000000..8967552013 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/chart_pull.go @@ -0,0 +1,44 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "io" + + "helm.sh/helm/v3/internal/experimental/registry" +) + +// ChartPull performs a chart pull operation. +type ChartPull struct { + cfg *Configuration +} + +// NewChartPull creates a new ChartPull object with the given configuration. +func NewChartPull(cfg *Configuration) *ChartPull { + return &ChartPull{ + cfg: cfg, + } +} + +// Run executes the chart pull operation +func (a *ChartPull) Run(out io.Writer, ref string) error { + r, err := registry.ParseReference(ref) + if err != nil { + return err + } + return a.cfg.RegistryClient.PullChartToCache(r) +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/chart_push.go b/vendor/helm.sh/helm/v3/pkg/action/chart_push.go new file mode 100644 index 0000000000..91ec49d388 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/chart_push.go @@ -0,0 +1,44 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "io" + + "helm.sh/helm/v3/internal/experimental/registry" +) + +// ChartPush performs a chart push operation. +type ChartPush struct { + cfg *Configuration +} + +// NewChartPush creates a new ChartPush object with the given configuration. +func NewChartPush(cfg *Configuration) *ChartPush { + return &ChartPush{ + cfg: cfg, + } +} + +// Run executes the chart push operation +func (a *ChartPush) Run(out io.Writer, ref string) error { + r, err := registry.ParseReference(ref) + if err != nil { + return err + } + return a.cfg.RegistryClient.PushChart(r) +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/chart_remove.go b/vendor/helm.sh/helm/v3/pkg/action/chart_remove.go new file mode 100644 index 0000000000..3c0fc2ed75 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/chart_remove.go @@ -0,0 +1,44 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "io" + + "helm.sh/helm/v3/internal/experimental/registry" +) + +// ChartRemove performs a chart remove operation. +type ChartRemove struct { + cfg *Configuration +} + +// NewChartRemove creates a new ChartRemove object with the given configuration. +func NewChartRemove(cfg *Configuration) *ChartRemove { + return &ChartRemove{ + cfg: cfg, + } +} + +// Run executes the chart remove operation +func (a *ChartRemove) Run(out io.Writer, ref string) error { + r, err := registry.ParseReference(ref) + if err != nil { + return err + } + return a.cfg.RegistryClient.RemoveChart(r) +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/chart_save.go b/vendor/helm.sh/helm/v3/pkg/action/chart_save.go new file mode 100644 index 0000000000..14a2d7c3c1 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/chart_save.go @@ -0,0 +1,51 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "io" + + "helm.sh/helm/v3/internal/experimental/registry" + "helm.sh/helm/v3/pkg/chart" +) + +// ChartSave performs a chart save operation. +type ChartSave struct { + cfg *Configuration +} + +// NewChartSave creates a new ChartSave object with the given configuration. +func NewChartSave(cfg *Configuration) *ChartSave { + return &ChartSave{ + cfg: cfg, + } +} + +// Run executes the chart save operation +func (a *ChartSave) Run(out io.Writer, ch *chart.Chart, ref string) error { + r, err := registry.ParseReference(ref) + if err != nil { + return err + } + + // If no tag is present, use the chart version + if r.Tag == "" { + r.Tag = ch.Metadata.Version + } + + return a.cfg.RegistryClient.SaveChart(ch, r) +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/dependency.go b/vendor/helm.sh/helm/v3/pkg/action/dependency.go new file mode 100644 index 0000000000..578a46aecb --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/dependency.go @@ -0,0 +1,227 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/Masterminds/semver/v3" + "github.com/gosuri/uitable" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" +) + +// Dependency is the action for building a given chart's dependency tree. +// +// It provides the implementation of 'helm dependency' and its respective subcommands. +type Dependency struct { + Verify bool + Keyring string + SkipRefresh bool +} + +// NewDependency creates a new Dependency object with the given configuration. +func NewDependency() *Dependency { + return &Dependency{} +} + +// List executes 'helm dependency list'. +func (d *Dependency) List(chartpath string, out io.Writer) error { + c, err := loader.Load(chartpath) + if err != nil { + return err + } + + if c.Metadata.Dependencies == nil { + fmt.Fprintf(out, "WARNING: no dependencies at %s\n", filepath.Join(chartpath, "charts")) + return nil + } + + d.printDependencies(chartpath, out, c) + fmt.Fprintln(out) + d.printMissing(chartpath, out, c.Metadata.Dependencies) + return nil +} + +// dependencyStatus returns a string describing the status of a dependency viz a viz the parent chart. +func (d *Dependency) dependencyStatus(chartpath string, dep *chart.Dependency, parent *chart.Chart) string { + filename := fmt.Sprintf("%s-%s.tgz", dep.Name, "*") + + // If a chart is unpacked, this will check the unpacked chart's `charts/` directory for tarballs. + // Technically, this is COMPLETELY unnecessary, and should be removed in Helm 4. It is here + // to preserved backward compatibility. In Helm 2/3, there is a "difference" between + // the tgz version (which outputs "ok" if it unpacks) and the loaded version (which outputs + // "unpacked"). Early in Helm 2's history, this would have made a difference. But it no + // longer does. However, since this code shipped with Helm 3, the output must remain stable + // until Helm 4. + switch archives, err := filepath.Glob(filepath.Join(chartpath, "charts", filename)); { + case err != nil: + return "bad pattern" + case len(archives) > 1: + // See if the second part is a SemVer + found := []string{} + for _, arc := range archives { + // we need to trip the prefix dirs and the extension off. + filename = strings.TrimSuffix(filepath.Base(arc), ".tgz") + maybeVersion := strings.TrimPrefix(filename, fmt.Sprintf("%s-", dep.Name)) + + if _, err := semver.StrictNewVersion(maybeVersion); err == nil { + // If the version parsed without an error, it is possibly a valid + // version. + found = append(found, arc) + } + } + + if l := len(found); l == 1 { + // If we get here, we do the same thing as in len(archives) == 1. + if r := statArchiveForStatus(found[0], dep); r != "" { + return r + } + + // Fall through and look for directories + } else if l > 1 { + return "too many matches" + } + + // The sanest thing to do here is to fall through and see if we have any directory + // matches. + + case len(archives) == 1: + archive := archives[0] + if r := statArchiveForStatus(archive, dep); r != "" { + return r + } + + } + // End unnecessary code. + + var depChart *chart.Chart + for _, item := range parent.Dependencies() { + if item.Name() == dep.Name { + depChart = item + } + } + + if depChart == nil { + return "missing" + } + + if depChart.Metadata.Version != dep.Version { + constraint, err := semver.NewConstraint(dep.Version) + if err != nil { + return "invalid version" + } + + v, err := semver.NewVersion(depChart.Metadata.Version) + if err != nil { + return "invalid version" + } + + if !constraint.Check(v) { + return "wrong version" + } + } + + return "unpacked" +} + +// stat an archive and return a message if the stat is successful +// +// This is a refactor of the code originally in dependencyStatus. It is here to +// support legacy behavior, and should be removed in Helm 4. +func statArchiveForStatus(archive string, dep *chart.Dependency) string { + if _, err := os.Stat(archive); err == nil { + c, err := loader.Load(archive) + if err != nil { + return "corrupt" + } + if c.Name() != dep.Name { + return "misnamed" + } + + if c.Metadata.Version != dep.Version { + constraint, err := semver.NewConstraint(dep.Version) + if err != nil { + return "invalid version" + } + + v, err := semver.NewVersion(c.Metadata.Version) + if err != nil { + return "invalid version" + } + + if !constraint.Check(v) { + return "wrong version" + } + } + return "ok" + } + return "" +} + +// printDependencies prints all of the dependencies in the yaml file. +func (d *Dependency) printDependencies(chartpath string, out io.Writer, c *chart.Chart) { + table := uitable.New() + table.MaxColWidth = 80 + table.AddRow("NAME", "VERSION", "REPOSITORY", "STATUS") + for _, row := range c.Metadata.Dependencies { + table.AddRow(row.Name, row.Version, row.Repository, d.dependencyStatus(chartpath, row, c)) + } + fmt.Fprintln(out, table) +} + +// printMissing prints warnings about charts that are present on disk, but are +// not in Charts.yaml. +func (d *Dependency) printMissing(chartpath string, out io.Writer, reqs []*chart.Dependency) { + folder := filepath.Join(chartpath, "charts/*") + files, err := filepath.Glob(folder) + if err != nil { + fmt.Fprintln(out, err) + return + } + + for _, f := range files { + fi, err := os.Stat(f) + if err != nil { + fmt.Fprintf(out, "Warning: %s\n", err) + } + // Skip anything that is not a directory and not a tgz file. + if !fi.IsDir() && filepath.Ext(f) != ".tgz" { + continue + } + c, err := loader.Load(f) + if err != nil { + fmt.Fprintf(out, "WARNING: %q is not a chart.\n", f) + continue + } + found := false + for _, d := range reqs { + if d.Name == c.Name() { + found = true + break + } + } + if !found { + fmt.Fprintf(out, "WARNING: %q is not in Chart.yaml.\n", f) + } + } +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/doc.go b/vendor/helm.sh/helm/v3/pkg/action/doc.go new file mode 100644 index 0000000000..3c91bd6184 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/doc.go @@ -0,0 +1,22 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package action contains the logic for each action that Helm can perform. +// +// This is a library for calling top-level Helm actions like 'install', +// 'upgrade', or 'list'. Actions approximately match the command line +// invocations that the Helm client uses. +package action diff --git a/vendor/helm.sh/helm/v3/pkg/action/get.go b/vendor/helm.sh/helm/v3/pkg/action/get.go new file mode 100644 index 0000000000..f44b53307f --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/get.go @@ -0,0 +1,47 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "helm.sh/helm/v3/pkg/release" +) + +// Get is the action for checking a given release's information. +// +// It provides the implementation of 'helm get' and its respective subcommands (except `helm get values`). +type Get struct { + cfg *Configuration + + // Initializing Version to 0 will get the latest revision of the release. + Version int +} + +// NewGet creates a new Get object with the given configuration. +func NewGet(cfg *Configuration) *Get { + return &Get{ + cfg: cfg, + } +} + +// Run executes 'helm get' against the given release. +func (g *Get) Run(name string) (*release.Release, error) { + if err := g.cfg.KubeClient.IsReachable(); err != nil { + return nil, err + } + + return g.cfg.releaseContent(name, g.Version) +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/get_values.go b/vendor/helm.sh/helm/v3/pkg/action/get_values.go new file mode 100644 index 0000000000..9c32db2132 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/get_values.go @@ -0,0 +1,60 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "helm.sh/helm/v3/pkg/chartutil" +) + +// GetValues is the action for checking a given release's values. +// +// It provides the implementation of 'helm get values'. +type GetValues struct { + cfg *Configuration + + Version int + AllValues bool +} + +// NewGetValues creates a new GetValues object with the given configuration. +func NewGetValues(cfg *Configuration) *GetValues { + return &GetValues{ + cfg: cfg, + } +} + +// Run executes 'helm get values' against the given release. +func (g *GetValues) Run(name string) (map[string]interface{}, error) { + if err := g.cfg.KubeClient.IsReachable(); err != nil { + return nil, err + } + + rel, err := g.cfg.releaseContent(name, g.Version) + if err != nil { + return nil, err + } + + // If the user wants all values, compute the values and return. + if g.AllValues { + cfg, err := chartutil.CoalesceValues(rel.Chart, rel.Config) + if err != nil { + return nil, err + } + return cfg, nil + } + return rel.Config, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/history.go b/vendor/helm.sh/helm/v3/pkg/action/history.go new file mode 100644 index 0000000000..0430aaf7a5 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/history.go @@ -0,0 +1,58 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/release" +) + +// History is the action for checking the release's ledger. +// +// It provides the implementation of 'helm history'. +// It returns all the revisions for a specific release. +// To list up to one revision of every release in one specific, or in all, +// namespaces, see the List action. +type History struct { + cfg *Configuration + + Max int + Version int +} + +// NewHistory creates a new History object with the given configuration. +func NewHistory(cfg *Configuration) *History { + return &History{ + cfg: cfg, + } +} + +// Run executes 'helm history' against the given release. +func (h *History) Run(name string) ([]*release.Release, error) { + if err := h.cfg.KubeClient.IsReachable(); err != nil { + return nil, err + } + + if err := chartutil.ValidateReleaseName(name); err != nil { + return nil, errors.Errorf("release name is invalid: %s", name) + } + + h.cfg.Log("getting history for release %s", name) + return h.cfg.Releases.History(name) +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/hooks.go b/vendor/helm.sh/helm/v3/pkg/action/hooks.go new file mode 100644 index 0000000000..40c1ffdb6d --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/hooks.go @@ -0,0 +1,151 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "bytes" + "sort" + "time" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/release" + helmtime "helm.sh/helm/v3/pkg/time" +) + +// execHook executes all of the hooks for the given hook event. +func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, timeout time.Duration) error { + executingHooks := []*release.Hook{} + + for _, h := range rl.Hooks { + for _, e := range h.Events { + if e == hook { + executingHooks = append(executingHooks, h) + } + } + } + + // hooke are pre-ordered by kind, so keep order stable + sort.Stable(hookByWeight(executingHooks)) + + for _, h := range executingHooks { + // Set default delete policy to before-hook-creation + if h.DeletePolicies == nil || len(h.DeletePolicies) == 0 { + // TODO(jlegrone): Only apply before-hook-creation delete policy to run to completion + // resources. For all other resource types update in place if a + // resource with the same name already exists and is owned by the + // current release. + h.DeletePolicies = []release.HookDeletePolicy{release.HookBeforeHookCreation} + } + + if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation); err != nil { + return err + } + + resources, err := cfg.KubeClient.Build(bytes.NewBufferString(h.Manifest), true) + if err != nil { + return errors.Wrapf(err, "unable to build kubernetes object for %s hook %s", hook, h.Path) + } + + // Record the time at which the hook was applied to the cluster + h.LastRun = release.HookExecution{ + StartedAt: helmtime.Now(), + Phase: release.HookPhaseRunning, + } + cfg.recordRelease(rl) + + // As long as the implementation of WatchUntilReady does not panic, HookPhaseFailed or HookPhaseSucceeded + // should always be set by this function. If we fail to do that for any reason, then HookPhaseUnknown is + // the most appropriate value to surface. + h.LastRun.Phase = release.HookPhaseUnknown + + // Create hook resources + if _, err := cfg.KubeClient.Create(resources); err != nil { + h.LastRun.CompletedAt = helmtime.Now() + h.LastRun.Phase = release.HookPhaseFailed + return errors.Wrapf(err, "warning: Hook %s %s failed", hook, h.Path) + } + + // Watch hook resources until they have completed + err = cfg.KubeClient.WatchUntilReady(resources, timeout) + // Note the time of success/failure + h.LastRun.CompletedAt = helmtime.Now() + // Mark hook as succeeded or failed + if err != nil { + h.LastRun.Phase = release.HookPhaseFailed + // If a hook is failed, check the annotation of the hook to determine whether the hook should be deleted + // under failed condition. If so, then clear the corresponding resource object in the hook + if err := cfg.deleteHookByPolicy(h, release.HookFailed); err != nil { + return err + } + return err + } + h.LastRun.Phase = release.HookPhaseSucceeded + } + + // If all hooks are successful, check the annotation of each hook to determine whether the hook should be deleted + // under succeeded condition. If so, then clear the corresponding resource object in each hook + for _, h := range executingHooks { + if err := cfg.deleteHookByPolicy(h, release.HookSucceeded); err != nil { + return err + } + } + + return nil +} + +// hookByWeight is a sorter for hooks +type hookByWeight []*release.Hook + +func (x hookByWeight) Len() int { return len(x) } +func (x hookByWeight) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x hookByWeight) Less(i, j int) bool { + if x[i].Weight == x[j].Weight { + return x[i].Name < x[j].Name + } + return x[i].Weight < x[j].Weight +} + +// deleteHookByPolicy deletes a hook if the hook policy instructs it to +func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.HookDeletePolicy) error { + // Never delete CustomResourceDefinitions; this could cause lots of + // cascading garbage collection. + if h.Kind == "CustomResourceDefinition" { + return nil + } + if hookHasDeletePolicy(h, policy) { + resources, err := cfg.KubeClient.Build(bytes.NewBufferString(h.Manifest), false) + if err != nil { + return errors.Wrapf(err, "unable to build kubernetes object for deleting hook %s", h.Path) + } + _, errs := cfg.KubeClient.Delete(resources) + if len(errs) > 0 { + return errors.New(joinErrors(errs)) + } + } + return nil +} + +// hookHasDeletePolicy determines whether the defined hook deletion policy matches the hook deletion polices +// supported by helm. If so, mark the hook as one should be deleted. +func hookHasDeletePolicy(h *release.Hook, policy release.HookDeletePolicy) bool { + for _, v := range h.DeletePolicies { + if policy == v { + return true + } + } + return false +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/install.go b/vendor/helm.sh/helm/v3/pkg/action/install.go new file mode 100644 index 0000000000..25274fcd26 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/install.go @@ -0,0 +1,720 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/url" + "os" + "path" + "path/filepath" + "strings" + "text/template" + "time" + + "github.com/Masterminds/sprig/v3" + "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/cli-runtime/pkg/resource" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/cli" + "helm.sh/helm/v3/pkg/downloader" + "helm.sh/helm/v3/pkg/getter" + "helm.sh/helm/v3/pkg/kube" + kubefake "helm.sh/helm/v3/pkg/kube/fake" + "helm.sh/helm/v3/pkg/postrender" + "helm.sh/helm/v3/pkg/release" + "helm.sh/helm/v3/pkg/releaseutil" + "helm.sh/helm/v3/pkg/repo" + "helm.sh/helm/v3/pkg/storage" + "helm.sh/helm/v3/pkg/storage/driver" +) + +// releaseNameMaxLen is the maximum length of a release name. +// +// As of Kubernetes 1.4, the max limit on a name is 63 chars. We reserve 10 for +// charts to add data. Effectively, that gives us 53 chars. +// See https://github.com/helm/helm/issues/1528 +const releaseNameMaxLen = 53 + +// NOTESFILE_SUFFIX that we want to treat special. It goes through the templating engine +// but it's not a yaml file (resource) hence can't have hooks, etc. And the user actually +// wants to see this file after rendering in the status command. However, it must be a suffix +// since there can be filepath in front of it. +const notesFileSuffix = "NOTES.txt" + +const defaultDirectoryPermission = 0755 + +// Install performs an installation operation. +type Install struct { + cfg *Configuration + + ChartPathOptions + + ClientOnly bool + CreateNamespace bool + DryRun bool + DisableHooks bool + Replace bool + Wait bool + WaitForJobs bool + Devel bool + DependencyUpdate bool + Timeout time.Duration + Namespace string + ReleaseName string + GenerateName bool + NameTemplate string + Description string + OutputDir string + Atomic bool + SkipCRDs bool + SubNotes bool + DisableOpenAPIValidation bool + IncludeCRDs bool + // KubeVersion allows specifying a custom kubernetes version to use and + // APIVersions allows a manual set of supported API Versions to be passed + // (for things like templating). These are ignored if ClientOnly is false + KubeVersion *chartutil.KubeVersion + APIVersions chartutil.VersionSet + // Used by helm template to render charts with .Release.IsUpgrade. Ignored if Dry-Run is false + IsUpgrade bool + // Used by helm template to add the release as part of OutputDir path + // OutputDir/ + UseReleaseName bool + PostRenderer postrender.PostRenderer +} + +// ChartPathOptions captures common options used for controlling chart paths +type ChartPathOptions struct { + CaFile string // --ca-file + CertFile string // --cert-file + KeyFile string // --key-file + InsecureSkipTLSverify bool // --insecure-skip-verify + Keyring string // --keyring + Password string // --password + PassCredentialsAll bool // --pass-credentials + RepoURL string // --repo + Username string // --username + Verify bool // --verify + Version string // --version +} + +// NewInstall creates a new Install object with the given configuration. +func NewInstall(cfg *Configuration) *Install { + return &Install{ + cfg: cfg, + } +} + +func (i *Install) installCRDs(crds []chart.CRD) error { + // We do these one file at a time in the order they were read. + totalItems := []*resource.Info{} + for _, obj := range crds { + // Read in the resources + res, err := i.cfg.KubeClient.Build(bytes.NewBuffer(obj.File.Data), false) + if err != nil { + return errors.Wrapf(err, "failed to install CRD %s", obj.Name) + } + + // Send them to Kube + if _, err := i.cfg.KubeClient.Create(res); err != nil { + // If the error is CRD already exists, continue. + if apierrors.IsAlreadyExists(err) { + crdName := res[0].Name + i.cfg.Log("CRD %s is already present. Skipping.", crdName) + continue + } + return errors.Wrapf(err, "failed to install CRD %s", obj.Name) + } + totalItems = append(totalItems, res...) + } + if len(totalItems) > 0 { + // Invalidate the local cache, since it will not have the new CRDs + // present. + discoveryClient, err := i.cfg.RESTClientGetter.ToDiscoveryClient() + if err != nil { + return err + } + i.cfg.Log("Clearing discovery cache") + discoveryClient.Invalidate() + // Give time for the CRD to be recognized. + + if err := i.cfg.KubeClient.Wait(totalItems, 60*time.Second); err != nil { + return err + } + + // Make sure to force a rebuild of the cache. + discoveryClient.ServerGroups() + } + return nil +} + +// Run executes the installation +// +// If DryRun is set to true, this will prepare the release, but not install it +func (i *Install) Run(chrt *chart.Chart, vals map[string]interface{}) (*release.Release, error) { + // Check reachability of cluster unless in client-only mode (e.g. `helm template` without `--validate`) + if !i.ClientOnly { + if err := i.cfg.KubeClient.IsReachable(); err != nil { + return nil, err + } + } + + if err := i.availableName(); err != nil { + return nil, err + } + + // Pre-install anything in the crd/ directory. We do this before Helm + // contacts the upstream server and builds the capabilities object. + if crds := chrt.CRDObjects(); !i.ClientOnly && !i.SkipCRDs && len(crds) > 0 { + // On dry run, bail here + if i.DryRun { + i.cfg.Log("WARNING: This chart or one of its subcharts contains CRDs. Rendering may fail or contain inaccuracies.") + } else if err := i.installCRDs(crds); err != nil { + return nil, err + } + } + + if i.ClientOnly { + // Add mock objects in here so it doesn't use Kube API server + // NOTE(bacongobbler): used for `helm template` + i.cfg.Capabilities = chartutil.DefaultCapabilities.Copy() + if i.KubeVersion != nil { + i.cfg.Capabilities.KubeVersion = *i.KubeVersion + } + i.cfg.Capabilities.APIVersions = append(i.cfg.Capabilities.APIVersions, i.APIVersions...) + i.cfg.KubeClient = &kubefake.PrintingKubeClient{Out: ioutil.Discard} + + mem := driver.NewMemory() + mem.SetNamespace(i.Namespace) + i.cfg.Releases = storage.Init(mem) + } else if !i.ClientOnly && len(i.APIVersions) > 0 { + i.cfg.Log("API Version list given outside of client only mode, this list will be ignored") + } + + if err := chartutil.ProcessDependencies(chrt, vals); err != nil { + return nil, err + } + + // Make sure if Atomic is set, that wait is set as well. This makes it so + // the user doesn't have to specify both + i.Wait = i.Wait || i.Atomic + + caps, err := i.cfg.getCapabilities() + if err != nil { + return nil, err + } + + //special case for helm template --is-upgrade + isUpgrade := i.IsUpgrade && i.DryRun + options := chartutil.ReleaseOptions{ + Name: i.ReleaseName, + Namespace: i.Namespace, + Revision: 1, + IsInstall: !isUpgrade, + IsUpgrade: isUpgrade, + } + valuesToRender, err := chartutil.ToRenderValues(chrt, vals, options, caps) + if err != nil { + return nil, err + } + + rel := i.createRelease(chrt, vals) + + var manifestDoc *bytes.Buffer + rel.Hooks, manifestDoc, rel.Info.Notes, err = i.cfg.renderResources(chrt, valuesToRender, i.ReleaseName, i.OutputDir, i.SubNotes, i.UseReleaseName, i.IncludeCRDs, i.PostRenderer, i.DryRun) + // Even for errors, attach this if available + if manifestDoc != nil { + rel.Manifest = manifestDoc.String() + } + // Check error from render + if err != nil { + rel.SetStatus(release.StatusFailed, fmt.Sprintf("failed to render resource: %s", err.Error())) + // Return a release with partial data so that the client can show debugging information. + return rel, err + } + + // Mark this release as in-progress + rel.SetStatus(release.StatusPendingInstall, "Initial install underway") + + var toBeAdopted kube.ResourceList + resources, err := i.cfg.KubeClient.Build(bytes.NewBufferString(rel.Manifest), !i.DisableOpenAPIValidation) + if err != nil { + return nil, errors.Wrap(err, "unable to build kubernetes objects from release manifest") + } + + // It is safe to use "force" here because these are resources currently rendered by the chart. + err = resources.Visit(setMetadataVisitor(rel.Name, rel.Namespace, true)) + if err != nil { + return nil, err + } + + // Install requires an extra validation step of checking that resources + // don't already exist before we actually create resources. If we continue + // forward and create the release object with resources that already exist, + // we'll end up in a state where we will delete those resources upon + // deleting the release because the manifest will be pointing at that + // resource + if !i.ClientOnly && !isUpgrade && len(resources) > 0 { + toBeAdopted, err = existingResourceConflict(resources, rel.Name, rel.Namespace) + if err != nil { + return nil, errors.Wrap(err, "rendered manifests contain a resource that already exists. Unable to continue with install") + } + } + + // Bail out here if it is a dry run + if i.DryRun { + rel.Info.Description = "Dry run complete" + return rel, nil + } + + if i.CreateNamespace { + ns := &v1.Namespace{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Namespace", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: i.Namespace, + Labels: map[string]string{ + "name": i.Namespace, + }, + }, + } + buf, err := yaml.Marshal(ns) + if err != nil { + return nil, err + } + resourceList, err := i.cfg.KubeClient.Build(bytes.NewBuffer(buf), true) + if err != nil { + return nil, err + } + if _, err := i.cfg.KubeClient.Create(resourceList); err != nil && !apierrors.IsAlreadyExists(err) { + return nil, err + } + } + + // If Replace is true, we need to supercede the last release. + if i.Replace { + if err := i.replaceRelease(rel); err != nil { + return nil, err + } + } + + // Store the release in history before continuing (new in Helm 3). We always know + // that this is a create operation. + if err := i.cfg.Releases.Create(rel); err != nil { + // We could try to recover gracefully here, but since nothing has been installed + // yet, this is probably safer than trying to continue when we know storage is + // not working. + return rel, err + } + + // pre-install hooks + if !i.DisableHooks { + if err := i.cfg.execHook(rel, release.HookPreInstall, i.Timeout); err != nil { + return i.failRelease(rel, fmt.Errorf("failed pre-install: %s", err)) + } + } + + // At this point, we can do the install. Note that before we were detecting whether to + // do an update, but it's not clear whether we WANT to do an update if the re-use is set + // to true, since that is basically an upgrade operation. + if len(toBeAdopted) == 0 && len(resources) > 0 { + if _, err := i.cfg.KubeClient.Create(resources); err != nil { + return i.failRelease(rel, err) + } + } else if len(resources) > 0 { + if _, err := i.cfg.KubeClient.Update(toBeAdopted, resources, false); err != nil { + return i.failRelease(rel, err) + } + } + + if i.Wait { + if i.WaitForJobs { + if err := i.cfg.KubeClient.WaitWithJobs(resources, i.Timeout); err != nil { + return i.failRelease(rel, err) + } + } else { + if err := i.cfg.KubeClient.Wait(resources, i.Timeout); err != nil { + return i.failRelease(rel, err) + } + } + } + + if !i.DisableHooks { + if err := i.cfg.execHook(rel, release.HookPostInstall, i.Timeout); err != nil { + return i.failRelease(rel, fmt.Errorf("failed post-install: %s", err)) + } + } + + if len(i.Description) > 0 { + rel.SetStatus(release.StatusDeployed, i.Description) + } else { + rel.SetStatus(release.StatusDeployed, "Install complete") + } + + // This is a tricky case. The release has been created, but the result + // cannot be recorded. The truest thing to tell the user is that the + // release was created. However, the user will not be able to do anything + // further with this release. + // + // One possible strategy would be to do a timed retry to see if we can get + // this stored in the future. + if err := i.recordRelease(rel); err != nil { + i.cfg.Log("failed to record the release: %s", err) + } + + return rel, nil +} + +func (i *Install) failRelease(rel *release.Release, err error) (*release.Release, error) { + rel.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", i.ReleaseName, err.Error())) + if i.Atomic { + i.cfg.Log("Install failed and atomic is set, uninstalling release") + uninstall := NewUninstall(i.cfg) + uninstall.DisableHooks = i.DisableHooks + uninstall.KeepHistory = false + uninstall.Timeout = i.Timeout + if _, uninstallErr := uninstall.Run(i.ReleaseName); uninstallErr != nil { + return rel, errors.Wrapf(uninstallErr, "an error occurred while uninstalling the release. original install error: %s", err) + } + return rel, errors.Wrapf(err, "release %s failed, and has been uninstalled due to atomic being set", i.ReleaseName) + } + i.recordRelease(rel) // Ignore the error, since we have another error to deal with. + return rel, err +} + +// availableName tests whether a name is available +// +// Roughly, this will return an error if name is +// +// - empty +// - too long +// - already in use, and not deleted +// - used by a deleted release, and i.Replace is false +func (i *Install) availableName() error { + start := i.ReleaseName + if start == "" { + return errors.New("name is required") + } + + if len(start) > releaseNameMaxLen { + return errors.Errorf("release name %q exceeds max length of %d", start, releaseNameMaxLen) + } + + if i.DryRun { + return nil + } + + h, err := i.cfg.Releases.History(start) + if err != nil || len(h) < 1 { + return nil + } + releaseutil.Reverse(h, releaseutil.SortByRevision) + rel := h[0] + + if st := rel.Info.Status; i.Replace && (st == release.StatusUninstalled || st == release.StatusFailed) { + return nil + } + return errors.New("cannot re-use a name that is still in use") +} + +// createRelease creates a new release object +func (i *Install) createRelease(chrt *chart.Chart, rawVals map[string]interface{}) *release.Release { + ts := i.cfg.Now() + return &release.Release{ + Name: i.ReleaseName, + Namespace: i.Namespace, + Chart: chrt, + Config: rawVals, + Info: &release.Info{ + FirstDeployed: ts, + LastDeployed: ts, + Status: release.StatusUnknown, + }, + Version: 1, + } +} + +// recordRelease with an update operation in case reuse has been set. +func (i *Install) recordRelease(r *release.Release) error { + // This is a legacy function which has been reduced to a oneliner. Could probably + // refactor it out. + return i.cfg.Releases.Update(r) +} + +// replaceRelease replaces an older release with this one +// +// This allows us to re-use names by superseding an existing release with a new one +func (i *Install) replaceRelease(rel *release.Release) error { + hist, err := i.cfg.Releases.History(rel.Name) + if err != nil || len(hist) == 0 { + // No releases exist for this name, so we can return early + return nil + } + + releaseutil.Reverse(hist, releaseutil.SortByRevision) + last := hist[0] + + // Update version to the next available + rel.Version = last.Version + 1 + + // Do not change the status of a failed release. + if last.Info.Status == release.StatusFailed { + return nil + } + + // For any other status, mark it as superseded and store the old record + last.SetStatus(release.StatusSuperseded, "superseded by new release") + return i.recordRelease(last) +} + +// write the to /. controls if the file is created or content will be appended +func writeToFile(outputDir string, name string, data string, append bool) error { + outfileName := strings.Join([]string{outputDir, name}, string(filepath.Separator)) + + err := ensureDirectoryForFile(outfileName) + if err != nil { + return err + } + + f, err := createOrOpenFile(outfileName, append) + if err != nil { + return err + } + + defer f.Close() + + _, err = f.WriteString(fmt.Sprintf("---\n# Source: %s\n%s\n", name, data)) + + if err != nil { + return err + } + + fmt.Printf("wrote %s\n", outfileName) + return nil +} + +func createOrOpenFile(filename string, append bool) (*os.File, error) { + if append { + return os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0600) + } + return os.Create(filename) +} + +// check if the directory exists to create file. creates if don't exists +func ensureDirectoryForFile(file string) error { + baseDir := path.Dir(file) + _, err := os.Stat(baseDir) + if err != nil && !os.IsNotExist(err) { + return err + } + + return os.MkdirAll(baseDir, defaultDirectoryPermission) +} + +// NameAndChart returns the name and chart that should be used. +// +// This will read the flags and handle name generation if necessary. +func (i *Install) NameAndChart(args []string) (string, string, error) { + flagsNotSet := func() error { + if i.GenerateName { + return errors.New("cannot set --generate-name and also specify a name") + } + if i.NameTemplate != "" { + return errors.New("cannot set --name-template and also specify a name") + } + return nil + } + + if len(args) > 2 { + return args[0], args[1], errors.Errorf("expected at most two arguments, unexpected arguments: %v", strings.Join(args[2:], ", ")) + } + + if len(args) == 2 { + return args[0], args[1], flagsNotSet() + } + + if i.NameTemplate != "" { + name, err := TemplateName(i.NameTemplate) + return name, args[0], err + } + + if i.ReleaseName != "" { + return i.ReleaseName, args[0], nil + } + + if !i.GenerateName { + return "", args[0], errors.New("must either provide a name or specify --generate-name") + } + + base := filepath.Base(args[0]) + if base == "." || base == "" { + base = "chart" + } + // if present, strip out the file extension from the name + if idx := strings.Index(base, "."); idx != -1 { + base = base[0:idx] + } + + return fmt.Sprintf("%s-%d", base, time.Now().Unix()), args[0], nil +} + +// TemplateName renders a name template, returning the name or an error. +func TemplateName(nameTemplate string) (string, error) { + if nameTemplate == "" { + return "", nil + } + + t, err := template.New("name-template").Funcs(sprig.TxtFuncMap()).Parse(nameTemplate) + if err != nil { + return "", err + } + var b bytes.Buffer + if err := t.Execute(&b, nil); err != nil { + return "", err + } + + return b.String(), nil +} + +// CheckDependencies checks the dependencies for a chart. +func CheckDependencies(ch *chart.Chart, reqs []*chart.Dependency) error { + var missing []string + +OUTER: + for _, r := range reqs { + for _, d := range ch.Dependencies() { + if d.Name() == r.Name { + continue OUTER + } + } + missing = append(missing, r.Name) + } + + if len(missing) > 0 { + return errors.Errorf("found in Chart.yaml, but missing in charts/ directory: %s", strings.Join(missing, ", ")) + } + return nil +} + +// LocateChart looks for a chart directory in known places, and returns either the full path or an error. +// +// This does not ensure that the chart is well-formed; only that the requested filename exists. +// +// Order of resolution: +// - relative to current working directory +// - if path is absolute or begins with '.', error out here +// - URL +// +// If 'verify' was set on ChartPathOptions, this will attempt to also verify the chart. +func (c *ChartPathOptions) LocateChart(name string, settings *cli.EnvSettings) (string, error) { + name = strings.TrimSpace(name) + version := strings.TrimSpace(c.Version) + + if _, err := os.Stat(name); err == nil { + abs, err := filepath.Abs(name) + if err != nil { + return abs, err + } + if c.Verify { + if _, err := downloader.VerifyChart(abs, c.Keyring); err != nil { + return "", err + } + } + return abs, nil + } + if filepath.IsAbs(name) || strings.HasPrefix(name, ".") { + return name, errors.Errorf("path %q not found", name) + } + + dl := downloader.ChartDownloader{ + Out: os.Stdout, + Keyring: c.Keyring, + Getters: getter.All(settings), + Options: []getter.Option{ + getter.WithPassCredentialsAll(c.PassCredentialsAll), + getter.WithTLSClientConfig(c.CertFile, c.KeyFile, c.CaFile), + getter.WithInsecureSkipVerifyTLS(c.InsecureSkipTLSverify), + }, + RepositoryConfig: settings.RepositoryConfig, + RepositoryCache: settings.RepositoryCache, + } + if c.Verify { + dl.Verify = downloader.VerifyAlways + } + if c.RepoURL != "" { + chartURL, err := repo.FindChartInAuthAndTLSAndPassRepoURL(c.RepoURL, c.Username, c.Password, name, version, + c.CertFile, c.KeyFile, c.CaFile, c.InsecureSkipTLSverify, c.PassCredentialsAll, getter.All(settings)) + if err != nil { + return "", err + } + name = chartURL + + // Only pass the user/pass on when the user has said to or when the + // location of the chart repo and the chart are the same domain. + u1, err := url.Parse(c.RepoURL) + if err != nil { + return "", err + } + u2, err := url.Parse(chartURL) + if err != nil { + return "", err + } + + // Host on URL (returned from url.Parse) contains the port if present. + // This check ensures credentials are not passed between different + // services on different ports. + if c.PassCredentialsAll || (u1.Scheme == u2.Scheme && u1.Host == u2.Host) { + dl.Options = append(dl.Options, getter.WithBasicAuth(c.Username, c.Password)) + } else { + dl.Options = append(dl.Options, getter.WithBasicAuth("", "")) + } + } else { + dl.Options = append(dl.Options, getter.WithBasicAuth(c.Username, c.Password)) + } + + if err := os.MkdirAll(settings.RepositoryCache, 0755); err != nil { + return "", err + } + + filename, _, err := dl.DownloadTo(name, version, settings.RepositoryCache) + if err == nil { + lname, err := filepath.Abs(filename) + if err != nil { + return filename, err + } + return lname, nil + } else if settings.Debug { + return filename, err + } + + atVersion := "" + if version != "" { + atVersion = fmt.Sprintf(" at version %q", version) + } + return filename, errors.Errorf("failed to download %q%s (hint: running `helm repo update` may help)", name, atVersion) +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/lazyclient.go b/vendor/helm.sh/helm/v3/pkg/action/lazyclient.go new file mode 100644 index 0000000000..9037782bb5 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/lazyclient.go @@ -0,0 +1,197 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "context" + "sync" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + applycorev1 "k8s.io/client-go/applyconfigurations/core/v1" + "k8s.io/client-go/kubernetes" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" +) + +// lazyClient is a workaround to deal with Kubernetes having an unstable client API. +// In Kubernetes v1.18 the defaults where removed which broke creating a +// client without an explicit configuration. ಠ_ಠ +type lazyClient struct { + // client caches an initialized kubernetes client + initClient sync.Once + client kubernetes.Interface + clientErr error + + // clientFn loads a kubernetes client + clientFn func() (*kubernetes.Clientset, error) + + // namespace passed to each client request + namespace string +} + +func (s *lazyClient) init() error { + s.initClient.Do(func() { + s.client, s.clientErr = s.clientFn() + }) + return s.clientErr +} + +// secretClient implements a corev1.SecretsInterface +type secretClient struct{ *lazyClient } + +var _ corev1.SecretInterface = (*secretClient)(nil) + +func newSecretClient(lc *lazyClient) *secretClient { + return &secretClient{lazyClient: lc} +} + +func (s *secretClient) Create(ctx context.Context, secret *v1.Secret, opts metav1.CreateOptions) (result *v1.Secret, err error) { + if err := s.init(); err != nil { + return nil, err + } + return s.client.CoreV1().Secrets(s.namespace).Create(ctx, secret, opts) +} + +func (s *secretClient) Update(ctx context.Context, secret *v1.Secret, opts metav1.UpdateOptions) (*v1.Secret, error) { + if err := s.init(); err != nil { + return nil, err + } + return s.client.CoreV1().Secrets(s.namespace).Update(ctx, secret, opts) +} + +func (s *secretClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + if err := s.init(); err != nil { + return err + } + return s.client.CoreV1().Secrets(s.namespace).Delete(ctx, name, opts) +} + +func (s *secretClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + if err := s.init(); err != nil { + return err + } + return s.client.CoreV1().Secrets(s.namespace).DeleteCollection(ctx, opts, listOpts) +} + +func (s *secretClient) Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Secret, error) { + if err := s.init(); err != nil { + return nil, err + } + return s.client.CoreV1().Secrets(s.namespace).Get(ctx, name, opts) +} + +func (s *secretClient) List(ctx context.Context, opts metav1.ListOptions) (*v1.SecretList, error) { + if err := s.init(); err != nil { + return nil, err + } + return s.client.CoreV1().Secrets(s.namespace).List(ctx, opts) +} + +func (s *secretClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + if err := s.init(); err != nil { + return nil, err + } + return s.client.CoreV1().Secrets(s.namespace).Watch(ctx, opts) +} + +func (s *secretClient) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*v1.Secret, error) { + if err := s.init(); err != nil { + return nil, err + } + return s.client.CoreV1().Secrets(s.namespace).Patch(ctx, name, pt, data, opts, subresources...) +} + +func (s *secretClient) Apply(ctx context.Context, secretConfiguration *applycorev1.SecretApplyConfiguration, opts metav1.ApplyOptions) (*v1.Secret, error) { + if err := s.init(); err != nil { + return nil, err + } + return s.client.CoreV1().Secrets(s.namespace).Apply(ctx, secretConfiguration, opts) +} + +// configMapClient implements a corev1.ConfigMapInterface +type configMapClient struct{ *lazyClient } + +var _ corev1.ConfigMapInterface = (*configMapClient)(nil) + +func newConfigMapClient(lc *lazyClient) *configMapClient { + return &configMapClient{lazyClient: lc} +} + +func (c *configMapClient) Create(ctx context.Context, configMap *v1.ConfigMap, opts metav1.CreateOptions) (*v1.ConfigMap, error) { + if err := c.init(); err != nil { + return nil, err + } + return c.client.CoreV1().ConfigMaps(c.namespace).Create(ctx, configMap, opts) +} + +func (c *configMapClient) Update(ctx context.Context, configMap *v1.ConfigMap, opts metav1.UpdateOptions) (*v1.ConfigMap, error) { + if err := c.init(); err != nil { + return nil, err + } + return c.client.CoreV1().ConfigMaps(c.namespace).Update(ctx, configMap, opts) +} + +func (c *configMapClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + if err := c.init(); err != nil { + return err + } + return c.client.CoreV1().ConfigMaps(c.namespace).Delete(ctx, name, opts) +} + +func (c *configMapClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + if err := c.init(); err != nil { + return err + } + return c.client.CoreV1().ConfigMaps(c.namespace).DeleteCollection(ctx, opts, listOpts) +} + +func (c *configMapClient) Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ConfigMap, error) { + if err := c.init(); err != nil { + return nil, err + } + return c.client.CoreV1().ConfigMaps(c.namespace).Get(ctx, name, opts) +} + +func (c *configMapClient) List(ctx context.Context, opts metav1.ListOptions) (*v1.ConfigMapList, error) { + if err := c.init(); err != nil { + return nil, err + } + return c.client.CoreV1().ConfigMaps(c.namespace).List(ctx, opts) +} + +func (c *configMapClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + if err := c.init(); err != nil { + return nil, err + } + return c.client.CoreV1().ConfigMaps(c.namespace).Watch(ctx, opts) +} + +func (c *configMapClient) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*v1.ConfigMap, error) { + if err := c.init(); err != nil { + return nil, err + } + return c.client.CoreV1().ConfigMaps(c.namespace).Patch(ctx, name, pt, data, opts, subresources...) +} + +func (c *configMapClient) Apply(ctx context.Context, configMap *applycorev1.ConfigMapApplyConfiguration, opts metav1.ApplyOptions) (*v1.ConfigMap, error) { + if err := c.init(); err != nil { + return nil, err + } + return c.client.CoreV1().ConfigMaps(c.namespace).Apply(ctx, configMap, opts) +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/lint.go b/vendor/helm.sh/helm/v3/pkg/action/lint.go new file mode 100644 index 0000000000..2292c14bf3 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/lint.go @@ -0,0 +1,118 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/lint" + "helm.sh/helm/v3/pkg/lint/support" +) + +// Lint is the action for checking that the semantics of a chart are well-formed. +// +// It provides the implementation of 'helm lint'. +type Lint struct { + Strict bool + Namespace string + WithSubcharts bool +} + +// LintResult is the result of Lint +type LintResult struct { + TotalChartsLinted int + Messages []support.Message + Errors []error +} + +// NewLint creates a new Lint object with the given configuration. +func NewLint() *Lint { + return &Lint{} +} + +// Run executes 'helm Lint' against the given chart. +func (l *Lint) Run(paths []string, vals map[string]interface{}) *LintResult { + lowestTolerance := support.ErrorSev + if l.Strict { + lowestTolerance = support.WarningSev + } + result := &LintResult{} + for _, path := range paths { + linter, err := lintChart(path, vals, l.Namespace, l.Strict) + if err != nil { + result.Errors = append(result.Errors, err) + continue + } + + result.Messages = append(result.Messages, linter.Messages...) + result.TotalChartsLinted++ + for _, msg := range linter.Messages { + if msg.Severity >= lowestTolerance { + result.Errors = append(result.Errors, msg.Err) + } + } + } + return result +} + +func lintChart(path string, vals map[string]interface{}, namespace string, strict bool) (support.Linter, error) { + var chartPath string + linter := support.Linter{} + + if strings.HasSuffix(path, ".tgz") || strings.HasSuffix(path, ".tar.gz") { + tempDir, err := ioutil.TempDir("", "helm-lint") + if err != nil { + return linter, errors.Wrap(err, "unable to create temp dir to extract tarball") + } + defer os.RemoveAll(tempDir) + + file, err := os.Open(path) + if err != nil { + return linter, errors.Wrap(err, "unable to open tarball") + } + defer file.Close() + + if err = chartutil.Expand(tempDir, file); err != nil { + return linter, errors.Wrap(err, "unable to extract tarball") + } + + files, err := ioutil.ReadDir(tempDir) + if err != nil { + return linter, errors.Wrapf(err, "unable to read temporary output directory %s", tempDir) + } + if !files[0].IsDir() { + return linter, errors.Errorf("unexpected file %s in temporary output directory %s", files[0].Name(), tempDir) + } + + chartPath = filepath.Join(tempDir, files[0].Name()) + } else { + chartPath = path + } + + // Guard: Error out if this is not a chart. + if _, err := os.Stat(filepath.Join(chartPath, "Chart.yaml")); err != nil { + return linter, errors.Wrap(err, "unable to check Chart.yaml file in chart") + } + + return lint.All(chartPath, vals, namespace, strict), nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/list.go b/vendor/helm.sh/helm/v3/pkg/action/list.go new file mode 100644 index 0000000000..c9e6e364ab --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/list.go @@ -0,0 +1,323 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "path" + "regexp" + + "k8s.io/apimachinery/pkg/labels" + + "helm.sh/helm/v3/pkg/release" + "helm.sh/helm/v3/pkg/releaseutil" +) + +// ListStates represents zero or more status codes that a list item may have set +// +// Because this is used as a bitmask filter, more than one bit can be flipped +// in the ListStates. +type ListStates uint + +const ( + // ListDeployed filters on status "deployed" + ListDeployed ListStates = 1 << iota + // ListUninstalled filters on status "uninstalled" + ListUninstalled + // ListUninstalling filters on status "uninstalling" (uninstall in progress) + ListUninstalling + // ListPendingInstall filters on status "pending" (deployment in progress) + ListPendingInstall + // ListPendingUpgrade filters on status "pending_upgrade" (upgrade in progress) + ListPendingUpgrade + // ListPendingRollback filters on status "pending_rollback" (rollback in progress) + ListPendingRollback + // ListSuperseded filters on status "superseded" (historical release version that is no longer deployed) + ListSuperseded + // ListFailed filters on status "failed" (release version not deployed because of error) + ListFailed + // ListUnknown filters on an unknown status + ListUnknown +) + +// FromName takes a state name and returns a ListStates representation. +// +// Currently, there are only names for individual flipped bits, so the returned +// ListStates will only match one of the constants. However, it is possible that +// this behavior could change in the future. +func (s ListStates) FromName(str string) ListStates { + switch str { + case "deployed": + return ListDeployed + case "uninstalled": + return ListUninstalled + case "superseded": + return ListSuperseded + case "failed": + return ListFailed + case "uninstalling": + return ListUninstalling + case "pending-install": + return ListPendingInstall + case "pending-upgrade": + return ListPendingUpgrade + case "pending-rollback": + return ListPendingRollback + } + return ListUnknown +} + +// ListAll is a convenience for enabling all list filters +const ListAll = ListDeployed | ListUninstalled | ListUninstalling | ListPendingInstall | ListPendingRollback | ListPendingUpgrade | ListSuperseded | ListFailed + +// Sorter is a top-level sort +type Sorter uint + +const ( + // ByNameDesc sorts by descending lexicographic order + ByNameDesc Sorter = iota + 1 + // ByDateAsc sorts by ascending dates (oldest updated release first) + ByDateAsc + // ByDateDesc sorts by descending dates (latest updated release first) + ByDateDesc +) + +// List is the action for listing releases. +// +// It provides, for example, the implementation of 'helm list'. +// It returns no more than one revision of every release in one specific, or in +// all, namespaces. +// To list all the revisions of a specific release, see the History action. +type List struct { + cfg *Configuration + + // All ignores the limit/offset + All bool + // AllNamespaces searches across namespaces + AllNamespaces bool + // Sort indicates the sort to use + // + // see pkg/releaseutil for several useful sorters + Sort Sorter + // Overrides the default lexicographic sorting + ByDate bool + SortReverse bool + // StateMask accepts a bitmask of states for items to show. + // The default is ListDeployed + StateMask ListStates + // Limit is the number of items to return per Run() + Limit int + // Offset is the starting index for the Run() call + Offset int + // Filter is a filter that is applied to the results + Filter string + Short bool + TimeFormat string + Uninstalled bool + Superseded bool + Uninstalling bool + Deployed bool + Failed bool + Pending bool + Selector string +} + +// NewList constructs a new *List +func NewList(cfg *Configuration) *List { + return &List{ + StateMask: ListDeployed | ListFailed, + cfg: cfg, + } +} + +// Run executes the list command, returning a set of matches. +func (l *List) Run() ([]*release.Release, error) { + if err := l.cfg.KubeClient.IsReachable(); err != nil { + return nil, err + } + + var filter *regexp.Regexp + if l.Filter != "" { + var err error + filter, err = regexp.Compile(l.Filter) + if err != nil { + return nil, err + } + } + + results, err := l.cfg.Releases.List(func(rel *release.Release) bool { + // Skip anything that doesn't match the filter. + if filter != nil && !filter.MatchString(rel.Name) { + return false + } + + return true + }) + + if err != nil { + return nil, err + } + + if results == nil { + return results, nil + } + + // by definition, superseded releases are never shown if + // only the latest releases are returned. so if requested statemask + // is _only_ ListSuperseded, skip the latest release filter + if l.StateMask != ListSuperseded { + results = filterLatestReleases(results) + } + + // State mask application must occur after filtering to + // latest releases, otherwise outdated entries can be returned + results = l.filterStateMask(results) + + // Skip anything that doesn't match the selector + selectorObj, err := labels.Parse(l.Selector) + if err != nil { + return nil, err + } + results = l.filterSelector(results, selectorObj) + + // Unfortunately, we have to sort before truncating, which can incur substantial overhead + l.sort(results) + + // Guard on offset + if l.Offset >= len(results) { + return []*release.Release{}, nil + } + + // Calculate the limit and offset, and then truncate results if necessary. + limit := len(results) + if l.Limit > 0 && l.Limit < limit { + limit = l.Limit + } + last := l.Offset + limit + if l := len(results); l < last { + last = l + } + results = results[l.Offset:last] + + return results, err +} + +// sort is an in-place sort where order is based on the value of a.Sort +func (l *List) sort(rels []*release.Release) { + if l.SortReverse { + l.Sort = ByNameDesc + } + + if l.ByDate { + l.Sort = ByDateDesc + if l.SortReverse { + l.Sort = ByDateAsc + } + } + + switch l.Sort { + case ByDateDesc: + releaseutil.SortByDate(rels) + case ByDateAsc: + releaseutil.Reverse(rels, releaseutil.SortByDate) + case ByNameDesc: + releaseutil.Reverse(rels, releaseutil.SortByName) + default: + releaseutil.SortByName(rels) + } +} + +// filterLatestReleases returns a list scrubbed of old releases. +func filterLatestReleases(releases []*release.Release) []*release.Release { + latestReleases := make(map[string]*release.Release) + + for _, rls := range releases { + name, namespace := rls.Name, rls.Namespace + key := path.Join(namespace, name) + if latestRelease, exists := latestReleases[key]; exists && latestRelease.Version > rls.Version { + continue + } + latestReleases[key] = rls + } + + var list = make([]*release.Release, 0, len(latestReleases)) + for _, rls := range latestReleases { + list = append(list, rls) + } + return list +} + +func (l *List) filterStateMask(releases []*release.Release) []*release.Release { + desiredStateReleases := make([]*release.Release, 0) + + for _, rls := range releases { + currentStatus := l.StateMask.FromName(rls.Info.Status.String()) + mask := l.StateMask & currentStatus + if mask == 0 { + continue + } + desiredStateReleases = append(desiredStateReleases, rls) + } + + return desiredStateReleases +} + +func (l *List) filterSelector(releases []*release.Release, selector labels.Selector) []*release.Release { + desiredStateReleases := make([]*release.Release, 0) + + for _, rls := range releases { + if selector.Matches(labels.Set(rls.Labels)) { + desiredStateReleases = append(desiredStateReleases, rls) + } + } + + return desiredStateReleases +} + +// SetStateMask calculates the state mask based on parameters. +func (l *List) SetStateMask() { + if l.All { + l.StateMask = ListAll + return + } + + state := ListStates(0) + if l.Deployed { + state |= ListDeployed + } + if l.Uninstalled { + state |= ListUninstalled + } + if l.Uninstalling { + state |= ListUninstalling + } + if l.Pending { + state |= ListPendingInstall | ListPendingRollback | ListPendingUpgrade + } + if l.Failed { + state |= ListFailed + } + if l.Superseded { + state |= ListSuperseded + } + + // Apply a default + if state == 0 { + state = ListDeployed | ListFailed + } + + l.StateMask = state +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/package.go b/vendor/helm.sh/helm/v3/pkg/action/package.go new file mode 100644 index 0000000000..52920956fb --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/package.go @@ -0,0 +1,182 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "syscall" + + "github.com/Masterminds/semver/v3" + "github.com/pkg/errors" + "golang.org/x/term" + + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/provenance" +) + +// Package is the action for packaging a chart. +// +// It provides the implementation of 'helm package'. +type Package struct { + Sign bool + Key string + Keyring string + PassphraseFile string + Version string + AppVersion string + Destination string + DependencyUpdate bool + + RepositoryConfig string + RepositoryCache string +} + +// NewPackage creates a new Package object with the given configuration. +func NewPackage() *Package { + return &Package{} +} + +// Run executes 'helm package' against the given chart and returns the path to the packaged chart. +func (p *Package) Run(path string, vals map[string]interface{}) (string, error) { + ch, err := loader.LoadDir(path) + if err != nil { + return "", err + } + + // If version is set, modify the version. + if p.Version != "" { + ch.Metadata.Version = p.Version + } + + if err := validateVersion(ch.Metadata.Version); err != nil { + return "", err + } + + if p.AppVersion != "" { + ch.Metadata.AppVersion = p.AppVersion + } + + if reqs := ch.Metadata.Dependencies; reqs != nil { + if err := CheckDependencies(ch, reqs); err != nil { + return "", err + } + } + + var dest string + if p.Destination == "." { + // Save to the current working directory. + dest, err = os.Getwd() + if err != nil { + return "", err + } + } else { + // Otherwise save to set destination + dest = p.Destination + } + + name, err := chartutil.Save(ch, dest) + if err != nil { + return "", errors.Wrap(err, "failed to save") + } + + if p.Sign { + err = p.Clearsign(name) + } + + return name, err +} + +// validateVersion Verify that version is a Version, and error out if it is not. +func validateVersion(ver string) error { + if _, err := semver.NewVersion(ver); err != nil { + return err + } + return nil +} + +// Clearsign signs a chart +func (p *Package) Clearsign(filename string) error { + // Load keyring + signer, err := provenance.NewFromKeyring(p.Keyring, p.Key) + if err != nil { + return err + } + + passphraseFetcher := promptUser + if p.PassphraseFile != "" { + passphraseFetcher, err = passphraseFileFetcher(p.PassphraseFile, os.Stdin) + if err != nil { + return err + } + } + + if err := signer.DecryptKey(passphraseFetcher); err != nil { + return err + } + + sig, err := signer.ClearSign(filename) + if err != nil { + return err + } + + return ioutil.WriteFile(filename+".prov", []byte(sig), 0644) +} + +// promptUser implements provenance.PassphraseFetcher +func promptUser(name string) ([]byte, error) { + fmt.Printf("Password for key %q > ", name) + // syscall.Stdin is not an int in all environments and needs to be coerced + // into one there (e.g., Windows) + pw, err := term.ReadPassword(int(syscall.Stdin)) + fmt.Println() + return pw, err +} + +func passphraseFileFetcher(passphraseFile string, stdin *os.File) (provenance.PassphraseFetcher, error) { + file, err := openPassphraseFile(passphraseFile, stdin) + if err != nil { + return nil, err + } + defer file.Close() + + reader := bufio.NewReader(file) + passphrase, _, err := reader.ReadLine() + if err != nil { + return nil, err + } + return func(name string) ([]byte, error) { + return passphrase, nil + }, nil +} + +func openPassphraseFile(passphraseFile string, stdin *os.File) (*os.File, error) { + if passphraseFile == "-" { + stat, err := stdin.Stat() + if err != nil { + return nil, err + } + if (stat.Mode() & os.ModeNamedPipe) == 0 { + return nil, errors.New("specified reading passphrase from stdin, without input on stdin") + } + return stdin, nil + } + return os.Open(passphraseFile) +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/pull.go b/vendor/helm.sh/helm/v3/pkg/action/pull.go new file mode 100644 index 0000000000..fa1247054b --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/pull.go @@ -0,0 +1,169 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/cli" + "helm.sh/helm/v3/pkg/downloader" + "helm.sh/helm/v3/pkg/getter" + "helm.sh/helm/v3/pkg/repo" +) + +// Pull is the action for checking a given release's information. +// +// It provides the implementation of 'helm pull'. +type Pull struct { + ChartPathOptions + + Settings *cli.EnvSettings // TODO: refactor this out of pkg/action + + Devel bool + Untar bool + VerifyLater bool + UntarDir string + DestDir string + cfg *Configuration +} + +type PullOpt func(*Pull) + +func WithConfig(cfg *Configuration) PullOpt { + return func(p *Pull) { + p.cfg = cfg + } +} + +// NewPull creates a new Pull object. +func NewPull() *Pull { + return NewPullWithOpts() +} + +// NewPullWithOpts creates a new pull, with configuration options. +func NewPullWithOpts(opts ...PullOpt) *Pull { + p := &Pull{} + for _, fn := range opts { + fn(p) + } + + return p +} + +// Run executes 'helm pull' against the given release. +func (p *Pull) Run(chartRef string) (string, error) { + var out strings.Builder + + c := downloader.ChartDownloader{ + Out: &out, + Keyring: p.Keyring, + Verify: downloader.VerifyNever, + Getters: getter.All(p.Settings), + Options: []getter.Option{ + getter.WithBasicAuth(p.Username, p.Password), + getter.WithPassCredentialsAll(p.PassCredentialsAll), + getter.WithTLSClientConfig(p.CertFile, p.KeyFile, p.CaFile), + getter.WithInsecureSkipVerifyTLS(p.InsecureSkipTLSverify), + }, + RepositoryConfig: p.Settings.RepositoryConfig, + RepositoryCache: p.Settings.RepositoryCache, + } + + if strings.HasPrefix(chartRef, "oci://") { + if p.Version == "" { + return out.String(), errors.Errorf("--version flag is explicitly required for OCI registries") + } + + c.Options = append(c.Options, + getter.WithRegistryClient(p.cfg.RegistryClient), + getter.WithTagName(p.Version)) + } + + if p.Verify { + c.Verify = downloader.VerifyAlways + } else if p.VerifyLater { + c.Verify = downloader.VerifyLater + } + + // If untar is set, we fetch to a tempdir, then untar and copy after + // verification. + dest := p.DestDir + if p.Untar { + var err error + dest, err = ioutil.TempDir("", "helm-") + if err != nil { + return out.String(), errors.Wrap(err, "failed to untar") + } + defer os.RemoveAll(dest) + } + + if p.RepoURL != "" { + chartURL, err := repo.FindChartInAuthAndTLSAndPassRepoURL(p.RepoURL, p.Username, p.Password, chartRef, p.Version, p.CertFile, p.KeyFile, p.CaFile, p.InsecureSkipTLSverify, p.PassCredentialsAll, getter.All(p.Settings)) + if err != nil { + return out.String(), err + } + chartRef = chartURL + } + + saved, v, err := c.DownloadTo(chartRef, p.Version, dest) + if err != nil { + return out.String(), err + } + + if p.Verify { + for name := range v.SignedBy.Identities { + fmt.Fprintf(&out, "Signed by: %v\n", name) + } + fmt.Fprintf(&out, "Using Key With Fingerprint: %X\n", v.SignedBy.PrimaryKey.Fingerprint) + fmt.Fprintf(&out, "Chart Hash Verified: %s\n", v.FileHash) + } + + // After verification, untar the chart into the requested directory. + if p.Untar { + ud := p.UntarDir + if !filepath.IsAbs(ud) { + ud = filepath.Join(p.DestDir, ud) + } + // Let udCheck to check conflict file/dir without replacing ud when untarDir is the current directory(.). + udCheck := ud + if udCheck == "." { + _, udCheck = filepath.Split(chartRef) + } else { + _, chartName := filepath.Split(chartRef) + udCheck = filepath.Join(udCheck, chartName) + } + + if _, err := os.Stat(udCheck); err != nil { + if err := os.MkdirAll(udCheck, 0755); err != nil { + return out.String(), errors.Wrap(err, "failed to untar (mkdir)") + } + + } else { + return out.String(), errors.Errorf("failed to untar: a file or directory with the name %s already exists", udCheck) + } + + return out.String(), chartutil.ExpandFile(ud, saved) + } + return out.String(), nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/registry_login.go b/vendor/helm.sh/helm/v3/pkg/action/registry_login.go new file mode 100644 index 0000000000..00f6e2644c --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/registry_login.go @@ -0,0 +1,38 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "io" +) + +// RegistryLogin performs a registry login operation. +type RegistryLogin struct { + cfg *Configuration +} + +// NewRegistryLogin creates a new RegistryLogin object with the given configuration. +func NewRegistryLogin(cfg *Configuration) *RegistryLogin { + return &RegistryLogin{ + cfg: cfg, + } +} + +// Run executes the registry login operation +func (a *RegistryLogin) Run(out io.Writer, hostname string, username string, password string, insecure bool) error { + return a.cfg.RegistryClient.Login(hostname, username, password, insecure) +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/registry_logout.go b/vendor/helm.sh/helm/v3/pkg/action/registry_logout.go new file mode 100644 index 0000000000..69add4163d --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/registry_logout.go @@ -0,0 +1,38 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "io" +) + +// RegistryLogout performs a registry login operation. +type RegistryLogout struct { + cfg *Configuration +} + +// NewRegistryLogout creates a new RegistryLogout object with the given configuration. +func NewRegistryLogout(cfg *Configuration) *RegistryLogout { + return &RegistryLogout{ + cfg: cfg, + } +} + +// Run executes the registry logout operation +func (a *RegistryLogout) Run(out io.Writer, hostname string) error { + return a.cfg.RegistryClient.Logout(hostname) +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/release_testing.go b/vendor/helm.sh/helm/v3/pkg/action/release_testing.go new file mode 100644 index 0000000000..ecaeaf59f5 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/release_testing.go @@ -0,0 +1,138 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "context" + "fmt" + "io" + "time" + + "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" + + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/release" +) + +// ReleaseTesting is the action for testing a release. +// +// It provides the implementation of 'helm test'. +type ReleaseTesting struct { + cfg *Configuration + Timeout time.Duration + // Used for fetching logs from test pods + Namespace string + Filters map[string][]string +} + +// NewReleaseTesting creates a new ReleaseTesting object with the given configuration. +func NewReleaseTesting(cfg *Configuration) *ReleaseTesting { + return &ReleaseTesting{ + cfg: cfg, + Filters: map[string][]string{}, + } +} + +// Run executes 'helm test' against the given release. +func (r *ReleaseTesting) Run(name string) (*release.Release, error) { + if err := r.cfg.KubeClient.IsReachable(); err != nil { + return nil, err + } + + if err := chartutil.ValidateReleaseName(name); err != nil { + return nil, errors.Errorf("releaseTest: Release name is invalid: %s", name) + } + + // finds the non-deleted release with the given name + rel, err := r.cfg.Releases.Last(name) + if err != nil { + return rel, err + } + + skippedHooks := []*release.Hook{} + executingHooks := []*release.Hook{} + if len(r.Filters["!name"]) != 0 { + for _, h := range rel.Hooks { + if contains(r.Filters["!name"], h.Name) { + skippedHooks = append(skippedHooks, h) + } else { + executingHooks = append(executingHooks, h) + } + } + rel.Hooks = executingHooks + } + if len(r.Filters["name"]) != 0 { + executingHooks = nil + for _, h := range rel.Hooks { + if contains(r.Filters["name"], h.Name) { + executingHooks = append(executingHooks, h) + } else { + skippedHooks = append(skippedHooks, h) + } + } + rel.Hooks = executingHooks + } + + if err := r.cfg.execHook(rel, release.HookTest, r.Timeout); err != nil { + rel.Hooks = append(skippedHooks, rel.Hooks...) + r.cfg.Releases.Update(rel) + return rel, err + } + + rel.Hooks = append(skippedHooks, rel.Hooks...) + return rel, r.cfg.Releases.Update(rel) +} + +// GetPodLogs will write the logs for all test pods in the given release into +// the given writer. These can be immediately output to the user or captured for +// other uses +func (r *ReleaseTesting) GetPodLogs(out io.Writer, rel *release.Release) error { + client, err := r.cfg.KubernetesClientSet() + if err != nil { + return errors.Wrap(err, "unable to get kubernetes client to fetch pod logs") + } + + for _, h := range rel.Hooks { + for _, e := range h.Events { + if e == release.HookTest { + req := client.CoreV1().Pods(r.Namespace).GetLogs(h.Name, &v1.PodLogOptions{}) + logReader, err := req.Stream(context.Background()) + if err != nil { + return errors.Wrapf(err, "unable to get pod logs for %s", h.Name) + } + + fmt.Fprintf(out, "POD LOGS: %s\n", h.Name) + _, err = io.Copy(out, logReader) + fmt.Fprintln(out) + if err != nil { + return errors.Wrapf(err, "unable to write pod logs for %s", h.Name) + } + } + } + } + return nil +} + +func contains(arr []string, value string) bool { + for _, item := range arr { + if item == value { + return true + } + } + return false +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/resource_policy.go b/vendor/helm.sh/helm/v3/pkg/action/resource_policy.go new file mode 100644 index 0000000000..63e83f3d94 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/resource_policy.go @@ -0,0 +1,46 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "strings" + + "helm.sh/helm/v3/pkg/kube" + "helm.sh/helm/v3/pkg/releaseutil" +) + +func filterManifestsToKeep(manifests []releaseutil.Manifest) (keep, remaining []releaseutil.Manifest) { + for _, m := range manifests { + if m.Head.Metadata == nil || m.Head.Metadata.Annotations == nil || len(m.Head.Metadata.Annotations) == 0 { + remaining = append(remaining, m) + continue + } + + resourcePolicyType, ok := m.Head.Metadata.Annotations[kube.ResourcePolicyAnno] + if !ok { + remaining = append(remaining, m) + continue + } + + resourcePolicyType = strings.ToLower(strings.TrimSpace(resourcePolicyType)) + if resourcePolicyType == kube.KeepPolicy { + keep = append(keep, m) + } + + } + return keep, remaining +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/rollback.go b/vendor/helm.sh/helm/v3/pkg/action/rollback.go new file mode 100644 index 0000000000..f3f958f3d4 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/rollback.go @@ -0,0 +1,241 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "bytes" + "fmt" + "strings" + "time" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/release" + helmtime "helm.sh/helm/v3/pkg/time" +) + +// Rollback is the action for rolling back to a given release. +// +// It provides the implementation of 'helm rollback'. +type Rollback struct { + cfg *Configuration + + Version int + Timeout time.Duration + Wait bool + WaitForJobs bool + DisableHooks bool + DryRun bool + Recreate bool // will (if true) recreate pods after a rollback. + Force bool // will (if true) force resource upgrade through uninstall/recreate if needed + CleanupOnFail bool + MaxHistory int // MaxHistory limits the maximum number of revisions saved per release +} + +// NewRollback creates a new Rollback object with the given configuration. +func NewRollback(cfg *Configuration) *Rollback { + return &Rollback{ + cfg: cfg, + } +} + +// Run executes 'helm rollback' against the given release. +func (r *Rollback) Run(name string) error { + if err := r.cfg.KubeClient.IsReachable(); err != nil { + return err + } + + r.cfg.Releases.MaxHistory = r.MaxHistory + + r.cfg.Log("preparing rollback of %s", name) + currentRelease, targetRelease, err := r.prepareRollback(name) + if err != nil { + return err + } + + if !r.DryRun { + r.cfg.Log("creating rolled back release for %s", name) + if err := r.cfg.Releases.Create(targetRelease); err != nil { + return err + } + } + + r.cfg.Log("performing rollback of %s", name) + if _, err := r.performRollback(currentRelease, targetRelease); err != nil { + return err + } + + if !r.DryRun { + r.cfg.Log("updating status for rolled back release for %s", name) + if err := r.cfg.Releases.Update(targetRelease); err != nil { + return err + } + } + return nil +} + +// prepareRollback finds the previous release and prepares a new release object with +// the previous release's configuration +func (r *Rollback) prepareRollback(name string) (*release.Release, *release.Release, error) { + if err := chartutil.ValidateReleaseName(name); err != nil { + return nil, nil, errors.Errorf("prepareRollback: Release name is invalid: %s", name) + } + + if r.Version < 0 { + return nil, nil, errInvalidRevision + } + + currentRelease, err := r.cfg.Releases.Last(name) + if err != nil { + return nil, nil, err + } + + previousVersion := r.Version + if r.Version == 0 { + previousVersion = currentRelease.Version - 1 + } + + r.cfg.Log("rolling back %s (current: v%d, target: v%d)", name, currentRelease.Version, previousVersion) + + previousRelease, err := r.cfg.Releases.Get(name, previousVersion) + if err != nil { + return nil, nil, err + } + + // Store a new release object with previous release's configuration + targetRelease := &release.Release{ + Name: name, + Namespace: currentRelease.Namespace, + Chart: previousRelease.Chart, + Config: previousRelease.Config, + Info: &release.Info{ + FirstDeployed: currentRelease.Info.FirstDeployed, + LastDeployed: helmtime.Now(), + Status: release.StatusPendingRollback, + Notes: previousRelease.Info.Notes, + // Because we lose the reference to previous version elsewhere, we set the + // message here, and only override it later if we experience failure. + Description: fmt.Sprintf("Rollback to %d", previousVersion), + }, + Version: currentRelease.Version + 1, + Manifest: previousRelease.Manifest, + Hooks: previousRelease.Hooks, + } + + return currentRelease, targetRelease, nil +} + +func (r *Rollback) performRollback(currentRelease, targetRelease *release.Release) (*release.Release, error) { + if r.DryRun { + r.cfg.Log("dry run for %s", targetRelease.Name) + return targetRelease, nil + } + + current, err := r.cfg.KubeClient.Build(bytes.NewBufferString(currentRelease.Manifest), false) + if err != nil { + return targetRelease, errors.Wrap(err, "unable to build kubernetes objects from current release manifest") + } + target, err := r.cfg.KubeClient.Build(bytes.NewBufferString(targetRelease.Manifest), false) + if err != nil { + return targetRelease, errors.Wrap(err, "unable to build kubernetes objects from new release manifest") + } + + // pre-rollback hooks + if !r.DisableHooks { + if err := r.cfg.execHook(targetRelease, release.HookPreRollback, r.Timeout); err != nil { + return targetRelease, err + } + } else { + r.cfg.Log("rollback hooks disabled for %s", targetRelease.Name) + } + + results, err := r.cfg.KubeClient.Update(current, target, r.Force) + + if err != nil { + msg := fmt.Sprintf("Rollback %q failed: %s", targetRelease.Name, err) + r.cfg.Log("warning: %s", msg) + currentRelease.Info.Status = release.StatusSuperseded + targetRelease.Info.Status = release.StatusFailed + targetRelease.Info.Description = msg + r.cfg.recordRelease(currentRelease) + r.cfg.recordRelease(targetRelease) + if r.CleanupOnFail { + r.cfg.Log("Cleanup on fail set, cleaning up %d resources", len(results.Created)) + _, errs := r.cfg.KubeClient.Delete(results.Created) + if errs != nil { + var errorList []string + for _, e := range errs { + errorList = append(errorList, e.Error()) + } + return targetRelease, errors.Wrapf(fmt.Errorf("unable to cleanup resources: %s", strings.Join(errorList, ", ")), "an error occurred while cleaning up resources. original rollback error: %s", err) + } + r.cfg.Log("Resource cleanup complete") + } + return targetRelease, err + } + + if r.Recreate { + // NOTE: Because this is not critical for a release to succeed, we just + // log if an error occurs and continue onward. If we ever introduce log + // levels, we should make these error level logs so users are notified + // that they'll need to go do the cleanup on their own + if err := recreate(r.cfg, results.Updated); err != nil { + r.cfg.Log(err.Error()) + } + } + + if r.Wait { + if r.WaitForJobs { + if err := r.cfg.KubeClient.WaitWithJobs(target, r.Timeout); err != nil { + targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error())) + r.cfg.recordRelease(currentRelease) + r.cfg.recordRelease(targetRelease) + return targetRelease, errors.Wrapf(err, "release %s failed", targetRelease.Name) + } + } else { + if err := r.cfg.KubeClient.Wait(target, r.Timeout); err != nil { + targetRelease.SetStatus(release.StatusFailed, fmt.Sprintf("Release %q failed: %s", targetRelease.Name, err.Error())) + r.cfg.recordRelease(currentRelease) + r.cfg.recordRelease(targetRelease) + return targetRelease, errors.Wrapf(err, "release %s failed", targetRelease.Name) + } + } + } + + // post-rollback hooks + if !r.DisableHooks { + if err := r.cfg.execHook(targetRelease, release.HookPostRollback, r.Timeout); err != nil { + return targetRelease, err + } + } + + deployed, err := r.cfg.Releases.DeployedAll(currentRelease.Name) + if err != nil && !strings.Contains(err.Error(), "has no deployed releases") { + return nil, err + } + // Supersede all previous deployments, see issue #2941. + for _, rel := range deployed { + r.cfg.Log("superseding previous deployment %d", rel.Version) + rel.Info.Status = release.StatusSuperseded + r.cfg.recordRelease(rel) + } + + targetRelease.Info.Status = release.StatusDeployed + + return targetRelease, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/show.go b/vendor/helm.sh/helm/v3/pkg/action/show.go new file mode 100644 index 0000000000..4eab2b4fd0 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/show.go @@ -0,0 +1,130 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" + "k8s.io/cli-runtime/pkg/printers" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/chartutil" +) + +// ShowOutputFormat is the format of the output of `helm show` +type ShowOutputFormat string + +const ( + // ShowAll is the format which shows all the information of a chart + ShowAll ShowOutputFormat = "all" + // ShowChart is the format which only shows the chart's definition + ShowChart ShowOutputFormat = "chart" + // ShowValues is the format which only shows the chart's values + ShowValues ShowOutputFormat = "values" + // ShowReadme is the format which only shows the chart's README + ShowReadme ShowOutputFormat = "readme" +) + +var readmeFileNames = []string{"readme.md", "readme.txt", "readme"} + +func (o ShowOutputFormat) String() string { + return string(o) +} + +// Show is the action for checking a given release's information. +// +// It provides the implementation of 'helm show' and its respective subcommands. +type Show struct { + ChartPathOptions + Devel bool + OutputFormat ShowOutputFormat + JSONPathTemplate string + chart *chart.Chart // for testing +} + +// NewShow creates a new Show object with the given configuration. +func NewShow(output ShowOutputFormat) *Show { + return &Show{ + OutputFormat: output, + } +} + +// Run executes 'helm show' against the given release. +func (s *Show) Run(chartpath string) (string, error) { + if s.chart == nil { + chrt, err := loader.Load(chartpath) + if err != nil { + return "", err + } + s.chart = chrt + } + cf, err := yaml.Marshal(s.chart.Metadata) + if err != nil { + return "", err + } + + var out strings.Builder + if s.OutputFormat == ShowChart || s.OutputFormat == ShowAll { + fmt.Fprintf(&out, "%s\n", cf) + } + + if (s.OutputFormat == ShowValues || s.OutputFormat == ShowAll) && s.chart.Values != nil { + if s.OutputFormat == ShowAll { + fmt.Fprintln(&out, "---") + } + if s.JSONPathTemplate != "" { + printer, err := printers.NewJSONPathPrinter(s.JSONPathTemplate) + if err != nil { + return "", errors.Wrapf(err, "error parsing jsonpath %s", s.JSONPathTemplate) + } + printer.Execute(&out, s.chart.Values) + } else { + for _, f := range s.chart.Raw { + if f.Name == chartutil.ValuesfileName { + fmt.Fprintln(&out, string(f.Data)) + } + } + } + } + + if s.OutputFormat == ShowReadme || s.OutputFormat == ShowAll { + if s.OutputFormat == ShowAll { + fmt.Fprintln(&out, "---") + } + readme := findReadme(s.chart.Files) + if readme == nil { + return out.String(), nil + } + fmt.Fprintf(&out, "%s\n", readme.Data) + } + return out.String(), nil +} + +func findReadme(files []*chart.File) (file *chart.File) { + for _, file := range files { + for _, n := range readmeFileNames { + if strings.EqualFold(file.Name, n) { + return file + } + } + } + return nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/status.go b/vendor/helm.sh/helm/v3/pkg/action/status.go new file mode 100644 index 0000000000..1c556e28dc --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/status.go @@ -0,0 +1,51 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "helm.sh/helm/v3/pkg/release" +) + +// Status is the action for checking the deployment status of releases. +// +// It provides the implementation of 'helm status'. +type Status struct { + cfg *Configuration + + Version int + + // If true, display description to output format, + // only affect print type table. + // TODO Helm 4: Remove this flag and output the description by default. + ShowDescription bool +} + +// NewStatus creates a new Status object with the given configuration. +func NewStatus(cfg *Configuration) *Status { + return &Status{ + cfg: cfg, + } +} + +// Run executes 'helm status' against the given release. +func (s *Status) Run(name string) (*release.Release, error) { + if err := s.cfg.KubeClient.IsReachable(); err != nil { + return nil, err + } + + return s.cfg.releaseContent(name, s.Version) +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/uninstall.go b/vendor/helm.sh/helm/v3/pkg/action/uninstall.go new file mode 100644 index 0000000000..c762159cbb --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/uninstall.go @@ -0,0 +1,212 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "strings" + "time" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/release" + "helm.sh/helm/v3/pkg/releaseutil" + helmtime "helm.sh/helm/v3/pkg/time" +) + +// Uninstall is the action for uninstalling releases. +// +// It provides the implementation of 'helm uninstall'. +type Uninstall struct { + cfg *Configuration + + DisableHooks bool + DryRun bool + KeepHistory bool + Timeout time.Duration + Description string +} + +// NewUninstall creates a new Uninstall object with the given configuration. +func NewUninstall(cfg *Configuration) *Uninstall { + return &Uninstall{ + cfg: cfg, + } +} + +// Run uninstalls the given release. +func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) { + if err := u.cfg.KubeClient.IsReachable(); err != nil { + return nil, err + } + + if u.DryRun { + // In the dry run case, just see if the release exists + r, err := u.cfg.releaseContent(name, 0) + if err != nil { + return &release.UninstallReleaseResponse{}, err + } + return &release.UninstallReleaseResponse{Release: r}, nil + } + + if err := chartutil.ValidateReleaseName(name); err != nil { + return nil, errors.Errorf("uninstall: Release name is invalid: %s", name) + } + + rels, err := u.cfg.Releases.History(name) + if err != nil { + return nil, errors.Wrapf(err, "uninstall: Release not loaded: %s", name) + } + if len(rels) < 1 { + return nil, errMissingRelease + } + + releaseutil.SortByRevision(rels) + rel := rels[len(rels)-1] + + // TODO: Are there any cases where we want to force a delete even if it's + // already marked deleted? + if rel.Info.Status == release.StatusUninstalled { + if !u.KeepHistory { + if err := u.purgeReleases(rels...); err != nil { + return nil, errors.Wrap(err, "uninstall: Failed to purge the release") + } + return &release.UninstallReleaseResponse{Release: rel}, nil + } + return nil, errors.Errorf("the release named %q is already deleted", name) + } + + u.cfg.Log("uninstall: Deleting %s", name) + rel.Info.Status = release.StatusUninstalling + rel.Info.Deleted = helmtime.Now() + rel.Info.Description = "Deletion in progress (or silently failed)" + res := &release.UninstallReleaseResponse{Release: rel} + + if !u.DisableHooks { + if err := u.cfg.execHook(rel, release.HookPreDelete, u.Timeout); err != nil { + return res, err + } + } else { + u.cfg.Log("delete hooks disabled for %s", name) + } + + // From here on out, the release is currently considered to be in StatusUninstalling + // state. + if err := u.cfg.Releases.Update(rel); err != nil { + u.cfg.Log("uninstall: Failed to store updated release: %s", err) + } + + kept, errs := u.deleteRelease(rel) + + if kept != "" { + kept = "These resources were kept due to the resource policy:\n" + kept + } + res.Info = kept + + if !u.DisableHooks { + if err := u.cfg.execHook(rel, release.HookPostDelete, u.Timeout); err != nil { + errs = append(errs, err) + } + } + + rel.Info.Status = release.StatusUninstalled + if len(u.Description) > 0 { + rel.Info.Description = u.Description + } else { + rel.Info.Description = "Uninstallation complete" + } + + if !u.KeepHistory { + u.cfg.Log("purge requested for %s", name) + err := u.purgeReleases(rels...) + if err != nil { + errs = append(errs, errors.Wrap(err, "uninstall: Failed to purge the release")) + } + + // Return the errors that occurred while deleting the release, if any + if len(errs) > 0 { + return res, errors.Errorf("uninstallation completed with %d error(s): %s", len(errs), joinErrors(errs)) + } + + return res, nil + } + + if err := u.cfg.Releases.Update(rel); err != nil { + u.cfg.Log("uninstall: Failed to store updated release: %s", err) + } + + if len(errs) > 0 { + return res, errors.Errorf("uninstallation completed with %d error(s): %s", len(errs), joinErrors(errs)) + } + return res, nil +} + +func (u *Uninstall) purgeReleases(rels ...*release.Release) error { + for _, rel := range rels { + if _, err := u.cfg.Releases.Delete(rel.Name, rel.Version); err != nil { + return err + } + } + return nil +} + +func joinErrors(errs []error) string { + es := make([]string, 0, len(errs)) + for _, e := range errs { + es = append(es, e.Error()) + } + return strings.Join(es, "; ") +} + +// deleteRelease deletes the release and returns manifests that were kept in the deletion process +func (u *Uninstall) deleteRelease(rel *release.Release) (string, []error) { + var errs []error + caps, err := u.cfg.getCapabilities() + if err != nil { + return rel.Manifest, []error{errors.Wrap(err, "could not get apiVersions from Kubernetes")} + } + + manifests := releaseutil.SplitManifests(rel.Manifest) + _, files, err := releaseutil.SortManifests(manifests, caps.APIVersions, releaseutil.UninstallOrder) + if err != nil { + // We could instead just delete everything in no particular order. + // FIXME: One way to delete at this point would be to try a label-based + // deletion. The problem with this is that we could get a false positive + // and delete something that was not legitimately part of this release. + return rel.Manifest, []error{errors.Wrap(err, "corrupted release record. You must manually delete the resources")} + } + + filesToKeep, filesToDelete := filterManifestsToKeep(files) + var kept string + for _, f := range filesToKeep { + kept += "[" + f.Head.Kind + "] " + f.Head.Metadata.Name + "\n" + } + + var builder strings.Builder + for _, file := range filesToDelete { + builder.WriteString("\n---\n" + file.Content) + } + + resources, err := u.cfg.KubeClient.Build(strings.NewReader(builder.String()), false) + if err != nil { + return "", []error{errors.Wrap(err, "unable to build kubernetes objects for delete")} + } + if len(resources) > 0 { + _, errs = u.cfg.KubeClient.Delete(resources) + } + return kept, errs +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/upgrade.go b/vendor/helm.sh/helm/v3/pkg/action/upgrade.go new file mode 100644 index 0000000000..3b3dd3f1c4 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/upgrade.go @@ -0,0 +1,509 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "bytes" + "context" + "fmt" + "strings" + "time" + + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/cli-runtime/pkg/resource" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/kube" + "helm.sh/helm/v3/pkg/postrender" + "helm.sh/helm/v3/pkg/release" + "helm.sh/helm/v3/pkg/releaseutil" + "helm.sh/helm/v3/pkg/storage/driver" +) + +// Upgrade is the action for upgrading releases. +// +// It provides the implementation of 'helm upgrade'. +type Upgrade struct { + cfg *Configuration + + ChartPathOptions + + // Install is a purely informative flag that indicates whether this upgrade was done in "install" mode. + // + // Applications may use this to determine whether this Upgrade operation was done as part of a + // pure upgrade (Upgrade.Install == false) or as part of an install-or-upgrade operation + // (Upgrade.Install == true). + // + // Setting this to `true` will NOT cause `Upgrade` to perform an install if the release does not exist. + // That process must be handled by creating an Install action directly. See cmd/upgrade.go for an + // example of how this flag is used. + Install bool + // Devel indicates that the operation is done in devel mode. + Devel bool + // Namespace is the namespace in which this operation should be performed. + Namespace string + // SkipCRDs skips installing CRDs when install flag is enabled during upgrade + SkipCRDs bool + // Timeout is the timeout for this operation + Timeout time.Duration + // Wait determines whether the wait operation should be performed after the upgrade is requested. + Wait bool + // WaitForJobs determines whether the wait operation for the Jobs should be performed after the upgrade is requested. + WaitForJobs bool + // DisableHooks disables hook processing if set to true. + DisableHooks bool + // DryRun controls whether the operation is prepared, but not executed. + // If `true`, the upgrade is prepared but not performed. + DryRun bool + // Force will, if set to `true`, ignore certain warnings and perform the upgrade anyway. + // + // This should be used with caution. + Force bool + // ResetValues will reset the values to the chart's built-ins rather than merging with existing. + ResetValues bool + // ReuseValues will re-use the user's last supplied values. + ReuseValues bool + // Recreate will (if true) recreate pods after a rollback. + Recreate bool + // MaxHistory limits the maximum number of revisions saved per release + MaxHistory int + // Atomic, if true, will roll back on failure. + Atomic bool + // CleanupOnFail will, if true, cause the upgrade to delete newly-created resources on a failed update. + CleanupOnFail bool + // SubNotes determines whether sub-notes are rendered in the chart. + SubNotes bool + // Description is the description of this operation + Description string + // PostRender is an optional post-renderer + // + // If this is non-nil, then after templates are rendered, they will be sent to the + // post renderer before sending to the Kubernetes API server. + PostRenderer postrender.PostRenderer + // DisableOpenAPIValidation controls whether OpenAPI validation is enforced. + DisableOpenAPIValidation bool +} + +// NewUpgrade creates a new Upgrade object with the given configuration. +func NewUpgrade(cfg *Configuration) *Upgrade { + return &Upgrade{ + cfg: cfg, + } +} + +// Run executes the upgrade on the given release. +func (u *Upgrade) Run(name string, chart *chart.Chart, vals map[string]interface{}) (*release.Release, error) { + if err := u.cfg.KubeClient.IsReachable(); err != nil { + return nil, err + } + + // Make sure if Atomic is set, that wait is set as well. This makes it so + // the user doesn't have to specify both + u.Wait = u.Wait || u.Atomic + + if err := chartutil.ValidateReleaseName(name); err != nil { + return nil, errors.Errorf("release name is invalid: %s", name) + } + u.cfg.Log("preparing upgrade for %s", name) + currentRelease, upgradedRelease, err := u.prepareUpgrade(name, chart, vals) + if err != nil { + return nil, err + } + + u.cfg.Releases.MaxHistory = u.MaxHistory + + u.cfg.Log("performing update for %s", name) + res, err := u.performUpgrade(currentRelease, upgradedRelease) + if err != nil { + return res, err + } + + if !u.DryRun { + u.cfg.Log("updating status for upgraded release for %s", name) + if err := u.cfg.Releases.Update(upgradedRelease); err != nil { + return res, err + } + } + + return res, nil +} + +// prepareUpgrade builds an upgraded release for an upgrade operation. +func (u *Upgrade) prepareUpgrade(name string, chart *chart.Chart, vals map[string]interface{}) (*release.Release, *release.Release, error) { + if chart == nil { + return nil, nil, errMissingChart + } + + // finds the last non-deleted release with the given name + lastRelease, err := u.cfg.Releases.Last(name) + if err != nil { + // to keep existing behavior of returning the "%q has no deployed releases" error when an existing release does not exist + if errors.Is(err, driver.ErrReleaseNotFound) { + return nil, nil, driver.NewErrNoDeployedReleases(name) + } + return nil, nil, err + } + + // Concurrent `helm upgrade`s will either fail here with `errPending` or when creating the release with "already exists". This should act as a pessimistic lock. + if lastRelease.Info.Status.IsPending() { + return nil, nil, errPending + } + + var currentRelease *release.Release + if lastRelease.Info.Status == release.StatusDeployed { + // no need to retrieve the last deployed release from storage as the last release is deployed + currentRelease = lastRelease + } else { + // finds the deployed release with the given name + currentRelease, err = u.cfg.Releases.Deployed(name) + if err != nil { + if errors.Is(err, driver.ErrNoDeployedReleases) && + (lastRelease.Info.Status == release.StatusFailed || lastRelease.Info.Status == release.StatusSuperseded) { + currentRelease = lastRelease + } else { + return nil, nil, err + } + } + } + + // determine if values will be reused + vals, err = u.reuseValues(chart, currentRelease, vals) + if err != nil { + return nil, nil, err + } + + if err := chartutil.ProcessDependencies(chart, vals); err != nil { + return nil, nil, err + } + + // Increment revision count. This is passed to templates, and also stored on + // the release object. + revision := lastRelease.Version + 1 + + options := chartutil.ReleaseOptions{ + Name: name, + Namespace: currentRelease.Namespace, + Revision: revision, + IsUpgrade: true, + } + + caps, err := u.cfg.getCapabilities() + if err != nil { + return nil, nil, err + } + valuesToRender, err := chartutil.ToRenderValues(chart, vals, options, caps) + if err != nil { + return nil, nil, err + } + + hooks, manifestDoc, notesTxt, err := u.cfg.renderResources(chart, valuesToRender, "", "", u.SubNotes, false, false, u.PostRenderer, u.DryRun) + if err != nil { + return nil, nil, err + } + + // Store an upgraded release. + upgradedRelease := &release.Release{ + Name: name, + Namespace: currentRelease.Namespace, + Chart: chart, + Config: vals, + Info: &release.Info{ + FirstDeployed: currentRelease.Info.FirstDeployed, + LastDeployed: Timestamper(), + Status: release.StatusPendingUpgrade, + Description: "Preparing upgrade", // This should be overwritten later. + }, + Version: revision, + Manifest: manifestDoc.String(), + Hooks: hooks, + } + + if len(notesTxt) > 0 { + upgradedRelease.Info.Notes = notesTxt + } + err = validateManifest(u.cfg.KubeClient, manifestDoc.Bytes(), !u.DisableOpenAPIValidation) + return currentRelease, upgradedRelease, err +} + +func (u *Upgrade) performUpgrade(originalRelease, upgradedRelease *release.Release) (*release.Release, error) { + current, err := u.cfg.KubeClient.Build(bytes.NewBufferString(originalRelease.Manifest), false) + if err != nil { + // Checking for removed Kubernetes API error so can provide a more informative error message to the user + // Ref: https://github.com/helm/helm/issues/7219 + if strings.Contains(err.Error(), "unable to recognize \"\": no matches for kind") { + return upgradedRelease, errors.Wrap(err, "current release manifest contains removed kubernetes api(s) for this "+ + "kubernetes version and it is therefore unable to build the kubernetes "+ + "objects for performing the diff. error from kubernetes") + } + return upgradedRelease, errors.Wrap(err, "unable to build kubernetes objects from current release manifest") + } + target, err := u.cfg.KubeClient.Build(bytes.NewBufferString(upgradedRelease.Manifest), !u.DisableOpenAPIValidation) + if err != nil { + return upgradedRelease, errors.Wrap(err, "unable to build kubernetes objects from new release manifest") + } + + // It is safe to use force only on target because these are resources currently rendered by the chart. + err = target.Visit(setMetadataVisitor(upgradedRelease.Name, upgradedRelease.Namespace, true)) + if err != nil { + return upgradedRelease, err + } + + // Do a basic diff using gvk + name to figure out what new resources are being created so we can validate they don't already exist + existingResources := make(map[string]bool) + for _, r := range current { + existingResources[objectKey(r)] = true + } + + var toBeCreated kube.ResourceList + for _, r := range target { + if !existingResources[objectKey(r)] { + toBeCreated = append(toBeCreated, r) + } + } + + toBeUpdated, err := existingResourceConflict(toBeCreated, upgradedRelease.Name, upgradedRelease.Namespace) + if err != nil { + return nil, errors.Wrap(err, "rendered manifests contain a resource that already exists. Unable to continue with update") + } + + toBeUpdated.Visit(func(r *resource.Info, err error) error { + if err != nil { + return err + } + current.Append(r) + return nil + }) + + if u.DryRun { + u.cfg.Log("dry run for %s", upgradedRelease.Name) + if len(u.Description) > 0 { + upgradedRelease.Info.Description = u.Description + } else { + upgradedRelease.Info.Description = "Dry run complete" + } + return upgradedRelease, nil + } + + u.cfg.Log("creating upgraded release for %s", upgradedRelease.Name) + if err := u.cfg.Releases.Create(upgradedRelease); err != nil { + return nil, err + } + + // pre-upgrade hooks + if !u.DisableHooks { + if err := u.cfg.execHook(upgradedRelease, release.HookPreUpgrade, u.Timeout); err != nil { + return u.failRelease(upgradedRelease, kube.ResourceList{}, fmt.Errorf("pre-upgrade hooks failed: %s", err)) + } + } else { + u.cfg.Log("upgrade hooks disabled for %s", upgradedRelease.Name) + } + + results, err := u.cfg.KubeClient.Update(current, target, u.Force) + if err != nil { + u.cfg.recordRelease(originalRelease) + return u.failRelease(upgradedRelease, results.Created, err) + } + + if u.Recreate { + // NOTE: Because this is not critical for a release to succeed, we just + // log if an error occurs and continue onward. If we ever introduce log + // levels, we should make these error level logs so users are notified + // that they'll need to go do the cleanup on their own + if err := recreate(u.cfg, results.Updated); err != nil { + u.cfg.Log(err.Error()) + } + } + + if u.Wait { + if u.WaitForJobs { + if err := u.cfg.KubeClient.WaitWithJobs(target, u.Timeout); err != nil { + u.cfg.recordRelease(originalRelease) + return u.failRelease(upgradedRelease, results.Created, err) + } + } else { + if err := u.cfg.KubeClient.Wait(target, u.Timeout); err != nil { + u.cfg.recordRelease(originalRelease) + return u.failRelease(upgradedRelease, results.Created, err) + } + } + } + + // post-upgrade hooks + if !u.DisableHooks { + if err := u.cfg.execHook(upgradedRelease, release.HookPostUpgrade, u.Timeout); err != nil { + return u.failRelease(upgradedRelease, results.Created, fmt.Errorf("post-upgrade hooks failed: %s", err)) + } + } + + originalRelease.Info.Status = release.StatusSuperseded + u.cfg.recordRelease(originalRelease) + + upgradedRelease.Info.Status = release.StatusDeployed + if len(u.Description) > 0 { + upgradedRelease.Info.Description = u.Description + } else { + upgradedRelease.Info.Description = "Upgrade complete" + } + + return upgradedRelease, nil +} + +func (u *Upgrade) failRelease(rel *release.Release, created kube.ResourceList, err error) (*release.Release, error) { + msg := fmt.Sprintf("Upgrade %q failed: %s", rel.Name, err) + u.cfg.Log("warning: %s", msg) + + rel.Info.Status = release.StatusFailed + rel.Info.Description = msg + u.cfg.recordRelease(rel) + if u.CleanupOnFail && len(created) > 0 { + u.cfg.Log("Cleanup on fail set, cleaning up %d resources", len(created)) + _, errs := u.cfg.KubeClient.Delete(created) + if errs != nil { + var errorList []string + for _, e := range errs { + errorList = append(errorList, e.Error()) + } + return rel, errors.Wrapf(fmt.Errorf("unable to cleanup resources: %s", strings.Join(errorList, ", ")), "an error occurred while cleaning up resources. original upgrade error: %s", err) + } + u.cfg.Log("Resource cleanup complete") + } + if u.Atomic { + u.cfg.Log("Upgrade failed and atomic is set, rolling back to last successful release") + + // As a protection, get the last successful release before rollback. + // If there are no successful releases, bail out + hist := NewHistory(u.cfg) + fullHistory, herr := hist.Run(rel.Name) + if herr != nil { + return rel, errors.Wrapf(herr, "an error occurred while finding last successful release. original upgrade error: %s", err) + } + + // There isn't a way to tell if a previous release was successful, but + // generally failed releases do not get superseded unless the next + // release is successful, so this should be relatively safe + filteredHistory := releaseutil.FilterFunc(func(r *release.Release) bool { + return r.Info.Status == release.StatusSuperseded || r.Info.Status == release.StatusDeployed + }).Filter(fullHistory) + if len(filteredHistory) == 0 { + return rel, errors.Wrap(err, "unable to find a previously successful release when attempting to rollback. original upgrade error") + } + + releaseutil.Reverse(filteredHistory, releaseutil.SortByRevision) + + rollin := NewRollback(u.cfg) + rollin.Version = filteredHistory[0].Version + rollin.Wait = true + rollin.WaitForJobs = u.WaitForJobs + rollin.DisableHooks = u.DisableHooks + rollin.Recreate = u.Recreate + rollin.Force = u.Force + rollin.Timeout = u.Timeout + if rollErr := rollin.Run(rel.Name); rollErr != nil { + return rel, errors.Wrapf(rollErr, "an error occurred while rolling back the release. original upgrade error: %s", err) + } + return rel, errors.Wrapf(err, "release %s failed, and has been rolled back due to atomic being set", rel.Name) + } + + return rel, err +} + +// reuseValues copies values from the current release to a new release if the +// new release does not have any values. +// +// If the request already has values, or if there are no values in the current +// release, this does nothing. +// +// This is skipped if the u.ResetValues flag is set, in which case the +// request values are not altered. +func (u *Upgrade) reuseValues(chart *chart.Chart, current *release.Release, newVals map[string]interface{}) (map[string]interface{}, error) { + if u.ResetValues { + // If ResetValues is set, we completely ignore current.Config. + u.cfg.Log("resetting values to the chart's original version") + return newVals, nil + } + + // If the ReuseValues flag is set, we always copy the old values over the new config's values. + if u.ReuseValues { + u.cfg.Log("reusing the old release's values") + + // We have to regenerate the old coalesced values: + oldVals, err := chartutil.CoalesceValues(current.Chart, current.Config) + if err != nil { + return nil, errors.Wrap(err, "failed to rebuild old values") + } + + newVals = chartutil.CoalesceTables(newVals, current.Config) + + chart.Values = oldVals + + return newVals, nil + } + + if len(newVals) == 0 && len(current.Config) > 0 { + u.cfg.Log("copying values from %s (v%d) to new release.", current.Name, current.Version) + newVals = current.Config + } + return newVals, nil +} + +func validateManifest(c kube.Interface, manifest []byte, openAPIValidation bool) error { + _, err := c.Build(bytes.NewReader(manifest), openAPIValidation) + return err +} + +// recreate captures all the logic for recreating pods for both upgrade and +// rollback. If we end up refactoring rollback to use upgrade, this can just be +// made an unexported method on the upgrade action. +func recreate(cfg *Configuration, resources kube.ResourceList) error { + for _, res := range resources { + versioned := kube.AsVersioned(res) + selector, err := kube.SelectorsForObject(versioned) + if err != nil { + // If no selector is returned, it means this object is + // definitely not a pod, so continue onward + continue + } + + client, err := cfg.KubernetesClientSet() + if err != nil { + return errors.Wrapf(err, "unable to recreate pods for object %s/%s because an error occurred", res.Namespace, res.Name) + } + + pods, err := client.CoreV1().Pods(res.Namespace).List(context.Background(), metav1.ListOptions{ + LabelSelector: selector.String(), + }) + if err != nil { + return errors.Wrapf(err, "unable to recreate pods for object %s/%s because an error occurred", res.Namespace, res.Name) + } + + // Restart pods + for _, pod := range pods.Items { + // Delete each pod for get them restarted with changed spec. + if err := client.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, *metav1.NewPreconditionDeleteOptions(string(pod.UID))); err != nil { + return errors.Wrapf(err, "unable to recreate pods for object %s/%s because an error occurred", res.Namespace, res.Name) + } + } + } + return nil +} + +func objectKey(r *resource.Info) string { + gvk := r.Object.GetObjectKind().GroupVersionKind() + return fmt.Sprintf("%s/%s/%s/%s", gvk.GroupVersion().String(), gvk.Kind, r.Namespace, r.Name) +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/validate.go b/vendor/helm.sh/helm/v3/pkg/action/validate.go new file mode 100644 index 0000000000..6e074f78b3 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/validate.go @@ -0,0 +1,184 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "fmt" + + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/resource" + + "helm.sh/helm/v3/pkg/kube" +) + +var accessor = meta.NewAccessor() + +const ( + appManagedByLabel = "app.kubernetes.io/managed-by" + appManagedByHelm = "Helm" + helmReleaseNameAnnotation = "meta.helm.sh/release-name" + helmReleaseNamespaceAnnotation = "meta.helm.sh/release-namespace" +) + +func existingResourceConflict(resources kube.ResourceList, releaseName, releaseNamespace string) (kube.ResourceList, error) { + var requireUpdate kube.ResourceList + + err := resources.Visit(func(info *resource.Info, err error) error { + if err != nil { + return err + } + + helper := resource.NewHelper(info.Client, info.Mapping) + existing, err := helper.Get(info.Namespace, info.Name) + if err != nil { + if apierrors.IsNotFound(err) { + return nil + } + return errors.Wrap(err, "could not get information about the resource") + } + + // Allow adoption of the resource if it is managed by Helm and is annotated with correct release name and namespace. + if err := checkOwnership(existing, releaseName, releaseNamespace); err != nil { + return fmt.Errorf("%s exists and cannot be imported into the current release: %s", resourceString(info), err) + } + + requireUpdate.Append(info) + return nil + }) + + return requireUpdate, err +} + +func checkOwnership(obj runtime.Object, releaseName, releaseNamespace string) error { + lbls, err := accessor.Labels(obj) + if err != nil { + return err + } + annos, err := accessor.Annotations(obj) + if err != nil { + return err + } + + var errs []error + if err := requireValue(lbls, appManagedByLabel, appManagedByHelm); err != nil { + errs = append(errs, fmt.Errorf("label validation error: %s", err)) + } + if err := requireValue(annos, helmReleaseNameAnnotation, releaseName); err != nil { + errs = append(errs, fmt.Errorf("annotation validation error: %s", err)) + } + if err := requireValue(annos, helmReleaseNamespaceAnnotation, releaseNamespace); err != nil { + errs = append(errs, fmt.Errorf("annotation validation error: %s", err)) + } + + if len(errs) > 0 { + err := errors.New("invalid ownership metadata") + for _, e := range errs { + err = fmt.Errorf("%w; %s", err, e) + } + return err + } + + return nil +} + +func requireValue(meta map[string]string, k, v string) error { + actual, ok := meta[k] + if !ok { + return fmt.Errorf("missing key %q: must be set to %q", k, v) + } + if actual != v { + return fmt.Errorf("key %q must equal %q: current value is %q", k, v, actual) + } + return nil +} + +// setMetadataVisitor adds release tracking metadata to all resources. If force is enabled, existing +// ownership metadata will be overwritten. Otherwise an error will be returned if any resource has an +// existing and conflicting value for the managed by label or Helm release/namespace annotations. +func setMetadataVisitor(releaseName, releaseNamespace string, force bool) resource.VisitorFunc { + return func(info *resource.Info, err error) error { + if err != nil { + return err + } + + if !force { + if err := checkOwnership(info.Object, releaseName, releaseNamespace); err != nil { + return fmt.Errorf("%s cannot be owned: %s", resourceString(info), err) + } + } + + if err := mergeLabels(info.Object, map[string]string{ + appManagedByLabel: appManagedByHelm, + }); err != nil { + return fmt.Errorf( + "%s labels could not be updated: %s", + resourceString(info), err, + ) + } + + if err := mergeAnnotations(info.Object, map[string]string{ + helmReleaseNameAnnotation: releaseName, + helmReleaseNamespaceAnnotation: releaseNamespace, + }); err != nil { + return fmt.Errorf( + "%s annotations could not be updated: %s", + resourceString(info), err, + ) + } + + return nil + } +} + +func resourceString(info *resource.Info) string { + _, k := info.Mapping.GroupVersionKind.ToAPIVersionAndKind() + return fmt.Sprintf( + "%s %q in namespace %q", + k, info.Name, info.Namespace, + ) +} + +func mergeLabels(obj runtime.Object, labels map[string]string) error { + current, err := accessor.Labels(obj) + if err != nil { + return err + } + return accessor.SetLabels(obj, mergeStrStrMaps(current, labels)) +} + +func mergeAnnotations(obj runtime.Object, annotations map[string]string) error { + current, err := accessor.Annotations(obj) + if err != nil { + return err + } + return accessor.SetAnnotations(obj, mergeStrStrMaps(current, annotations)) +} + +// merge two maps, always taking the value on the right +func mergeStrStrMaps(current, desired map[string]string) map[string]string { + result := make(map[string]string) + for k, v := range current { + result[k] = v + } + for k, desiredVal := range desired { + result[k] = desiredVal + } + return result +} diff --git a/vendor/helm.sh/helm/v3/pkg/action/verify.go b/vendor/helm.sh/helm/v3/pkg/action/verify.go new file mode 100644 index 0000000000..f362394962 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/action/verify.go @@ -0,0 +1,59 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package action + +import ( + "fmt" + "strings" + + "helm.sh/helm/v3/pkg/downloader" +) + +// Verify is the action for building a given chart's Verify tree. +// +// It provides the implementation of 'helm verify'. +type Verify struct { + Keyring string + Out string +} + +// NewVerify creates a new Verify object with the given configuration. +func NewVerify() *Verify { + return &Verify{} +} + +// Run executes 'helm verify'. +func (v *Verify) Run(chartfile string) error { + var out strings.Builder + p, err := downloader.VerifyChart(chartfile, v.Keyring) + if err != nil { + return err + } + + for name := range p.SignedBy.Identities { + fmt.Fprintf(&out, "Signed by: %v\n", name) + } + fmt.Fprintf(&out, "Using Key With Fingerprint: %X\n", p.SignedBy.PrimaryKey.Fingerprint) + fmt.Fprintf(&out, "Chart Hash Verified: %s\n", p.FileHash) + + // TODO(mattfarina): The output is set as a property rather than returned + // to maintain the Go API. In Helm v4 this function should return the out + // and the property on the struct can be removed. + v.Out = out.String() + + return nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/chart/dependency.go b/vendor/helm.sh/helm/v3/pkg/chart/dependency.go index 9ec4544c22..b2819f373a 100644 --- a/vendor/helm.sh/helm/v3/pkg/chart/dependency.go +++ b/vendor/helm.sh/helm/v3/pkg/chart/dependency.go @@ -49,6 +49,23 @@ type Dependency struct { Alias string `json:"alias,omitempty"` } +// Validate checks for common problems with the dependency datastructure in +// the chart. This check must be done at load time before the dependency's charts are +// loaded. +func (d *Dependency) Validate() error { + d.Name = sanitizeString(d.Name) + d.Version = sanitizeString(d.Version) + d.Repository = sanitizeString(d.Repository) + d.Condition = sanitizeString(d.Condition) + for i := range d.Tags { + d.Tags[i] = sanitizeString(d.Tags[i]) + } + if d.Alias != "" && !aliasNameFormat.MatchString(d.Alias) { + return ValidationErrorf("dependency %q has disallowed characters in the alias", d.Name) + } + return nil +} + // Lock is a lock file for dependencies. // // It represents the state that the dependencies should be in. diff --git a/vendor/helm.sh/helm/v3/pkg/chart/metadata.go b/vendor/helm.sh/helm/v3/pkg/chart/metadata.go index 1848eb280d..1925e45ac6 100644 --- a/vendor/helm.sh/helm/v3/pkg/chart/metadata.go +++ b/vendor/helm.sh/helm/v3/pkg/chart/metadata.go @@ -15,6 +15,13 @@ limitations under the License. package chart +import ( + "strings" + "unicode" + + "github.com/Masterminds/semver/v3" +) + // Maintainer describes a Chart maintainer. type Maintainer struct { // Name is a user name or organization name @@ -25,15 +32,23 @@ type Maintainer struct { URL string `json:"url,omitempty"` } +// Validate checks valid data and sanitizes string characters. +func (m *Maintainer) Validate() error { + m.Name = sanitizeString(m.Name) + m.Email = sanitizeString(m.Email) + m.URL = sanitizeString(m.URL) + return nil +} + // Metadata for a Chart file. This models the structure of a Chart.yaml file. type Metadata struct { - // The name of the chart + // The name of the chart. Required. Name string `json:"name,omitempty"` // The URL to a relevant project page, git repo, or contact person Home string `json:"home,omitempty"` // Source is the URL to the source code of this chart Sources []string `json:"sources,omitempty"` - // A SemVer 2 conformant version string of the chart + // A SemVer 2 conformant version string of the chart. Required. Version string `json:"version,omitempty"` // A one-sentence description of the chart Description string `json:"description,omitempty"` @@ -43,7 +58,7 @@ type Metadata struct { Maintainers []*Maintainer `json:"maintainers,omitempty"` // The URL to an icon file. Icon string `json:"icon,omitempty"` - // The API Version of this chart. + // The API Version of this chart. Required. APIVersion string `json:"apiVersion,omitempty"` // The condition to check to enable chart Condition string `json:"condition,omitempty"` @@ -64,11 +79,28 @@ type Metadata struct { Type string `json:"type,omitempty"` } -// Validate checks the metadata for known issues, returning an error if metadata is not correct +// Validate checks the metadata for known issues and sanitizes string +// characters. func (md *Metadata) Validate() error { if md == nil { return ValidationError("chart.metadata is required") } + + md.Name = sanitizeString(md.Name) + md.Description = sanitizeString(md.Description) + md.Home = sanitizeString(md.Home) + md.Icon = sanitizeString(md.Icon) + md.Condition = sanitizeString(md.Condition) + md.Tags = sanitizeString(md.Tags) + md.AppVersion = sanitizeString(md.AppVersion) + md.KubeVersion = sanitizeString(md.KubeVersion) + for i := range md.Sources { + md.Sources[i] = sanitizeString(md.Sources[i]) + } + for i := range md.Keywords { + md.Keywords[i] = sanitizeString(md.Keywords[i]) + } + if md.APIVersion == "" { return ValidationError("chart.metadata.apiVersion is required") } @@ -78,19 +110,26 @@ func (md *Metadata) Validate() error { if md.Version == "" { return ValidationError("chart.metadata.version is required") } + if !isValidSemver(md.Version) { + return ValidationErrorf("chart.metadata.version %q is invalid", md.Version) + } if !isValidChartType(md.Type) { return ValidationError("chart.metadata.type must be application or library") } + for _, m := range md.Maintainers { + if err := m.Validate(); err != nil { + return err + } + } + // Aliases need to be validated here to make sure that the alias name does // not contain any illegal characters. for _, dependency := range md.Dependencies { - if err := validateDependency(dependency); err != nil { + if err := dependency.Validate(); err != nil { return err } } - - // TODO validate valid semver here? return nil } @@ -102,12 +141,20 @@ func isValidChartType(in string) bool { return false } -// validateDependency checks for common problems with the dependency datastructure in -// the chart. This check must be done at load time before the dependency's charts are -// loaded. -func validateDependency(dep *Dependency) error { - if len(dep.Alias) > 0 && !aliasNameFormat.MatchString(dep.Alias) { - return ValidationErrorf("dependency %q has disallowed characters in the alias", dep.Name) - } - return nil +func isValidSemver(v string) bool { + _, err := semver.NewVersion(v) + return err == nil +} + +// sanitizeString normalize spaces and removes non-printable characters. +func sanitizeString(str string) string { + return strings.Map(func(r rune) rune { + if unicode.IsSpace(r) { + return ' ' + } + if unicode.IsPrint(r) { + return r + } + return -1 + }, str) } diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/capabilities.go b/vendor/helm.sh/helm/v3/pkg/chartutil/capabilities.go index c002e33f22..d24228056f 100644 --- a/vendor/helm.sh/helm/v3/pkg/chartutil/capabilities.go +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/capabilities.go @@ -19,6 +19,7 @@ import ( "fmt" "strconv" + "github.com/Masterminds/semver/v3" "k8s.io/client-go/kubernetes/scheme" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" @@ -58,6 +59,14 @@ type Capabilities struct { HelmVersion helmversion.BuildInfo } +func (capabilities *Capabilities) Copy() *Capabilities { + return &Capabilities{ + KubeVersion: capabilities.KubeVersion, + APIVersions: capabilities.APIVersions, + HelmVersion: capabilities.HelmVersion, + } +} + // KubeVersion is the Kubernetes version. type KubeVersion struct { Version string // Kubernetes version @@ -73,6 +82,19 @@ func (kv *KubeVersion) String() string { return kv.Version } // Deprecated: use KubeVersion.Version. func (kv *KubeVersion) GitVersion() string { return kv.Version } +// ParseKubeVersion parses kubernetes version from string +func ParseKubeVersion(version string) (*KubeVersion, error) { + sv, err := semver.NewVersion(version) + if err != nil { + return nil, err + } + return &KubeVersion{ + Version: "v" + sv.String(), + Major: strconv.FormatUint(sv.Major(), 10), + Minor: strconv.FormatUint(sv.Minor(), 10), + }, nil +} + // VersionSet is a set of Kubernetes API versions. type VersionSet []string diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/create.go b/vendor/helm.sh/helm/v3/pkg/chartutil/create.go index bdcdc6e974..ca79e7ab21 100644 --- a/vendor/helm.sh/helm/v3/pkg/chartutil/create.go +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/create.go @@ -147,12 +147,15 @@ service: ingress: enabled: false + className: "" annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" hosts: - host: chart-example.local - paths: [] + paths: + - path: / + pathType: ImplementationSpecific tls: [] # - secretName: chart-example-tls # hosts: @@ -212,7 +215,14 @@ const defaultIgnore = `# Patterns to ignore when building packages. const defaultIngress = `{{- if .Values.ingress.enabled -}} {{- $fullName := include ".fullname" . -}} {{- $svcPort := .Values.service.port -}} -{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} apiVersion: networking.k8s.io/v1beta1 {{- else -}} apiVersion: extensions/v1beta1 @@ -227,6 +237,9 @@ metadata: {{- toYaml . | nindent 4 }} {{- end }} spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} {{- if .Values.ingress.tls }} tls: {{- range .Values.ingress.tls }} @@ -244,12 +257,22 @@ spec: paths: {{- range .paths }} - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} serviceName: {{ $fullName }} servicePort: {{ $svcPort }} + {{- end }} {{- end }} {{- end }} - {{- end }} +{{- end }} ` const defaultDeployment = `apiVersion: apps/v1 diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/validate_name.go b/vendor/helm.sh/helm/v3/pkg/chartutil/validate_name.go index 913a477cf0..d253731ec6 100644 --- a/vendor/helm.sh/helm/v3/pkg/chartutil/validate_name.go +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/validate_name.go @@ -61,7 +61,7 @@ const ( // ValidateReleaseName performs checks for an entry for a Helm release name // // For Helm to allow a name, it must be below a certain character count (53) and also match -// a reguar expression. +// a regular expression. // // According to the Kubernetes help text, the regular expression it uses is: // @@ -96,6 +96,9 @@ func ValidateReleaseName(name string) error { // // The Kubernetes documentation is here, though it is not entirely correct: // https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names +// +// Deprecated: remove in Helm 4. Name validation now uses rules defined in +// pkg/lint/rules.validateMetadataNameFunc() func ValidateMetadataName(name string) error { if name == "" || len(name) > maxMetadataNameLen || !validName.MatchString(name) { return errInvalidKubernetesName diff --git a/vendor/helm.sh/helm/v3/pkg/downloader/chart_downloader.go b/vendor/helm.sh/helm/v3/pkg/downloader/chart_downloader.go new file mode 100644 index 0000000000..575c941510 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/downloader/chart_downloader.go @@ -0,0 +1,384 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package downloader + +import ( + "fmt" + "io" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/internal/experimental/registry" + "helm.sh/helm/v3/internal/fileutil" + "helm.sh/helm/v3/internal/urlutil" + "helm.sh/helm/v3/pkg/getter" + "helm.sh/helm/v3/pkg/helmpath" + "helm.sh/helm/v3/pkg/provenance" + "helm.sh/helm/v3/pkg/repo" +) + +// VerificationStrategy describes a strategy for determining whether to verify a chart. +type VerificationStrategy int + +const ( + // VerifyNever will skip all verification of a chart. + VerifyNever VerificationStrategy = iota + // VerifyIfPossible will attempt a verification, it will not error if verification + // data is missing. But it will not stop processing if verification fails. + VerifyIfPossible + // VerifyAlways will always attempt a verification, and will fail if the + // verification fails. + VerifyAlways + // VerifyLater will fetch verification data, but not do any verification. + // This is to accommodate the case where another step of the process will + // perform verification. + VerifyLater +) + +// ErrNoOwnerRepo indicates that a given chart URL can't be found in any repos. +var ErrNoOwnerRepo = errors.New("could not find a repo containing the given URL") + +// ChartDownloader handles downloading a chart. +// +// It is capable of performing verifications on charts as well. +type ChartDownloader struct { + // Out is the location to write warning and info messages. + Out io.Writer + // Verify indicates what verification strategy to use. + Verify VerificationStrategy + // Keyring is the keyring file used for verification. + Keyring string + // Getter collection for the operation + Getters getter.Providers + // Options provide parameters to be passed along to the Getter being initialized. + Options []getter.Option + RegistryClient *registry.Client + RepositoryConfig string + RepositoryCache string +} + +// DownloadTo retrieves a chart. Depending on the settings, it may also download a provenance file. +// +// If Verify is set to VerifyNever, the verification will be nil. +// If Verify is set to VerifyIfPossible, this will return a verification (or nil on failure), and print a warning on failure. +// If Verify is set to VerifyAlways, this will return a verification or an error if the verification fails. +// If Verify is set to VerifyLater, this will download the prov file (if it exists), but not verify it. +// +// For VerifyNever and VerifyIfPossible, the Verification may be empty. +// +// Returns a string path to the location where the file was downloaded and a verification +// (if provenance was verified), or an error if something bad happened. +func (c *ChartDownloader) DownloadTo(ref, version, dest string) (string, *provenance.Verification, error) { + u, err := c.ResolveChartVersion(ref, version) + if err != nil { + return "", nil, err + } + + g, err := c.Getters.ByScheme(u.Scheme) + if err != nil { + return "", nil, err + } + + data, err := g.Get(u.String(), c.Options...) + if err != nil { + return "", nil, err + } + + name := filepath.Base(u.Path) + if u.Scheme == "oci" { + name = fmt.Sprintf("%s-%s.tgz", name, version) + } + + destfile := filepath.Join(dest, name) + if err := fileutil.AtomicWriteFile(destfile, data, 0644); err != nil { + return destfile, nil, err + } + + // If provenance is requested, verify it. + ver := &provenance.Verification{} + if c.Verify > VerifyNever { + body, err := g.Get(u.String() + ".prov") + if err != nil { + if c.Verify == VerifyAlways { + return destfile, ver, errors.Errorf("failed to fetch provenance %q", u.String()+".prov") + } + fmt.Fprintf(c.Out, "WARNING: Verification not found for %s: %s\n", ref, err) + return destfile, ver, nil + } + provfile := destfile + ".prov" + if err := fileutil.AtomicWriteFile(provfile, body, 0644); err != nil { + return destfile, nil, err + } + + if c.Verify != VerifyLater { + ver, err = VerifyChart(destfile, c.Keyring) + if err != nil { + // Fail always in this case, since it means the verification step + // failed. + return destfile, ver, err + } + } + } + return destfile, ver, nil +} + +// ResolveChartVersion resolves a chart reference to a URL. +// +// It returns the URL and sets the ChartDownloader's Options that can fetch +// the URL using the appropriate Getter. +// +// A reference may be an HTTP URL, a 'reponame/chartname' reference, or a local path. +// +// A version is a SemVer string (1.2.3-beta.1+f334a6789). +// +// - For fully qualified URLs, the version will be ignored (since URLs aren't versioned) +// - For a chart reference +// * If version is non-empty, this will return the URL for that version +// * If version is empty, this will return the URL for the latest version +// * If no version can be found, an error is returned +func (c *ChartDownloader) ResolveChartVersion(ref, version string) (*url.URL, error) { + u, err := url.Parse(ref) + if err != nil { + return nil, errors.Errorf("invalid chart URL format: %s", ref) + } + + rf, err := loadRepoConfig(c.RepositoryConfig) + if err != nil { + return u, err + } + + if u.IsAbs() && len(u.Host) > 0 && len(u.Path) > 0 { + // In this case, we have to find the parent repo that contains this chart + // URL. And this is an unfortunate problem, as it requires actually going + // through each repo cache file and finding a matching URL. But basically + // we want to find the repo in case we have special SSL cert config + // for that repo. + + rc, err := c.scanReposForURL(ref, rf) + if err != nil { + // If there is no special config, return the default HTTP client and + // swallow the error. + if err == ErrNoOwnerRepo { + // Make sure to add the ref URL as the URL for the getter + c.Options = append(c.Options, getter.WithURL(ref)) + return u, nil + } + return u, err + } + + // If we get here, we don't need to go through the next phase of looking + // up the URL. We have it already. So we just set the parameters and return. + c.Options = append( + c.Options, + getter.WithURL(rc.URL), + ) + if rc.CertFile != "" || rc.KeyFile != "" || rc.CAFile != "" { + c.Options = append(c.Options, getter.WithTLSClientConfig(rc.CertFile, rc.KeyFile, rc.CAFile)) + } + if rc.Username != "" && rc.Password != "" { + c.Options = append( + c.Options, + getter.WithBasicAuth(rc.Username, rc.Password), + getter.WithPassCredentialsAll(rc.PassCredentialsAll), + ) + } + return u, nil + } + + // See if it's of the form: repo/path_to_chart + p := strings.SplitN(u.Path, "/", 2) + if len(p) < 2 { + return u, errors.Errorf("non-absolute URLs should be in form of repo_name/path_to_chart, got: %s", u) + } + + repoName := p[0] + chartName := p[1] + rc, err := pickChartRepositoryConfigByName(repoName, rf.Repositories) + + if err != nil { + return u, err + } + + // Now that we have the chart repository information we can use that URL + // to set the URL for the getter. + c.Options = append(c.Options, getter.WithURL(rc.URL)) + + r, err := repo.NewChartRepository(rc, c.Getters) + if err != nil { + return u, err + } + + if r != nil && r.Config != nil { + if r.Config.CertFile != "" || r.Config.KeyFile != "" || r.Config.CAFile != "" { + c.Options = append(c.Options, getter.WithTLSClientConfig(r.Config.CertFile, r.Config.KeyFile, r.Config.CAFile)) + } + if r.Config.Username != "" && r.Config.Password != "" { + c.Options = append(c.Options, + getter.WithBasicAuth(r.Config.Username, r.Config.Password), + getter.WithPassCredentialsAll(r.Config.PassCredentialsAll), + ) + } + } + + // Next, we need to load the index, and actually look up the chart. + idxFile := filepath.Join(c.RepositoryCache, helmpath.CacheIndexFile(r.Config.Name)) + i, err := repo.LoadIndexFile(idxFile) + if err != nil { + return u, errors.Wrap(err, "no cached repo found. (try 'helm repo update')") + } + + cv, err := i.Get(chartName, version) + if err != nil { + return u, errors.Wrapf(err, "chart %q matching %s not found in %s index. (try 'helm repo update')", chartName, version, r.Config.Name) + } + + if len(cv.URLs) == 0 { + return u, errors.Errorf("chart %q has no downloadable URLs", ref) + } + + // TODO: Seems that picking first URL is not fully correct + u, err = url.Parse(cv.URLs[0]) + if err != nil { + return u, errors.Errorf("invalid chart URL format: %s", ref) + } + + // If the URL is relative (no scheme), prepend the chart repo's base URL + if !u.IsAbs() { + repoURL, err := url.Parse(rc.URL) + if err != nil { + return repoURL, err + } + q := repoURL.Query() + // We need a trailing slash for ResolveReference to work, but make sure there isn't already one + repoURL.Path = strings.TrimSuffix(repoURL.Path, "/") + "/" + u = repoURL.ResolveReference(u) + u.RawQuery = q.Encode() + // TODO add user-agent + if _, err := getter.NewHTTPGetter(getter.WithURL(rc.URL)); err != nil { + return repoURL, err + } + return u, err + } + + // TODO add user-agent + return u, nil +} + +// VerifyChart takes a path to a chart archive and a keyring, and verifies the chart. +// +// It assumes that a chart archive file is accompanied by a provenance file whose +// name is the archive file name plus the ".prov" extension. +func VerifyChart(path, keyring string) (*provenance.Verification, error) { + // For now, error out if it's not a tar file. + switch fi, err := os.Stat(path); { + case err != nil: + return nil, err + case fi.IsDir(): + return nil, errors.New("unpacked charts cannot be verified") + case !isTar(path): + return nil, errors.New("chart must be a tgz file") + } + + provfile := path + ".prov" + if _, err := os.Stat(provfile); err != nil { + return nil, errors.Wrapf(err, "could not load provenance file %s", provfile) + } + + sig, err := provenance.NewFromKeyring(keyring, "") + if err != nil { + return nil, errors.Wrap(err, "failed to load keyring") + } + return sig.Verify(path, provfile) +} + +// isTar tests whether the given file is a tar file. +// +// Currently, this simply checks extension, since a subsequent function will +// untar the file and validate its binary format. +func isTar(filename string) bool { + return strings.EqualFold(filepath.Ext(filename), ".tgz") +} + +func pickChartRepositoryConfigByName(name string, cfgs []*repo.Entry) (*repo.Entry, error) { + for _, rc := range cfgs { + if rc.Name == name { + if rc.URL == "" { + return nil, errors.Errorf("no URL found for repository %s", name) + } + return rc, nil + } + } + return nil, errors.Errorf("repo %s not found", name) +} + +// scanReposForURL scans all repos to find which repo contains the given URL. +// +// This will attempt to find the given URL in all of the known repositories files. +// +// If the URL is found, this will return the repo entry that contained that URL. +// +// If all of the repos are checked, but the URL is not found, an ErrNoOwnerRepo +// error is returned. +// +// Other errors may be returned when repositories cannot be loaded or searched. +// +// Technically, the fact that a URL is not found in a repo is not a failure indication. +// Charts are not required to be included in an index before they are valid. So +// be mindful of this case. +// +// The same URL can technically exist in two or more repositories. This algorithm +// will return the first one it finds. Order is determined by the order of repositories +// in the repositories.yaml file. +func (c *ChartDownloader) scanReposForURL(u string, rf *repo.File) (*repo.Entry, error) { + // FIXME: This is far from optimal. Larger installations and index files will + // incur a performance hit for this type of scanning. + for _, rc := range rf.Repositories { + r, err := repo.NewChartRepository(rc, c.Getters) + if err != nil { + return nil, err + } + + idxFile := filepath.Join(c.RepositoryCache, helmpath.CacheIndexFile(r.Config.Name)) + i, err := repo.LoadIndexFile(idxFile) + if err != nil { + return nil, errors.Wrap(err, "no cached repo found. (try 'helm repo update')") + } + + for _, entry := range i.Entries { + for _, ver := range entry { + for _, dl := range ver.URLs { + if urlutil.Equal(u, dl) { + return rc, nil + } + } + } + } + } + // This means that there is no repo file for the given URL. + return nil, ErrNoOwnerRepo +} + +func loadRepoConfig(file string) (*repo.File, error) { + r, err := repo.LoadFile(file) + if err != nil && !os.IsNotExist(errors.Cause(err)) { + return nil, err + } + return r, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/downloader/doc.go b/vendor/helm.sh/helm/v3/pkg/downloader/doc.go new file mode 100644 index 0000000000..9588a7dfe1 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/downloader/doc.go @@ -0,0 +1,23 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/*Package downloader provides a library for downloading charts. + +This package contains various tools for downloading charts from repository +servers, and then storing them in Helm-specific directory structures. This +library contains many functions that depend on a specific +filesystem layout. +*/ +package downloader diff --git a/vendor/helm.sh/helm/v3/pkg/downloader/manager.go b/vendor/helm.sh/helm/v3/pkg/downloader/manager.go new file mode 100644 index 0000000000..8211471ca5 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/downloader/manager.go @@ -0,0 +1,898 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package downloader + +import ( + "crypto" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "log" + "net/url" + "os" + "path" + "path/filepath" + "regexp" + "strings" + "sync" + + "github.com/Masterminds/semver/v3" + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/internal/experimental/registry" + "helm.sh/helm/v3/internal/resolver" + "helm.sh/helm/v3/internal/third_party/dep/fs" + "helm.sh/helm/v3/internal/urlutil" + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/getter" + "helm.sh/helm/v3/pkg/helmpath" + "helm.sh/helm/v3/pkg/repo" +) + +// ErrRepoNotFound indicates that chart repositories can't be found in local repo cache. +// The value of Repos is missing repos. +type ErrRepoNotFound struct { + Repos []string +} + +// Error implements the error interface. +func (e ErrRepoNotFound) Error() string { + return fmt.Sprintf("no repository definition for %s", strings.Join(e.Repos, ", ")) +} + +// Manager handles the lifecycle of fetching, resolving, and storing dependencies. +type Manager struct { + // Out is used to print warnings and notifications. + Out io.Writer + // ChartPath is the path to the unpacked base chart upon which this operates. + ChartPath string + // Verification indicates whether the chart should be verified. + Verify VerificationStrategy + // Debug is the global "--debug" flag + Debug bool + // Keyring is the key ring file. + Keyring string + // SkipUpdate indicates that the repository should not be updated first. + SkipUpdate bool + // Getter collection for the operation + Getters []getter.Provider + RegistryClient *registry.Client + RepositoryConfig string + RepositoryCache string +} + +// Build rebuilds a local charts directory from a lockfile. +// +// If the lockfile is not present, this will run a Manager.Update() +// +// If SkipUpdate is set, this will not update the repository. +func (m *Manager) Build() error { + c, err := m.loadChartDir() + if err != nil { + return err + } + + // If a lock file is found, run a build from that. Otherwise, just do + // an update. + lock := c.Lock + if lock == nil { + return m.Update() + } + + // Check that all of the repos we're dependent on actually exist. + req := c.Metadata.Dependencies + + // If using apiVersion v1, calculate the hash before resolve repo names + // because resolveRepoNames will change req if req uses repo alias + // and Helm 2 calculate the digest from the original req + // Fix for: https://github.com/helm/helm/issues/7619 + var v2Sum string + if c.Metadata.APIVersion == chart.APIVersionV1 { + v2Sum, err = resolver.HashV2Req(req) + if err != nil { + return errors.New("the lock file (requirements.lock) is out of sync with the dependencies file (requirements.yaml). Please update the dependencies") + } + } + + if _, err := m.resolveRepoNames(req); err != nil { + return err + } + + if sum, err := resolver.HashReq(req, lock.Dependencies); err != nil || sum != lock.Digest { + // If lock digest differs and chart is apiVersion v1, it maybe because the lock was built + // with Helm 2 and therefore should be checked with Helm v2 hash + // Fix for: https://github.com/helm/helm/issues/7233 + if c.Metadata.APIVersion == chart.APIVersionV1 { + log.Println("warning: a valid Helm v3 hash was not found. Checking against Helm v2 hash...") + if v2Sum != lock.Digest { + return errors.New("the lock file (requirements.lock) is out of sync with the dependencies file (requirements.yaml). Please update the dependencies") + } + } else { + return errors.New("the lock file (Chart.lock) is out of sync with the dependencies file (Chart.yaml). Please update the dependencies") + } + } + + // Check that all of the repos we're dependent on actually exist. + if err := m.hasAllRepos(lock.Dependencies); err != nil { + return err + } + + if !m.SkipUpdate { + // For each repo in the file, update the cached copy of that repo + if err := m.UpdateRepositories(); err != nil { + return err + } + } + + // Now we need to fetch every package here into charts/ + return m.downloadAll(lock.Dependencies) +} + +// Update updates a local charts directory. +// +// It first reads the Chart.yaml file, and then attempts to +// negotiate versions based on that. It will download the versions +// from remote chart repositories unless SkipUpdate is true. +func (m *Manager) Update() error { + c, err := m.loadChartDir() + if err != nil { + return err + } + + // If no dependencies are found, we consider this a successful + // completion. + req := c.Metadata.Dependencies + if req == nil { + return nil + } + + // Get the names of the repositories the dependencies need that Helm is + // configured to know about. + repoNames, err := m.resolveRepoNames(req) + if err != nil { + return err + } + + // For the repositories Helm is not configured to know about, ensure Helm + // has some information about them and, when possible, the index files + // locally. + // TODO(mattfarina): Repositories should be explicitly added by end users + // rather than automattic. In Helm v4 require users to add repositories. They + // should have to add them in order to make sure they are aware of the + // repositories and opt-in to any locations, for security. + repoNames, err = m.ensureMissingRepos(repoNames, req) + if err != nil { + return err + } + + // For each of the repositories Helm is configured to know about, update + // the index information locally. + if !m.SkipUpdate { + if err := m.UpdateRepositories(); err != nil { + return err + } + } + + // Now we need to find out which version of a chart best satisfies the + // dependencies in the Chart.yaml + lock, err := m.resolve(req, repoNames) + if err != nil { + return err + } + + // Now we need to fetch every package here into charts/ + if err := m.downloadAll(lock.Dependencies); err != nil { + return err + } + + // downloadAll might overwrite dependency version, recalculate lock digest + newDigest, err := resolver.HashReq(req, lock.Dependencies) + if err != nil { + return err + } + lock.Digest = newDigest + + // If the lock file hasn't changed, don't write a new one. + oldLock := c.Lock + if oldLock != nil && oldLock.Digest == lock.Digest { + return nil + } + + // Finally, we need to write the lockfile. + return writeLock(m.ChartPath, lock, c.Metadata.APIVersion == chart.APIVersionV1) +} + +func (m *Manager) loadChartDir() (*chart.Chart, error) { + if fi, err := os.Stat(m.ChartPath); err != nil { + return nil, errors.Wrapf(err, "could not find %s", m.ChartPath) + } else if !fi.IsDir() { + return nil, errors.New("only unpacked charts can be updated") + } + return loader.LoadDir(m.ChartPath) +} + +// resolve takes a list of dependencies and translates them into an exact version to download. +// +// This returns a lock file, which has all of the dependencies normalized to a specific version. +func (m *Manager) resolve(req []*chart.Dependency, repoNames map[string]string) (*chart.Lock, error) { + res := resolver.New(m.ChartPath, m.RepositoryCache) + return res.Resolve(req, repoNames) +} + +// downloadAll takes a list of dependencies and downloads them into charts/ +// +// It will delete versions of the chart that exist on disk and might cause +// a conflict. +func (m *Manager) downloadAll(deps []*chart.Dependency) error { + repos, err := m.loadChartRepositories() + if err != nil { + return err + } + + destPath := filepath.Join(m.ChartPath, "charts") + tmpPath := filepath.Join(m.ChartPath, "tmpcharts") + + // Create 'charts' directory if it doesn't already exist. + if fi, err := os.Stat(destPath); err != nil { + if err := os.MkdirAll(destPath, 0755); err != nil { + return err + } + } else if !fi.IsDir() { + return errors.Errorf("%q is not a directory", destPath) + } + + if err := fs.RenameWithFallback(destPath, tmpPath); err != nil { + return errors.Wrap(err, "unable to move current charts to tmp dir") + } + + if err := os.MkdirAll(destPath, 0755); err != nil { + return err + } + + fmt.Fprintf(m.Out, "Saving %d charts\n", len(deps)) + var saveError error + churls := make(map[string]struct{}) + for _, dep := range deps { + // No repository means the chart is in charts directory + if dep.Repository == "" { + fmt.Fprintf(m.Out, "Dependency %s did not declare a repository. Assuming it exists in the charts directory\n", dep.Name) + chartPath := filepath.Join(tmpPath, dep.Name) + ch, err := loader.LoadDir(chartPath) + if err != nil { + return fmt.Errorf("Unable to load chart: %v", err) + } + + constraint, err := semver.NewConstraint(dep.Version) + if err != nil { + return fmt.Errorf("Dependency %s has an invalid version/constraint format: %s", dep.Name, err) + } + + v, err := semver.NewVersion(ch.Metadata.Version) + if err != nil { + return fmt.Errorf("Invalid version %s for dependency %s: %s", dep.Version, dep.Name, err) + } + + if !constraint.Check(v) { + saveError = fmt.Errorf("Dependency %s at version %s does not satisfy the constraint %s", dep.Name, ch.Metadata.Version, dep.Version) + break + } + continue + } + if strings.HasPrefix(dep.Repository, "file://") { + if m.Debug { + fmt.Fprintf(m.Out, "Archiving %s from repo %s\n", dep.Name, dep.Repository) + } + ver, err := tarFromLocalDir(m.ChartPath, dep.Name, dep.Repository, dep.Version) + if err != nil { + saveError = err + break + } + dep.Version = ver + continue + } + + // Any failure to resolve/download a chart should fail: + // https://github.com/helm/helm/issues/1439 + churl, username, password, insecureskiptlsverify, passcredentialsall, caFile, certFile, keyFile, err := m.findChartURL(dep.Name, dep.Version, dep.Repository, repos) + if err != nil { + saveError = errors.Wrapf(err, "could not find %s", churl) + break + } + + if _, ok := churls[churl]; ok { + fmt.Fprintf(m.Out, "Already downloaded %s from repo %s\n", dep.Name, dep.Repository) + continue + } + + fmt.Fprintf(m.Out, "Downloading %s from repo %s\n", dep.Name, dep.Repository) + + dl := ChartDownloader{ + Out: m.Out, + Verify: m.Verify, + Keyring: m.Keyring, + RepositoryConfig: m.RepositoryConfig, + RepositoryCache: m.RepositoryCache, + Getters: m.Getters, + Options: []getter.Option{ + getter.WithBasicAuth(username, password), + getter.WithPassCredentialsAll(passcredentialsall), + getter.WithInsecureSkipVerifyTLS(insecureskiptlsverify), + getter.WithTLSClientConfig(certFile, keyFile, caFile), + }, + } + + version := "" + if strings.HasPrefix(churl, "oci://") { + if !resolver.FeatureGateOCI.IsEnabled() { + return errors.Wrapf(resolver.FeatureGateOCI.Error(), + "the repository %s is an OCI registry", churl) + } + + churl, version, err = parseOCIRef(churl) + if err != nil { + return errors.Wrapf(err, "could not parse OCI reference") + } + dl.Options = append(dl.Options, + getter.WithRegistryClient(m.RegistryClient), + getter.WithTagName(version)) + } + + _, _, err = dl.DownloadTo(churl, version, destPath) + if err != nil { + saveError = errors.Wrapf(err, "could not download %s", churl) + break + } + + churls[churl] = struct{}{} + } + + if saveError == nil { + fmt.Fprintln(m.Out, "Deleting outdated charts") + for _, dep := range deps { + // Chart from local charts directory stays in place + if dep.Repository != "" { + if err := m.safeDeleteDep(dep.Name, tmpPath); err != nil { + return err + } + } + } + if err := move(tmpPath, destPath); err != nil { + return err + } + if err := os.RemoveAll(tmpPath); err != nil { + return errors.Wrapf(err, "failed to remove %v", tmpPath) + } + } else { + fmt.Fprintln(m.Out, "Save error occurred: ", saveError) + fmt.Fprintln(m.Out, "Deleting newly downloaded charts, restoring pre-update state") + for _, dep := range deps { + if err := m.safeDeleteDep(dep.Name, destPath); err != nil { + return err + } + } + if err := os.RemoveAll(destPath); err != nil { + return errors.Wrapf(err, "failed to remove %v", destPath) + } + if err := fs.RenameWithFallback(tmpPath, destPath); err != nil { + return errors.Wrap(err, "unable to move current charts to tmp dir") + } + return saveError + } + return nil +} + +func parseOCIRef(chartRef string) (string, string, error) { + refTagRegexp := regexp.MustCompile(`^(oci://[^:]+(:[0-9]{1,5})?[^:]+):(.*)$`) + caps := refTagRegexp.FindStringSubmatch(chartRef) + if len(caps) != 4 { + return "", "", errors.Errorf("improperly formatted oci chart reference: %s", chartRef) + } + chartRef = caps[1] + tag := caps[3] + + return chartRef, tag, nil +} + +// safeDeleteDep deletes any versions of the given dependency in the given directory. +// +// It does this by first matching the file name to an expected pattern, then loading +// the file to verify that it is a chart with the same name as the given name. +// +// Because it requires tar file introspection, it is more intensive than a basic delete. +// +// This will only return errors that should stop processing entirely. Other errors +// will emit log messages or be ignored. +func (m *Manager) safeDeleteDep(name, dir string) error { + files, err := filepath.Glob(filepath.Join(dir, name+"-*.tgz")) + if err != nil { + // Only for ErrBadPattern + return err + } + for _, fname := range files { + ch, err := loader.LoadFile(fname) + if err != nil { + fmt.Fprintf(m.Out, "Could not verify %s for deletion: %s (Skipping)", fname, err) + continue + } + if ch.Name() != name { + // This is not the file you are looking for. + continue + } + if err := os.Remove(fname); err != nil { + fmt.Fprintf(m.Out, "Could not delete %s: %s (Skipping)", fname, err) + continue + } + } + return nil +} + +// hasAllRepos ensures that all of the referenced deps are in the local repo cache. +func (m *Manager) hasAllRepos(deps []*chart.Dependency) error { + rf, err := loadRepoConfig(m.RepositoryConfig) + if err != nil { + return err + } + repos := rf.Repositories + + // Verify that all repositories referenced in the deps are actually known + // by Helm. + missing := []string{} +Loop: + for _, dd := range deps { + // If repo is from local path or OCI, continue + if strings.HasPrefix(dd.Repository, "file://") || strings.HasPrefix(dd.Repository, "oci://") { + continue + } + + if dd.Repository == "" { + continue + } + for _, repo := range repos { + if urlutil.Equal(repo.URL, strings.TrimSuffix(dd.Repository, "/")) { + continue Loop + } + } + missing = append(missing, dd.Repository) + } + if len(missing) > 0 { + return ErrRepoNotFound{missing} + } + return nil +} + +// ensureMissingRepos attempts to ensure the repository information for repos +// not managed by Helm is present. This takes in the repoNames Helm is configured +// to work with along with the chart dependencies. It will find the deps not +// in a known repo and attempt to ensure the data is present for steps like +// version resolution. +func (m *Manager) ensureMissingRepos(repoNames map[string]string, deps []*chart.Dependency) (map[string]string, error) { + + var ru []*repo.Entry + + for _, dd := range deps { + + // If the chart is in the local charts directory no repository needs + // to be specified. + if dd.Repository == "" { + continue + } + + // When the repoName for a dependency is known we can skip ensuring + if _, ok := repoNames[dd.Name]; ok { + continue + } + + // The generated repository name, which will result in an index being + // locally cached, has a name pattern of "helm-manager-" followed by a + // sha256 of the repo name. This assumes end users will never create + // repositories with these names pointing to other repositories. Using + // this method of naming allows the existing repository pulling and + // resolution code to do most of the work. + rn, err := key(dd.Repository) + if err != nil { + return repoNames, err + } + rn = managerKeyPrefix + rn + + repoNames[dd.Name] = rn + + // Assuming the repository is generally available. For Helm managed + // access controls the repository needs to be added through the user + // managed system. This path will work for public charts, like those + // supplied by Bitnami, but not for protected charts, like corp ones + // behind a username and pass. + ri := &repo.Entry{ + Name: rn, + URL: dd.Repository, + } + ru = append(ru, ri) + } + + // Calls to UpdateRepositories (a public function) will only update + // repositories configured by the user. Here we update repos found in + // the dependencies that are not known to the user if update skipping + // is not configured. + if !m.SkipUpdate && len(ru) > 0 { + fmt.Fprintln(m.Out, "Getting updates for unmanaged Helm repositories...") + if err := m.parallelRepoUpdate(ru); err != nil { + return repoNames, err + } + } + + return repoNames, nil +} + +// resolveRepoNames returns the repo names of the referenced deps which can be used to fetch the cached index file +// and replaces aliased repository URLs into resolved URLs in dependencies. +func (m *Manager) resolveRepoNames(deps []*chart.Dependency) (map[string]string, error) { + rf, err := loadRepoConfig(m.RepositoryConfig) + if err != nil { + if os.IsNotExist(err) { + return make(map[string]string), nil + } + return nil, err + } + repos := rf.Repositories + + reposMap := make(map[string]string) + + // Verify that all repositories referenced in the deps are actually known + // by Helm. + missing := []string{} + for _, dd := range deps { + // Don't map the repository, we don't need to download chart from charts directory + // When OCI is used there is no Helm repository + if dd.Repository == "" || strings.HasPrefix(dd.Repository, "oci://") { + continue + } + // if dep chart is from local path, verify the path is valid + if strings.HasPrefix(dd.Repository, "file://") { + if _, err := resolver.GetLocalPath(dd.Repository, m.ChartPath); err != nil { + return nil, err + } + + if m.Debug { + fmt.Fprintf(m.Out, "Repository from local path: %s\n", dd.Repository) + } + reposMap[dd.Name] = dd.Repository + continue + } + + if strings.HasPrefix(dd.Repository, "oci://") { + reposMap[dd.Name] = dd.Repository + continue + } + + found := false + + for _, repo := range repos { + if (strings.HasPrefix(dd.Repository, "@") && strings.TrimPrefix(dd.Repository, "@") == repo.Name) || + (strings.HasPrefix(dd.Repository, "alias:") && strings.TrimPrefix(dd.Repository, "alias:") == repo.Name) { + found = true + dd.Repository = repo.URL + reposMap[dd.Name] = repo.Name + break + } else if urlutil.Equal(repo.URL, dd.Repository) { + found = true + reposMap[dd.Name] = repo.Name + break + } + } + if !found { + repository := dd.Repository + // Add if URL + _, err := url.ParseRequestURI(repository) + if err == nil { + reposMap[repository] = repository + continue + } + missing = append(missing, repository) + } + } + if len(missing) > 0 { + errorMessage := fmt.Sprintf("no repository definition for %s. Please add them via 'helm repo add'", strings.Join(missing, ", ")) + // It is common for people to try to enter "stable" as a repository instead of the actual URL. + // For this case, let's give them a suggestion. + containsNonURL := false + for _, repo := range missing { + if !strings.Contains(repo, "//") && !strings.HasPrefix(repo, "@") && !strings.HasPrefix(repo, "alias:") { + containsNonURL = true + } + } + if containsNonURL { + errorMessage += ` +Note that repositories must be URLs or aliases. For example, to refer to the "example" +repository, use "https://charts.example.com/" or "@example" instead of +"example". Don't forget to add the repo, too ('helm repo add').` + } + return nil, errors.New(errorMessage) + } + return reposMap, nil +} + +// UpdateRepositories updates all of the local repos to the latest. +func (m *Manager) UpdateRepositories() error { + rf, err := loadRepoConfig(m.RepositoryConfig) + if err != nil { + return err + } + repos := rf.Repositories + if len(repos) > 0 { + fmt.Fprintln(m.Out, "Hang tight while we grab the latest from your chart repositories...") + // This prints warnings straight to out. + if err := m.parallelRepoUpdate(repos); err != nil { + return err + } + fmt.Fprintln(m.Out, "Update Complete. ⎈Happy Helming!⎈") + } + return nil +} + +func (m *Manager) parallelRepoUpdate(repos []*repo.Entry) error { + + var wg sync.WaitGroup + for _, c := range repos { + r, err := repo.NewChartRepository(c, m.Getters) + if err != nil { + return err + } + wg.Add(1) + go func(r *repo.ChartRepository) { + if _, err := r.DownloadIndexFile(); err != nil { + // For those dependencies that are not known to helm and using a + // generated key name we display the repo url. + if strings.HasPrefix(r.Config.Name, managerKeyPrefix) { + fmt.Fprintf(m.Out, "...Unable to get an update from the %q chart repository:\n\t%s\n", r.Config.URL, err) + } else { + fmt.Fprintf(m.Out, "...Unable to get an update from the %q chart repository (%s):\n\t%s\n", r.Config.Name, r.Config.URL, err) + } + } else { + // For those dependencies that are not known to helm and using a + // generated key name we display the repo url. + if strings.HasPrefix(r.Config.Name, managerKeyPrefix) { + fmt.Fprintf(m.Out, "...Successfully got an update from the %q chart repository\n", r.Config.URL) + } else { + fmt.Fprintf(m.Out, "...Successfully got an update from the %q chart repository\n", r.Config.Name) + } + } + wg.Done() + }(r) + } + wg.Wait() + + return nil +} + +// findChartURL searches the cache of repo data for a chart that has the name and the repoURL specified. +// +// 'name' is the name of the chart. Version is an exact semver, or an empty string. If empty, the +// newest version will be returned. +// +// repoURL is the repository to search +// +// If it finds a URL that is "relative", it will prepend the repoURL. +func (m *Manager) findChartURL(name, version, repoURL string, repos map[string]*repo.ChartRepository) (url, username, password string, insecureskiptlsverify, passcredentialsall bool, caFile, certFile, keyFile string, err error) { + if strings.HasPrefix(repoURL, "oci://") { + return fmt.Sprintf("%s/%s:%s", repoURL, name, version), "", "", false, false, "", "", "", nil + } + + for _, cr := range repos { + + if urlutil.Equal(repoURL, cr.Config.URL) { + var entry repo.ChartVersions + entry, err = findEntryByName(name, cr) + if err != nil { + return + } + var ve *repo.ChartVersion + ve, err = findVersionedEntry(version, entry) + if err != nil { + return + } + url, err = normalizeURL(repoURL, ve.URLs[0]) + if err != nil { + return + } + username = cr.Config.Username + password = cr.Config.Password + passcredentialsall = cr.Config.PassCredentialsAll + insecureskiptlsverify = cr.Config.InsecureSkipTLSverify + caFile = cr.Config.CAFile + certFile = cr.Config.CertFile + keyFile = cr.Config.KeyFile + return + } + } + url, err = repo.FindChartInRepoURL(repoURL, name, version, certFile, keyFile, caFile, m.Getters) + if err == nil { + return url, username, password, false, false, "", "", "", err + } + err = errors.Errorf("chart %s not found in %s: %s", name, repoURL, err) + return url, username, password, false, false, "", "", "", err +} + +// findEntryByName finds an entry in the chart repository whose name matches the given name. +// +// It returns the ChartVersions for that entry. +func findEntryByName(name string, cr *repo.ChartRepository) (repo.ChartVersions, error) { + for ename, entry := range cr.IndexFile.Entries { + if ename == name { + return entry, nil + } + } + return nil, errors.New("entry not found") +} + +// findVersionedEntry takes a ChartVersions list and returns a single chart version that satisfies the version constraints. +// +// If version is empty, the first chart found is returned. +func findVersionedEntry(version string, vers repo.ChartVersions) (*repo.ChartVersion, error) { + for _, verEntry := range vers { + if len(verEntry.URLs) == 0 { + // Not a legit entry. + continue + } + + if version == "" || versionEquals(version, verEntry.Version) { + return verEntry, nil + } + } + return nil, errors.New("no matching version") +} + +func versionEquals(v1, v2 string) bool { + sv1, err := semver.NewVersion(v1) + if err != nil { + // Fallback to string comparison. + return v1 == v2 + } + sv2, err := semver.NewVersion(v2) + if err != nil { + return false + } + return sv1.Equal(sv2) +} + +func normalizeURL(baseURL, urlOrPath string) (string, error) { + u, err := url.Parse(urlOrPath) + if err != nil { + return urlOrPath, err + } + if u.IsAbs() { + return u.String(), nil + } + u2, err := url.Parse(baseURL) + if err != nil { + return urlOrPath, errors.Wrap(err, "base URL failed to parse") + } + + u2.Path = path.Join(u2.Path, urlOrPath) + return u2.String(), nil +} + +// loadChartRepositories reads the repositories.yaml, and then builds a map of +// ChartRepositories. +// +// The key is the local name (which is only present in the repositories.yaml). +func (m *Manager) loadChartRepositories() (map[string]*repo.ChartRepository, error) { + indices := map[string]*repo.ChartRepository{} + + // Load repositories.yaml file + rf, err := loadRepoConfig(m.RepositoryConfig) + if err != nil { + return indices, errors.Wrapf(err, "failed to load %s", m.RepositoryConfig) + } + + for _, re := range rf.Repositories { + lname := re.Name + idxFile := filepath.Join(m.RepositoryCache, helmpath.CacheIndexFile(lname)) + index, err := repo.LoadIndexFile(idxFile) + if err != nil { + return indices, err + } + + // TODO: use constructor + cr := &repo.ChartRepository{ + Config: re, + IndexFile: index, + } + indices[lname] = cr + } + return indices, nil +} + +// writeLock writes a lockfile to disk +func writeLock(chartpath string, lock *chart.Lock, legacyLockfile bool) error { + data, err := yaml.Marshal(lock) + if err != nil { + return err + } + lockfileName := "Chart.lock" + if legacyLockfile { + lockfileName = "requirements.lock" + } + dest := filepath.Join(chartpath, lockfileName) + return ioutil.WriteFile(dest, data, 0644) +} + +// archive a dep chart from local directory and save it into charts/ +func tarFromLocalDir(chartpath, name, repo, version string) (string, error) { + destPath := filepath.Join(chartpath, "charts") + + if !strings.HasPrefix(repo, "file://") { + return "", errors.Errorf("wrong format: chart %s repository %s", name, repo) + } + + origPath, err := resolver.GetLocalPath(repo, chartpath) + if err != nil { + return "", err + } + + ch, err := loader.LoadDir(origPath) + if err != nil { + return "", err + } + + constraint, err := semver.NewConstraint(version) + if err != nil { + return "", errors.Wrapf(err, "dependency %s has an invalid version/constraint format", name) + } + + v, err := semver.NewVersion(ch.Metadata.Version) + if err != nil { + return "", err + } + + if constraint.Check(v) { + _, err = chartutil.Save(ch, destPath) + return ch.Metadata.Version, err + } + + return "", errors.Errorf("can't get a valid version for dependency %s", name) +} + +// move files from tmppath to destpath +func move(tmpPath, destPath string) error { + files, _ := ioutil.ReadDir(tmpPath) + for _, file := range files { + filename := file.Name() + tmpfile := filepath.Join(tmpPath, filename) + destfile := filepath.Join(destPath, filename) + if err := fs.RenameWithFallback(tmpfile, destfile); err != nil { + return errors.Wrap(err, "unable to move local charts to charts dir") + } + } + return nil +} + +// The prefix to use for cache keys created by the manager for repo names +const managerKeyPrefix = "helm-manager-" + +// key is used to turn a name, such as a repository url, into a filesystem +// safe name that is unique for querying. To accomplish this a unique hash of +// the string is used. +func key(name string) (string, error) { + in := strings.NewReader(name) + hash := crypto.SHA256.New() + if _, err := io.Copy(hash, in); err != nil { + return "", nil + } + return hex.EncodeToString(hash.Sum(nil)), nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/engine/doc.go b/vendor/helm.sh/helm/v3/pkg/engine/doc.go new file mode 100644 index 0000000000..6ff875c46b --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/engine/doc.go @@ -0,0 +1,23 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/*Package engine implements the Go text template engine as needed for Helm. + +When Helm renders templates it does so with additional functions and different +modes (e.g., strict, lint mode). This package handles the helm specific +implementation. +*/ +package engine // import "helm.sh/helm/v3/pkg/engine" diff --git a/vendor/helm.sh/helm/v3/pkg/engine/engine.go b/vendor/helm.sh/helm/v3/pkg/engine/engine.go new file mode 100644 index 0000000000..4da64f5cc3 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/engine/engine.go @@ -0,0 +1,392 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "fmt" + "log" + "path" + "path/filepath" + "regexp" + "sort" + "strings" + "text/template" + + "github.com/pkg/errors" + "k8s.io/client-go/rest" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chartutil" +) + +// Engine is an implementation of the Helm rendering implementation for templates. +type Engine struct { + // If strict is enabled, template rendering will fail if a template references + // a value that was not passed in. + Strict bool + // In LintMode, some 'required' template values may be missing, so don't fail + LintMode bool + // the rest config to connect to the kubernetes api + config *rest.Config +} + +// Render takes a chart, optional values, and value overrides, and attempts to render the Go templates. +// +// Render can be called repeatedly on the same engine. +// +// This will look in the chart's 'templates' data (e.g. the 'templates/' directory) +// and attempt to render the templates there using the values passed in. +// +// Values are scoped to their templates. A dependency template will not have +// access to the values set for its parent. If chart "foo" includes chart "bar", +// "bar" will not have access to the values for "foo". +// +// Values should be prepared with something like `chartutils.ReadValues`. +// +// Values are passed through the templates according to scope. If the top layer +// chart includes the chart foo, which includes the chart bar, the values map +// will be examined for a table called "foo". If "foo" is found in vals, +// that section of the values will be passed into the "foo" chart. And if that +// section contains a value named "bar", that value will be passed on to the +// bar chart during render time. +func (e Engine) Render(chrt *chart.Chart, values chartutil.Values) (map[string]string, error) { + tmap := allTemplates(chrt, values) + return e.render(tmap) +} + +// Render takes a chart, optional values, and value overrides, and attempts to +// render the Go templates using the default options. +func Render(chrt *chart.Chart, values chartutil.Values) (map[string]string, error) { + return new(Engine).Render(chrt, values) +} + +// RenderWithClient takes a chart, optional values, and value overrides, and attempts to +// render the Go templates using the default options. This engine is client aware and so can have template +// functions that interact with the client +func RenderWithClient(chrt *chart.Chart, values chartutil.Values, config *rest.Config) (map[string]string, error) { + return Engine{ + config: config, + }.Render(chrt, values) +} + +// renderable is an object that can be rendered. +type renderable struct { + // tpl is the current template. + tpl string + // vals are the values to be supplied to the template. + vals chartutil.Values + // namespace prefix to the templates of the current chart + basePath string +} + +const warnStartDelim = "HELM_ERR_START" +const warnEndDelim = "HELM_ERR_END" +const recursionMaxNums = 1000 + +var warnRegex = regexp.MustCompile(warnStartDelim + `(.*)` + warnEndDelim) + +func warnWrap(warn string) string { + return warnStartDelim + warn + warnEndDelim +} + +// initFunMap creates the Engine's FuncMap and adds context-specific functions. +func (e Engine) initFunMap(t *template.Template, referenceTpls map[string]renderable) { + funcMap := funcMap() + includedNames := make(map[string]int) + + // Add the 'include' function here so we can close over t. + funcMap["include"] = func(name string, data interface{}) (string, error) { + var buf strings.Builder + if v, ok := includedNames[name]; ok { + if v > recursionMaxNums { + return "", errors.Wrapf(fmt.Errorf("unable to execute template"), "rendering template has a nested reference name: %s", name) + } + includedNames[name]++ + } else { + includedNames[name] = 1 + } + err := t.ExecuteTemplate(&buf, name, data) + includedNames[name]-- + return buf.String(), err + } + + // Add the 'tpl' function here + funcMap["tpl"] = func(tpl string, vals chartutil.Values) (string, error) { + basePath, err := vals.PathValue("Template.BasePath") + if err != nil { + return "", errors.Wrapf(err, "cannot retrieve Template.Basepath from values inside tpl function: %s", tpl) + } + + templateName, err := vals.PathValue("Template.Name") + if err != nil { + return "", errors.Wrapf(err, "cannot retrieve Template.Name from values inside tpl function: %s", tpl) + } + + templates := map[string]renderable{ + templateName.(string): { + tpl: tpl, + vals: vals, + basePath: basePath.(string), + }, + } + + result, err := e.renderWithReferences(templates, referenceTpls) + if err != nil { + return "", errors.Wrapf(err, "error during tpl function execution for %q", tpl) + } + return result[templateName.(string)], nil + } + + // Add the `required` function here so we can use lintMode + funcMap["required"] = func(warn string, val interface{}) (interface{}, error) { + if val == nil { + if e.LintMode { + // Don't fail on missing required values when linting + log.Printf("[INFO] Missing required value: %s", warn) + return "", nil + } + return val, errors.Errorf(warnWrap(warn)) + } else if _, ok := val.(string); ok { + if val == "" { + if e.LintMode { + // Don't fail on missing required values when linting + log.Printf("[INFO] Missing required value: %s", warn) + return "", nil + } + return val, errors.Errorf(warnWrap(warn)) + } + } + return val, nil + } + + // Override sprig fail function for linting and wrapping message + funcMap["fail"] = func(msg string) (string, error) { + if e.LintMode { + // Don't fail when linting + log.Printf("[INFO] Fail: %s", msg) + return "", nil + } + return "", errors.New(warnWrap(msg)) + } + + // If we are not linting and have a cluster connection, provide a Kubernetes-backed + // implementation. + if !e.LintMode && e.config != nil { + funcMap["lookup"] = NewLookupFunction(e.config) + } + + t.Funcs(funcMap) +} + +// render takes a map of templates/values and renders them. +func (e Engine) render(tpls map[string]renderable) (map[string]string, error) { + return e.renderWithReferences(tpls, tpls) +} + +// renderWithReferences takes a map of templates/values to render, and a map of +// templates which can be referenced within them. +func (e Engine) renderWithReferences(tpls, referenceTpls map[string]renderable) (rendered map[string]string, err error) { + // Basically, what we do here is start with an empty parent template and then + // build up a list of templates -- one for each file. Once all of the templates + // have been parsed, we loop through again and execute every template. + // + // The idea with this process is to make it possible for more complex templates + // to share common blocks, but to make the entire thing feel like a file-based + // template engine. + defer func() { + if r := recover(); r != nil { + err = errors.Errorf("rendering template failed: %v", r) + } + }() + t := template.New("gotpl") + if e.Strict { + t.Option("missingkey=error") + } else { + // Not that zero will attempt to add default values for types it knows, + // but will still emit for others. We mitigate that later. + t.Option("missingkey=zero") + } + + e.initFunMap(t, referenceTpls) + + // We want to parse the templates in a predictable order. The order favors + // higher-level (in file system) templates over deeply nested templates. + keys := sortTemplates(tpls) + referenceKeys := sortTemplates(referenceTpls) + + for _, filename := range keys { + r := tpls[filename] + if _, err := t.New(filename).Parse(r.tpl); err != nil { + return map[string]string{}, cleanupParseError(filename, err) + } + } + + // Adding the reference templates to the template context + // so they can be referenced in the tpl function + for _, filename := range referenceKeys { + if t.Lookup(filename) == nil { + r := referenceTpls[filename] + if _, err := t.New(filename).Parse(r.tpl); err != nil { + return map[string]string{}, cleanupParseError(filename, err) + } + } + } + + rendered = make(map[string]string, len(keys)) + for _, filename := range keys { + // Don't render partials. We don't care out the direct output of partials. + // They are only included from other templates. + if strings.HasPrefix(path.Base(filename), "_") { + continue + } + // At render time, add information about the template that is being rendered. + vals := tpls[filename].vals + vals["Template"] = chartutil.Values{"Name": filename, "BasePath": tpls[filename].basePath} + var buf strings.Builder + if err := t.ExecuteTemplate(&buf, filename, vals); err != nil { + return map[string]string{}, cleanupExecError(filename, err) + } + + // Work around the issue where Go will emit "" even if Options(missing=zero) + // is set. Since missing=error will never get here, we do not need to handle + // the Strict case. + rendered[filename] = strings.ReplaceAll(buf.String(), "", "") + } + + return rendered, nil +} + +func cleanupParseError(filename string, err error) error { + tokens := strings.Split(err.Error(), ": ") + if len(tokens) == 1 { + // This might happen if a non-templating error occurs + return fmt.Errorf("parse error in (%s): %s", filename, err) + } + // The first token is "template" + // The second token is either "filename:lineno" or "filename:lineNo:columnNo" + location := tokens[1] + // The remaining tokens make up a stacktrace-like chain, ending with the relevant error + errMsg := tokens[len(tokens)-1] + return fmt.Errorf("parse error at (%s): %s", string(location), errMsg) +} + +func cleanupExecError(filename string, err error) error { + if _, isExecError := err.(template.ExecError); !isExecError { + return err + } + + tokens := strings.SplitN(err.Error(), ": ", 3) + if len(tokens) != 3 { + // This might happen if a non-templating error occurs + return fmt.Errorf("execution error in (%s): %s", filename, err) + } + + // The first token is "template" + // The second token is either "filename:lineno" or "filename:lineNo:columnNo" + location := tokens[1] + + parts := warnRegex.FindStringSubmatch(tokens[2]) + if len(parts) >= 2 { + return fmt.Errorf("execution error at (%s): %s", string(location), parts[1]) + } + + return err +} + +func sortTemplates(tpls map[string]renderable) []string { + keys := make([]string, len(tpls)) + i := 0 + for key := range tpls { + keys[i] = key + i++ + } + sort.Sort(sort.Reverse(byPathLen(keys))) + return keys +} + +type byPathLen []string + +func (p byPathLen) Len() int { return len(p) } +func (p byPathLen) Swap(i, j int) { p[j], p[i] = p[i], p[j] } +func (p byPathLen) Less(i, j int) bool { + a, b := p[i], p[j] + ca, cb := strings.Count(a, "/"), strings.Count(b, "/") + if ca == cb { + return strings.Compare(a, b) == -1 + } + return ca < cb +} + +// allTemplates returns all templates for a chart and its dependencies. +// +// As it goes, it also prepares the values in a scope-sensitive manner. +func allTemplates(c *chart.Chart, vals chartutil.Values) map[string]renderable { + templates := make(map[string]renderable) + recAllTpls(c, templates, vals) + return templates +} + +// recAllTpls recurses through the templates in a chart. +// +// As it recurses, it also sets the values to be appropriate for the template +// scope. +func recAllTpls(c *chart.Chart, templates map[string]renderable, vals chartutil.Values) { + next := map[string]interface{}{ + "Chart": c.Metadata, + "Files": newFiles(c.Files), + "Release": vals["Release"], + "Capabilities": vals["Capabilities"], + "Values": make(chartutil.Values), + } + + // If there is a {{.Values.ThisChart}} in the parent metadata, + // copy that into the {{.Values}} for this template. + if c.IsRoot() { + next["Values"] = vals["Values"] + } else if vs, err := vals.Table("Values." + c.Name()); err == nil { + next["Values"] = vs + } + + for _, child := range c.Dependencies() { + recAllTpls(child, templates, next) + } + + newParentID := c.ChartFullPath() + for _, t := range c.Templates { + if !isTemplateValid(c, t.Name) { + continue + } + templates[path.Join(newParentID, t.Name)] = renderable{ + tpl: string(t.Data), + vals: next, + basePath: path.Join(newParentID, "templates"), + } + } +} + +// isTemplateValid returns true if the template is valid for the chart type +func isTemplateValid(ch *chart.Chart, templateName string) bool { + if isLibraryChart(ch) { + return strings.HasPrefix(filepath.Base(templateName), "_") + } + return true +} + +// isLibraryChart returns true if the chart is a library chart +func isLibraryChart(c *chart.Chart) bool { + return strings.EqualFold(c.Metadata.Type, "library") +} diff --git a/vendor/helm.sh/helm/v3/pkg/engine/files.go b/vendor/helm.sh/helm/v3/pkg/engine/files.go new file mode 100644 index 0000000000..d7e62da5a2 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/engine/files.go @@ -0,0 +1,160 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "encoding/base64" + "path" + "strings" + + "github.com/gobwas/glob" + + "helm.sh/helm/v3/pkg/chart" +) + +// files is a map of files in a chart that can be accessed from a template. +type files map[string][]byte + +// NewFiles creates a new files from chart files. +// Given an []*chart.File (the format for files in a chart.Chart), extract a map of files. +func newFiles(from []*chart.File) files { + files := make(map[string][]byte) + for _, f := range from { + files[f.Name] = f.Data + } + return files +} + +// GetBytes gets a file by path. +// +// The returned data is raw. In a template context, this is identical to calling +// {{index .Files $path}}. +// +// This is intended to be accessed from within a template, so a missed key returns +// an empty []byte. +func (f files) GetBytes(name string) []byte { + if v, ok := f[name]; ok { + return v + } + return []byte{} +} + +// Get returns a string representation of the given file. +// +// Fetch the contents of a file as a string. It is designed to be called in a +// template. +// +// {{.Files.Get "foo"}} +func (f files) Get(name string) string { + return string(f.GetBytes(name)) +} + +// Glob takes a glob pattern and returns another files object only containing +// matched files. +// +// This is designed to be called from a template. +// +// {{ range $name, $content := .Files.Glob("foo/**") }} +// {{ $name }}: | +// {{ .Files.Get($name) | indent 4 }}{{ end }} +func (f files) Glob(pattern string) files { + g, err := glob.Compile(pattern, '/') + if err != nil { + g, _ = glob.Compile("**") + } + + nf := newFiles(nil) + for name, contents := range f { + if g.Match(name) { + nf[name] = contents + } + } + + return nf +} + +// AsConfig turns a Files group and flattens it to a YAML map suitable for +// including in the 'data' section of a Kubernetes ConfigMap definition. +// Duplicate keys will be overwritten, so be aware that your file names +// (regardless of path) should be unique. +// +// This is designed to be called from a template, and will return empty string +// (via toYAML function) if it cannot be serialized to YAML, or if the Files +// object is nil. +// +// The output will not be indented, so you will want to pipe this to the +// 'indent' template function. +// +// data: +// {{ .Files.Glob("config/**").AsConfig() | indent 4 }} +func (f files) AsConfig() string { + if f == nil { + return "" + } + + m := make(map[string]string) + + // Explicitly convert to strings, and file names + for k, v := range f { + m[path.Base(k)] = string(v) + } + + return toYAML(m) +} + +// AsSecrets returns the base64-encoded value of a Files object suitable for +// including in the 'data' section of a Kubernetes Secret definition. +// Duplicate keys will be overwritten, so be aware that your file names +// (regardless of path) should be unique. +// +// This is designed to be called from a template, and will return empty string +// (via toYAML function) if it cannot be serialized to YAML, or if the Files +// object is nil. +// +// The output will not be indented, so you will want to pipe this to the +// 'indent' template function. +// +// data: +// {{ .Files.Glob("secrets/*").AsSecrets() }} +func (f files) AsSecrets() string { + if f == nil { + return "" + } + + m := make(map[string]string) + + for k, v := range f { + m[path.Base(k)] = base64.StdEncoding.EncodeToString(v) + } + + return toYAML(m) +} + +// Lines returns each line of a named file (split by "\n") as a slice, so it can +// be ranged over in your templates. +// +// This is designed to be called from a template. +// +// {{ range .Files.Lines "foo/bar.html" }} +// {{ . }}{{ end }} +func (f files) Lines(path string) []string { + if f == nil || f[path] == nil { + return []string{} + } + + return strings.Split(string(f[path]), "\n") +} diff --git a/vendor/helm.sh/helm/v3/pkg/engine/funcs.go b/vendor/helm.sh/helm/v3/pkg/engine/funcs.go new file mode 100644 index 0000000000..92b4c3383e --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/engine/funcs.go @@ -0,0 +1,177 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "bytes" + "encoding/json" + "strings" + "text/template" + + "github.com/BurntSushi/toml" + "github.com/Masterminds/sprig/v3" + "sigs.k8s.io/yaml" +) + +// funcMap returns a mapping of all of the functions that Engine has. +// +// Because some functions are late-bound (e.g. contain context-sensitive +// data), the functions may not all perform identically outside of an Engine +// as they will inside of an Engine. +// +// Known late-bound functions: +// +// - "include" +// - "tpl" +// +// These are late-bound in Engine.Render(). The +// version included in the FuncMap is a placeholder. +// +func funcMap() template.FuncMap { + f := sprig.TxtFuncMap() + delete(f, "env") + delete(f, "expandenv") + + // Add some extra functionality + extra := template.FuncMap{ + "toToml": toTOML, + "toYaml": toYAML, + "fromYaml": fromYAML, + "fromYamlArray": fromYAMLArray, + "toJson": toJSON, + "fromJson": fromJSON, + "fromJsonArray": fromJSONArray, + + // This is a placeholder for the "include" function, which is + // late-bound to a template. By declaring it here, we preserve the + // integrity of the linter. + "include": func(string, interface{}) string { return "not implemented" }, + "tpl": func(string, interface{}) interface{} { return "not implemented" }, + "required": func(string, interface{}) (interface{}, error) { return "not implemented", nil }, + // Provide a placeholder for the "lookup" function, which requires a kubernetes + // connection. + "lookup": func(string, string, string, string) (map[string]interface{}, error) { + return map[string]interface{}{}, nil + }, + } + + for k, v := range extra { + f[k] = v + } + + return f +} + +// toYAML takes an interface, marshals it to yaml, and returns a string. It will +// always return a string, even on marshal error (empty string). +// +// This is designed to be called from a template. +func toYAML(v interface{}) string { + data, err := yaml.Marshal(v) + if err != nil { + // Swallow errors inside of a template. + return "" + } + return strings.TrimSuffix(string(data), "\n") +} + +// fromYAML converts a YAML document into a map[string]interface{}. +// +// This is not a general-purpose YAML parser, and will not parse all valid +// YAML documents. Additionally, because its intended use is within templates +// it tolerates errors. It will insert the returned error message string into +// m["Error"] in the returned map. +func fromYAML(str string) map[string]interface{} { + m := map[string]interface{}{} + + if err := yaml.Unmarshal([]byte(str), &m); err != nil { + m["Error"] = err.Error() + } + return m +} + +// fromYAMLArray converts a YAML array into a []interface{}. +// +// This is not a general-purpose YAML parser, and will not parse all valid +// YAML documents. Additionally, because its intended use is within templates +// it tolerates errors. It will insert the returned error message string as +// the first and only item in the returned array. +func fromYAMLArray(str string) []interface{} { + a := []interface{}{} + + if err := yaml.Unmarshal([]byte(str), &a); err != nil { + a = []interface{}{err.Error()} + } + return a +} + +// toTOML takes an interface, marshals it to toml, and returns a string. It will +// always return a string, even on marshal error (empty string). +// +// This is designed to be called from a template. +func toTOML(v interface{}) string { + b := bytes.NewBuffer(nil) + e := toml.NewEncoder(b) + err := e.Encode(v) + if err != nil { + return err.Error() + } + return b.String() +} + +// toJSON takes an interface, marshals it to json, and returns a string. It will +// always return a string, even on marshal error (empty string). +// +// This is designed to be called from a template. +func toJSON(v interface{}) string { + data, err := json.Marshal(v) + if err != nil { + // Swallow errors inside of a template. + return "" + } + return string(data) +} + +// fromJSON converts a JSON document into a map[string]interface{}. +// +// This is not a general-purpose JSON parser, and will not parse all valid +// JSON documents. Additionally, because its intended use is within templates +// it tolerates errors. It will insert the returned error message string into +// m["Error"] in the returned map. +func fromJSON(str string) map[string]interface{} { + m := make(map[string]interface{}) + + if err := json.Unmarshal([]byte(str), &m); err != nil { + m["Error"] = err.Error() + } + return m +} + +// fromJSONArray converts a JSON array into a []interface{}. +// +// This is not a general-purpose JSON parser, and will not parse all valid +// JSON documents. Additionally, because its intended use is within templates +// it tolerates errors. It will insert the returned error message string as +// the first and only item in the returned array. +func fromJSONArray(str string) []interface{} { + a := []interface{}{} + + if err := json.Unmarshal([]byte(str), &a); err != nil { + a = []interface{}{err.Error()} + } + return a +} diff --git a/vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go b/vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go new file mode 100644 index 0000000000..cd3883c0f1 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go @@ -0,0 +1,124 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "log" + "strings" + + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" +) + +type lookupFunc = func(apiversion string, resource string, namespace string, name string) (map[string]interface{}, error) + +// NewLookupFunction returns a function for looking up objects in the cluster. +// +// If the resource does not exist, no error is raised. +// +// This function is considered deprecated, and will be renamed in Helm 4. It will no +// longer be a public function. +func NewLookupFunction(config *rest.Config) lookupFunc { + return func(apiversion string, resource string, namespace string, name string) (map[string]interface{}, error) { + var client dynamic.ResourceInterface + c, namespaced, err := getDynamicClientOnKind(apiversion, resource, config) + if err != nil { + return map[string]interface{}{}, err + } + if namespaced && namespace != "" { + client = c.Namespace(namespace) + } else { + client = c + } + if name != "" { + // this will return a single object + obj, err := client.Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + // Just return an empty interface when the object was not found. + // That way, users can use `if not (lookup ...)` in their templates. + return map[string]interface{}{}, nil + } + return map[string]interface{}{}, err + } + return obj.UnstructuredContent(), nil + } + //this will return a list + obj, err := client.List(context.Background(), metav1.ListOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + // Just return an empty interface when the object was not found. + // That way, users can use `if not (lookup ...)` in their templates. + return map[string]interface{}{}, nil + } + return map[string]interface{}{}, err + } + return obj.UnstructuredContent(), nil + } +} + +// getDynamicClientOnUnstructured returns a dynamic client on an Unstructured type. This client can be further namespaced. +func getDynamicClientOnKind(apiversion string, kind string, config *rest.Config) (dynamic.NamespaceableResourceInterface, bool, error) { + gvk := schema.FromAPIVersionAndKind(apiversion, kind) + apiRes, err := getAPIResourceForGVK(gvk, config) + if err != nil { + log.Printf("[ERROR] unable to get apiresource from unstructured: %s , error %s", gvk.String(), err) + return nil, false, errors.Wrapf(err, "unable to get apiresource from unstructured: %s", gvk.String()) + } + gvr := schema.GroupVersionResource{ + Group: apiRes.Group, + Version: apiRes.Version, + Resource: apiRes.Name, + } + intf, err := dynamic.NewForConfig(config) + if err != nil { + log.Printf("[ERROR] unable to get dynamic client %s", err) + return nil, false, err + } + res := intf.Resource(gvr) + return res, apiRes.Namespaced, nil +} + +func getAPIResourceForGVK(gvk schema.GroupVersionKind, config *rest.Config) (metav1.APIResource, error) { + res := metav1.APIResource{} + discoveryClient, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + log.Printf("[ERROR] unable to create discovery client %s", err) + return res, err + } + resList, err := discoveryClient.ServerResourcesForGroupVersion(gvk.GroupVersion().String()) + if err != nil { + log.Printf("[ERROR] unable to retrieve resource list for: %s , error: %s", gvk.GroupVersion().String(), err) + return res, err + } + for _, resource := range resList.APIResources { + //if a resource contains a "/" it's referencing a subresource. we don't support suberesource for now. + if resource.Kind == gvk.Kind && !strings.Contains(resource.Name, "/") { + res = resource + res.Group = gvk.Group + res.Version = gvk.Version + break + } + } + return res, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/gates/doc.go b/vendor/helm.sh/helm/v3/pkg/gates/doc.go new file mode 100644 index 0000000000..762fdb8c66 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/gates/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/*Package gates provides a general tool for working with experimental feature gates. + +This provides convenience methods where the user can determine if certain experimental features are enabled. +*/ +package gates diff --git a/vendor/helm.sh/helm/v3/pkg/gates/gates.go b/vendor/helm.sh/helm/v3/pkg/gates/gates.go new file mode 100644 index 0000000000..69559219e7 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/gates/gates.go @@ -0,0 +1,38 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gates + +import ( + "fmt" + "os" +) + +// Gate is the name of the feature gate. +type Gate string + +// String returns the string representation of this feature gate. +func (g Gate) String() string { + return string(g) +} + +// IsEnabled determines whether a certain feature gate is enabled. +func (g Gate) IsEnabled() bool { + return os.Getenv(string(g)) != "" +} + +func (g Gate) Error() error { + return fmt.Errorf("this feature has been marked as experimental and is not enabled by default. Please set %s=1 in your environment to use this feature", g.String()) +} diff --git a/vendor/helm.sh/helm/v3/pkg/getter/getter.go b/vendor/helm.sh/helm/v3/pkg/getter/getter.go index 4653484560..78add728ad 100644 --- a/vendor/helm.sh/helm/v3/pkg/getter/getter.go +++ b/vendor/helm.sh/helm/v3/pkg/getter/getter.go @@ -38,6 +38,7 @@ type options struct { insecureSkipVerifyTLS bool username string password string + passCredentialsAll bool userAgent string version string registryClient *registry.Client @@ -64,6 +65,12 @@ func WithBasicAuth(username, password string) Option { } } +func WithPassCredentialsAll(pass bool) Option { + return func(opts *options) { + opts.passCredentialsAll = pass + } +} + // WithUserAgent sets the request's User-Agent header to use the provided agent name. func WithUserAgent(userAgent string) Option { return func(opts *options) { diff --git a/vendor/helm.sh/helm/v3/pkg/getter/httpgetter.go b/vendor/helm.sh/helm/v3/pkg/getter/httpgetter.go index bd60629ae8..822abad2ef 100644 --- a/vendor/helm.sh/helm/v3/pkg/getter/httpgetter.go +++ b/vendor/helm.sh/helm/v3/pkg/getter/httpgetter.go @@ -20,6 +20,7 @@ import ( "crypto/tls" "io" "net/http" + "net/url" "github.com/pkg/errors" @@ -56,8 +57,24 @@ func (g *HTTPGetter) get(href string) (*bytes.Buffer, error) { req.Header.Set("User-Agent", g.opts.userAgent) } - if g.opts.username != "" && g.opts.password != "" { - req.SetBasicAuth(g.opts.username, g.opts.password) + // Before setting the basic auth credentials, make sure the URL associated + // with the basic auth is the one being fetched. + u1, err := url.Parse(g.opts.url) + if err != nil { + return buf, errors.Wrap(err, "Unable to parse getter URL") + } + u2, err := url.Parse(href) + if err != nil { + return buf, errors.Wrap(err, "Unable to parse URL getting from") + } + + // Host on URL (returned from url.Parse) contains the port if present. + // This check ensures credentials are not passed between different + // services on different ports. + if g.opts.passCredentialsAll || (u1.Scheme == u2.Scheme && u1.Host == u2.Host) { + if g.opts.username != "" && g.opts.password != "" { + req.SetBasicAuth(g.opts.username, g.opts.password) + } } client, err := g.httpClient() diff --git a/vendor/helm.sh/helm/v3/pkg/getter/ocigetter.go b/vendor/helm.sh/helm/v3/pkg/getter/ocigetter.go index d8fd538628..3f85b9862b 100644 --- a/vendor/helm.sh/helm/v3/pkg/getter/ocigetter.go +++ b/vendor/helm.sh/helm/v3/pkg/getter/ocigetter.go @@ -58,10 +58,19 @@ func (g *OCIGetter) get(href string) (*bytes.Buffer, error) { } // NewOCIGetter constructs a valid http/https client as a Getter -func NewOCIGetter(options ...Option) (Getter, error) { - var client OCIGetter +func NewOCIGetter(ops ...Option) (Getter, error) { + registryClient, err := registry.NewClient() + if err != nil { + return nil, err + } - for _, opt := range options { + client := OCIGetter{ + opts: options{ + registryClient: registryClient, + }, + } + + for _, opt := range ops { opt(&client.opts) } diff --git a/vendor/helm.sh/helm/v3/pkg/kube/client.go b/vendor/helm.sh/helm/v3/pkg/kube/client.go index 34079e7a02..76a832b0e4 100644 --- a/vendor/helm.sh/helm/v3/pkg/kube/client.go +++ b/vendor/helm.sh/helm/v3/pkg/kube/client.go @@ -21,6 +21,8 @@ import ( "encoding/json" "fmt" "io" + "os" + "path/filepath" "strings" "sync" "time" @@ -55,6 +57,10 @@ var ErrNoObjectsVisited = errors.New("no objects visited") var metadataAccessor = meta.NewAccessor() +// ManagedFieldsManager is the name of the manager of Kubernetes managedFields +// first introduced in Kubernetes 1.18 +var ManagedFieldsManager string + // Client represents a client capable of communicating with the Kubernetes API. type Client struct { Factory Factory @@ -100,7 +106,7 @@ func (c *Client) getKubeClient() (*kubernetes.Clientset, error) { return c.kubeClient, err } -// IsReachable tests connectivity to the cluster +// IsReachable tests connectivity to the cluster. func (c *Client) IsReachable() error { client, err := c.getKubeClient() if err == genericclioptions.ErrEmptyConfig { @@ -126,18 +132,19 @@ func (c *Client) Create(resources ResourceList) (*Result, error) { return &Result{Created: resources}, nil } -// Wait up to the given timeout for the specified resources to be ready +// Wait waits up to the given timeout for the specified resources to be ready. func (c *Client) Wait(resources ResourceList, timeout time.Duration) error { cs, err := c.getKubeClient() if err != nil { return err } + checker := NewReadyChecker(cs, c.Log, PausedAsReady(true)) w := waiter{ - c: cs, + c: checker, log: c.Log, timeout: timeout, } - return w.waitForResources(resources, false) + return w.waitForResources(resources) } // WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs. @@ -146,12 +153,13 @@ func (c *Client) WaitWithJobs(resources ResourceList, timeout time.Duration) err if err != nil { return err } + checker := NewReadyChecker(cs, c.Log, PausedAsReady(true), CheckJobs(true)) w := waiter{ - c: cs, + c: checker, log: c.Log, timeout: timeout, } - return w.waitForResources(resources, true) + return w.waitForResources(resources) } func (c *Client) namespace() string { @@ -204,7 +212,7 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err return err } - helper := resource.NewHelper(info.Client, info.Mapping) + helper := resource.NewHelper(info.Client, info.Mapping).WithFieldManager(getManagedFieldsManager()) if _, err := helper.Get(info.Namespace, info.Name); err != nil { if !apierrors.IsNotFound(err) { return errors.Wrap(err, "could not get information about the resource") @@ -322,7 +330,7 @@ func (c *Client) watchTimeout(t time.Duration) func(*resource.Info) error { // WatchUntilReady watches the resources given and waits until it is ready. // -// This function is mainly for hook implementations. It watches for a resource to +// This method is mainly for hook implementations. It watches for a resource to // hit a particular milestone. The milestone depends on the Kind. // // For most kinds, it checks to see if the resource is marked as Added or Modified @@ -357,6 +365,26 @@ func perform(infos ResourceList, fn func(*resource.Info) error) error { return nil } +// getManagedFieldsManager returns the manager string. If one was set it will be returned. +// Otherwise, one is calculated based on the name of the binary. +func getManagedFieldsManager() string { + + // When a manager is explicitly set use it + if ManagedFieldsManager != "" { + return ManagedFieldsManager + } + + // When no manager is set and no calling application can be found it is unknown + if len(os.Args[0]) == 0 { + return "unknown" + } + + // When there is an application that can be determined and no set manager + // use the base name. This is one of the ways Kubernetes libs handle figuring + // names out. + return filepath.Base(os.Args[0]) +} + func batchPerform(infos ResourceList, fn func(*resource.Info) error, errs chan<- error) { var kind string var wg sync.WaitGroup @@ -375,7 +403,7 @@ func batchPerform(infos ResourceList, fn func(*resource.Info) error, errs chan<- } func createResource(info *resource.Info) error { - obj, err := resource.NewHelper(info.Client, info.Mapping).Create(info.Namespace, true, info.Object) + obj, err := resource.NewHelper(info.Client, info.Mapping).WithFieldManager(getManagedFieldsManager()).Create(info.Namespace, true, info.Object) if err != nil { return err } @@ -385,7 +413,7 @@ func createResource(info *resource.Info) error { func deleteResource(info *resource.Info) error { policy := metav1.DeletePropagationBackground opts := &metav1.DeleteOptions{PropagationPolicy: &policy} - _, err := resource.NewHelper(info.Client, info.Mapping).DeleteWithOptions(info.Namespace, info.Name, opts) + _, err := resource.NewHelper(info.Client, info.Mapping).WithFieldManager(getManagedFieldsManager()).DeleteWithOptions(info.Namespace, info.Name, opts) return err } @@ -400,7 +428,7 @@ func createPatch(target *resource.Info, current runtime.Object) ([]byte, types.P } // Fetch the current object for the three way merge - helper := resource.NewHelper(target.Client, target.Mapping) + helper := resource.NewHelper(target.Client, target.Mapping).WithFieldManager(getManagedFieldsManager()) currentObj, err := helper.Get(target.Namespace, target.Name) if err != nil && !apierrors.IsNotFound(err) { return nil, types.StrategicMergePatchType, errors.Wrapf(err, "unable to get data for current object %s/%s", target.Namespace, target.Name) @@ -442,7 +470,7 @@ func createPatch(target *resource.Info, current runtime.Object) ([]byte, types.P func updateResource(c *Client, target *resource.Info, currentObj runtime.Object, force bool) error { var ( obj runtime.Object - helper = resource.NewHelper(target.Client, target.Mapping) + helper = resource.NewHelper(target.Client, target.Mapping).WithFieldManager(getManagedFieldsManager()) kind = target.Mapping.GroupVersionKind.Kind ) diff --git a/vendor/helm.sh/helm/v3/pkg/kube/fake/fake.go b/vendor/helm.sh/helm/v3/pkg/kube/fake/fake.go new file mode 100644 index 0000000000..ff800864c9 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/kube/fake/fake.go @@ -0,0 +1,107 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package fake implements various fake KubeClients for use in testing +package fake + +import ( + "io" + "time" + + v1 "k8s.io/api/core/v1" + "k8s.io/cli-runtime/pkg/resource" + + "helm.sh/helm/v3/pkg/kube" +) + +// FailingKubeClient implements KubeClient for testing purposes. It also has +// additional errors you can set to fail different functions, otherwise it +// delegates all its calls to `PrintingKubeClient` +type FailingKubeClient struct { + PrintingKubeClient + CreateError error + WaitError error + DeleteError error + WatchUntilReadyError error + UpdateError error + BuildError error + BuildUnstructuredError error + WaitAndGetCompletedPodPhaseError error +} + +// Create returns the configured error if set or prints +func (f *FailingKubeClient) Create(resources kube.ResourceList) (*kube.Result, error) { + if f.CreateError != nil { + return nil, f.CreateError + } + return f.PrintingKubeClient.Create(resources) +} + +// Wait returns the configured error if set or prints +func (f *FailingKubeClient) Wait(resources kube.ResourceList, d time.Duration) error { + if f.WaitError != nil { + return f.WaitError + } + return f.PrintingKubeClient.Wait(resources, d) +} + +// WaitWithJobs returns the configured error if set or prints +func (f *FailingKubeClient) WaitWithJobs(resources kube.ResourceList, d time.Duration) error { + if f.WaitError != nil { + return f.WaitError + } + return f.PrintingKubeClient.Wait(resources, d) +} + +// Delete returns the configured error if set or prints +func (f *FailingKubeClient) Delete(resources kube.ResourceList) (*kube.Result, []error) { + if f.DeleteError != nil { + return nil, []error{f.DeleteError} + } + return f.PrintingKubeClient.Delete(resources) +} + +// WatchUntilReady returns the configured error if set or prints +func (f *FailingKubeClient) WatchUntilReady(resources kube.ResourceList, d time.Duration) error { + if f.WatchUntilReadyError != nil { + return f.WatchUntilReadyError + } + return f.PrintingKubeClient.WatchUntilReady(resources, d) +} + +// Update returns the configured error if set or prints +func (f *FailingKubeClient) Update(r, modified kube.ResourceList, ignoreMe bool) (*kube.Result, error) { + if f.UpdateError != nil { + return &kube.Result{}, f.UpdateError + } + return f.PrintingKubeClient.Update(r, modified, ignoreMe) +} + +// Build returns the configured error if set or prints +func (f *FailingKubeClient) Build(r io.Reader, _ bool) (kube.ResourceList, error) { + if f.BuildError != nil { + return []*resource.Info{}, f.BuildError + } + return f.PrintingKubeClient.Build(r, false) +} + +// WaitAndGetCompletedPodPhase returns the configured error if set or prints +func (f *FailingKubeClient) WaitAndGetCompletedPodPhase(s string, d time.Duration) (v1.PodPhase, error) { + if f.WaitAndGetCompletedPodPhaseError != nil { + return v1.PodSucceeded, f.WaitAndGetCompletedPodPhaseError + } + return f.PrintingKubeClient.WaitAndGetCompletedPodPhase(s, d) +} diff --git a/vendor/helm.sh/helm/v3/pkg/kube/fake/printer.go b/vendor/helm.sh/helm/v3/pkg/kube/fake/printer.go new file mode 100644 index 0000000000..e8bd1845b9 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/kube/fake/printer.go @@ -0,0 +1,105 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "io" + "strings" + "time" + + v1 "k8s.io/api/core/v1" + "k8s.io/cli-runtime/pkg/resource" + + "helm.sh/helm/v3/pkg/kube" +) + +// PrintingKubeClient implements KubeClient, but simply prints the reader to +// the given output. +type PrintingKubeClient struct { + Out io.Writer +} + +// IsReachable checks if the cluster is reachable +func (p *PrintingKubeClient) IsReachable() error { + return nil +} + +// Create prints the values of what would be created with a real KubeClient. +func (p *PrintingKubeClient) Create(resources kube.ResourceList) (*kube.Result, error) { + _, err := io.Copy(p.Out, bufferize(resources)) + if err != nil { + return nil, err + } + return &kube.Result{Created: resources}, nil +} + +func (p *PrintingKubeClient) Wait(resources kube.ResourceList, _ time.Duration) error { + _, err := io.Copy(p.Out, bufferize(resources)) + return err +} + +func (p *PrintingKubeClient) WaitWithJobs(resources kube.ResourceList, _ time.Duration) error { + _, err := io.Copy(p.Out, bufferize(resources)) + return err +} + +// Delete implements KubeClient delete. +// +// It only prints out the content to be deleted. +func (p *PrintingKubeClient) Delete(resources kube.ResourceList) (*kube.Result, []error) { + _, err := io.Copy(p.Out, bufferize(resources)) + if err != nil { + return nil, []error{err} + } + return &kube.Result{Deleted: resources}, nil +} + +// WatchUntilReady implements KubeClient WatchUntilReady. +func (p *PrintingKubeClient) WatchUntilReady(resources kube.ResourceList, _ time.Duration) error { + _, err := io.Copy(p.Out, bufferize(resources)) + return err +} + +// Update implements KubeClient Update. +func (p *PrintingKubeClient) Update(_, modified kube.ResourceList, _ bool) (*kube.Result, error) { + _, err := io.Copy(p.Out, bufferize(modified)) + if err != nil { + return nil, err + } + // TODO: This doesn't completely mock out have some that get created, + // updated, and deleted. I don't think these are used in any unit tests, but + // we may want to refactor a way to handle future tests + return &kube.Result{Updated: modified}, nil +} + +// Build implements KubeClient Build. +func (p *PrintingKubeClient) Build(_ io.Reader, _ bool) (kube.ResourceList, error) { + return []*resource.Info{}, nil +} + +// WaitAndGetCompletedPodPhase implements KubeClient WaitAndGetCompletedPodPhase. +func (p *PrintingKubeClient) WaitAndGetCompletedPodPhase(_ string, _ time.Duration) (v1.PodPhase, error) { + return v1.PodSucceeded, nil +} + +func bufferize(resources kube.ResourceList) io.Reader { + var builder strings.Builder + for _, info := range resources { + builder.WriteString(info.String() + "\n") + } + return strings.NewReader(builder.String()) +} diff --git a/vendor/helm.sh/helm/v3/pkg/kube/interface.go b/vendor/helm.sh/helm/v3/pkg/kube/interface.go index 545985996c..59a4cbdccd 100644 --- a/vendor/helm.sh/helm/v3/pkg/kube/interface.go +++ b/vendor/helm.sh/helm/v3/pkg/kube/interface.go @@ -30,14 +30,19 @@ type Interface interface { // Create creates one or more resources. Create(resources ResourceList) (*Result, error) + // Wait waits up to the given timeout for the specified resources to be ready. Wait(resources ResourceList, timeout time.Duration) error + // WaitWithJobs wait up to the given timeout for the specified resources to be ready, including jobs. WaitWithJobs(resources ResourceList, timeout time.Duration) error // Delete destroys one or more resources. Delete(resources ResourceList) (*Result, []error) - // Watch the resource in reader until it is "ready". This method + // WatchUntilReady watches the resources given and waits until it is ready. + // + // This method is mainly for hook implementations. It watches for a resource to + // hit a particular milestone. The milestone depends on the Kind. // // For Jobs, "ready" means the Job ran to completion (exited without error). // For Pods, "ready" means the Pod phase is marked "succeeded". @@ -49,9 +54,9 @@ type Interface interface { // if it doesn't exist. Update(original, target ResourceList, force bool) (*Result, error) - // Build creates a resource list from a Reader + // Build creates a resource list from a Reader. // - // reader must contain a YAML stream (one or more YAML documents separated + // Reader must contain a YAML stream (one or more YAML documents separated // by "\n---\n") // // Validates against OpenAPI schema if validate is true. @@ -61,7 +66,7 @@ type Interface interface { // and returns said phase (PodSucceeded or PodFailed qualify). WaitAndGetCompletedPodPhase(name string, timeout time.Duration) (v1.PodPhase, error) - // isReachable checks whether the client is able to connect to the cluster + // IsReachable checks whether the client is able to connect to the cluster. IsReachable() error } diff --git a/vendor/helm.sh/helm/v3/pkg/kube/ready.go b/vendor/helm.sh/helm/v3/pkg/kube/ready.go new file mode 100644 index 0000000000..19b93e3862 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/kube/ready.go @@ -0,0 +1,397 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube // import "helm.sh/helm/v3/pkg/kube" + +import ( + "context" + + appsv1 "k8s.io/api/apps/v1" + appsv1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta2 "k8s.io/api/apps/v1beta2" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + + deploymentutil "helm.sh/helm/v3/internal/third_party/k8s.io/kubernetes/deployment/util" +) + +// ReadyCheckerOption is a function that configures a ReadyChecker. +type ReadyCheckerOption func(*ReadyChecker) + +// PausedAsReady returns a ReadyCheckerOption that configures a ReadyChecker +// to consider paused resources to be ready. For example a Deployment +// with spec.paused equal to true would be considered ready. +func PausedAsReady(pausedAsReady bool) ReadyCheckerOption { + return func(c *ReadyChecker) { + c.pausedAsReady = pausedAsReady + } +} + +// CheckJobs returns a ReadyCheckerOption that configures a ReadyChecker +// to consider readiness of Job resources. +func CheckJobs(checkJobs bool) ReadyCheckerOption { + return func(c *ReadyChecker) { + c.checkJobs = checkJobs + } +} + +// NewReadyChecker creates a new checker. Passed ReadyCheckerOptions can +// be used to override defaults. +func NewReadyChecker(cl kubernetes.Interface, log func(string, ...interface{}), opts ...ReadyCheckerOption) ReadyChecker { + c := ReadyChecker{ + client: cl, + log: log, + } + if c.log == nil { + c.log = nopLogger + } + for _, opt := range opts { + opt(&c) + } + return c +} + +// ReadyChecker is a type that can check core Kubernetes types for readiness. +type ReadyChecker struct { + client kubernetes.Interface + log func(string, ...interface{}) + checkJobs bool + pausedAsReady bool +} + +// IsReady checks if v is ready. It supports checking readiness for pods, +// deployments, persistent volume claims, services, daemon sets, custom +// resource definitions, stateful sets, replication controllers, and replica +// sets. All other resource kinds are always considered ready. +// +// IsReady will fetch the latest state of the object from the server prior to +// performing readiness checks, and it will return any error encountered. +func (c *ReadyChecker) IsReady(ctx context.Context, v *resource.Info) (bool, error) { + var ( + // This defaults to true, otherwise we get to a point where + // things will always return false unless one of the objects + // that manages pods has been hit + ok = true + err error + ) + switch value := AsVersioned(v).(type) { + case *corev1.Pod: + pod, err := c.client.CoreV1().Pods(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{}) + if err != nil || !c.isPodReady(pod) { + return false, err + } + case *batchv1.Job: + if c.checkJobs { + job, err := c.client.BatchV1().Jobs(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{}) + if err != nil || !c.jobReady(job) { + return false, err + } + } + case *appsv1.Deployment, *appsv1beta1.Deployment, *appsv1beta2.Deployment, *extensionsv1beta1.Deployment: + currentDeployment, err := c.client.AppsV1().Deployments(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + // If paused deployment will never be ready + if currentDeployment.Spec.Paused { + return c.pausedAsReady, nil + } + // Find RS associated with deployment + newReplicaSet, err := deploymentutil.GetNewReplicaSet(currentDeployment, c.client.AppsV1()) + if err != nil || newReplicaSet == nil { + return false, err + } + if !c.deploymentReady(newReplicaSet, currentDeployment) { + return false, nil + } + case *corev1.PersistentVolumeClaim: + claim, err := c.client.CoreV1().PersistentVolumeClaims(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + if !c.volumeReady(claim) { + return false, nil + } + case *corev1.Service: + svc, err := c.client.CoreV1().Services(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + if !c.serviceReady(svc) { + return false, nil + } + case *extensionsv1beta1.DaemonSet, *appsv1.DaemonSet, *appsv1beta2.DaemonSet: + ds, err := c.client.AppsV1().DaemonSets(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + if !c.daemonSetReady(ds) { + return false, nil + } + case *apiextv1beta1.CustomResourceDefinition: + if err := v.Get(); err != nil { + return false, err + } + crd := &apiextv1beta1.CustomResourceDefinition{} + if err := scheme.Scheme.Convert(v.Object, crd, nil); err != nil { + return false, err + } + if !c.crdBetaReady(*crd) { + return false, nil + } + case *apiextv1.CustomResourceDefinition: + if err := v.Get(); err != nil { + return false, err + } + crd := &apiextv1.CustomResourceDefinition{} + if err := scheme.Scheme.Convert(v.Object, crd, nil); err != nil { + return false, err + } + if !c.crdReady(*crd) { + return false, nil + } + case *appsv1.StatefulSet, *appsv1beta1.StatefulSet, *appsv1beta2.StatefulSet: + sts, err := c.client.AppsV1().StatefulSets(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + if !c.statefulSetReady(sts) { + return false, nil + } + case *corev1.ReplicationController, *extensionsv1beta1.ReplicaSet, *appsv1beta2.ReplicaSet, *appsv1.ReplicaSet: + ok, err = c.podsReadyForObject(ctx, v.Namespace, value) + } + if !ok || err != nil { + return false, err + } + return true, nil +} + +func (c *ReadyChecker) podsReadyForObject(ctx context.Context, namespace string, obj runtime.Object) (bool, error) { + pods, err := c.podsforObject(ctx, namespace, obj) + if err != nil { + return false, err + } + for _, pod := range pods { + if !c.isPodReady(&pod) { + return false, nil + } + } + return true, nil +} + +func (c *ReadyChecker) podsforObject(ctx context.Context, namespace string, obj runtime.Object) ([]corev1.Pod, error) { + selector, err := SelectorsForObject(obj) + if err != nil { + return nil, err + } + list, err := getPods(ctx, c.client, namespace, selector.String()) + return list, err +} + +// isPodReady returns true if a pod is ready; false otherwise. +func (c *ReadyChecker) isPodReady(pod *corev1.Pod) bool { + for _, c := range pod.Status.Conditions { + if c.Type == corev1.PodReady && c.Status == corev1.ConditionTrue { + return true + } + } + c.log("Pod is not ready: %s/%s", pod.GetNamespace(), pod.GetName()) + return false +} + +func (c *ReadyChecker) jobReady(job *batchv1.Job) bool { + if job.Status.Failed > *job.Spec.BackoffLimit { + c.log("Job is failed: %s/%s", job.GetNamespace(), job.GetName()) + return false + } + if job.Status.Succeeded < *job.Spec.Completions { + c.log("Job is not completed: %s/%s", job.GetNamespace(), job.GetName()) + return false + } + return true +} + +func (c *ReadyChecker) serviceReady(s *corev1.Service) bool { + // ExternalName Services are external to cluster so helm shouldn't be checking to see if they're 'ready' (i.e. have an IP Set) + if s.Spec.Type == corev1.ServiceTypeExternalName { + return true + } + + // Ensure that the service cluster IP is not empty + if s.Spec.ClusterIP == "" { + c.log("Service does not have cluster IP address: %s/%s", s.GetNamespace(), s.GetName()) + return false + } + + // This checks if the service has a LoadBalancer and that balancer has an Ingress defined + if s.Spec.Type == corev1.ServiceTypeLoadBalancer { + // do not wait when at least 1 external IP is set + if len(s.Spec.ExternalIPs) > 0 { + c.log("Service %s/%s has external IP addresses (%v), marking as ready", s.GetNamespace(), s.GetName(), s.Spec.ExternalIPs) + return true + } + + if s.Status.LoadBalancer.Ingress == nil { + c.log("Service does not have load balancer ingress IP address: %s/%s", s.GetNamespace(), s.GetName()) + return false + } + } + + return true +} + +func (c *ReadyChecker) volumeReady(v *corev1.PersistentVolumeClaim) bool { + if v.Status.Phase != corev1.ClaimBound { + c.log("PersistentVolumeClaim is not bound: %s/%s", v.GetNamespace(), v.GetName()) + return false + } + return true +} + +func (c *ReadyChecker) deploymentReady(rs *appsv1.ReplicaSet, dep *appsv1.Deployment) bool { + expectedReady := *dep.Spec.Replicas - deploymentutil.MaxUnavailable(*dep) + if !(rs.Status.ReadyReplicas >= expectedReady) { + c.log("Deployment is not ready: %s/%s. %d out of %d expected pods are ready", dep.Namespace, dep.Name, rs.Status.ReadyReplicas, expectedReady) + return false + } + return true +} + +func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool { + // If the update strategy is not a rolling update, there will be nothing to wait for + if ds.Spec.UpdateStrategy.Type != appsv1.RollingUpdateDaemonSetStrategyType { + return true + } + + // Make sure all the updated pods have been scheduled + if ds.Status.UpdatedNumberScheduled != ds.Status.DesiredNumberScheduled { + c.log("DaemonSet is not ready: %s/%s. %d out of %d expected pods have been scheduled", ds.Namespace, ds.Name, ds.Status.UpdatedNumberScheduled, ds.Status.DesiredNumberScheduled) + return false + } + maxUnavailable, err := intstr.GetValueFromIntOrPercent(ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, int(ds.Status.DesiredNumberScheduled), true) + if err != nil { + // If for some reason the value is invalid, set max unavailable to the + // number of desired replicas. This is the same behavior as the + // `MaxUnavailable` function in deploymentutil + maxUnavailable = int(ds.Status.DesiredNumberScheduled) + } + + expectedReady := int(ds.Status.DesiredNumberScheduled) - maxUnavailable + if !(int(ds.Status.NumberReady) >= expectedReady) { + c.log("DaemonSet is not ready: %s/%s. %d out of %d expected pods are ready", ds.Namespace, ds.Name, ds.Status.NumberReady, expectedReady) + return false + } + return true +} + +// Because the v1 extensions API is not available on all supported k8s versions +// yet and because Go doesn't support generics, we need to have a duplicate +// function to support the v1beta1 types +func (c *ReadyChecker) crdBetaReady(crd apiextv1beta1.CustomResourceDefinition) bool { + for _, cond := range crd.Status.Conditions { + switch cond.Type { + case apiextv1beta1.Established: + if cond.Status == apiextv1beta1.ConditionTrue { + return true + } + case apiextv1beta1.NamesAccepted: + if cond.Status == apiextv1beta1.ConditionFalse { + // This indicates a naming conflict, but it's probably not the + // job of this function to fail because of that. Instead, + // we treat it as a success, since the process should be able to + // continue. + return true + } + } + } + return false +} + +func (c *ReadyChecker) crdReady(crd apiextv1.CustomResourceDefinition) bool { + for _, cond := range crd.Status.Conditions { + switch cond.Type { + case apiextv1.Established: + if cond.Status == apiextv1.ConditionTrue { + return true + } + case apiextv1.NamesAccepted: + if cond.Status == apiextv1.ConditionFalse { + // This indicates a naming conflict, but it's probably not the + // job of this function to fail because of that. Instead, + // we treat it as a success, since the process should be able to + // continue. + return true + } + } + } + return false +} + +func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool { + // If the update strategy is not a rolling update, there will be nothing to wait for + if sts.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType { + return true + } + + // Dereference all the pointers because StatefulSets like them + var partition int + // 1 is the default for replicas if not set + var replicas = 1 + // For some reason, even if the update strategy is a rolling update, the + // actual rollingUpdate field can be nil. If it is, we can safely assume + // there is no partition value + if sts.Spec.UpdateStrategy.RollingUpdate != nil && sts.Spec.UpdateStrategy.RollingUpdate.Partition != nil { + partition = int(*sts.Spec.UpdateStrategy.RollingUpdate.Partition) + } + if sts.Spec.Replicas != nil { + replicas = int(*sts.Spec.Replicas) + } + + // Because an update strategy can use partitioning, we need to calculate the + // number of updated replicas we should have. For example, if the replicas + // is set to 3 and the partition is 2, we'd expect only one pod to be + // updated + expectedReplicas := replicas - partition + + // Make sure all the updated pods have been scheduled + if int(sts.Status.UpdatedReplicas) != expectedReplicas { + c.log("StatefulSet is not ready: %s/%s. %d out of %d expected pods have been scheduled", sts.Namespace, sts.Name, sts.Status.UpdatedReplicas, expectedReplicas) + return false + } + + if int(sts.Status.ReadyReplicas) != replicas { + c.log("StatefulSet is not ready: %s/%s. %d out of %d expected pods are ready", sts.Namespace, sts.Name, sts.Status.ReadyReplicas, replicas) + return false + } + return true +} + +func getPods(ctx context.Context, client kubernetes.Interface, namespace, selector string) ([]corev1.Pod, error) { + list, err := client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: selector, + }) + return list.Items, err +} diff --git a/vendor/helm.sh/helm/v3/pkg/kube/wait.go b/vendor/helm.sh/helm/v3/pkg/kube/wait.go index 40f7b7a6ef..f9ea2ea850 100644 --- a/vendor/helm.sh/helm/v3/pkg/kube/wait.go +++ b/vendor/helm.sh/helm/v3/pkg/kube/wait.go @@ -28,339 +28,36 @@ import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" - apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" - deploymentutil "helm.sh/helm/v3/internal/third_party/k8s.io/kubernetes/deployment/util" + "k8s.io/apimachinery/pkg/util/wait" ) type waiter struct { - c kubernetes.Interface + c ReadyChecker timeout time.Duration log func(string, ...interface{}) } // waitForResources polls to get the current status of all pods, PVCs, Services and // Jobs(optional) until all are ready or a timeout is reached -func (w *waiter) waitForResources(created ResourceList, waitForJobsEnabled bool) error { +func (w *waiter) waitForResources(created ResourceList) error { w.log("beginning wait for %d resources with timeout of %v", len(created), w.timeout) - return wait.Poll(2*time.Second, w.timeout, func() (bool, error) { + ctx, cancel := context.WithTimeout(context.Background(), w.timeout) + defer cancel() + + return wait.PollImmediateUntil(2*time.Second, func() (bool, error) { for _, v := range created { - var ( - // This defaults to true, otherwise we get to a point where - // things will always return false unless one of the objects - // that manages pods has been hit - ok = true - err error - ) - switch value := AsVersioned(v).(type) { - case *corev1.Pod: - pod, err := w.c.CoreV1().Pods(v.Namespace).Get(context.Background(), v.Name, metav1.GetOptions{}) - if err != nil || !w.isPodReady(pod) { - return false, err - } - case *batchv1.Job: - if waitForJobsEnabled { - job, err := w.c.BatchV1().Jobs(v.Namespace).Get(context.Background(), v.Name, metav1.GetOptions{}) - if err != nil || !w.jobReady(job) { - return false, err - } - } - case *appsv1.Deployment, *appsv1beta1.Deployment, *appsv1beta2.Deployment, *extensionsv1beta1.Deployment: - currentDeployment, err := w.c.AppsV1().Deployments(v.Namespace).Get(context.Background(), v.Name, metav1.GetOptions{}) - if err != nil { - return false, err - } - // If paused deployment will never be ready - if currentDeployment.Spec.Paused { - continue - } - // Find RS associated with deployment - newReplicaSet, err := deploymentutil.GetNewReplicaSet(currentDeployment, w.c.AppsV1()) - if err != nil || newReplicaSet == nil { - return false, err - } - if !w.deploymentReady(newReplicaSet, currentDeployment) { - return false, nil - } - case *corev1.PersistentVolumeClaim: - claim, err := w.c.CoreV1().PersistentVolumeClaims(v.Namespace).Get(context.Background(), v.Name, metav1.GetOptions{}) - if err != nil { - return false, err - } - if !w.volumeReady(claim) { - return false, nil - } - case *corev1.Service: - svc, err := w.c.CoreV1().Services(v.Namespace).Get(context.Background(), v.Name, metav1.GetOptions{}) - if err != nil { - return false, err - } - if !w.serviceReady(svc) { - return false, nil - } - case *extensionsv1beta1.DaemonSet, *appsv1.DaemonSet, *appsv1beta2.DaemonSet: - ds, err := w.c.AppsV1().DaemonSets(v.Namespace).Get(context.Background(), v.Name, metav1.GetOptions{}) - if err != nil { - return false, err - } - if !w.daemonSetReady(ds) { - return false, nil - } - case *apiextv1beta1.CustomResourceDefinition: - if err := v.Get(); err != nil { - return false, err - } - crd := &apiextv1beta1.CustomResourceDefinition{} - if err := scheme.Scheme.Convert(v.Object, crd, nil); err != nil { - return false, err - } - if !w.crdBetaReady(*crd) { - return false, nil - } - case *apiextv1.CustomResourceDefinition: - if err := v.Get(); err != nil { - return false, err - } - crd := &apiextv1.CustomResourceDefinition{} - if err := scheme.Scheme.Convert(v.Object, crd, nil); err != nil { - return false, err - } - if !w.crdReady(*crd) { - return false, nil - } - case *appsv1.StatefulSet, *appsv1beta1.StatefulSet, *appsv1beta2.StatefulSet: - sts, err := w.c.AppsV1().StatefulSets(v.Namespace).Get(context.Background(), v.Name, metav1.GetOptions{}) - if err != nil { - return false, err - } - if !w.statefulSetReady(sts) { - return false, nil - } - case *corev1.ReplicationController, *extensionsv1beta1.ReplicaSet, *appsv1beta2.ReplicaSet, *appsv1.ReplicaSet: - ok, err = w.podsReadyForObject(v.Namespace, value) - } - if !ok || err != nil { + ready, err := w.c.IsReady(ctx, v) + if !ready || err != nil { return false, err } } return true, nil - }) -} - -func (w *waiter) podsReadyForObject(namespace string, obj runtime.Object) (bool, error) { - pods, err := w.podsforObject(namespace, obj) - if err != nil { - return false, err - } - for _, pod := range pods { - if !w.isPodReady(&pod) { - return false, nil - } - } - return true, nil -} - -func (w *waiter) podsforObject(namespace string, obj runtime.Object) ([]corev1.Pod, error) { - selector, err := SelectorsForObject(obj) - if err != nil { - return nil, err - } - list, err := getPods(w.c, namespace, selector.String()) - return list, err -} - -// isPodReady returns true if a pod is ready; false otherwise. -func (w *waiter) isPodReady(pod *corev1.Pod) bool { - for _, c := range pod.Status.Conditions { - if c.Type == corev1.PodReady && c.Status == corev1.ConditionTrue { - return true - } - } - w.log("Pod is not ready: %s/%s", pod.GetNamespace(), pod.GetName()) - return false -} - -func (w *waiter) jobReady(job *batchv1.Job) bool { - if job.Status.Failed >= *job.Spec.BackoffLimit { - w.log("Job is failed: %s/%s", job.GetNamespace(), job.GetName()) - return false - } - if job.Status.Succeeded < *job.Spec.Completions { - w.log("Job is not completed: %s/%s", job.GetNamespace(), job.GetName()) - return false - } - return true -} - -func (w *waiter) serviceReady(s *corev1.Service) bool { - // ExternalName Services are external to cluster so helm shouldn't be checking to see if they're 'ready' (i.e. have an IP Set) - if s.Spec.Type == corev1.ServiceTypeExternalName { - return true - } - - // Ensure that the service cluster IP is not empty - if s.Spec.ClusterIP == "" { - w.log("Service does not have cluster IP address: %s/%s", s.GetNamespace(), s.GetName()) - return false - } - - // This checks if the service has a LoadBalancer and that balancer has an Ingress defined - if s.Spec.Type == corev1.ServiceTypeLoadBalancer { - // do not wait when at least 1 external IP is set - if len(s.Spec.ExternalIPs) > 0 { - w.log("Service %s/%s has external IP addresses (%v), marking as ready", s.GetNamespace(), s.GetName(), s.Spec.ExternalIPs) - return true - } - - if s.Status.LoadBalancer.Ingress == nil { - w.log("Service does not have load balancer ingress IP address: %s/%s", s.GetNamespace(), s.GetName()) - return false - } - } - - return true -} - -func (w *waiter) volumeReady(v *corev1.PersistentVolumeClaim) bool { - if v.Status.Phase != corev1.ClaimBound { - w.log("PersistentVolumeClaim is not bound: %s/%s", v.GetNamespace(), v.GetName()) - return false - } - return true -} - -func (w *waiter) deploymentReady(rs *appsv1.ReplicaSet, dep *appsv1.Deployment) bool { - expectedReady := *dep.Spec.Replicas - deploymentutil.MaxUnavailable(*dep) - if !(rs.Status.ReadyReplicas >= expectedReady) { - w.log("Deployment is not ready: %s/%s. %d out of %d expected pods are ready", dep.Namespace, dep.Name, rs.Status.ReadyReplicas, expectedReady) - return false - } - return true -} - -func (w *waiter) daemonSetReady(ds *appsv1.DaemonSet) bool { - // If the update strategy is not a rolling update, there will be nothing to wait for - if ds.Spec.UpdateStrategy.Type != appsv1.RollingUpdateDaemonSetStrategyType { - return true - } - - // Make sure all the updated pods have been scheduled - if ds.Status.UpdatedNumberScheduled != ds.Status.DesiredNumberScheduled { - w.log("DaemonSet is not ready: %s/%s. %d out of %d expected pods have been scheduled", ds.Namespace, ds.Name, ds.Status.UpdatedNumberScheduled, ds.Status.DesiredNumberScheduled) - return false - } - maxUnavailable, err := intstr.GetValueFromIntOrPercent(ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, int(ds.Status.DesiredNumberScheduled), true) - if err != nil { - // If for some reason the value is invalid, set max unavailable to the - // number of desired replicas. This is the same behavior as the - // `MaxUnavailable` function in deploymentutil - maxUnavailable = int(ds.Status.DesiredNumberScheduled) - } - - expectedReady := int(ds.Status.DesiredNumberScheduled) - maxUnavailable - if !(int(ds.Status.NumberReady) >= expectedReady) { - w.log("DaemonSet is not ready: %s/%s. %d out of %d expected pods are ready", ds.Namespace, ds.Name, ds.Status.NumberReady, expectedReady) - return false - } - return true -} - -// Because the v1 extensions API is not available on all supported k8s versions -// yet and because Go doesn't support generics, we need to have a duplicate -// function to support the v1beta1 types -func (w *waiter) crdBetaReady(crd apiextv1beta1.CustomResourceDefinition) bool { - for _, cond := range crd.Status.Conditions { - switch cond.Type { - case apiextv1beta1.Established: - if cond.Status == apiextv1beta1.ConditionTrue { - return true - } - case apiextv1beta1.NamesAccepted: - if cond.Status == apiextv1beta1.ConditionFalse { - // This indicates a naming conflict, but it's probably not the - // job of this function to fail because of that. Instead, - // we treat it as a success, since the process should be able to - // continue. - return true - } - } - } - return false -} - -func (w *waiter) crdReady(crd apiextv1.CustomResourceDefinition) bool { - for _, cond := range crd.Status.Conditions { - switch cond.Type { - case apiextv1.Established: - if cond.Status == apiextv1.ConditionTrue { - return true - } - case apiextv1.NamesAccepted: - if cond.Status == apiextv1.ConditionFalse { - // This indicates a naming conflict, but it's probably not the - // job of this function to fail because of that. Instead, - // we treat it as a success, since the process should be able to - // continue. - return true - } - } - } - return false -} - -func (w *waiter) statefulSetReady(sts *appsv1.StatefulSet) bool { - // If the update strategy is not a rolling update, there will be nothing to wait for - if sts.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType { - return true - } - - // Dereference all the pointers because StatefulSets like them - var partition int - // 1 is the default for replicas if not set - var replicas = 1 - // For some reason, even if the update strategy is a rolling update, the - // actual rollingUpdate field can be nil. If it is, we can safely assume - // there is no partition value - if sts.Spec.UpdateStrategy.RollingUpdate != nil && sts.Spec.UpdateStrategy.RollingUpdate.Partition != nil { - partition = int(*sts.Spec.UpdateStrategy.RollingUpdate.Partition) - } - if sts.Spec.Replicas != nil { - replicas = int(*sts.Spec.Replicas) - } - - // Because an update strategy can use partitioning, we need to calculate the - // number of updated replicas we should have. For example, if the replicas - // is set to 3 and the partition is 2, we'd expect only one pod to be - // updated - expectedReplicas := replicas - partition - - // Make sure all the updated pods have been scheduled - if int(sts.Status.UpdatedReplicas) != expectedReplicas { - w.log("StatefulSet is not ready: %s/%s. %d out of %d expected pods have been scheduled", sts.Namespace, sts.Name, sts.Status.UpdatedReplicas, expectedReplicas) - return false - } - - if int(sts.Status.ReadyReplicas) != replicas { - w.log("StatefulSet is not ready: %s/%s. %d out of %d expected pods are ready", sts.Namespace, sts.Name, sts.Status.ReadyReplicas, replicas) - return false - } - return true -} - -func getPods(client kubernetes.Interface, namespace, selector string) ([]corev1.Pod, error) { - list, err := client.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{ - LabelSelector: selector, - }) - return list.Items, err + }, ctx.Done()) } // SelectorsForObject returns the pod label selector for a given object diff --git a/vendor/helm.sh/helm/v3/pkg/lint/lint.go b/vendor/helm.sh/helm/v3/pkg/lint/lint.go new file mode 100644 index 0000000000..67e76bd3d4 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/lint/lint.go @@ -0,0 +1,37 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lint // import "helm.sh/helm/v3/pkg/lint" + +import ( + "path/filepath" + + "helm.sh/helm/v3/pkg/lint/rules" + "helm.sh/helm/v3/pkg/lint/support" +) + +// All runs all of the available linters on the given base directory. +func All(basedir string, values map[string]interface{}, namespace string, strict bool) support.Linter { + // Using abs path to get directory context + chartDir, _ := filepath.Abs(basedir) + + linter := support.Linter{ChartDir: chartDir} + rules.Chartfile(&linter) + rules.ValuesWithOverrides(&linter, values) + rules.Templates(&linter, values, namespace, strict) + rules.Dependencies(&linter) + return linter +} diff --git a/vendor/helm.sh/helm/v3/pkg/lint/rules/chartfile.go b/vendor/helm.sh/helm/v3/pkg/lint/rules/chartfile.go new file mode 100644 index 0000000000..b49f2cec0b --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/lint/rules/chartfile.go @@ -0,0 +1,210 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules // import "helm.sh/helm/v3/pkg/lint/rules" + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/Masterminds/semver/v3" + "github.com/asaskevich/govalidator" + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/lint/support" +) + +// Chartfile runs a set of linter rules related to Chart.yaml file +func Chartfile(linter *support.Linter) { + chartFileName := "Chart.yaml" + chartPath := filepath.Join(linter.ChartDir, chartFileName) + + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartYamlNotDirectory(chartPath)) + + chartFile, err := chartutil.LoadChartfile(chartPath) + validChartFile := linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartYamlFormat(err)) + + // Guard clause. Following linter rules require a parsable ChartFile + if !validChartFile { + return + } + + // type check for Chart.yaml . ignoring error as any parse + // errors would already be caught in the above load function + chartFileForTypeCheck, _ := loadChartFileForTypeCheck(chartPath) + + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartName(chartFile)) + + // Chart metadata + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartAPIVersion(chartFile)) + + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartVersionType(chartFileForTypeCheck)) + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartVersion(chartFile)) + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartAppVersionType(chartFileForTypeCheck)) + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartMaintainer(chartFile)) + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartSources(chartFile)) + linter.RunLinterRule(support.InfoSev, chartFileName, validateChartIconPresence(chartFile)) + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartIconURL(chartFile)) + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartType(chartFile)) + linter.RunLinterRule(support.ErrorSev, chartFileName, validateChartDependencies(chartFile)) +} + +func validateChartVersionType(data map[string]interface{}) error { + return isStringValue(data, "version") +} + +func validateChartAppVersionType(data map[string]interface{}) error { + return isStringValue(data, "appVersion") +} + +func isStringValue(data map[string]interface{}, key string) error { + value, ok := data[key] + if !ok { + return nil + } + valueType := fmt.Sprintf("%T", value) + if valueType != "string" { + return errors.Errorf("%s should be of type string but it's of type %s", key, valueType) + } + return nil +} + +func validateChartYamlNotDirectory(chartPath string) error { + fi, err := os.Stat(chartPath) + + if err == nil && fi.IsDir() { + return errors.New("should be a file, not a directory") + } + return nil +} + +func validateChartYamlFormat(chartFileError error) error { + if chartFileError != nil { + return errors.Errorf("unable to parse YAML\n\t%s", chartFileError.Error()) + } + return nil +} + +func validateChartName(cf *chart.Metadata) error { + if cf.Name == "" { + return errors.New("name is required") + } + return nil +} + +func validateChartAPIVersion(cf *chart.Metadata) error { + if cf.APIVersion == "" { + return errors.New("apiVersion is required. The value must be either \"v1\" or \"v2\"") + } + + if cf.APIVersion != chart.APIVersionV1 && cf.APIVersion != chart.APIVersionV2 { + return fmt.Errorf("apiVersion '%s' is not valid. The value must be either \"v1\" or \"v2\"", cf.APIVersion) + } + + return nil +} + +func validateChartVersion(cf *chart.Metadata) error { + if cf.Version == "" { + return errors.New("version is required") + } + + version, err := semver.NewVersion(cf.Version) + + if err != nil { + return errors.Errorf("version '%s' is not a valid SemVer", cf.Version) + } + + c, err := semver.NewConstraint(">0.0.0-0") + if err != nil { + return err + } + valid, msg := c.Validate(version) + + if !valid && len(msg) > 0 { + return errors.Errorf("version %v", msg[0]) + } + + return nil +} + +func validateChartMaintainer(cf *chart.Metadata) error { + for _, maintainer := range cf.Maintainers { + if maintainer.Name == "" { + return errors.New("each maintainer requires a name") + } else if maintainer.Email != "" && !govalidator.IsEmail(maintainer.Email) { + return errors.Errorf("invalid email '%s' for maintainer '%s'", maintainer.Email, maintainer.Name) + } else if maintainer.URL != "" && !govalidator.IsURL(maintainer.URL) { + return errors.Errorf("invalid url '%s' for maintainer '%s'", maintainer.URL, maintainer.Name) + } + } + return nil +} + +func validateChartSources(cf *chart.Metadata) error { + for _, source := range cf.Sources { + if source == "" || !govalidator.IsRequestURL(source) { + return errors.Errorf("invalid source URL '%s'", source) + } + } + return nil +} + +func validateChartIconPresence(cf *chart.Metadata) error { + if cf.Icon == "" { + return errors.New("icon is recommended") + } + return nil +} + +func validateChartIconURL(cf *chart.Metadata) error { + if cf.Icon != "" && !govalidator.IsRequestURL(cf.Icon) { + return errors.Errorf("invalid icon URL '%s'", cf.Icon) + } + return nil +} + +func validateChartDependencies(cf *chart.Metadata) error { + if len(cf.Dependencies) > 0 && cf.APIVersion != chart.APIVersionV2 { + return fmt.Errorf("dependencies are not valid in the Chart file with apiVersion '%s'. They are valid in apiVersion '%s'", cf.APIVersion, chart.APIVersionV2) + } + return nil +} + +func validateChartType(cf *chart.Metadata) error { + if len(cf.Type) > 0 && cf.APIVersion != chart.APIVersionV2 { + return fmt.Errorf("chart type is not valid in apiVersion '%s'. It is valid in apiVersion '%s'", cf.APIVersion, chart.APIVersionV2) + } + return nil +} + +// loadChartFileForTypeCheck loads the Chart.yaml +// in a generic form of a map[string]interface{}, so that the type +// of the values can be checked +func loadChartFileForTypeCheck(filename string) (map[string]interface{}, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + y := make(map[string]interface{}) + err = yaml.Unmarshal(b, &y) + return y, err +} diff --git a/vendor/helm.sh/helm/v3/pkg/lint/rules/dependencies.go b/vendor/helm.sh/helm/v3/pkg/lint/rules/dependencies.go new file mode 100644 index 0000000000..abecd1feb6 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/lint/rules/dependencies.go @@ -0,0 +1,82 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules // import "helm.sh/helm/v3/pkg/lint/rules" + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/lint/support" +) + +// Dependencies runs lints against a chart's dependencies +// +// See https://github.com/helm/helm/issues/7910 +func Dependencies(linter *support.Linter) { + c, err := loader.LoadDir(linter.ChartDir) + if !linter.RunLinterRule(support.ErrorSev, "", validateChartFormat(err)) { + return + } + + linter.RunLinterRule(support.ErrorSev, linter.ChartDir, validateDependencyInMetadata(c)) + linter.RunLinterRule(support.WarningSev, linter.ChartDir, validateDependencyInChartsDir(c)) +} + +func validateChartFormat(chartError error) error { + if chartError != nil { + return errors.Errorf("unable to load chart\n\t%s", chartError) + } + return nil +} + +func validateDependencyInChartsDir(c *chart.Chart) (err error) { + dependencies := map[string]struct{}{} + missing := []string{} + for _, dep := range c.Dependencies() { + dependencies[dep.Metadata.Name] = struct{}{} + } + for _, dep := range c.Metadata.Dependencies { + if _, ok := dependencies[dep.Name]; !ok { + missing = append(missing, dep.Name) + } + } + if len(missing) > 0 { + err = fmt.Errorf("chart directory is missing these dependencies: %s", strings.Join(missing, ",")) + } + return err +} + +func validateDependencyInMetadata(c *chart.Chart) (err error) { + dependencies := map[string]struct{}{} + missing := []string{} + for _, dep := range c.Metadata.Dependencies { + dependencies[dep.Name] = struct{}{} + } + for _, dep := range c.Dependencies() { + if _, ok := dependencies[dep.Metadata.Name]; !ok { + missing = append(missing, dep.Metadata.Name) + } + } + if len(missing) > 0 { + err = fmt.Errorf("chart metadata is missing these dependencies: %s", strings.Join(missing, ",")) + } + return err +} diff --git a/vendor/helm.sh/helm/v3/pkg/lint/rules/deprecations.go b/vendor/helm.sh/helm/v3/pkg/lint/rules/deprecations.go new file mode 100644 index 0000000000..384c179736 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/lint/rules/deprecations.go @@ -0,0 +1,84 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules // import "helm.sh/helm/v3/pkg/lint/rules" + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/endpoints/deprecation" + kscheme "k8s.io/client-go/kubernetes/scheme" +) + +const ( + // This should be set in the Makefile based on the version of client-go being imported. + // These constants will be overwritten with LDFLAGS + k8sVersionMajor = 1 + k8sVersionMinor = 20 +) + +// deprecatedAPIError indicates than an API is deprecated in Kubernetes +type deprecatedAPIError struct { + Deprecated string + Message string +} + +func (e deprecatedAPIError) Error() string { + msg := e.Message + return msg +} + +func validateNoDeprecations(resource *K8sYamlStruct) error { + // if `resource` does not have an APIVersion or Kind, we cannot test it for deprecation + if resource.APIVersion == "" { + return nil + } + if resource.Kind == "" { + return nil + } + + runtimeObject, err := resourceToRuntimeObject(resource) + if err != nil { + // do not error for non-kubernetes resources + if runtime.IsNotRegisteredError(err) { + return nil + } + return err + } + if !deprecation.IsDeprecated(runtimeObject, k8sVersionMajor, k8sVersionMinor) { + return nil + } + gvk := fmt.Sprintf("%s %s", resource.APIVersion, resource.Kind) + return deprecatedAPIError{ + Deprecated: gvk, + Message: deprecation.WarningMessage(runtimeObject), + } +} + +func resourceToRuntimeObject(resource *K8sYamlStruct) (runtime.Object, error) { + scheme := runtime.NewScheme() + kscheme.AddToScheme(scheme) + + gvk := schema.FromAPIVersionAndKind(resource.APIVersion, resource.Kind) + out, err := scheme.New(gvk) + if err != nil { + return nil, err + } + out.GetObjectKind().SetGroupVersionKind(gvk) + return out, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/lint/rules/template.go b/vendor/helm.sh/helm/v3/pkg/lint/rules/template.go new file mode 100644 index 0000000000..f70c997cf4 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/lint/rules/template.go @@ -0,0 +1,310 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules + +import ( + "bufio" + "bytes" + "fmt" + "io" + "os" + "path" + "path/filepath" + "regexp" + "strings" + + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/api/validation" + apipath "k8s.io/apimachinery/pkg/api/validation/path" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apimachinery/pkg/util/yaml" + + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/engine" + "helm.sh/helm/v3/pkg/lint/support" +) + +var ( + crdHookSearch = regexp.MustCompile(`"?helm\.sh/hook"?:\s+crd-install`) + releaseTimeSearch = regexp.MustCompile(`\.Release\.Time`) +) + +// Templates lints the templates in the Linter. +func Templates(linter *support.Linter, values map[string]interface{}, namespace string, strict bool) { + fpath := "templates/" + templatesPath := filepath.Join(linter.ChartDir, fpath) + + templatesDirExist := linter.RunLinterRule(support.WarningSev, fpath, validateTemplatesDir(templatesPath)) + + // Templates directory is optional for now + if !templatesDirExist { + return + } + + // Load chart and parse templates + chart, err := loader.Load(linter.ChartDir) + + chartLoaded := linter.RunLinterRule(support.ErrorSev, fpath, err) + + if !chartLoaded { + return + } + + options := chartutil.ReleaseOptions{ + Name: "test-release", + Namespace: namespace, + } + + cvals, err := chartutil.CoalesceValues(chart, values) + if err != nil { + return + } + valuesToRender, err := chartutil.ToRenderValues(chart, cvals, options, nil) + if err != nil { + linter.RunLinterRule(support.ErrorSev, fpath, err) + return + } + var e engine.Engine + e.LintMode = true + renderedContentMap, err := e.Render(chart, valuesToRender) + + renderOk := linter.RunLinterRule(support.ErrorSev, fpath, err) + + if !renderOk { + return + } + + /* Iterate over all the templates to check: + - It is a .yaml file + - All the values in the template file is defined + - {{}} include | quote + - Generated content is a valid Yaml file + - Metadata.Namespace is not set + */ + for _, template := range chart.Templates { + fileName, data := template.Name, template.Data + fpath = fileName + + linter.RunLinterRule(support.ErrorSev, fpath, validateAllowedExtension(fileName)) + // These are v3 specific checks to make sure and warn people if their + // chart is not compatible with v3 + linter.RunLinterRule(support.WarningSev, fpath, validateNoCRDHooks(data)) + linter.RunLinterRule(support.ErrorSev, fpath, validateNoReleaseTime(data)) + + // We only apply the following lint rules to yaml files + if filepath.Ext(fileName) != ".yaml" || filepath.Ext(fileName) == ".yml" { + continue + } + + // NOTE: disabled for now, Refs https://github.com/helm/helm/issues/1463 + // Check that all the templates have a matching value + //linter.RunLinterRule(support.WarningSev, fpath, validateNoMissingValues(templatesPath, valuesToRender, preExecutedTemplate)) + + // NOTE: disabled for now, Refs https://github.com/helm/helm/issues/1037 + // linter.RunLinterRule(support.WarningSev, fpath, validateQuotes(string(preExecutedTemplate))) + + renderedContent := renderedContentMap[path.Join(chart.Name(), fileName)] + if strings.TrimSpace(renderedContent) != "" { + linter.RunLinterRule(support.WarningSev, fpath, validateTopIndentLevel(renderedContent)) + + decoder := yaml.NewYAMLOrJSONDecoder(strings.NewReader(renderedContent), 4096) + + // Lint all resources if the file contains multiple documents separated by --- + for { + // Even though K8sYamlStruct only defines a few fields, an error in any other + // key will be raised as well + var yamlStruct *K8sYamlStruct + + err := decoder.Decode(&yamlStruct) + if err == io.EOF { + break + } + + // If YAML linting fails, we sill progress. So we don't capture the returned state + // on this linter run. + linter.RunLinterRule(support.ErrorSev, fpath, validateYamlContent(err)) + + if yamlStruct != nil { + // NOTE: set to warnings to allow users to support out-of-date kubernetes + // Refs https://github.com/helm/helm/issues/8596 + linter.RunLinterRule(support.WarningSev, fpath, validateMetadataName(yamlStruct)) + linter.RunLinterRule(support.WarningSev, fpath, validateNoDeprecations(yamlStruct)) + + linter.RunLinterRule(support.ErrorSev, fpath, validateMatchSelector(yamlStruct, renderedContent)) + } + } + } + } +} + +// validateTopIndentLevel checks that the content does not start with an indent level > 0. +// +// This error can occur when a template accidentally inserts space. It can cause +// unpredictable errors depending on whether the text is normalized before being passed +// into the YAML parser. So we trap it here. +// +// See https://github.com/helm/helm/issues/8467 +func validateTopIndentLevel(content string) error { + // Read lines until we get to a non-empty one + scanner := bufio.NewScanner(bytes.NewBufferString(content)) + for scanner.Scan() { + line := scanner.Text() + // If line is empty, skip + if strings.TrimSpace(line) == "" { + continue + } + // If it starts with one or more spaces, this is an error + if strings.HasPrefix(line, " ") || strings.HasPrefix(line, "\t") { + return fmt.Errorf("document starts with an illegal indent: %q, which may cause parsing problems", line) + } + // Any other condition passes. + return nil + } + return scanner.Err() +} + +// Validation functions +func validateTemplatesDir(templatesPath string) error { + if fi, err := os.Stat(templatesPath); err != nil { + return errors.New("directory not found") + } else if !fi.IsDir() { + return errors.New("not a directory") + } + return nil +} + +func validateAllowedExtension(fileName string) error { + ext := filepath.Ext(fileName) + validExtensions := []string{".yaml", ".yml", ".tpl", ".txt"} + + for _, b := range validExtensions { + if b == ext { + return nil + } + } + + return errors.Errorf("file extension '%s' not valid. Valid extensions are .yaml, .yml, .tpl, or .txt", ext) +} + +func validateYamlContent(err error) error { + return errors.Wrap(err, "unable to parse YAML") +} + +// validateMetadataName uses the correct validation function for the object +// Kind, or if not set, defaults to the standard definition of a subdomain in +// DNS (RFC 1123), used by most resources. +func validateMetadataName(obj *K8sYamlStruct) error { + fn := validateMetadataNameFunc(obj) + allErrs := field.ErrorList{} + for _, msg := range fn(obj.Metadata.Name, false) { + allErrs = append(allErrs, field.Invalid(field.NewPath("metadata").Child("name"), obj.Metadata.Name, msg)) + } + if len(allErrs) > 0 { + return errors.Wrapf(allErrs.ToAggregate(), "object name does not conform to Kubernetes naming requirements: %q", obj.Metadata.Name) + } + return nil +} + +// validateMetadataNameFunc will return a name validation function for the +// object kind, if defined below. +// +// Rules should match those set in the various api validations: +// https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/core/validation/validation.go#L205-L274 +// https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/apps/validation/validation.go#L39 +// ... +// +// Implementing here to avoid importing k/k. +// +// If no mapping is defined, returns NameIsDNSSubdomain. This is used by object +// kinds that don't have special requirements, so is the most likely to work if +// new kinds are added. +func validateMetadataNameFunc(obj *K8sYamlStruct) validation.ValidateNameFunc { + switch strings.ToLower(obj.Kind) { + case "pod", "node", "secret", "endpoints", "resourcequota", // core + "controllerrevision", "daemonset", "deployment", "replicaset", "statefulset", // apps + "autoscaler", // autoscaler + "cronjob", "job", // batch + "lease", // coordination + "endpointslice", // discovery + "networkpolicy", "ingress", // networking + "podsecuritypolicy", // policy + "priorityclass", // scheduling + "podpreset", // settings + "storageclass", "volumeattachment", "csinode": // storage + return validation.NameIsDNSSubdomain + case "service": + return validation.NameIsDNS1035Label + case "namespace": + return validation.ValidateNamespaceName + case "serviceaccount": + return validation.ValidateServiceAccountName + case "certificatesigningrequest": + // No validation. + // https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/certificates/validation/validation.go#L137-L140 + return func(name string, prefix bool) []string { return nil } + case "role", "clusterrole", "rolebinding", "clusterrolebinding": + // https://github.com/kubernetes/kubernetes/blob/v1.20.0/pkg/apis/rbac/validation/validation.go#L32-L34 + return func(name string, prefix bool) []string { + return apipath.IsValidPathSegmentName(name) + } + default: + return validation.NameIsDNSSubdomain + } +} + +func validateNoCRDHooks(manifest []byte) error { + if crdHookSearch.Match(manifest) { + return errors.New("manifest is a crd-install hook. This hook is no longer supported in v3 and all CRDs should also exist the crds/ directory at the top level of the chart") + } + return nil +} + +func validateNoReleaseTime(manifest []byte) error { + if releaseTimeSearch.Match(manifest) { + return errors.New(".Release.Time has been removed in v3, please replace with the `now` function in your templates") + } + return nil +} + +// validateMatchSelector ensures that template specs have a selector declared. +// See https://github.com/helm/helm/issues/1990 +func validateMatchSelector(yamlStruct *K8sYamlStruct, manifest string) error { + switch yamlStruct.Kind { + case "Deployment", "ReplicaSet", "DaemonSet", "StatefulSet": + // verify that matchLabels or matchExpressions is present + if !(strings.Contains(manifest, "matchLabels") || strings.Contains(manifest, "matchExpressions")) { + return fmt.Errorf("a %s must contain matchLabels or matchExpressions, and %q does not", yamlStruct.Kind, yamlStruct.Metadata.Name) + } + } + return nil +} + +// K8sYamlStruct stubs a Kubernetes YAML file. +// +// DEPRECATED: In Helm 4, this will be made a private type, as it is for use only within +// the rules package. +type K8sYamlStruct struct { + APIVersion string `json:"apiVersion"` + Kind string + Metadata k8sYamlMetadata +} + +type k8sYamlMetadata struct { + Namespace string + Name string +} diff --git a/vendor/helm.sh/helm/v3/pkg/lint/rules/values.go b/vendor/helm.sh/helm/v3/pkg/lint/rules/values.go new file mode 100644 index 0000000000..79a294326b --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/lint/rules/values.go @@ -0,0 +1,87 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules + +import ( + "io/ioutil" + "os" + "path/filepath" + + "github.com/pkg/errors" + + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/lint/support" +) + +// Values lints a chart's values.yaml file. +// +// This function is deprecated and will be removed in Helm 4. +func Values(linter *support.Linter) { + ValuesWithOverrides(linter, map[string]interface{}{}) +} + +// ValuesWithOverrides tests the values.yaml file. +// +// If a schema is present in the chart, values are tested against that. Otherwise, +// they are only tested for well-formedness. +// +// If additional values are supplied, they are coalesced into the values in values.yaml. +func ValuesWithOverrides(linter *support.Linter, values map[string]interface{}) { + file := "values.yaml" + vf := filepath.Join(linter.ChartDir, file) + fileExists := linter.RunLinterRule(support.InfoSev, file, validateValuesFileExistence(vf)) + + if !fileExists { + return + } + + linter.RunLinterRule(support.ErrorSev, file, validateValuesFile(vf, values)) +} + +func validateValuesFileExistence(valuesPath string) error { + _, err := os.Stat(valuesPath) + if err != nil { + return errors.Errorf("file does not exist") + } + return nil +} + +func validateValuesFile(valuesPath string, overrides map[string]interface{}) error { + values, err := chartutil.ReadValuesFile(valuesPath) + if err != nil { + return errors.Wrap(err, "unable to parse YAML") + } + + // Helm 3.0.0 carried over the values linting from Helm 2.x, which only tests the top + // level values against the top-level expectations. Subchart values are not linted. + // We could change that. For now, though, we retain that strategy, and thus can + // coalesce tables (like reuse-values does) instead of doing the full chart + // CoalesceValues + coalescedValues := chartutil.CoalesceTables(make(map[string]interface{}, len(overrides)), overrides) + coalescedValues = chartutil.CoalesceTables(coalescedValues, values) + + ext := filepath.Ext(valuesPath) + schemaPath := valuesPath[:len(valuesPath)-len(ext)] + ".schema.json" + schema, err := ioutil.ReadFile(schemaPath) + if len(schema) == 0 { + return nil + } + if err != nil { + return err + } + return chartutil.ValidateAgainstSingleSchema(coalescedValues, schema) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/version/doc.go b/vendor/helm.sh/helm/v3/pkg/lint/support/doc.go similarity index 66% rename from vendor/k8s.io/apimachinery/pkg/util/version/doc.go rename to vendor/helm.sh/helm/v3/pkg/lint/support/doc.go index 5b2b22b6d0..b9a9d09189 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/version/doc.go +++ b/vendor/helm.sh/helm/v3/pkg/lint/support/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright The Helm Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,5 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package version provides utilities for version number comparisons -package version // import "k8s.io/apimachinery/pkg/util/version" +/*Package support contains tools for linting charts. + +Linting is the process of testing charts for errors or warnings regarding +formatting, compilation, or standards compliance. +*/ +package support // import "helm.sh/helm/v3/pkg/lint/support" diff --git a/vendor/helm.sh/helm/v3/pkg/lint/support/message.go b/vendor/helm.sh/helm/v3/pkg/lint/support/message.go new file mode 100644 index 0000000000..5efbc7a61c --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/lint/support/message.go @@ -0,0 +1,76 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package support + +import "fmt" + +// Severity indicates the severity of a Message. +const ( + // UnknownSev indicates that the severity of the error is unknown, and should not stop processing. + UnknownSev = iota + // InfoSev indicates information, for example missing values.yaml file + InfoSev + // WarningSev indicates that something does not meet code standards, but will likely function. + WarningSev + // ErrorSev indicates that something will not likely function. + ErrorSev +) + +// sev matches the *Sev states. +var sev = []string{"UNKNOWN", "INFO", "WARNING", "ERROR"} + +// Linter encapsulates a linting run of a particular chart. +type Linter struct { + Messages []Message + // The highest severity of all the failing lint rules + HighestSeverity int + ChartDir string +} + +// Message describes an error encountered while linting. +type Message struct { + // Severity is one of the *Sev constants + Severity int + Path string + Err error +} + +func (m Message) Error() string { + return fmt.Sprintf("[%s] %s: %s", sev[m.Severity], m.Path, m.Err.Error()) +} + +// NewMessage creates a new Message struct +func NewMessage(severity int, path string, err error) Message { + return Message{Severity: severity, Path: path, Err: err} +} + +// RunLinterRule returns true if the validation passed +func (l *Linter) RunLinterRule(severity int, path string, err error) bool { + // severity is out of bound + if severity < 0 || severity >= len(sev) { + return false + } + + if err != nil { + l.Messages = append(l.Messages, NewMessage(severity, path, err)) + + if severity > l.HighestSeverity { + l.HighestSeverity = severity + } + } + return err == nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/plugin/plugin.go b/vendor/helm.sh/helm/v3/pkg/plugin/plugin.go index 93b5527a1b..9ead561b2b 100644 --- a/vendor/helm.sh/helm/v3/pkg/plugin/plugin.go +++ b/vendor/helm.sh/helm/v3/pkg/plugin/plugin.go @@ -23,6 +23,7 @@ import ( "regexp" "runtime" "strings" + "unicode" "github.com/pkg/errors" "sigs.k8s.io/yaml" @@ -175,10 +176,25 @@ func validatePluginData(plug *Plugin, filepath string) error { if !validPluginName.MatchString(plug.Metadata.Name) { return fmt.Errorf("invalid plugin name at %q", filepath) } + plug.Metadata.Usage = sanitizeString(plug.Metadata.Usage) + // We could also validate SemVer, executable, and other fields should we so choose. return nil } +// sanitizeString normalize spaces and removes non-printable characters. +func sanitizeString(str string) string { + return strings.Map(func(r rune) rune { + if unicode.IsSpace(r) { + return ' ' + } + if unicode.IsPrint(r) { + return r + } + return -1 + }, str) +} + func detectDuplicates(plugs []*Plugin) error { names := map[string]string{} diff --git a/vendor/helm.sh/helm/v3/pkg/postrender/exec.go b/vendor/helm.sh/helm/v3/pkg/postrender/exec.go new file mode 100644 index 0000000000..1de70b0248 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/postrender/exec.go @@ -0,0 +1,108 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package postrender + +import ( + "bytes" + "io" + "os/exec" + "path/filepath" + + "github.com/pkg/errors" +) + +type execRender struct { + binaryPath string +} + +// NewExec returns a PostRenderer implementation that calls the provided binary. +// It returns an error if the binary cannot be found. If the path does not +// contain any separators, it will search in $PATH, otherwise it will resolve +// any relative paths to a fully qualified path +func NewExec(binaryPath string) (PostRenderer, error) { + fullPath, err := getFullPath(binaryPath) + if err != nil { + return nil, err + } + return &execRender{fullPath}, nil +} + +// Run the configured binary for the post render +func (p *execRender) Run(renderedManifests *bytes.Buffer) (*bytes.Buffer, error) { + cmd := exec.Command(p.binaryPath) + stdin, err := cmd.StdinPipe() + if err != nil { + return nil, err + } + + var postRendered = &bytes.Buffer{} + var stderr = &bytes.Buffer{} + cmd.Stdout = postRendered + cmd.Stderr = stderr + + go func() { + defer stdin.Close() + io.Copy(stdin, renderedManifests) + }() + err = cmd.Run() + if err != nil { + return nil, errors.Wrapf(err, "error while running command %s. error output:\n%s", p.binaryPath, stderr.String()) + } + + return postRendered, nil +} + +// getFullPath returns the full filepath to the binary to execute. If the path +// does not contain any separators, it will search in $PATH, otherwise it will +// resolve any relative paths to a fully qualified path +func getFullPath(binaryPath string) (string, error) { + // NOTE(thomastaylor312): I am leaving this code commented out here. During + // the implementation of post-render, it was brought up that if we are + // relying on plugins, we should actually use the plugin system so it can + // properly handle multiple OSs. This will be a feature add in the future, + // so I left this code for reference. It can be deleted or reused once the + // feature is implemented + + // Manually check the plugin dir first + // if !strings.Contains(binaryPath, string(filepath.Separator)) { + // // First check the plugin dir + // pluginDir := helmpath.DataPath("plugins") // Default location + // // If location for plugins is explicitly set, check there + // if v, ok := os.LookupEnv("HELM_PLUGINS"); ok { + // pluginDir = v + // } + // // The plugins variable can actually contain multiple paths, so loop through those + // for _, p := range filepath.SplitList(pluginDir) { + // _, err := os.Stat(filepath.Join(p, binaryPath)) + // if err != nil && !os.IsNotExist(err) { + // return "", err + // } else if err == nil { + // binaryPath = filepath.Join(p, binaryPath) + // break + // } + // } + // } + + // Now check for the binary using the given path or check if it exists in + // the path and is executable + checkedPath, err := exec.LookPath(binaryPath) + if err != nil { + return "", errors.Wrapf(err, "unable to find binary at %s", binaryPath) + } + + return filepath.Abs(checkedPath) +} diff --git a/vendor/helm.sh/helm/v3/pkg/postrender/postrender.go b/vendor/helm.sh/helm/v3/pkg/postrender/postrender.go new file mode 100644 index 0000000000..3af3842907 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/postrender/postrender.go @@ -0,0 +1,29 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package postrender contains an interface that can be implemented for custom +// post-renderers and an exec implementation that can be used for arbitrary +// binaries and scripts +package postrender + +import "bytes" + +type PostRenderer interface { + // Run expects a single buffer filled with Helm rendered manifests. It + // expects the modified results to be returned on a separate buffer or an + // error if there was an issue or failure while running the post render step + Run(renderedManifests *bytes.Buffer) (modifiedManifests *bytes.Buffer, err error) +} diff --git a/vendor/helm.sh/helm/v3/pkg/release/hook.go b/vendor/helm.sh/helm/v3/pkg/release/hook.go index 662320f064..cb99555822 100644 --- a/vendor/helm.sh/helm/v3/pkg/release/hook.go +++ b/vendor/helm.sh/helm/v3/pkg/release/hook.go @@ -102,5 +102,5 @@ const ( HookPhaseFailed HookPhase = "Failed" ) -// Strng converts a hook phase to a printable string +// String converts a hook phase to a printable string func (x HookPhase) String() string { return string(x) } diff --git a/vendor/helm.sh/helm/v3/pkg/releaseutil/filter.go b/vendor/helm.sh/helm/v3/pkg/releaseutil/filter.go new file mode 100644 index 0000000000..dbd0df8e2d --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/releaseutil/filter.go @@ -0,0 +1,78 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package releaseutil // import "helm.sh/helm/v3/pkg/releaseutil" + +import rspb "helm.sh/helm/v3/pkg/release" + +// FilterFunc returns true if the release object satisfies +// the predicate of the underlying filter func. +type FilterFunc func(*rspb.Release) bool + +// Check applies the FilterFunc to the release object. +func (fn FilterFunc) Check(rls *rspb.Release) bool { + if rls == nil { + return false + } + return fn(rls) +} + +// Filter applies the filter(s) to the list of provided releases +// returning the list that satisfies the filtering predicate. +func (fn FilterFunc) Filter(rels []*rspb.Release) (rets []*rspb.Release) { + for _, rel := range rels { + if fn.Check(rel) { + rets = append(rets, rel) + } + } + return +} + +// Any returns a FilterFunc that filters a list of releases +// determined by the predicate 'f0 || f1 || ... || fn'. +func Any(filters ...FilterFunc) FilterFunc { + return func(rls *rspb.Release) bool { + for _, filter := range filters { + if filter(rls) { + return true + } + } + return false + } +} + +// All returns a FilterFunc that filters a list of releases +// determined by the predicate 'f0 && f1 && ... && fn'. +func All(filters ...FilterFunc) FilterFunc { + return func(rls *rspb.Release) bool { + for _, filter := range filters { + if !filter(rls) { + return false + } + } + return true + } +} + +// StatusFilter filters a set of releases by status code. +func StatusFilter(status rspb.Status) FilterFunc { + return FilterFunc(func(rls *rspb.Release) bool { + if rls == nil { + return true + } + return rls.Info.Status == status + }) +} diff --git a/vendor/helm.sh/helm/v3/pkg/releaseutil/kind_sorter.go b/vendor/helm.sh/helm/v3/pkg/releaseutil/kind_sorter.go new file mode 100644 index 0000000000..a340dfc291 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/releaseutil/kind_sorter.go @@ -0,0 +1,156 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package releaseutil + +import ( + "sort" + + "helm.sh/helm/v3/pkg/release" +) + +// KindSortOrder is an ordering of Kinds. +type KindSortOrder []string + +// InstallOrder is the order in which manifests should be installed (by Kind). +// +// Those occurring earlier in the list get installed before those occurring later in the list. +var InstallOrder KindSortOrder = []string{ + "Namespace", + "NetworkPolicy", + "ResourceQuota", + "LimitRange", + "PodSecurityPolicy", + "PodDisruptionBudget", + "ServiceAccount", + "Secret", + "SecretList", + "ConfigMap", + "StorageClass", + "PersistentVolume", + "PersistentVolumeClaim", + "CustomResourceDefinition", + "ClusterRole", + "ClusterRoleList", + "ClusterRoleBinding", + "ClusterRoleBindingList", + "Role", + "RoleList", + "RoleBinding", + "RoleBindingList", + "Service", + "DaemonSet", + "Pod", + "ReplicationController", + "ReplicaSet", + "Deployment", + "HorizontalPodAutoscaler", + "StatefulSet", + "Job", + "CronJob", + "Ingress", + "APIService", +} + +// UninstallOrder is the order in which manifests should be uninstalled (by Kind). +// +// Those occurring earlier in the list get uninstalled before those occurring later in the list. +var UninstallOrder KindSortOrder = []string{ + "APIService", + "Ingress", + "Service", + "CronJob", + "Job", + "StatefulSet", + "HorizontalPodAutoscaler", + "Deployment", + "ReplicaSet", + "ReplicationController", + "Pod", + "DaemonSet", + "RoleBindingList", + "RoleBinding", + "RoleList", + "Role", + "ClusterRoleBindingList", + "ClusterRoleBinding", + "ClusterRoleList", + "ClusterRole", + "CustomResourceDefinition", + "PersistentVolumeClaim", + "PersistentVolume", + "StorageClass", + "ConfigMap", + "SecretList", + "Secret", + "ServiceAccount", + "PodDisruptionBudget", + "PodSecurityPolicy", + "LimitRange", + "ResourceQuota", + "NetworkPolicy", + "Namespace", +} + +// sort manifests by kind. +// +// Results are sorted by 'ordering', keeping order of items with equal kind/priority +func sortManifestsByKind(manifests []Manifest, ordering KindSortOrder) []Manifest { + sort.SliceStable(manifests, func(i, j int) bool { + return lessByKind(manifests[i], manifests[j], manifests[i].Head.Kind, manifests[j].Head.Kind, ordering) + }) + + return manifests +} + +// sort hooks by kind, using an out-of-place sort to preserve the input parameters. +// +// Results are sorted by 'ordering', keeping order of items with equal kind/priority +func sortHooksByKind(hooks []*release.Hook, ordering KindSortOrder) []*release.Hook { + h := hooks + sort.SliceStable(h, func(i, j int) bool { + return lessByKind(h[i], h[j], h[i].Kind, h[j].Kind, ordering) + }) + + return h +} + +func lessByKind(a interface{}, b interface{}, kindA string, kindB string, o KindSortOrder) bool { + ordering := make(map[string]int, len(o)) + for v, k := range o { + ordering[k] = v + } + + first, aok := ordering[kindA] + second, bok := ordering[kindB] + + if !aok && !bok { + // if both are unknown then sort alphabetically by kind, keep original order if same kind + if kindA != kindB { + return kindA < kindB + } + return first < second + } + // unknown kind is last + if !aok { + return false + } + if !bok { + return true + } + // sort different kinds, keep original order if same priority + return first < second +} diff --git a/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest.go b/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest.go new file mode 100644 index 0000000000..0b04a4599b --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest.go @@ -0,0 +1,72 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package releaseutil + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +// SimpleHead defines what the structure of the head of a manifest file +type SimpleHead struct { + Version string `json:"apiVersion"` + Kind string `json:"kind,omitempty"` + Metadata *struct { + Name string `json:"name"` + Annotations map[string]string `json:"annotations"` + } `json:"metadata,omitempty"` +} + +var sep = regexp.MustCompile("(?:^|\\s*\n)---\\s*") + +// SplitManifests takes a string of manifest and returns a map contains individual manifests +func SplitManifests(bigFile string) map[string]string { + // Basically, we're quickly splitting a stream of YAML documents into an + // array of YAML docs. The file name is just a place holder, but should be + // integer-sortable so that manifests get output in the same order as the + // input (see `BySplitManifestsOrder`). + tpl := "manifest-%d" + res := map[string]string{} + // Making sure that any extra whitespace in YAML stream doesn't interfere in splitting documents correctly. + bigFileTmp := strings.TrimSpace(bigFile) + docs := sep.Split(bigFileTmp, -1) + var count int + for _, d := range docs { + if d == "" { + continue + } + + d = strings.TrimSpace(d) + res[fmt.Sprintf(tpl, count)] = d + count = count + 1 + } + return res +} + +// BySplitManifestsOrder sorts by in-file manifest order, as provided in function `SplitManifests` +type BySplitManifestsOrder []string + +func (a BySplitManifestsOrder) Len() int { return len(a) } +func (a BySplitManifestsOrder) Less(i, j int) bool { + // Split `manifest-%d` + anum, _ := strconv.ParseInt(a[i][len("manifest-"):], 10, 0) + bnum, _ := strconv.ParseInt(a[j][len("manifest-"):], 10, 0) + return anum < bnum +} +func (a BySplitManifestsOrder) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest_sorter.go b/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest_sorter.go new file mode 100644 index 0000000000..e834145000 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest_sorter.go @@ -0,0 +1,233 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package releaseutil + +import ( + "log" + "path" + "sort" + "strconv" + "strings" + + "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "helm.sh/helm/v3/pkg/chartutil" + "helm.sh/helm/v3/pkg/release" +) + +// Manifest represents a manifest file, which has a name and some content. +type Manifest struct { + Name string + Content string + Head *SimpleHead +} + +// manifestFile represents a file that contains a manifest. +type manifestFile struct { + entries map[string]string + path string + apis chartutil.VersionSet +} + +// result is an intermediate structure used during sorting. +type result struct { + hooks []*release.Hook + generic []Manifest +} + +// TODO: Refactor this out. It's here because naming conventions were not followed through. +// So fix the Test hook names and then remove this. +var events = map[string]release.HookEvent{ + release.HookPreInstall.String(): release.HookPreInstall, + release.HookPostInstall.String(): release.HookPostInstall, + release.HookPreDelete.String(): release.HookPreDelete, + release.HookPostDelete.String(): release.HookPostDelete, + release.HookPreUpgrade.String(): release.HookPreUpgrade, + release.HookPostUpgrade.String(): release.HookPostUpgrade, + release.HookPreRollback.String(): release.HookPreRollback, + release.HookPostRollback.String(): release.HookPostRollback, + release.HookTest.String(): release.HookTest, + // Support test-success for backward compatibility with Helm 2 tests + "test-success": release.HookTest, +} + +// SortManifests takes a map of filename/YAML contents, splits the file +// by manifest entries, and sorts the entries into hook types. +// +// The resulting hooks struct will be populated with all of the generated hooks. +// Any file that does not declare one of the hook types will be placed in the +// 'generic' bucket. +// +// Files that do not parse into the expected format are simply placed into a map and +// returned. +func SortManifests(files map[string]string, apis chartutil.VersionSet, ordering KindSortOrder) ([]*release.Hook, []Manifest, error) { + result := &result{} + + var sortedFilePaths []string + for filePath := range files { + sortedFilePaths = append(sortedFilePaths, filePath) + } + sort.Strings(sortedFilePaths) + + for _, filePath := range sortedFilePaths { + content := files[filePath] + + // Skip partials. We could return these as a separate map, but there doesn't + // seem to be any need for that at this time. + if strings.HasPrefix(path.Base(filePath), "_") { + continue + } + // Skip empty files and log this. + if strings.TrimSpace(content) == "" { + continue + } + + manifestFile := &manifestFile{ + entries: SplitManifests(content), + path: filePath, + apis: apis, + } + + if err := manifestFile.sort(result); err != nil { + return result.hooks, result.generic, err + } + } + + return sortHooksByKind(result.hooks, ordering), sortManifestsByKind(result.generic, ordering), nil +} + +// sort takes a manifestFile object which may contain multiple resource definition +// entries and sorts each entry by hook types, and saves the resulting hooks and +// generic manifests (or non-hooks) to the result struct. +// +// To determine hook type, it looks for a YAML structure like this: +// +// kind: SomeKind +// apiVersion: v1 +// metadata: +// annotations: +// helm.sh/hook: pre-install +// +// To determine the policy to delete the hook, it looks for a YAML structure like this: +// +// kind: SomeKind +// apiVersion: v1 +// metadata: +// annotations: +// helm.sh/hook-delete-policy: hook-succeeded +func (file *manifestFile) sort(result *result) error { + // Go through manifests in order found in file (function `SplitManifests` creates integer-sortable keys) + var sortedEntryKeys []string + for entryKey := range file.entries { + sortedEntryKeys = append(sortedEntryKeys, entryKey) + } + sort.Sort(BySplitManifestsOrder(sortedEntryKeys)) + + for _, entryKey := range sortedEntryKeys { + m := file.entries[entryKey] + + var entry SimpleHead + if err := yaml.Unmarshal([]byte(m), &entry); err != nil { + return errors.Wrapf(err, "YAML parse error on %s", file.path) + } + + if !hasAnyAnnotation(entry) { + result.generic = append(result.generic, Manifest{ + Name: file.path, + Content: m, + Head: &entry, + }) + continue + } + + hookTypes, ok := entry.Metadata.Annotations[release.HookAnnotation] + if !ok { + result.generic = append(result.generic, Manifest{ + Name: file.path, + Content: m, + Head: &entry, + }) + continue + } + + hw := calculateHookWeight(entry) + + h := &release.Hook{ + Name: entry.Metadata.Name, + Kind: entry.Kind, + Path: file.path, + Manifest: m, + Events: []release.HookEvent{}, + Weight: hw, + DeletePolicies: []release.HookDeletePolicy{}, + } + + isUnknownHook := false + for _, hookType := range strings.Split(hookTypes, ",") { + hookType = strings.ToLower(strings.TrimSpace(hookType)) + e, ok := events[hookType] + if !ok { + isUnknownHook = true + break + } + h.Events = append(h.Events, e) + } + + if isUnknownHook { + log.Printf("info: skipping unknown hook: %q", hookTypes) + continue + } + + result.hooks = append(result.hooks, h) + + operateAnnotationValues(entry, release.HookDeleteAnnotation, func(value string) { + h.DeletePolicies = append(h.DeletePolicies, release.HookDeletePolicy(value)) + }) + } + + return nil +} + +// hasAnyAnnotation returns true if the given entry has any annotations at all. +func hasAnyAnnotation(entry SimpleHead) bool { + return entry.Metadata != nil && + entry.Metadata.Annotations != nil && + len(entry.Metadata.Annotations) != 0 +} + +// calculateHookWeight finds the weight in the hook weight annotation. +// +// If no weight is found, the assigned weight is 0 +func calculateHookWeight(entry SimpleHead) int { + hws := entry.Metadata.Annotations[release.HookWeightAnnotation] + hw, err := strconv.Atoi(hws) + if err != nil { + hw = 0 + } + return hw +} + +// operateAnnotationValues finds the given annotation and runs the operate function with the value of that annotation +func operateAnnotationValues(entry SimpleHead, annotation string, operate func(p string)) { + if dps, ok := entry.Metadata.Annotations[annotation]; ok { + for _, dp := range strings.Split(dps, ",") { + dp = strings.ToLower(strings.TrimSpace(dp)) + operate(dp) + } + } +} diff --git a/vendor/helm.sh/helm/v3/pkg/releaseutil/sorter.go b/vendor/helm.sh/helm/v3/pkg/releaseutil/sorter.go new file mode 100644 index 0000000000..1a8aa78a62 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/releaseutil/sorter.go @@ -0,0 +1,78 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package releaseutil // import "helm.sh/helm/v3/pkg/releaseutil" + +import ( + "sort" + + rspb "helm.sh/helm/v3/pkg/release" +) + +type list []*rspb.Release + +func (s list) Len() int { return len(s) } +func (s list) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// ByName sorts releases by name +type ByName struct{ list } + +// Less compares to releases +func (s ByName) Less(i, j int) bool { return s.list[i].Name < s.list[j].Name } + +// ByDate sorts releases by date +type ByDate struct{ list } + +// Less compares to releases +func (s ByDate) Less(i, j int) bool { + ti := s.list[i].Info.LastDeployed.Unix() + tj := s.list[j].Info.LastDeployed.Unix() + return ti < tj +} + +// ByRevision sorts releases by revision number +type ByRevision struct{ list } + +// Less compares to releases +func (s ByRevision) Less(i, j int) bool { + return s.list[i].Version < s.list[j].Version +} + +// Reverse reverses the list of releases sorted by the sort func. +func Reverse(list []*rspb.Release, sortFn func([]*rspb.Release)) { + sortFn(list) + for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 { + list[i], list[j] = list[j], list[i] + } +} + +// SortByName returns the list of releases sorted +// in lexicographical order. +func SortByName(list []*rspb.Release) { + sort.Sort(ByName{list}) +} + +// SortByDate returns the list of releases sorted by a +// release's last deployed time (in seconds). +func SortByDate(list []*rspb.Release) { + sort.Sort(ByDate{list}) +} + +// SortByRevision returns the list of releases sorted by a +// release's revision number (release.Version). +func SortByRevision(list []*rspb.Release) { + sort.Sort(ByRevision{list}) +} diff --git a/vendor/helm.sh/helm/v3/pkg/repo/chartrepo.go b/vendor/helm.sh/helm/v3/pkg/repo/chartrepo.go index 92892bb858..956997cc9f 100644 --- a/vendor/helm.sh/helm/v3/pkg/repo/chartrepo.go +++ b/vendor/helm.sh/helm/v3/pkg/repo/chartrepo.go @@ -48,6 +48,7 @@ type Entry struct { KeyFile string `json:"keyFile"` CAFile string `json:"caFile"` InsecureSkipTLSverify bool `json:"insecure_skip_tls_verify"` + PassCredentialsAll bool `json:"pass_credentials_all"` } // ChartRepository represents a chart repository @@ -82,6 +83,8 @@ func NewChartRepository(cfg *Entry, getters getter.Providers) (*ChartRepository, // Load loads a directory of charts as if it were a repository. // // It requires the presence of an index.yaml file in the directory. +// +// Deprecated: remove in Helm 4. func (r *ChartRepository) Load() error { dirInfo, err := os.Stat(r.Config.Name) if err != nil { @@ -99,7 +102,7 @@ func (r *ChartRepository) Load() error { if strings.Contains(f.Name(), "-index.yaml") { i, err := LoadIndexFile(path) if err != nil { - return nil + return err } r.IndexFile = i } else if strings.HasSuffix(f.Name(), ".tgz") { @@ -127,6 +130,7 @@ func (r *ChartRepository) DownloadIndexFile() (string, error) { getter.WithInsecureSkipVerifyTLS(r.Config.InsecureSkipTLSverify), getter.WithTLSClientConfig(r.Config.CertFile, r.Config.KeyFile, r.Config.CAFile), getter.WithBasicAuth(r.Config.Username, r.Config.Password), + getter.WithPassCredentialsAll(r.Config.PassCredentialsAll), ) if err != nil { return "", err @@ -137,7 +141,7 @@ func (r *ChartRepository) DownloadIndexFile() (string, error) { return "", err } - indexFile, err := loadIndex(index) + indexFile, err := loadIndex(index, r.Config.URL) if err != nil { return "", err } @@ -187,7 +191,9 @@ func (r *ChartRepository) generateIndex() error { } if !r.IndexFile.Has(ch.Name(), ch.Metadata.Version) { - r.IndexFile.Add(ch.Metadata, path, r.Config.URL, digest) + if err := r.IndexFile.MustAdd(ch.Metadata, path, r.Config.URL, digest); err != nil { + return errors.Wrapf(err, "failed adding to %s to index", path) + } } // TODO: If a chart exists, but has a different Digest, should we error? } @@ -213,6 +219,15 @@ func FindChartInAuthRepoURL(repoURL, username, password, chartName, chartVersion // but it also receives credentials and TLS verify flag for the chart repository. // TODO Helm 4, FindChartInAuthAndTLSRepoURL should be integrated into FindChartInAuthRepoURL. func FindChartInAuthAndTLSRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile string, insecureSkipTLSverify bool, getters getter.Providers) (string, error) { + return FindChartInAuthAndTLSAndPassRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile, false, false, getters) +} + +// FindChartInAuthAndTLSAndPassRepoURL finds chart in chart repository pointed by repoURL +// without adding repo to repositories, like FindChartInRepoURL, +// but it also receives credentials, TLS verify flag, and if credentials should +// be passed on to other domains. +// TODO Helm 4, FindChartInAuthAndTLSAndPassRepoURL should be integrated into FindChartInAuthRepoURL. +func FindChartInAuthAndTLSAndPassRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile string, insecureSkipTLSverify, passCredentialsAll bool, getters getter.Providers) (string, error) { // Download and write the index file to a temporary location buf := make([]byte, 20) @@ -223,6 +238,7 @@ func FindChartInAuthAndTLSRepoURL(repoURL, username, password, chartName, chartV URL: repoURL, Username: username, Password: password, + PassCredentialsAll: passCredentialsAll, CertFile: certFile, KeyFile: keyFile, CAFile: caFile, @@ -270,7 +286,8 @@ func FindChartInAuthAndTLSRepoURL(repoURL, username, password, chartName, chartV // ResolveReferenceURL resolves refURL relative to baseURL. // If refURL is absolute, it simply returns refURL. func ResolveReferenceURL(baseURL, refURL string) (string, error) { - parsedBaseURL, err := url.Parse(baseURL) + // We need a trailing slash for ResolveReference to work, but make sure there isn't already one + parsedBaseURL, err := url.Parse(strings.TrimSuffix(baseURL, "/") + "/") if err != nil { return "", errors.Wrapf(err, "failed to parse %s as URL", baseURL) } @@ -280,8 +297,6 @@ func ResolveReferenceURL(baseURL, refURL string) (string, error) { return "", errors.Wrapf(err, "failed to parse %s as URL", refURL) } - // We need a trailing slash for ResolveReference to work, but make sure there isn't already one - parsedBaseURL.Path = strings.TrimSuffix(parsedBaseURL.Path, "/") + "/" return parsedBaseURL.ResolveReference(parsedRefURL).String(), nil } diff --git a/vendor/helm.sh/helm/v3/pkg/repo/index.go b/vendor/helm.sh/helm/v3/pkg/repo/index.go index 43f1e1c879..e86b17349f 100644 --- a/vendor/helm.sh/helm/v3/pkg/repo/index.go +++ b/vendor/helm.sh/helm/v3/pkg/repo/index.go @@ -19,6 +19,7 @@ package repo import ( "bytes" "io/ioutil" + "log" "os" "path" "path/filepath" @@ -105,16 +106,27 @@ func LoadIndexFile(path string) (*IndexFile, error) { if err != nil { return nil, err } - return loadIndex(b) + i, err := loadIndex(b, path) + if err != nil { + return nil, errors.Wrapf(err, "error loading %s", path) + } + return i, nil } -// Add adds a file to the index +// MustAdd adds a file to the index // This can leave the index in an unsorted state -func (i IndexFile) Add(md *chart.Metadata, filename, baseURL, digest string) { +func (i IndexFile) MustAdd(md *chart.Metadata, filename, baseURL, digest string) error { + if md.APIVersion == "" { + md.APIVersion = chart.APIVersionV1 + } + if err := md.Validate(); err != nil { + return errors.Wrapf(err, "validate failed for %s", filename) + } + u := filename if baseURL != "" { - var err error _, file := filepath.Split(filename) + var err error u, err = urlutil.URLJoin(baseURL, file) if err != nil { u = path.Join(baseURL, file) @@ -126,10 +138,17 @@ func (i IndexFile) Add(md *chart.Metadata, filename, baseURL, digest string) { Digest: digest, Created: time.Now(), } - if ee, ok := i.Entries[md.Name]; !ok { - i.Entries[md.Name] = ChartVersions{cr} - } else { - i.Entries[md.Name] = append(ee, cr) + ee := i.Entries[md.Name] + i.Entries[md.Name] = append(ee, cr) + return nil +} + +// Add adds a file to the index and logs an error. +// +// Deprecated: Use index.MustAdd instead. +func (i IndexFile) Add(md *chart.Metadata, filename, baseURL, digest string) { + if err := i.MustAdd(md, filename, baseURL, digest); err != nil { + log.Printf("skipping loading invalid entry for chart %q %q from %s: %s", md.Name, md.Version, filename, err) } } @@ -248,7 +267,7 @@ type ChartVersion struct { // YAML parser enabled, this field must be present. TillerVersionDeprecated string `json:"tillerVersion,omitempty"` - // URLDeprecated is deprectaed in Helm 3, superseded by URLs. It is ignored. However, + // URLDeprecated is deprecated in Helm 3, superseded by URLs. It is ignored. However, // with a strict YAML parser enabled, this must be present on the struct. URLDeprecated string `json:"url,omitempty"` } @@ -294,19 +313,34 @@ func IndexDirectory(dir, baseURL string) (*IndexFile, error) { if err != nil { return index, err } - index.Add(c.Metadata, fname, parentURL, hash) + if err := index.MustAdd(c.Metadata, fname, parentURL, hash); err != nil { + return index, errors.Wrapf(err, "failed adding to %s to index", fname) + } } return index, nil } // loadIndex loads an index file and does minimal validity checking. // +// The source parameter is only used for logging. // This will fail if API Version is not set (ErrNoAPIVersion) or if the unmarshal fails. -func loadIndex(data []byte) (*IndexFile, error) { +func loadIndex(data []byte, source string) (*IndexFile, error) { i := &IndexFile{} if err := yaml.UnmarshalStrict(data, i); err != nil { return i, err } + + for name, cvs := range i.Entries { + for idx := len(cvs) - 1; idx >= 0; idx-- { + if cvs[idx].APIVersion == "" { + cvs[idx].APIVersion = chart.APIVersionV1 + } + if err := cvs[idx].Validate(); err != nil { + log.Printf("skipping loading invalid entry for chart %q %q from %s: %s", name, cvs[idx].Version, source, err) + cvs = append(cvs[:idx], cvs[idx+1:]...) + } + } + } i.SortEntries() if i.APIVersion == "" { return i, ErrNoAPIVersion diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/cfgmaps.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/cfgmaps.go new file mode 100644 index 0000000000..94c278875e --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/cfgmaps.go @@ -0,0 +1,257 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver // import "helm.sh/helm/v3/pkg/storage/driver" + +import ( + "context" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kblabels "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/validation" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + + rspb "helm.sh/helm/v3/pkg/release" +) + +var _ Driver = (*ConfigMaps)(nil) + +// ConfigMapsDriverName is the string name of the driver. +const ConfigMapsDriverName = "ConfigMap" + +// ConfigMaps is a wrapper around an implementation of a kubernetes +// ConfigMapsInterface. +type ConfigMaps struct { + impl corev1.ConfigMapInterface + Log func(string, ...interface{}) +} + +// NewConfigMaps initializes a new ConfigMaps wrapping an implementation of +// the kubernetes ConfigMapsInterface. +func NewConfigMaps(impl corev1.ConfigMapInterface) *ConfigMaps { + return &ConfigMaps{ + impl: impl, + Log: func(_ string, _ ...interface{}) {}, + } +} + +// Name returns the name of the driver. +func (cfgmaps *ConfigMaps) Name() string { + return ConfigMapsDriverName +} + +// Get fetches the release named by key. The corresponding release is returned +// or error if not found. +func (cfgmaps *ConfigMaps) Get(key string) (*rspb.Release, error) { + // fetch the configmap holding the release named by key + obj, err := cfgmaps.impl.Get(context.Background(), key, metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + return nil, ErrReleaseNotFound + } + + cfgmaps.Log("get: failed to get %q: %s", key, err) + return nil, err + } + // found the configmap, decode the base64 data string + r, err := decodeRelease(obj.Data["release"]) + if err != nil { + cfgmaps.Log("get: failed to decode data %q: %s", key, err) + return nil, err + } + // return the release object + return r, nil +} + +// List fetches all releases and returns the list releases such +// that filter(release) == true. An error is returned if the +// configmap fails to retrieve the releases. +func (cfgmaps *ConfigMaps) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) { + lsel := kblabels.Set{"owner": "helm"}.AsSelector() + opts := metav1.ListOptions{LabelSelector: lsel.String()} + + list, err := cfgmaps.impl.List(context.Background(), opts) + if err != nil { + cfgmaps.Log("list: failed to list: %s", err) + return nil, err + } + + var results []*rspb.Release + + // iterate over the configmaps object list + // and decode each release + for _, item := range list.Items { + rls, err := decodeRelease(item.Data["release"]) + if err != nil { + cfgmaps.Log("list: failed to decode release: %v: %s", item, err) + continue + } + + rls.Labels = item.ObjectMeta.Labels + + if filter(rls) { + results = append(results, rls) + } + } + return results, nil +} + +// Query fetches all releases that match the provided map of labels. +// An error is returned if the configmap fails to retrieve the releases. +func (cfgmaps *ConfigMaps) Query(labels map[string]string) ([]*rspb.Release, error) { + ls := kblabels.Set{} + for k, v := range labels { + if errs := validation.IsValidLabelValue(v); len(errs) != 0 { + return nil, errors.Errorf("invalid label value: %q: %s", v, strings.Join(errs, "; ")) + } + ls[k] = v + } + + opts := metav1.ListOptions{LabelSelector: ls.AsSelector().String()} + + list, err := cfgmaps.impl.List(context.Background(), opts) + if err != nil { + cfgmaps.Log("query: failed to query with labels: %s", err) + return nil, err + } + + if len(list.Items) == 0 { + return nil, ErrReleaseNotFound + } + + var results []*rspb.Release + for _, item := range list.Items { + rls, err := decodeRelease(item.Data["release"]) + if err != nil { + cfgmaps.Log("query: failed to decode release: %s", err) + continue + } + results = append(results, rls) + } + return results, nil +} + +// Create creates a new ConfigMap holding the release. If the +// ConfigMap already exists, ErrReleaseExists is returned. +func (cfgmaps *ConfigMaps) Create(key string, rls *rspb.Release) error { + // set labels for configmaps object meta data + var lbs labels + + lbs.init() + lbs.set("createdAt", strconv.Itoa(int(time.Now().Unix()))) + + // create a new configmap to hold the release + obj, err := newConfigMapsObject(key, rls, lbs) + if err != nil { + cfgmaps.Log("create: failed to encode release %q: %s", rls.Name, err) + return err + } + // push the configmap object out into the kubiverse + if _, err := cfgmaps.impl.Create(context.Background(), obj, metav1.CreateOptions{}); err != nil { + if apierrors.IsAlreadyExists(err) { + return ErrReleaseExists + } + + cfgmaps.Log("create: failed to create: %s", err) + return err + } + return nil +} + +// Update updates the ConfigMap holding the release. If not found +// the ConfigMap is created to hold the release. +func (cfgmaps *ConfigMaps) Update(key string, rls *rspb.Release) error { + // set labels for configmaps object meta data + var lbs labels + + lbs.init() + lbs.set("modifiedAt", strconv.Itoa(int(time.Now().Unix()))) + + // create a new configmap object to hold the release + obj, err := newConfigMapsObject(key, rls, lbs) + if err != nil { + cfgmaps.Log("update: failed to encode release %q: %s", rls.Name, err) + return err + } + // push the configmap object out into the kubiverse + _, err = cfgmaps.impl.Update(context.Background(), obj, metav1.UpdateOptions{}) + if err != nil { + cfgmaps.Log("update: failed to update: %s", err) + return err + } + return nil +} + +// Delete deletes the ConfigMap holding the release named by key. +func (cfgmaps *ConfigMaps) Delete(key string) (rls *rspb.Release, err error) { + // fetch the release to check existence + if rls, err = cfgmaps.Get(key); err != nil { + return nil, err + } + // delete the release + if err = cfgmaps.impl.Delete(context.Background(), key, metav1.DeleteOptions{}); err != nil { + return rls, err + } + return rls, nil +} + +// newConfigMapsObject constructs a kubernetes ConfigMap object +// to store a release. Each configmap data entry is the base64 +// encoded gzipped string of a release. +// +// The following labels are used within each configmap: +// +// "modifiedAt" - timestamp indicating when this configmap was last modified. (set in Update) +// "createdAt" - timestamp indicating when this configmap was created. (set in Create) +// "version" - version of the release. +// "status" - status of the release (see pkg/release/status.go for variants) +// "owner" - owner of the configmap, currently "helm". +// "name" - name of the release. +// +func newConfigMapsObject(key string, rls *rspb.Release, lbs labels) (*v1.ConfigMap, error) { + const owner = "helm" + + // encode the release + s, err := encodeRelease(rls) + if err != nil { + return nil, err + } + + if lbs == nil { + lbs.init() + } + + // apply labels + lbs.set("name", rls.Name) + lbs.set("owner", owner) + lbs.set("status", rls.Info.Status.String()) + lbs.set("version", strconv.Itoa(rls.Version)) + + // create and return configmap object + return &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: key, + Labels: lbs.toMap(), + }, + Data: map[string]string{"release": s}, + }, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/driver.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/driver.go new file mode 100644 index 0000000000..9c01f37660 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/driver.go @@ -0,0 +1,105 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver // import "helm.sh/helm/v3/pkg/storage/driver" + +import ( + "fmt" + + "github.com/pkg/errors" + + rspb "helm.sh/helm/v3/pkg/release" +) + +var ( + // ErrReleaseNotFound indicates that a release is not found. + ErrReleaseNotFound = errors.New("release: not found") + // ErrReleaseExists indicates that a release already exists. + ErrReleaseExists = errors.New("release: already exists") + // ErrInvalidKey indicates that a release key could not be parsed. + ErrInvalidKey = errors.New("release: invalid key") + // ErrNoDeployedReleases indicates that there are no releases with the given key in the deployed state + ErrNoDeployedReleases = errors.New("has no deployed releases") +) + +// StorageDriverError records an error and the release name that caused it +type StorageDriverError struct { + ReleaseName string + Err error +} + +func (e *StorageDriverError) Error() string { + return fmt.Sprintf("%q %s", e.ReleaseName, e.Err.Error()) +} + +func (e *StorageDriverError) Unwrap() error { return e.Err } + +func NewErrNoDeployedReleases(releaseName string) error { + return &StorageDriverError{ + ReleaseName: releaseName, + Err: ErrNoDeployedReleases, + } +} + +// Creator is the interface that wraps the Create method. +// +// Create stores the release or returns ErrReleaseExists +// if an identical release already exists. +type Creator interface { + Create(key string, rls *rspb.Release) error +} + +// Updator is the interface that wraps the Update method. +// +// Update updates an existing release or returns +// ErrReleaseNotFound if the release does not exist. +type Updator interface { + Update(key string, rls *rspb.Release) error +} + +// Deletor is the interface that wraps the Delete method. +// +// Delete deletes the release named by key or returns +// ErrReleaseNotFound if the release does not exist. +type Deletor interface { + Delete(key string) (*rspb.Release, error) +} + +// Queryor is the interface that wraps the Get and List methods. +// +// Get returns the release named by key or returns ErrReleaseNotFound +// if the release does not exist. +// +// List returns the set of all releases that satisfy the filter predicate. +// +// Query returns the set of all releases that match the provided label set. +type Queryor interface { + Get(key string) (*rspb.Release, error) + List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) + Query(labels map[string]string) ([]*rspb.Release, error) +} + +// Driver is the interface composed of Creator, Updator, Deletor, and Queryor +// interfaces. It defines the behavior for storing, updating, deleted, +// and retrieving Helm releases from some underlying storage mechanism, +// e.g. memory, configmaps. +type Driver interface { + Creator + Updator + Deletor + Queryor + Name() string +} diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/labels.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/labels.go new file mode 100644 index 0000000000..eb7118fe5f --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/labels.go @@ -0,0 +1,48 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver + +// labels is a map of key value pairs to be included as metadata in a configmap object. +type labels map[string]string + +func (lbs *labels) init() { *lbs = labels(make(map[string]string)) } +func (lbs labels) get(key string) string { return lbs[key] } +func (lbs labels) set(key, val string) { lbs[key] = val } + +func (lbs labels) keys() (ls []string) { + for key := range lbs { + ls = append(ls, key) + } + return +} + +func (lbs labels) match(set labels) bool { + for _, key := range set.keys() { + if lbs.get(key) != set.get(key) { + return false + } + } + return true +} + +func (lbs labels) toMap() map[string]string { return lbs } + +func (lbs *labels) fromMap(kvs map[string]string) { + for k, v := range kvs { + lbs.set(k, v) + } +} diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/memory.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/memory.go new file mode 100644 index 0000000000..91378f5885 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/memory.go @@ -0,0 +1,240 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver + +import ( + "strconv" + "strings" + "sync" + + rspb "helm.sh/helm/v3/pkg/release" +) + +var _ Driver = (*Memory)(nil) + +const ( + // MemoryDriverName is the string name of this driver. + MemoryDriverName = "Memory" + + defaultNamespace = "default" +) + +// A map of release names to list of release records +type memReleases map[string]records + +// Memory is the in-memory storage driver implementation. +type Memory struct { + sync.RWMutex + namespace string + // A map of namespaces to releases + cache map[string]memReleases +} + +// NewMemory initializes a new memory driver. +func NewMemory() *Memory { + return &Memory{cache: map[string]memReleases{}, namespace: "default"} +} + +// SetNamespace sets a specific namespace in which releases will be accessed. +// An empty string indicates all namespaces (for the list operation) +func (mem *Memory) SetNamespace(ns string) { + mem.namespace = ns +} + +// Name returns the name of the driver. +func (mem *Memory) Name() string { + return MemoryDriverName +} + +// Get returns the release named by key or returns ErrReleaseNotFound. +func (mem *Memory) Get(key string) (*rspb.Release, error) { + defer unlock(mem.rlock()) + + keyWithoutPrefix := strings.TrimPrefix(key, "sh.helm.release.v1.") + switch elems := strings.Split(keyWithoutPrefix, ".v"); len(elems) { + case 2: + name, ver := elems[0], elems[1] + if _, err := strconv.Atoi(ver); err != nil { + return nil, ErrInvalidKey + } + if recs, ok := mem.cache[mem.namespace][name]; ok { + if r := recs.Get(key); r != nil { + return r.rls, nil + } + } + return nil, ErrReleaseNotFound + default: + return nil, ErrInvalidKey + } +} + +// List returns the list of all releases such that filter(release) == true +func (mem *Memory) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) { + defer unlock(mem.rlock()) + + var ls []*rspb.Release + for namespace := range mem.cache { + if mem.namespace != "" { + // Should only list releases of this namespace + namespace = mem.namespace + } + for _, recs := range mem.cache[namespace] { + recs.Iter(func(_ int, rec *record) bool { + if filter(rec.rls) { + ls = append(ls, rec.rls) + } + return true + }) + } + if mem.namespace != "" { + // Should only list releases of this namespace + break + } + } + return ls, nil +} + +// Query returns the set of releases that match the provided set of labels +func (mem *Memory) Query(keyvals map[string]string) ([]*rspb.Release, error) { + defer unlock(mem.rlock()) + + var lbs labels + + lbs.init() + lbs.fromMap(keyvals) + + var ls []*rspb.Release + for namespace := range mem.cache { + if mem.namespace != "" { + // Should only query releases of this namespace + namespace = mem.namespace + } + for _, recs := range mem.cache[namespace] { + recs.Iter(func(_ int, rec *record) bool { + // A query for a release name that doesn't exist (has been deleted) + // can cause rec to be nil. + if rec == nil { + return false + } + if rec.lbs.match(lbs) { + ls = append(ls, rec.rls) + } + return true + }) + } + if mem.namespace != "" { + // Should only query releases of this namespace + break + } + } + + if len(ls) == 0 { + return nil, ErrReleaseNotFound + } + + return ls, nil +} + +// Create creates a new release or returns ErrReleaseExists. +func (mem *Memory) Create(key string, rls *rspb.Release) error { + defer unlock(mem.wlock()) + + // For backwards compatibility, we protect against an unset namespace + namespace := rls.Namespace + if namespace == "" { + namespace = defaultNamespace + } + mem.SetNamespace(namespace) + + if _, ok := mem.cache[namespace]; !ok { + mem.cache[namespace] = memReleases{} + } + + if recs, ok := mem.cache[namespace][rls.Name]; ok { + if err := recs.Add(newRecord(key, rls)); err != nil { + return err + } + mem.cache[namespace][rls.Name] = recs + return nil + } + mem.cache[namespace][rls.Name] = records{newRecord(key, rls)} + return nil +} + +// Update updates a release or returns ErrReleaseNotFound. +func (mem *Memory) Update(key string, rls *rspb.Release) error { + defer unlock(mem.wlock()) + + // For backwards compatibility, we protect against an unset namespace + namespace := rls.Namespace + if namespace == "" { + namespace = defaultNamespace + } + mem.SetNamespace(namespace) + + if _, ok := mem.cache[namespace]; ok { + if rs, ok := mem.cache[namespace][rls.Name]; ok && rs.Exists(key) { + rs.Replace(key, newRecord(key, rls)) + return nil + } + } + return ErrReleaseNotFound +} + +// Delete deletes a release or returns ErrReleaseNotFound. +func (mem *Memory) Delete(key string) (*rspb.Release, error) { + defer unlock(mem.wlock()) + + keyWithoutPrefix := strings.TrimPrefix(key, "sh.helm.release.v1.") + elems := strings.Split(keyWithoutPrefix, ".v") + + if len(elems) != 2 { + return nil, ErrInvalidKey + } + + name, ver := elems[0], elems[1] + if _, err := strconv.Atoi(ver); err != nil { + return nil, ErrInvalidKey + } + if _, ok := mem.cache[mem.namespace]; ok { + if recs, ok := mem.cache[mem.namespace][name]; ok { + if r := recs.Remove(key); r != nil { + // recs.Remove changes the slice reference, so we have to re-assign it. + mem.cache[mem.namespace][name] = recs + return r.rls, nil + } + } + } + return nil, ErrReleaseNotFound +} + +// wlock locks mem for writing +func (mem *Memory) wlock() func() { + mem.Lock() + return func() { mem.Unlock() } +} + +// rlock locks mem for reading +func (mem *Memory) rlock() func() { + mem.RLock() + return func() { mem.RUnlock() } +} + +// unlock calls fn which reverses a mem.rlock or mem.wlock. e.g: +// ```defer unlock(mem.rlock())```, locks mem for reading at the +// call point of defer and unlocks upon exiting the block. +func unlock(fn func()) { fn() } diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/records.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/records.go new file mode 100644 index 0000000000..9df173384c --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/records.go @@ -0,0 +1,124 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver // import "helm.sh/helm/v3/pkg/storage/driver" + +import ( + "sort" + "strconv" + + rspb "helm.sh/helm/v3/pkg/release" +) + +// records holds a list of in-memory release records +type records []*record + +func (rs records) Len() int { return len(rs) } +func (rs records) Swap(i, j int) { rs[i], rs[j] = rs[j], rs[i] } +func (rs records) Less(i, j int) bool { return rs[i].rls.Version < rs[j].rls.Version } + +func (rs *records) Add(r *record) error { + if r == nil { + return nil + } + + if rs.Exists(r.key) { + return ErrReleaseExists + } + + *rs = append(*rs, r) + sort.Sort(*rs) + + return nil +} + +func (rs records) Get(key string) *record { + if i, ok := rs.Index(key); ok { + return rs[i] + } + return nil +} + +func (rs *records) Iter(fn func(int, *record) bool) { + cp := make([]*record, len(*rs)) + copy(cp, *rs) + + for i, r := range cp { + if !fn(i, r) { + return + } + } +} + +func (rs *records) Index(key string) (int, bool) { + for i, r := range *rs { + if r.key == key { + return i, true + } + } + return -1, false +} + +func (rs records) Exists(key string) bool { + _, ok := rs.Index(key) + return ok +} + +func (rs *records) Remove(key string) (r *record) { + if i, ok := rs.Index(key); ok { + return rs.removeAt(i) + } + return nil +} + +func (rs *records) Replace(key string, rec *record) *record { + if i, ok := rs.Index(key); ok { + old := (*rs)[i] + (*rs)[i] = rec + return old + } + return nil +} + +func (rs *records) removeAt(index int) *record { + r := (*rs)[index] + (*rs)[index] = nil + copy((*rs)[index:], (*rs)[index+1:]) + *rs = (*rs)[:len(*rs)-1] + return r +} + +// record is the data structure used to cache releases +// for the in-memory storage driver +type record struct { + key string + lbs labels + rls *rspb.Release +} + +// newRecord creates a new in-memory release record +func newRecord(key string, rls *rspb.Release) *record { + var lbs labels + + lbs.init() + lbs.set("name", rls.Name) + lbs.set("owner", "helm") + lbs.set("status", rls.Info.Status.String()) + lbs.set("version", strconv.Itoa(rls.Version)) + + // return &record{key: key, lbs: lbs, rls: proto.Clone(rls).(*rspb.Release)} + return &record{key: key, lbs: lbs, rls: rls} +} diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/secrets.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/secrets.go new file mode 100644 index 0000000000..2e8530d0ca --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/secrets.go @@ -0,0 +1,250 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver // import "helm.sh/helm/v3/pkg/storage/driver" + +import ( + "context" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kblabels "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/validation" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + + rspb "helm.sh/helm/v3/pkg/release" +) + +var _ Driver = (*Secrets)(nil) + +// SecretsDriverName is the string name of the driver. +const SecretsDriverName = "Secret" + +// Secrets is a wrapper around an implementation of a kubernetes +// SecretsInterface. +type Secrets struct { + impl corev1.SecretInterface + Log func(string, ...interface{}) +} + +// NewSecrets initializes a new Secrets wrapping an implementation of +// the kubernetes SecretsInterface. +func NewSecrets(impl corev1.SecretInterface) *Secrets { + return &Secrets{ + impl: impl, + Log: func(_ string, _ ...interface{}) {}, + } +} + +// Name returns the name of the driver. +func (secrets *Secrets) Name() string { + return SecretsDriverName +} + +// Get fetches the release named by key. The corresponding release is returned +// or error if not found. +func (secrets *Secrets) Get(key string) (*rspb.Release, error) { + // fetch the secret holding the release named by key + obj, err := secrets.impl.Get(context.Background(), key, metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + return nil, ErrReleaseNotFound + } + return nil, errors.Wrapf(err, "get: failed to get %q", key) + } + // found the secret, decode the base64 data string + r, err := decodeRelease(string(obj.Data["release"])) + return r, errors.Wrapf(err, "get: failed to decode data %q", key) +} + +// List fetches all releases and returns the list releases such +// that filter(release) == true. An error is returned if the +// secret fails to retrieve the releases. +func (secrets *Secrets) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) { + lsel := kblabels.Set{"owner": "helm"}.AsSelector() + opts := metav1.ListOptions{LabelSelector: lsel.String()} + + list, err := secrets.impl.List(context.Background(), opts) + if err != nil { + return nil, errors.Wrap(err, "list: failed to list") + } + + var results []*rspb.Release + + // iterate over the secrets object list + // and decode each release + for _, item := range list.Items { + rls, err := decodeRelease(string(item.Data["release"])) + if err != nil { + secrets.Log("list: failed to decode release: %v: %s", item, err) + continue + } + + rls.Labels = item.ObjectMeta.Labels + + if filter(rls) { + results = append(results, rls) + } + } + return results, nil +} + +// Query fetches all releases that match the provided map of labels. +// An error is returned if the secret fails to retrieve the releases. +func (secrets *Secrets) Query(labels map[string]string) ([]*rspb.Release, error) { + ls := kblabels.Set{} + for k, v := range labels { + if errs := validation.IsValidLabelValue(v); len(errs) != 0 { + return nil, errors.Errorf("invalid label value: %q: %s", v, strings.Join(errs, "; ")) + } + ls[k] = v + } + + opts := metav1.ListOptions{LabelSelector: ls.AsSelector().String()} + + list, err := secrets.impl.List(context.Background(), opts) + if err != nil { + return nil, errors.Wrap(err, "query: failed to query with labels") + } + + if len(list.Items) == 0 { + return nil, ErrReleaseNotFound + } + + var results []*rspb.Release + for _, item := range list.Items { + rls, err := decodeRelease(string(item.Data["release"])) + if err != nil { + secrets.Log("query: failed to decode release: %s", err) + continue + } + results = append(results, rls) + } + return results, nil +} + +// Create creates a new Secret holding the release. If the +// Secret already exists, ErrReleaseExists is returned. +func (secrets *Secrets) Create(key string, rls *rspb.Release) error { + // set labels for secrets object meta data + var lbs labels + + lbs.init() + lbs.set("createdAt", strconv.Itoa(int(time.Now().Unix()))) + + // create a new secret to hold the release + obj, err := newSecretsObject(key, rls, lbs) + if err != nil { + return errors.Wrapf(err, "create: failed to encode release %q", rls.Name) + } + // push the secret object out into the kubiverse + if _, err := secrets.impl.Create(context.Background(), obj, metav1.CreateOptions{}); err != nil { + if apierrors.IsAlreadyExists(err) { + return ErrReleaseExists + } + + return errors.Wrap(err, "create: failed to create") + } + return nil +} + +// Update updates the Secret holding the release. If not found +// the Secret is created to hold the release. +func (secrets *Secrets) Update(key string, rls *rspb.Release) error { + // set labels for secrets object meta data + var lbs labels + + lbs.init() + lbs.set("modifiedAt", strconv.Itoa(int(time.Now().Unix()))) + + // create a new secret object to hold the release + obj, err := newSecretsObject(key, rls, lbs) + if err != nil { + return errors.Wrapf(err, "update: failed to encode release %q", rls.Name) + } + // push the secret object out into the kubiverse + _, err = secrets.impl.Update(context.Background(), obj, metav1.UpdateOptions{}) + return errors.Wrap(err, "update: failed to update") +} + +// Delete deletes the Secret holding the release named by key. +func (secrets *Secrets) Delete(key string) (rls *rspb.Release, err error) { + // fetch the release to check existence + if rls, err = secrets.Get(key); err != nil { + return nil, err + } + // delete the release + err = secrets.impl.Delete(context.Background(), key, metav1.DeleteOptions{}) + return rls, err +} + +// newSecretsObject constructs a kubernetes Secret object +// to store a release. Each secret data entry is the base64 +// encoded gzipped string of a release. +// +// The following labels are used within each secret: +// +// "modifiedAt" - timestamp indicating when this secret was last modified. (set in Update) +// "createdAt" - timestamp indicating when this secret was created. (set in Create) +// "version" - version of the release. +// "status" - status of the release (see pkg/release/status.go for variants) +// "owner" - owner of the secret, currently "helm". +// "name" - name of the release. +// +func newSecretsObject(key string, rls *rspb.Release, lbs labels) (*v1.Secret, error) { + const owner = "helm" + + // encode the release + s, err := encodeRelease(rls) + if err != nil { + return nil, err + } + + if lbs == nil { + lbs.init() + } + + // apply labels + lbs.set("name", rls.Name) + lbs.set("owner", owner) + lbs.set("status", rls.Info.Status.String()) + lbs.set("version", strconv.Itoa(rls.Version)) + + // create and return secret object. + // Helm 3 introduced setting the 'Type' field + // in the Kubernetes storage object. + // Helm defines the field content as follows: + // /.v + // Type field for Helm 3: helm.sh/release.v1 + // Note: Version starts at 'v1' for Helm 3 and + // should be incremented if the release object + // metadata is modified. + // This would potentially be a breaking change + // and should only happen between major versions. + return &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: key, + Labels: lbs.toMap(), + }, + Type: "helm.sh/release.v1", + Data: map[string][]byte{"release": []byte(s)}, + }, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/sql.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/sql.go new file mode 100644 index 0000000000..c8a6ae04fb --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/sql.go @@ -0,0 +1,496 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver // import "helm.sh/helm/v3/pkg/storage/driver" + +import ( + "fmt" + "sort" + "time" + + "github.com/jmoiron/sqlx" + migrate "github.com/rubenv/sql-migrate" + + sq "github.com/Masterminds/squirrel" + + // Import pq for postgres dialect + _ "github.com/lib/pq" + + rspb "helm.sh/helm/v3/pkg/release" +) + +var _ Driver = (*SQL)(nil) + +var labelMap = map[string]struct{}{ + "modifiedAt": {}, + "createdAt": {}, + "version": {}, + "status": {}, + "owner": {}, + "name": {}, +} + +const postgreSQLDialect = "postgres" + +// SQLDriverName is the string name of this driver. +const SQLDriverName = "SQL" + +const sqlReleaseTableName = "releases_v1" + +const ( + sqlReleaseTableKeyColumn = "key" + sqlReleaseTableTypeColumn = "type" + sqlReleaseTableBodyColumn = "body" + sqlReleaseTableNameColumn = "name" + sqlReleaseTableNamespaceColumn = "namespace" + sqlReleaseTableVersionColumn = "version" + sqlReleaseTableStatusColumn = "status" + sqlReleaseTableOwnerColumn = "owner" + sqlReleaseTableCreatedAtColumn = "createdAt" + sqlReleaseTableModifiedAtColumn = "modifiedAt" +) + +const ( + sqlReleaseDefaultOwner = "helm" + sqlReleaseDefaultType = "helm.sh/release.v1" +) + +// SQL is the sql storage driver implementation. +type SQL struct { + db *sqlx.DB + namespace string + statementBuilder sq.StatementBuilderType + + Log func(string, ...interface{}) +} + +// Name returns the name of the driver. +func (s *SQL) Name() string { + return SQLDriverName +} + +func (s *SQL) ensureDBSetup() error { + // Populate the database with the relations we need if they don't exist yet + migrations := &migrate.MemoryMigrationSource{ + Migrations: []*migrate.Migration{ + { + Id: "init", + Up: []string{ + fmt.Sprintf(` + CREATE TABLE %s ( + %s VARCHAR(67), + %s VARCHAR(64) NOT NULL, + %s TEXT NOT NULL, + %s VARCHAR(64) NOT NULL, + %s VARCHAR(64) NOT NULL, + %s INTEGER NOT NULL, + %s TEXT NOT NULL, + %s TEXT NOT NULL, + %s INTEGER NOT NULL, + %s INTEGER NOT NULL DEFAULT 0, + PRIMARY KEY(%s, %s) + ); + CREATE INDEX ON %s (%s, %s); + CREATE INDEX ON %s (%s); + CREATE INDEX ON %s (%s); + CREATE INDEX ON %s (%s); + CREATE INDEX ON %s (%s); + CREATE INDEX ON %s (%s); + + GRANT ALL ON %s TO PUBLIC; + + ALTER TABLE %s ENABLE ROW LEVEL SECURITY; + `, + sqlReleaseTableName, + sqlReleaseTableKeyColumn, + sqlReleaseTableTypeColumn, + sqlReleaseTableBodyColumn, + sqlReleaseTableNameColumn, + sqlReleaseTableNamespaceColumn, + sqlReleaseTableVersionColumn, + sqlReleaseTableStatusColumn, + sqlReleaseTableOwnerColumn, + sqlReleaseTableCreatedAtColumn, + sqlReleaseTableModifiedAtColumn, + sqlReleaseTableKeyColumn, + sqlReleaseTableNamespaceColumn, + sqlReleaseTableName, + sqlReleaseTableKeyColumn, + sqlReleaseTableNamespaceColumn, + sqlReleaseTableName, + sqlReleaseTableVersionColumn, + sqlReleaseTableName, + sqlReleaseTableStatusColumn, + sqlReleaseTableName, + sqlReleaseTableOwnerColumn, + sqlReleaseTableName, + sqlReleaseTableCreatedAtColumn, + sqlReleaseTableName, + sqlReleaseTableModifiedAtColumn, + sqlReleaseTableName, + sqlReleaseTableName, + ), + }, + Down: []string{ + fmt.Sprintf(` + DROP TABLE %s; + `, sqlReleaseTableName), + }, + }, + }, + } + + _, err := migrate.Exec(s.db.DB, postgreSQLDialect, migrations, migrate.Up) + return err +} + +// SQLReleaseWrapper describes how Helm releases are stored in an SQL database +type SQLReleaseWrapper struct { + // The primary key, made of {release-name}.{release-version} + Key string `db:"key"` + + // See https://github.com/helm/helm/blob/c9fe3d118caec699eb2565df9838673af379ce12/pkg/storage/driver/secrets.go#L231 + Type string `db:"type"` + + // The rspb.Release body, as a base64-encoded string + Body string `db:"body"` + + // Release "labels" that can be used as filters in the storage.Query(labels map[string]string) + // we implemented. Note that allowing Helm users to filter against new dimensions will require a + // new migration to be added, and the Create and/or update functions to be updated accordingly. + Name string `db:"name"` + Namespace string `db:"namespace"` + Version int `db:"version"` + Status string `db:"status"` + Owner string `db:"owner"` + CreatedAt int `db:"createdAt"` + ModifiedAt int `db:"modifiedAt"` +} + +// NewSQL initializes a new sql driver. +func NewSQL(connectionString string, logger func(string, ...interface{}), namespace string) (*SQL, error) { + db, err := sqlx.Connect(postgreSQLDialect, connectionString) + if err != nil { + return nil, err + } + + driver := &SQL{ + db: db, + Log: logger, + statementBuilder: sq.StatementBuilder.PlaceholderFormat(sq.Dollar), + } + + if err := driver.ensureDBSetup(); err != nil { + return nil, err + } + + driver.namespace = namespace + + return driver, nil +} + +// Get returns the release named by key. +func (s *SQL) Get(key string) (*rspb.Release, error) { + var record SQLReleaseWrapper + + qb := s.statementBuilder. + Select(sqlReleaseTableBodyColumn). + From(sqlReleaseTableName). + Where(sq.Eq{sqlReleaseTableKeyColumn: key}). + Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}) + + query, args, err := qb.ToSql() + if err != nil { + s.Log("failed to build query: %v", err) + return nil, err + } + + // Get will return an error if the result is empty + if err := s.db.Get(&record, query, args...); err != nil { + s.Log("got SQL error when getting release %s: %v", key, err) + return nil, ErrReleaseNotFound + } + + release, err := decodeRelease(record.Body) + if err != nil { + s.Log("get: failed to decode data %q: %v", key, err) + return nil, err + } + + return release, nil +} + +// List returns the list of all releases such that filter(release) == true +func (s *SQL) List(filter func(*rspb.Release) bool) ([]*rspb.Release, error) { + sb := s.statementBuilder. + Select(sqlReleaseTableBodyColumn). + From(sqlReleaseTableName). + Where(sq.Eq{sqlReleaseTableOwnerColumn: sqlReleaseDefaultOwner}) + + // If a namespace was specified, we only list releases from that namespace + if s.namespace != "" { + sb = sb.Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}) + } + + query, args, err := sb.ToSql() + if err != nil { + s.Log("failed to build query: %v", err) + return nil, err + } + + var records = []SQLReleaseWrapper{} + if err := s.db.Select(&records, query, args...); err != nil { + s.Log("list: failed to list: %v", err) + return nil, err + } + + var releases []*rspb.Release + for _, record := range records { + release, err := decodeRelease(record.Body) + if err != nil { + s.Log("list: failed to decode release: %v: %v", record, err) + continue + } + if filter(release) { + releases = append(releases, release) + } + } + + return releases, nil +} + +// Query returns the set of releases that match the provided set of labels. +func (s *SQL) Query(labels map[string]string) ([]*rspb.Release, error) { + sb := s.statementBuilder. + Select(sqlReleaseTableBodyColumn). + From(sqlReleaseTableName) + + keys := make([]string, 0, len(labels)) + for key := range labels { + keys = append(keys, key) + } + sort.Strings(keys) + for _, key := range keys { + if _, ok := labelMap[key]; ok { + sb = sb.Where(sq.Eq{key: labels[key]}) + } else { + s.Log("unknown label %s", key) + return nil, fmt.Errorf("unknown label %s", key) + } + } + + // If a namespace was specified, we only list releases from that namespace + if s.namespace != "" { + sb = sb.Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}) + } + + // Build our query + query, args, err := sb.ToSql() + if err != nil { + s.Log("failed to build query: %v", err) + return nil, err + } + + var records = []SQLReleaseWrapper{} + if err := s.db.Select(&records, query, args...); err != nil { + s.Log("list: failed to query with labels: %v", err) + return nil, err + } + + if len(records) == 0 { + return nil, ErrReleaseNotFound + } + + var releases []*rspb.Release + for _, record := range records { + release, err := decodeRelease(record.Body) + if err != nil { + s.Log("list: failed to decode release: %v: %v", record, err) + continue + } + releases = append(releases, release) + } + + if len(releases) == 0 { + return nil, ErrReleaseNotFound + } + + return releases, nil +} + +// Create creates a new release. +func (s *SQL) Create(key string, rls *rspb.Release) error { + namespace := rls.Namespace + if namespace == "" { + namespace = defaultNamespace + } + s.namespace = namespace + + body, err := encodeRelease(rls) + if err != nil { + s.Log("failed to encode release: %v", err) + return err + } + + transaction, err := s.db.Beginx() + if err != nil { + s.Log("failed to start SQL transaction: %v", err) + return fmt.Errorf("error beginning transaction: %v", err) + } + + insertQuery, args, err := s.statementBuilder. + Insert(sqlReleaseTableName). + Columns( + sqlReleaseTableKeyColumn, + sqlReleaseTableTypeColumn, + sqlReleaseTableBodyColumn, + sqlReleaseTableNameColumn, + sqlReleaseTableNamespaceColumn, + sqlReleaseTableVersionColumn, + sqlReleaseTableStatusColumn, + sqlReleaseTableOwnerColumn, + sqlReleaseTableCreatedAtColumn, + ). + Values( + key, + sqlReleaseDefaultType, + body, + rls.Name, + namespace, + int(rls.Version), + rls.Info.Status.String(), + sqlReleaseDefaultOwner, + int(time.Now().Unix()), + ).ToSql() + if err != nil { + s.Log("failed to build insert query: %v", err) + return err + } + + if _, err := transaction.Exec(insertQuery, args...); err != nil { + defer transaction.Rollback() + + selectQuery, args, buildErr := s.statementBuilder. + Select(sqlReleaseTableKeyColumn). + From(sqlReleaseTableName). + Where(sq.Eq{sqlReleaseTableKeyColumn: key}). + Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}). + ToSql() + if buildErr != nil { + s.Log("failed to build select query: %v", buildErr) + return err + } + + var record SQLReleaseWrapper + if err := transaction.Get(&record, selectQuery, args...); err == nil { + s.Log("release %s already exists", key) + return ErrReleaseExists + } + + s.Log("failed to store release %s in SQL database: %v", key, err) + return err + } + defer transaction.Commit() + + return nil +} + +// Update updates a release. +func (s *SQL) Update(key string, rls *rspb.Release) error { + namespace := rls.Namespace + if namespace == "" { + namespace = defaultNamespace + } + s.namespace = namespace + + body, err := encodeRelease(rls) + if err != nil { + s.Log("failed to encode release: %v", err) + return err + } + + query, args, err := s.statementBuilder. + Update(sqlReleaseTableName). + Set(sqlReleaseTableBodyColumn, body). + Set(sqlReleaseTableNameColumn, rls.Name). + Set(sqlReleaseTableVersionColumn, int(rls.Version)). + Set(sqlReleaseTableStatusColumn, rls.Info.Status.String()). + Set(sqlReleaseTableOwnerColumn, sqlReleaseDefaultOwner). + Set(sqlReleaseTableModifiedAtColumn, int(time.Now().Unix())). + Where(sq.Eq{sqlReleaseTableKeyColumn: key}). + Where(sq.Eq{sqlReleaseTableNamespaceColumn: namespace}). + ToSql() + + if err != nil { + s.Log("failed to build update query: %v", err) + return err + } + + if _, err := s.db.Exec(query, args...); err != nil { + s.Log("failed to update release %s in SQL database: %v", key, err) + return err + } + + return nil +} + +// Delete deletes a release or returns ErrReleaseNotFound. +func (s *SQL) Delete(key string) (*rspb.Release, error) { + transaction, err := s.db.Beginx() + if err != nil { + s.Log("failed to start SQL transaction: %v", err) + return nil, fmt.Errorf("error beginning transaction: %v", err) + } + + selectQuery, args, err := s.statementBuilder. + Select(sqlReleaseTableBodyColumn). + From(sqlReleaseTableName). + Where(sq.Eq{sqlReleaseTableKeyColumn: key}). + Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}). + ToSql() + if err != nil { + s.Log("failed to build select query: %v", err) + return nil, err + } + + var record SQLReleaseWrapper + err = transaction.Get(&record, selectQuery, args...) + if err != nil { + s.Log("release %s not found: %v", key, err) + return nil, ErrReleaseNotFound + } + + release, err := decodeRelease(record.Body) + if err != nil { + s.Log("failed to decode release %s: %v", key, err) + transaction.Rollback() + return nil, err + } + defer transaction.Commit() + + deleteQuery, args, err := s.statementBuilder. + Delete(sqlReleaseTableName). + Where(sq.Eq{sqlReleaseTableKeyColumn: key}). + Where(sq.Eq{sqlReleaseTableNamespaceColumn: s.namespace}). + ToSql() + if err != nil { + s.Log("failed to build select query: %v", err) + return nil, err + } + + _, err = transaction.Exec(deleteQuery, args...) + return release, err +} diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/util.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/util.go new file mode 100644 index 0000000000..e5b846163c --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/util.go @@ -0,0 +1,85 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver // import "helm.sh/helm/v3/pkg/storage/driver" + +import ( + "bytes" + "compress/gzip" + "encoding/base64" + "encoding/json" + "io/ioutil" + + rspb "helm.sh/helm/v3/pkg/release" +) + +var b64 = base64.StdEncoding + +var magicGzip = []byte{0x1f, 0x8b, 0x08} + +// encodeRelease encodes a release returning a base64 encoded +// gzipped string representation, or error. +func encodeRelease(rls *rspb.Release) (string, error) { + b, err := json.Marshal(rls) + if err != nil { + return "", err + } + var buf bytes.Buffer + w, err := gzip.NewWriterLevel(&buf, gzip.BestCompression) + if err != nil { + return "", err + } + if _, err = w.Write(b); err != nil { + return "", err + } + w.Close() + + return b64.EncodeToString(buf.Bytes()), nil +} + +// decodeRelease decodes the bytes of data into a release +// type. Data must contain a base64 encoded gzipped string of a +// valid release, otherwise an error is returned. +func decodeRelease(data string) (*rspb.Release, error) { + // base64 decode string + b, err := b64.DecodeString(data) + if err != nil { + return nil, err + } + + // For backwards compatibility with releases that were stored before + // compression was introduced we skip decompression if the + // gzip magic header is not found + if bytes.Equal(b[0:3], magicGzip) { + r, err := gzip.NewReader(bytes.NewReader(b)) + if err != nil { + return nil, err + } + defer r.Close() + b2, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + b = b2 + } + + var rls rspb.Release + // unmarshal release object bytes + if err := json.Unmarshal(b, &rls); err != nil { + return nil, err + } + return &rls, nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/storage/storage.go b/vendor/helm.sh/helm/v3/pkg/storage/storage.go new file mode 100644 index 0000000000..d836a412ae --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/storage/storage.go @@ -0,0 +1,266 @@ +/* +Copyright The Helm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage // import "helm.sh/helm/v3/pkg/storage" + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" + + rspb "helm.sh/helm/v3/pkg/release" + relutil "helm.sh/helm/v3/pkg/releaseutil" + "helm.sh/helm/v3/pkg/storage/driver" +) + +// HelmStorageType is the type field of the Kubernetes storage object which stores the Helm release +// version. It is modified slightly replacing the '/': sh.helm/release.v1 +// Note: The version 'v1' is incremented if the release object metadata is +// modified between major releases. +// This constant is used as a prefix for the Kubernetes storage object name. +const HelmStorageType = "sh.helm.release.v1" + +// Storage represents a storage engine for a Release. +type Storage struct { + driver.Driver + + // MaxHistory specifies the maximum number of historical releases that will + // be retained, including the most recent release. Values of 0 or less are + // ignored (meaning no limits are imposed). + MaxHistory int + + Log func(string, ...interface{}) +} + +// Get retrieves the release from storage. An error is returned +// if the storage driver failed to fetch the release, or the +// release identified by the key, version pair does not exist. +func (s *Storage) Get(name string, version int) (*rspb.Release, error) { + s.Log("getting release %q", makeKey(name, version)) + return s.Driver.Get(makeKey(name, version)) +} + +// Create creates a new storage entry holding the release. An +// error is returned if the storage driver failed to store the +// release, or a release with identical an key already exists. +func (s *Storage) Create(rls *rspb.Release) error { + s.Log("creating release %q", makeKey(rls.Name, rls.Version)) + if s.MaxHistory > 0 { + // Want to make space for one more release. + if err := s.removeLeastRecent(rls.Name, s.MaxHistory-1); err != nil && + !errors.Is(err, driver.ErrReleaseNotFound) { + return err + } + } + return s.Driver.Create(makeKey(rls.Name, rls.Version), rls) +} + +// Update updates the release in storage. An error is returned if the +// storage backend fails to update the release or if the release +// does not exist. +func (s *Storage) Update(rls *rspb.Release) error { + s.Log("updating release %q", makeKey(rls.Name, rls.Version)) + return s.Driver.Update(makeKey(rls.Name, rls.Version), rls) +} + +// Delete deletes the release from storage. An error is returned if +// the storage backend fails to delete the release or if the release +// does not exist. +func (s *Storage) Delete(name string, version int) (*rspb.Release, error) { + s.Log("deleting release %q", makeKey(name, version)) + return s.Driver.Delete(makeKey(name, version)) +} + +// ListReleases returns all releases from storage. An error is returned if the +// storage backend fails to retrieve the releases. +func (s *Storage) ListReleases() ([]*rspb.Release, error) { + s.Log("listing all releases in storage") + return s.Driver.List(func(_ *rspb.Release) bool { return true }) +} + +// ListUninstalled returns all releases with Status == UNINSTALLED. An error is returned +// if the storage backend fails to retrieve the releases. +func (s *Storage) ListUninstalled() ([]*rspb.Release, error) { + s.Log("listing uninstalled releases in storage") + return s.Driver.List(func(rls *rspb.Release) bool { + return relutil.StatusFilter(rspb.StatusUninstalled).Check(rls) + }) +} + +// ListDeployed returns all releases with Status == DEPLOYED. An error is returned +// if the storage backend fails to retrieve the releases. +func (s *Storage) ListDeployed() ([]*rspb.Release, error) { + s.Log("listing all deployed releases in storage") + return s.Driver.List(func(rls *rspb.Release) bool { + return relutil.StatusFilter(rspb.StatusDeployed).Check(rls) + }) +} + +// Deployed returns the last deployed release with the provided release name, or +// returns ErrReleaseNotFound if not found. +func (s *Storage) Deployed(name string) (*rspb.Release, error) { + ls, err := s.DeployedAll(name) + if err != nil { + return nil, err + } + + if len(ls) == 0 { + return nil, driver.NewErrNoDeployedReleases(name) + } + + // If executed concurrently, Helm's database gets corrupted + // and multiple releases are DEPLOYED. Take the latest. + relutil.Reverse(ls, relutil.SortByRevision) + + return ls[0], nil +} + +// DeployedAll returns all deployed releases with the provided name, or +// returns ErrReleaseNotFound if not found. +func (s *Storage) DeployedAll(name string) ([]*rspb.Release, error) { + s.Log("getting deployed releases from %q history", name) + + ls, err := s.Driver.Query(map[string]string{ + "name": name, + "owner": "helm", + "status": "deployed", + }) + if err == nil { + return ls, nil + } + if strings.Contains(err.Error(), "not found") { + return nil, driver.NewErrNoDeployedReleases(name) + } + return nil, err +} + +// History returns the revision history for the release with the provided name, or +// returns ErrReleaseNotFound if no such release name exists. +func (s *Storage) History(name string) ([]*rspb.Release, error) { + s.Log("getting release history for %q", name) + + return s.Driver.Query(map[string]string{"name": name, "owner": "helm"}) +} + +// removeLeastRecent removes items from history until the length number of releases +// does not exceed max. +// +// We allow max to be set explicitly so that calling functions can "make space" +// for the new records they are going to write. +func (s *Storage) removeLeastRecent(name string, max int) error { + if max < 0 { + return nil + } + h, err := s.History(name) + if err != nil { + return err + } + if len(h) <= max { + return nil + } + + // We want oldest to newest + relutil.SortByRevision(h) + + lastDeployed, err := s.Deployed(name) + if err != nil { + return err + } + + var toDelete []*rspb.Release + for _, rel := range h { + // once we have enough releases to delete to reach the max, stop + if len(h)-len(toDelete) == max { + break + } + if lastDeployed != nil { + if rel.Version != lastDeployed.Version { + toDelete = append(toDelete, rel) + } + } else { + toDelete = append(toDelete, rel) + } + } + + // Delete as many as possible. In the case of API throughput limitations, + // multiple invocations of this function will eventually delete them all. + errs := []error{} + for _, rel := range toDelete { + err = s.deleteReleaseVersion(name, rel.Version) + if err != nil { + errs = append(errs, err) + } + } + + s.Log("Pruned %d record(s) from %s with %d error(s)", len(toDelete), name, len(errs)) + switch c := len(errs); c { + case 0: + return nil + case 1: + return errs[0] + default: + return errors.Errorf("encountered %d deletion errors. First is: %s", c, errs[0]) + } +} + +func (s *Storage) deleteReleaseVersion(name string, version int) error { + key := makeKey(name, version) + _, err := s.Delete(name, version) + if err != nil { + s.Log("error pruning %s from release history: %s", key, err) + return err + } + return nil +} + +// Last fetches the last revision of the named release. +func (s *Storage) Last(name string) (*rspb.Release, error) { + s.Log("getting last revision of %q", name) + h, err := s.History(name) + if err != nil { + return nil, err + } + if len(h) == 0 { + return nil, errors.Errorf("no revision for release %q", name) + } + + relutil.Reverse(h, relutil.SortByRevision) + return h[0], nil +} + +// makeKey concatenates the Kubernetes storage object type, a release name and version +// into a string with format:```..v```. +// The storage type is prepended to keep name uniqueness between different +// release storage types. An example of clash when not using the type: +// https://github.com/helm/helm/issues/6435. +// This key is used to uniquely identify storage objects. +func makeKey(rlsname string, version int) string { + return fmt.Sprintf("%s.%s.v%d", HelmStorageType, rlsname, version) +} + +// Init initializes a new storage backend with the driver d. +// If d is nil, the default in-memory driver is used. +func Init(d driver.Driver) *Storage { + // default driver is in memory + if d == nil { + d = driver.NewMemory() + } + return &Storage{ + Driver: d, + Log: func(_ string, _ ...interface{}) {}, + } +} diff --git a/vendor/helm.sh/helm/v3/pkg/strvals/doc.go b/vendor/helm.sh/helm/v3/pkg/strvals/doc.go new file mode 100644 index 0000000000..f172905871 --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/strvals/doc.go @@ -0,0 +1,32 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/*Package strvals provides tools for working with strval lines. + +Helm supports a compressed format for YAML settings which we call strvals. +The format is roughly like this: + + name=value,topname.subname=value + +The above is equivalent to the YAML document + + name: value + topname: + subname: value + +This package provides a parser and utilities for converting the strvals format +to other formats. +*/ +package strvals diff --git a/vendor/helm.sh/helm/v3/pkg/strvals/parser.go b/vendor/helm.sh/helm/v3/pkg/strvals/parser.go new file mode 100644 index 0000000000..457b99f94a --- /dev/null +++ b/vendor/helm.sh/helm/v3/pkg/strvals/parser.go @@ -0,0 +1,446 @@ +/* +Copyright The Helm Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strvals + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/pkg/errors" + "sigs.k8s.io/yaml" +) + +// ErrNotList indicates that a non-list was treated as a list. +var ErrNotList = errors.New("not a list") + +// ToYAML takes a string of arguments and converts to a YAML document. +func ToYAML(s string) (string, error) { + m, err := Parse(s) + if err != nil { + return "", err + } + d, err := yaml.Marshal(m) + return strings.TrimSuffix(string(d), "\n"), err +} + +// Parse parses a set line. +// +// A set line is of the form name1=value1,name2=value2 +func Parse(s string) (map[string]interface{}, error) { + vals := map[string]interface{}{} + scanner := bytes.NewBufferString(s) + t := newParser(scanner, vals, false) + err := t.parse() + return vals, err +} + +// ParseString parses a set line and forces a string value. +// +// A set line is of the form name1=value1,name2=value2 +func ParseString(s string) (map[string]interface{}, error) { + vals := map[string]interface{}{} + scanner := bytes.NewBufferString(s) + t := newParser(scanner, vals, true) + err := t.parse() + return vals, err +} + +// ParseInto parses a strvals line and merges the result into dest. +// +// If the strval string has a key that exists in dest, it overwrites the +// dest version. +func ParseInto(s string, dest map[string]interface{}) error { + scanner := bytes.NewBufferString(s) + t := newParser(scanner, dest, false) + return t.parse() +} + +// ParseFile parses a set line, but its final value is loaded from the file at the path specified by the original value. +// +// A set line is of the form name1=path1,name2=path2 +// +// When the files at path1 and path2 contained "val1" and "val2" respectively, the set line is consumed as +// name1=val1,name2=val2 +func ParseFile(s string, reader RunesValueReader) (map[string]interface{}, error) { + vals := map[string]interface{}{} + scanner := bytes.NewBufferString(s) + t := newFileParser(scanner, vals, reader) + err := t.parse() + return vals, err +} + +// ParseIntoString parses a strvals line and merges the result into dest. +// +// This method always returns a string as the value. +func ParseIntoString(s string, dest map[string]interface{}) error { + scanner := bytes.NewBufferString(s) + t := newParser(scanner, dest, true) + return t.parse() +} + +// ParseIntoFile parses a filevals line and merges the result into dest. +// +// This method always returns a string as the value. +func ParseIntoFile(s string, dest map[string]interface{}, reader RunesValueReader) error { + scanner := bytes.NewBufferString(s) + t := newFileParser(scanner, dest, reader) + return t.parse() +} + +// RunesValueReader is a function that takes the given value (a slice of runes) +// and returns the parsed value +type RunesValueReader func([]rune) (interface{}, error) + +// parser is a simple parser that takes a strvals line and parses it into a +// map representation. +// +// where sc is the source of the original data being parsed +// where data is the final parsed data from the parses with correct types +type parser struct { + sc *bytes.Buffer + data map[string]interface{} + reader RunesValueReader +} + +func newParser(sc *bytes.Buffer, data map[string]interface{}, stringBool bool) *parser { + stringConverter := func(rs []rune) (interface{}, error) { + return typedVal(rs, stringBool), nil + } + return &parser{sc: sc, data: data, reader: stringConverter} +} + +func newFileParser(sc *bytes.Buffer, data map[string]interface{}, reader RunesValueReader) *parser { + return &parser{sc: sc, data: data, reader: reader} +} + +func (t *parser) parse() error { + for { + err := t.key(t.data) + if err == nil { + continue + } + if err == io.EOF { + return nil + } + return err + } +} + +func runeSet(r []rune) map[rune]bool { + s := make(map[rune]bool, len(r)) + for _, rr := range r { + s[rr] = true + } + return s +} + +func (t *parser) key(data map[string]interface{}) (reterr error) { + defer func() { + if r := recover(); r != nil { + reterr = fmt.Errorf("unable to parse key: %s", r) + } + }() + stop := runeSet([]rune{'=', '[', ',', '.'}) + for { + switch k, last, err := runesUntil(t.sc, stop); { + case err != nil: + if len(k) == 0 { + return err + } + return errors.Errorf("key %q has no value", string(k)) + //set(data, string(k), "") + //return err + case last == '[': + // We are in a list index context, so we need to set an index. + i, err := t.keyIndex() + if err != nil { + return errors.Wrap(err, "error parsing index") + } + kk := string(k) + // Find or create target list + list := []interface{}{} + if _, ok := data[kk]; ok { + list = data[kk].([]interface{}) + } + + // Now we need to get the value after the ]. + list, err = t.listItem(list, i) + set(data, kk, list) + return err + case last == '=': + //End of key. Consume =, Get value. + // FIXME: Get value list first + vl, e := t.valList() + switch e { + case nil: + set(data, string(k), vl) + return nil + case io.EOF: + set(data, string(k), "") + return e + case ErrNotList: + rs, e := t.val() + if e != nil && e != io.EOF { + return e + } + v, e := t.reader(rs) + set(data, string(k), v) + return e + default: + return e + } + + case last == ',': + // No value given. Set the value to empty string. Return error. + set(data, string(k), "") + return errors.Errorf("key %q has no value (cannot end with ,)", string(k)) + case last == '.': + // First, create or find the target map. + inner := map[string]interface{}{} + if _, ok := data[string(k)]; ok { + inner = data[string(k)].(map[string]interface{}) + } + + // Recurse + e := t.key(inner) + if len(inner) == 0 { + return errors.Errorf("key map %q has no value", string(k)) + } + set(data, string(k), inner) + return e + } + } +} + +func set(data map[string]interface{}, key string, val interface{}) { + // If key is empty, don't set it. + if len(key) == 0 { + return + } + data[key] = val +} + +func setIndex(list []interface{}, index int, val interface{}) (l2 []interface{}, err error) { + // There are possible index values that are out of range on a target system + // causing a panic. This will catch the panic and return an error instead. + // The value of the index that causes a panic varies from system to system. + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("error processing index %d: %s", index, r) + } + }() + + if index < 0 { + return list, fmt.Errorf("negative %d index not allowed", index) + } + if len(list) <= index { + newlist := make([]interface{}, index+1) + copy(newlist, list) + list = newlist + } + list[index] = val + return list, nil +} + +func (t *parser) keyIndex() (int, error) { + // First, get the key. + stop := runeSet([]rune{']'}) + v, _, err := runesUntil(t.sc, stop) + if err != nil { + return 0, err + } + // v should be the index + return strconv.Atoi(string(v)) + +} +func (t *parser) listItem(list []interface{}, i int) ([]interface{}, error) { + if i < 0 { + return list, fmt.Errorf("negative %d index not allowed", i) + } + stop := runeSet([]rune{'[', '.', '='}) + switch k, last, err := runesUntil(t.sc, stop); { + case len(k) > 0: + return list, errors.Errorf("unexpected data at end of array index: %q", k) + case err != nil: + return list, err + case last == '=': + vl, e := t.valList() + switch e { + case nil: + return setIndex(list, i, vl) + case io.EOF: + return setIndex(list, i, "") + case ErrNotList: + rs, e := t.val() + if e != nil && e != io.EOF { + return list, e + } + v, e := t.reader(rs) + if e != nil { + return list, e + } + return setIndex(list, i, v) + default: + return list, e + } + case last == '[': + // now we have a nested list. Read the index and handle. + nextI, err := t.keyIndex() + if err != nil { + return list, errors.Wrap(err, "error parsing index") + } + var crtList []interface{} + if len(list) > i { + // If nested list already exists, take the value of list to next cycle. + existed := list[i] + if existed != nil { + crtList = list[i].([]interface{}) + } + } + // Now we need to get the value after the ]. + list2, err := t.listItem(crtList, nextI) + if err != nil { + return list, err + } + return setIndex(list, i, list2) + case last == '.': + // We have a nested object. Send to t.key + inner := map[string]interface{}{} + if len(list) > i { + var ok bool + inner, ok = list[i].(map[string]interface{}) + if !ok { + // We have indices out of order. Initialize empty value. + list[i] = map[string]interface{}{} + inner = list[i].(map[string]interface{}) + } + } + + // Recurse + e := t.key(inner) + if e != nil { + return list, e + } + return setIndex(list, i, inner) + default: + return nil, errors.Errorf("parse error: unexpected token %v", last) + } +} + +func (t *parser) val() ([]rune, error) { + stop := runeSet([]rune{','}) + v, _, err := runesUntil(t.sc, stop) + return v, err +} + +func (t *parser) valList() ([]interface{}, error) { + r, _, e := t.sc.ReadRune() + if e != nil { + return []interface{}{}, e + } + + if r != '{' { + t.sc.UnreadRune() + return []interface{}{}, ErrNotList + } + + list := []interface{}{} + stop := runeSet([]rune{',', '}'}) + for { + switch rs, last, err := runesUntil(t.sc, stop); { + case err != nil: + if err == io.EOF { + err = errors.New("list must terminate with '}'") + } + return list, err + case last == '}': + // If this is followed by ',', consume it. + if r, _, e := t.sc.ReadRune(); e == nil && r != ',' { + t.sc.UnreadRune() + } + v, e := t.reader(rs) + list = append(list, v) + return list, e + case last == ',': + v, e := t.reader(rs) + if e != nil { + return list, e + } + list = append(list, v) + } + } +} + +func runesUntil(in io.RuneReader, stop map[rune]bool) ([]rune, rune, error) { + v := []rune{} + for { + switch r, _, e := in.ReadRune(); { + case e != nil: + return v, r, e + case inMap(r, stop): + return v, r, nil + case r == '\\': + next, _, e := in.ReadRune() + if e != nil { + return v, next, e + } + v = append(v, next) + default: + v = append(v, r) + } + } +} + +func inMap(k rune, m map[rune]bool) bool { + _, ok := m[k] + return ok +} + +func typedVal(v []rune, st bool) interface{} { + val := string(v) + + if st { + return val + } + + if strings.EqualFold(val, "true") { + return true + } + + if strings.EqualFold(val, "false") { + return false + } + + if strings.EqualFold(val, "null") { + return nil + } + + if strings.EqualFold(val, "0") { + return int64(0) + } + + // If this value does not start with zero, try parsing it to an int + if len(val) != 0 && val[0] != '0' { + if iv, err := strconv.ParseInt(val, 10, 64); err == nil { + return iv + } + } + + return val +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/version/version.go b/vendor/k8s.io/apimachinery/pkg/util/version/version.go deleted file mode 100644 index 8c997ec450..0000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/version/version.go +++ /dev/null @@ -1,325 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package version - -import ( - "bytes" - "fmt" - "regexp" - "strconv" - "strings" -) - -// Version is an opaque representation of a version number -type Version struct { - components []uint - semver bool - preRelease string - buildMetadata string -} - -var ( - // versionMatchRE splits a version string into numeric and "extra" parts - versionMatchRE = regexp.MustCompile(`^\s*v?([0-9]+(?:\.[0-9]+)*)(.*)*$`) - // extraMatchRE splits the "extra" part of versionMatchRE into semver pre-release and build metadata; it does not validate the "no leading zeroes" constraint for pre-release - extraMatchRE = regexp.MustCompile(`^(?:-([0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*))?(?:\+([0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*))?\s*$`) -) - -func parse(str string, semver bool) (*Version, error) { - parts := versionMatchRE.FindStringSubmatch(str) - if parts == nil { - return nil, fmt.Errorf("could not parse %q as version", str) - } - numbers, extra := parts[1], parts[2] - - components := strings.Split(numbers, ".") - if (semver && len(components) != 3) || (!semver && len(components) < 2) { - return nil, fmt.Errorf("illegal version string %q", str) - } - - v := &Version{ - components: make([]uint, len(components)), - semver: semver, - } - for i, comp := range components { - if (i == 0 || semver) && strings.HasPrefix(comp, "0") && comp != "0" { - return nil, fmt.Errorf("illegal zero-prefixed version component %q in %q", comp, str) - } - num, err := strconv.ParseUint(comp, 10, 0) - if err != nil { - return nil, fmt.Errorf("illegal non-numeric version component %q in %q: %v", comp, str, err) - } - v.components[i] = uint(num) - } - - if semver && extra != "" { - extraParts := extraMatchRE.FindStringSubmatch(extra) - if extraParts == nil { - return nil, fmt.Errorf("could not parse pre-release/metadata (%s) in version %q", extra, str) - } - v.preRelease, v.buildMetadata = extraParts[1], extraParts[2] - - for _, comp := range strings.Split(v.preRelease, ".") { - if _, err := strconv.ParseUint(comp, 10, 0); err == nil { - if strings.HasPrefix(comp, "0") && comp != "0" { - return nil, fmt.Errorf("illegal zero-prefixed version component %q in %q", comp, str) - } - } - } - } - - return v, nil -} - -// ParseGeneric parses a "generic" version string. The version string must consist of two -// or more dot-separated numeric fields (the first of which can't have leading zeroes), -// followed by arbitrary uninterpreted data (which need not be separated from the final -// numeric field by punctuation). For convenience, leading and trailing whitespace is -// ignored, and the version can be preceded by the letter "v". See also ParseSemantic. -func ParseGeneric(str string) (*Version, error) { - return parse(str, false) -} - -// MustParseGeneric is like ParseGeneric except that it panics on error -func MustParseGeneric(str string) *Version { - v, err := ParseGeneric(str) - if err != nil { - panic(err) - } - return v -} - -// ParseSemantic parses a version string that exactly obeys the syntax and semantics of -// the "Semantic Versioning" specification (http://semver.org/) (although it ignores -// leading and trailing whitespace, and allows the version to be preceded by "v"). For -// version strings that are not guaranteed to obey the Semantic Versioning syntax, use -// ParseGeneric. -func ParseSemantic(str string) (*Version, error) { - return parse(str, true) -} - -// MustParseSemantic is like ParseSemantic except that it panics on error -func MustParseSemantic(str string) *Version { - v, err := ParseSemantic(str) - if err != nil { - panic(err) - } - return v -} - -// Major returns the major release number -func (v *Version) Major() uint { - return v.components[0] -} - -// Minor returns the minor release number -func (v *Version) Minor() uint { - return v.components[1] -} - -// Patch returns the patch release number if v is a Semantic Version, or 0 -func (v *Version) Patch() uint { - if len(v.components) < 3 { - return 0 - } - return v.components[2] -} - -// BuildMetadata returns the build metadata, if v is a Semantic Version, or "" -func (v *Version) BuildMetadata() string { - return v.buildMetadata -} - -// PreRelease returns the prerelease metadata, if v is a Semantic Version, or "" -func (v *Version) PreRelease() string { - return v.preRelease -} - -// Components returns the version number components -func (v *Version) Components() []uint { - return v.components -} - -// WithMajor returns copy of the version object with requested major number -func (v *Version) WithMajor(major uint) *Version { - result := *v - result.components = []uint{major, v.Minor(), v.Patch()} - return &result -} - -// WithMinor returns copy of the version object with requested minor number -func (v *Version) WithMinor(minor uint) *Version { - result := *v - result.components = []uint{v.Major(), minor, v.Patch()} - return &result -} - -// WithPatch returns copy of the version object with requested patch number -func (v *Version) WithPatch(patch uint) *Version { - result := *v - result.components = []uint{v.Major(), v.Minor(), patch} - return &result -} - -// WithPreRelease returns copy of the version object with requested prerelease -func (v *Version) WithPreRelease(preRelease string) *Version { - result := *v - result.components = []uint{v.Major(), v.Minor(), v.Patch()} - result.preRelease = preRelease - return &result -} - -// WithBuildMetadata returns copy of the version object with requested buildMetadata -func (v *Version) WithBuildMetadata(buildMetadata string) *Version { - result := *v - result.components = []uint{v.Major(), v.Minor(), v.Patch()} - result.buildMetadata = buildMetadata - return &result -} - -// String converts a Version back to a string; note that for versions parsed with -// ParseGeneric, this will not include the trailing uninterpreted portion of the version -// number. -func (v *Version) String() string { - if v == nil { - return "" - } - var buffer bytes.Buffer - - for i, comp := range v.components { - if i > 0 { - buffer.WriteString(".") - } - buffer.WriteString(fmt.Sprintf("%d", comp)) - } - if v.preRelease != "" { - buffer.WriteString("-") - buffer.WriteString(v.preRelease) - } - if v.buildMetadata != "" { - buffer.WriteString("+") - buffer.WriteString(v.buildMetadata) - } - - return buffer.String() -} - -// compareInternal returns -1 if v is less than other, 1 if it is greater than other, or 0 -// if they are equal -func (v *Version) compareInternal(other *Version) int { - - vLen := len(v.components) - oLen := len(other.components) - for i := 0; i < vLen && i < oLen; i++ { - switch { - case other.components[i] < v.components[i]: - return 1 - case other.components[i] > v.components[i]: - return -1 - } - } - - // If components are common but one has more items and they are not zeros, it is bigger - switch { - case oLen < vLen && !onlyZeros(v.components[oLen:]): - return 1 - case oLen > vLen && !onlyZeros(other.components[vLen:]): - return -1 - } - - if !v.semver || !other.semver { - return 0 - } - - switch { - case v.preRelease == "" && other.preRelease != "": - return 1 - case v.preRelease != "" && other.preRelease == "": - return -1 - case v.preRelease == other.preRelease: // includes case where both are "" - return 0 - } - - vPR := strings.Split(v.preRelease, ".") - oPR := strings.Split(other.preRelease, ".") - for i := 0; i < len(vPR) && i < len(oPR); i++ { - vNum, err := strconv.ParseUint(vPR[i], 10, 0) - if err == nil { - oNum, err := strconv.ParseUint(oPR[i], 10, 0) - if err == nil { - switch { - case oNum < vNum: - return 1 - case oNum > vNum: - return -1 - default: - continue - } - } - } - if oPR[i] < vPR[i] { - return 1 - } else if oPR[i] > vPR[i] { - return -1 - } - } - - switch { - case len(oPR) < len(vPR): - return 1 - case len(oPR) > len(vPR): - return -1 - } - - return 0 -} - -// returns false if array contain any non-zero element -func onlyZeros(array []uint) bool { - for _, num := range array { - if num != 0 { - return false - } - } - return true -} - -// AtLeast tests if a version is at least equal to a given minimum version. If both -// Versions are Semantic Versions, this will use the Semantic Version comparison -// algorithm. Otherwise, it will compare only the numeric components, with non-present -// components being considered "0" (ie, "1.4" is equal to "1.4.0"). -func (v *Version) AtLeast(min *Version) bool { - return v.compareInternal(min) != -1 -} - -// LessThan tests if a version is less than a given version. (It is exactly the opposite -// of AtLeast, for situations where asking "is v too old?" makes more sense than asking -// "is v new enough?".) -func (v *Version) LessThan(other *Version) bool { - return v.compareInternal(other) == -1 -} - -// Compare compares v against a version string (which will be parsed as either Semantic -// or non-Semantic depending on v). On success it returns -1 if v is less than other, 1 if -// it is greater than other, or 0 if they are equal. -func (v *Version) Compare(other string) (int, error) { - ov, err := parse(other, v.semver) - if err != nil { - return 0, err - } - return v.compareInternal(ov), nil -} diff --git a/vendor/k8s.io/client-go/discovery/cached/legacy.go b/vendor/k8s.io/client-go/discovery/cached/legacy.go new file mode 100644 index 0000000000..725900a91c --- /dev/null +++ b/vendor/k8s.io/client-go/discovery/cached/legacy.go @@ -0,0 +1,30 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package memory + +import ( + "k8s.io/client-go/discovery" + "k8s.io/client-go/discovery/cached/memory" +) + +// NewMemCacheClient is DEPRECATED. Use memory.NewMemCacheClient directly. +func NewMemCacheClient(delegate discovery.DiscoveryInterface) discovery.CachedDiscoveryInterface { + return memory.NewMemCacheClient(delegate) +} + +// ErrCacheNotFound is DEPRECATED. Use memory.ErrCacheNotFound directly. +var ErrCacheNotFound = memory.ErrCacheNotFound diff --git a/vendor/k8s.io/client-go/discovery/cached/memory/memcache.go b/vendor/k8s.io/client-go/discovery/cached/memory/memcache.go new file mode 100644 index 0000000000..9de389fa7e --- /dev/null +++ b/vendor/k8s.io/client-go/discovery/cached/memory/memcache.go @@ -0,0 +1,233 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package memory + +import ( + "errors" + "fmt" + "sync" + "syscall" + + openapi_v2 "github.com/googleapis/gnostic/openapiv2" + + errorsutil "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/version" + "k8s.io/client-go/discovery" + restclient "k8s.io/client-go/rest" +) + +type cacheEntry struct { + resourceList *metav1.APIResourceList + err error +} + +// memCacheClient can Invalidate() to stay up-to-date with discovery +// information. +// +// TODO: Switch to a watch interface. Right now it will poll after each +// Invalidate() call. +type memCacheClient struct { + delegate discovery.DiscoveryInterface + + lock sync.RWMutex + groupToServerResources map[string]*cacheEntry + groupList *metav1.APIGroupList + cacheValid bool +} + +// Error Constants +var ( + ErrCacheNotFound = errors.New("not found") +) + +var _ discovery.CachedDiscoveryInterface = &memCacheClient{} + +// isTransientConnectionError checks whether given error is "Connection refused" or +// "Connection reset" error which usually means that apiserver is temporarily +// unavailable. +func isTransientConnectionError(err error) bool { + var errno syscall.Errno + if errors.As(err, &errno) { + return errno == syscall.ECONNREFUSED || errno == syscall.ECONNRESET + } + return false +} + +func isTransientError(err error) bool { + if isTransientConnectionError(err) { + return true + } + + if t, ok := err.(errorsutil.APIStatus); ok && t.Status().Code >= 500 { + return true + } + + return errorsutil.IsTooManyRequests(err) +} + +// ServerResourcesForGroupVersion returns the supported resources for a group and version. +func (d *memCacheClient) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { + d.lock.Lock() + defer d.lock.Unlock() + if !d.cacheValid { + if err := d.refreshLocked(); err != nil { + return nil, err + } + } + cachedVal, ok := d.groupToServerResources[groupVersion] + if !ok { + return nil, ErrCacheNotFound + } + + if cachedVal.err != nil && isTransientError(cachedVal.err) { + r, err := d.serverResourcesForGroupVersion(groupVersion) + if err != nil { + utilruntime.HandleError(fmt.Errorf("couldn't get resource list for %v: %v", groupVersion, err)) + } + cachedVal = &cacheEntry{r, err} + d.groupToServerResources[groupVersion] = cachedVal + } + + return cachedVal.resourceList, cachedVal.err +} + +// ServerResources returns the supported resources for all groups and versions. +// Deprecated: use ServerGroupsAndResources instead. +func (d *memCacheClient) ServerResources() ([]*metav1.APIResourceList, error) { + return discovery.ServerResources(d) +} + +// ServerGroupsAndResources returns the groups and supported resources for all groups and versions. +func (d *memCacheClient) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { + return discovery.ServerGroupsAndResources(d) +} + +func (d *memCacheClient) ServerGroups() (*metav1.APIGroupList, error) { + d.lock.Lock() + defer d.lock.Unlock() + if !d.cacheValid { + if err := d.refreshLocked(); err != nil { + return nil, err + } + } + return d.groupList, nil +} + +func (d *memCacheClient) RESTClient() restclient.Interface { + return d.delegate.RESTClient() +} + +func (d *memCacheClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) { + return discovery.ServerPreferredResources(d) +} + +func (d *memCacheClient) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { + return discovery.ServerPreferredNamespacedResources(d) +} + +func (d *memCacheClient) ServerVersion() (*version.Info, error) { + return d.delegate.ServerVersion() +} + +func (d *memCacheClient) OpenAPISchema() (*openapi_v2.Document, error) { + return d.delegate.OpenAPISchema() +} + +func (d *memCacheClient) Fresh() bool { + d.lock.RLock() + defer d.lock.RUnlock() + // Return whether the cache is populated at all. It is still possible that + // a single entry is missing due to transient errors and the attempt to read + // that entry will trigger retry. + return d.cacheValid +} + +// Invalidate enforces that no cached data that is older than the current time +// is used. +func (d *memCacheClient) Invalidate() { + d.lock.Lock() + defer d.lock.Unlock() + d.cacheValid = false + d.groupToServerResources = nil + d.groupList = nil +} + +// refreshLocked refreshes the state of cache. The caller must hold d.lock for +// writing. +func (d *memCacheClient) refreshLocked() error { + // TODO: Could this multiplicative set of calls be replaced by a single call + // to ServerResources? If it's possible for more than one resulting + // APIResourceList to have the same GroupVersion, the lists would need merged. + gl, err := d.delegate.ServerGroups() + if err != nil || len(gl.Groups) == 0 { + utilruntime.HandleError(fmt.Errorf("couldn't get current server API group list: %v", err)) + return err + } + + wg := &sync.WaitGroup{} + resultLock := &sync.Mutex{} + rl := map[string]*cacheEntry{} + for _, g := range gl.Groups { + for _, v := range g.Versions { + gv := v.GroupVersion + wg.Add(1) + go func() { + defer wg.Done() + defer utilruntime.HandleCrash() + + r, err := d.serverResourcesForGroupVersion(gv) + if err != nil { + utilruntime.HandleError(fmt.Errorf("couldn't get resource list for %v: %v", gv, err)) + } + + resultLock.Lock() + defer resultLock.Unlock() + rl[gv] = &cacheEntry{r, err} + }() + } + } + wg.Wait() + + d.groupToServerResources, d.groupList = rl, gl + d.cacheValid = true + return nil +} + +func (d *memCacheClient) serverResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { + r, err := d.delegate.ServerResourcesForGroupVersion(groupVersion) + if err != nil { + return r, err + } + if len(r.APIResources) == 0 { + return r, fmt.Errorf("Got empty response for: %v", groupVersion) + } + return r, nil +} + +// NewMemCacheClient creates a new CachedDiscoveryInterface which caches +// discovery information in memory and will stay up-to-date if Invalidate is +// called with regularity. +// +// NOTE: The client will NOT resort to live lookups on cache misses. +func NewMemCacheClient(delegate discovery.DiscoveryInterface) discovery.CachedDiscoveryInterface { + return &memCacheClient{ + delegate: delegate, + groupToServerResources: map[string]*cacheEntry{}, + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 6119893ccb..b11df00bd1 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -9,9 +9,15 @@ github.com/BurntSushi/toml # github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e => github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e ## explicit github.com/MakeNowJust/heredoc +# github.com/Masterminds/goutils v1.1.0 => github.com/Masterminds/goutils v1.1.0 +github.com/Masterminds/goutils # github.com/Masterminds/semver/v3 v3.1.1 => github.com/Masterminds/semver/v3 v3.0.1 ## explicit github.com/Masterminds/semver/v3 +# github.com/Masterminds/sprig/v3 v3.2.2 => github.com/Masterminds/sprig/v3 v3.0.0 +github.com/Masterminds/sprig/v3 +# github.com/Masterminds/squirrel v1.5.0 => github.com/Masterminds/squirrel v0.0.0-20161115235646-20f192218cf5 +github.com/Masterminds/squirrel # github.com/Microsoft/go-winio v0.5.0 => github.com/Microsoft/go-winio v0.4.12 github.com/Microsoft/go-winio # github.com/Microsoft/hcsshim v0.8.7 => github.com/Microsoft/hcsshim v0.8.6 @@ -154,7 +160,7 @@ github.com/cyphar/filepath-securejoin github.com/davecgh/go-spew/spew # github.com/deckarep/golang-set v1.7.1 => github.com/deckarep/golang-set v1.7.1 ## explicit -# github.com/deislabs/oras v0.8.1 => github.com/deislabs/oras v0.8.1 +# github.com/deislabs/oras v0.11.1 => github.com/deislabs/oras v0.8.1 github.com/deislabs/oras/pkg/artifact github.com/deislabs/oras/pkg/auth github.com/deislabs/oras/pkg/auth/docker @@ -456,6 +462,8 @@ github.com/hashicorp/hcl/hcl/token github.com/hashicorp/hcl/json/parser github.com/hashicorp/hcl/json/scanner github.com/hashicorp/hcl/json/token +# github.com/huandu/xstrings v1.2.0 => github.com/huandu/xstrings v1.2.0 +github.com/huandu/xstrings # github.com/imdario/mergo v0.3.12 => github.com/imdario/mergo v0.3.9 github.com/imdario/mergo # github.com/inconshreveable/mousetrap v1.0.0 => github.com/inconshreveable/mousetrap v1.0.0 @@ -464,6 +472,9 @@ github.com/inconshreveable/mousetrap github.com/jbenet/go-context/io # github.com/jmespath/go-jmespath v0.3.0 => github.com/jmespath/go-jmespath v0.3.0 github.com/jmespath/go-jmespath +# github.com/jmoiron/sqlx v1.3.1 => github.com/jmoiron/sqlx v1.2.0 +github.com/jmoiron/sqlx +github.com/jmoiron/sqlx/reflectx # github.com/json-iterator/go v1.1.11 => github.com/json-iterator/go v1.1.10 ## explicit github.com/json-iterator/go @@ -492,6 +503,16 @@ github.com/kubernetes-csi/external-snapshotter/client/v3/listers/volumesnapshot/ # github.com/kubesphere/sonargo v0.0.2 => github.com/kubesphere/sonargo v0.0.2 ## explicit github.com/kubesphere/sonargo/sonar +# github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 => github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 +## explicit +github.com/lann/builder +# github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 => github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 +## explicit +github.com/lann/ps +# github.com/lib/pq v1.10.0 => github.com/lib/pq v1.2.0 +github.com/lib/pq +github.com/lib/pq/oid +github.com/lib/pq/scram # github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de => github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de github.com/liggitt/tabwriter # github.com/magiconair/properties v1.8.0 => github.com/magiconair/properties v1.8.0 @@ -509,7 +530,7 @@ github.com/mattn/go-isatty github.com/mattn/go-runewidth # github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 => github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/matttproud/golang_protobuf_extensions/pbutil -# github.com/mitchellh/copystructure v1.0.0 => github.com/mitchellh/copystructure v1.0.0 +# github.com/mitchellh/copystructure v1.1.1 => github.com/mitchellh/copystructure v1.0.0 github.com/mitchellh/copystructure # github.com/mitchellh/go-homedir v1.1.0 => github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/go-homedir @@ -567,7 +588,7 @@ github.com/onsi/ginkgo/reporters/stenographer github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty github.com/onsi/ginkgo/types -# github.com/onsi/gomega v1.14.0 => github.com/onsi/gomega v1.10.1 +# github.com/onsi/gomega v1.15.0 => github.com/onsi/gomega v1.10.1 ## explicit github.com/onsi/gomega github.com/onsi/gomega/format @@ -631,6 +652,25 @@ github.com/opencontainers/runc/libcontainer/system github.com/opentracing/opentracing-go github.com/opentracing/opentracing-go/ext github.com/opentracing/opentracing-go/log +# github.com/operator-framework/helm-operator-plugins v0.0.8-0.20210810182245-240cc447b3de => github.com/operator-framework/helm-operator-plugins v0.0.8-0.20210810182245-240cc447b3de +## explicit +github.com/operator-framework/helm-operator-plugins/pkg/annotation +github.com/operator-framework/helm-operator-plugins/pkg/client +github.com/operator-framework/helm-operator-plugins/pkg/hook +github.com/operator-framework/helm-operator-plugins/pkg/internal/sdk/controllerutil +github.com/operator-framework/helm-operator-plugins/pkg/internal/sdk/predicate +github.com/operator-framework/helm-operator-plugins/pkg/internal/sdk/status +github.com/operator-framework/helm-operator-plugins/pkg/manifestutil +github.com/operator-framework/helm-operator-plugins/pkg/reconciler +github.com/operator-framework/helm-operator-plugins/pkg/reconciler/internal/conditions +github.com/operator-framework/helm-operator-plugins/pkg/reconciler/internal/hook +github.com/operator-framework/helm-operator-plugins/pkg/reconciler/internal/updater +github.com/operator-framework/helm-operator-plugins/pkg/reconciler/internal/values +github.com/operator-framework/helm-operator-plugins/pkg/values +github.com/operator-framework/helm-operator-plugins/pkg/watches +# github.com/operator-framework/operator-lib v0.3.0 => github.com/operator-framework/operator-lib v0.3.0 +github.com/operator-framework/operator-lib/handler +github.com/operator-framework/operator-lib/handler/internal/metrics # github.com/patrickmn/go-cache v2.1.0+incompatible => github.com/patrickmn/go-cache v2.1.0+incompatible ## explicit github.com/patrickmn/go-cache @@ -752,6 +792,9 @@ github.com/prometheus/prometheus/util/teststorage github.com/prometheus/prometheus/util/testutil # github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a => github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a github.com/rcrowley/go-metrics +# github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351 => github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351 +github.com/rubenv/sql-migrate +github.com/rubenv/sql-migrate/sqlparse # github.com/russross/blackfriday v1.5.2 => github.com/russross/blackfriday v1.5.2 github.com/russross/blackfriday # github.com/sergi/go-diff v1.1.0 => github.com/sergi/go-diff v1.0.0 @@ -899,6 +942,7 @@ golang.org/x/crypto/openpgp/s2k golang.org/x/crypto/pbkdf2 golang.org/x/crypto/poly1305 golang.org/x/crypto/salsa20/salsa +golang.org/x/crypto/scrypt golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf @@ -1075,6 +1119,8 @@ gopkg.in/asn1-ber.v1 # gopkg.in/cas.v2 v2.2.0 => gopkg.in/cas.v2 v2.2.0 ## explicit gopkg.in/cas.v2 +# gopkg.in/gorp.v1 v1.7.2 => gopkg.in/gorp.v1 v1.7.2 +gopkg.in/gorp.v1 # gopkg.in/inf.v0 v0.9.1 => gopkg.in/inf.v0 v0.9.1 gopkg.in/inf.v0 # gopkg.in/natefinch/lumberjack.v2 v2.0.0 => gopkg.in/natefinch/lumberjack.v2 v2.0.0 @@ -1093,7 +1139,7 @@ gopkg.in/src-d/go-billy.v4/helper/chroot gopkg.in/src-d/go-billy.v4/helper/polyfill gopkg.in/src-d/go-billy.v4/osfs gopkg.in/src-d/go-billy.v4/util -# gopkg.in/src-d/go-git.v4 v4.11.0 => gopkg.in/src-d/go-git.v4 v4.11.0 +# gopkg.in/src-d/go-git.v4 v4.13.1 => gopkg.in/src-d/go-git.v4 v4.11.0 ## explicit gopkg.in/src-d/go-git.v4 gopkg.in/src-d/go-git.v4/config @@ -1153,29 +1199,43 @@ gotest.tools/assert/cmp gotest.tools/internal/difflib gotest.tools/internal/format gotest.tools/internal/source -# helm.sh/helm/v3 v3.5.0 => helm.sh/helm/v3 v3.5.0 +# helm.sh/helm/v3 v3.6.3 => helm.sh/helm/v3 v3.6.3 ## explicit helm.sh/helm/v3/internal/experimental/registry helm.sh/helm/v3/internal/fileutil helm.sh/helm/v3/internal/ignore +helm.sh/helm/v3/internal/resolver helm.sh/helm/v3/internal/sympath helm.sh/helm/v3/internal/third_party/dep/fs helm.sh/helm/v3/internal/third_party/k8s.io/kubernetes/deployment/util helm.sh/helm/v3/internal/tlsutil helm.sh/helm/v3/internal/urlutil helm.sh/helm/v3/internal/version +helm.sh/helm/v3/pkg/action helm.sh/helm/v3/pkg/chart helm.sh/helm/v3/pkg/chart/loader helm.sh/helm/v3/pkg/chartutil helm.sh/helm/v3/pkg/cli +helm.sh/helm/v3/pkg/downloader +helm.sh/helm/v3/pkg/engine +helm.sh/helm/v3/pkg/gates helm.sh/helm/v3/pkg/getter helm.sh/helm/v3/pkg/helmpath helm.sh/helm/v3/pkg/helmpath/xdg helm.sh/helm/v3/pkg/kube +helm.sh/helm/v3/pkg/kube/fake +helm.sh/helm/v3/pkg/lint +helm.sh/helm/v3/pkg/lint/rules +helm.sh/helm/v3/pkg/lint/support helm.sh/helm/v3/pkg/plugin +helm.sh/helm/v3/pkg/postrender helm.sh/helm/v3/pkg/provenance helm.sh/helm/v3/pkg/release +helm.sh/helm/v3/pkg/releaseutil helm.sh/helm/v3/pkg/repo +helm.sh/helm/v3/pkg/storage +helm.sh/helm/v3/pkg/storage/driver +helm.sh/helm/v3/pkg/strvals helm.sh/helm/v3/pkg/time # honnef.co/go/tools v0.0.1-2020.1.3 => honnef.co/go/tools v0.0.1-2020.1.3 honnef.co/go/tools/arg @@ -1378,7 +1438,6 @@ k8s.io/apimachinery/pkg/util/strategicpatch k8s.io/apimachinery/pkg/util/uuid k8s.io/apimachinery/pkg/util/validation k8s.io/apimachinery/pkg/util/validation/field -k8s.io/apimachinery/pkg/util/version k8s.io/apimachinery/pkg/util/wait k8s.io/apimachinery/pkg/util/waitgroup k8s.io/apimachinery/pkg/util/yaml @@ -1561,7 +1620,9 @@ k8s.io/client-go/applyconfigurations/storage/v1 k8s.io/client-go/applyconfigurations/storage/v1alpha1 k8s.io/client-go/applyconfigurations/storage/v1beta1 k8s.io/client-go/discovery +k8s.io/client-go/discovery/cached k8s.io/client-go/discovery/cached/disk +k8s.io/client-go/discovery/cached/memory k8s.io/client-go/discovery/fake k8s.io/client-go/dynamic k8s.io/client-go/informers @@ -1848,7 +1909,7 @@ k8s.io/klog/klogr # k8s.io/klog/v2 v2.8.0 => k8s.io/klog/v2 v2.8.0 ## explicit k8s.io/klog/v2 -# k8s.io/kube-openapi v0.0.0-20210527164424-3c818078ee3d => k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 +# k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e => k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 ## explicit k8s.io/kube-openapi/cmd/openapi-gen k8s.io/kube-openapi/cmd/openapi-gen/args @@ -2059,7 +2120,7 @@ sigs.k8s.io/kustomize/api/resid sigs.k8s.io/kustomize/api/resmap sigs.k8s.io/kustomize/api/resource sigs.k8s.io/kustomize/api/types -# sigs.k8s.io/kustomize/kyaml v0.10.17 => sigs.k8s.io/kustomize/kyaml v0.10.17 +# sigs.k8s.io/kustomize/kyaml v0.10.21 => sigs.k8s.io/kustomize/kyaml v0.10.17 sigs.k8s.io/kustomize/kyaml/comments sigs.k8s.io/kustomize/kyaml/errors sigs.k8s.io/kustomize/kyaml/ext @@ -2098,6 +2159,7 @@ sigs.k8s.io/structured-merge-diff/v4/value # sigs.k8s.io/yaml v1.2.0 => sigs.k8s.io/yaml v1.2.0 ## explicit sigs.k8s.io/yaml +# bitbucket.org/liamstask/goose => bitbucket.org/liamstask/goose v0.0.0-20150115234039-8488cc47d90c # cloud.google.com/go => cloud.google.com/go v0.56.0 # cloud.google.com/go/bigquery => cloud.google.com/go/bigquery v1.4.0 # cloud.google.com/go/bigtable => cloud.google.com/go/bigtable v1.2.0 @@ -2121,6 +2183,8 @@ sigs.k8s.io/yaml # github.com/BurntSushi/toml => github.com/BurntSushi/toml v0.3.1 # github.com/DATA-DOG/go-sqlmock => github.com/DATA-DOG/go-sqlmock v1.3.3 # github.com/DataDog/datadog-go => github.com/DataDog/datadog-go v3.2.0+incompatible +# github.com/GeertJohan/go.incremental => github.com/GeertJohan/go.incremental v1.0.0 +# github.com/GeertJohan/go.rice => github.com/GeertJohan/go.rice v1.0.0 # github.com/Knetic/govaluate => github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible # github.com/MakeNowJust/heredoc => github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e # github.com/Masterminds/goutils => github.com/Masterminds/goutils v1.1.0 @@ -2143,6 +2207,7 @@ sigs.k8s.io/yaml # github.com/afex/hystrix-go => github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5 # github.com/agnivade/levenshtein => github.com/agnivade/levenshtein v1.0.1 # github.com/ajstarks/svgo => github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af +# github.com/akavel/rsrc => github.com/akavel/rsrc v0.8.0 # github.com/alcortesm/tgz => github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 # github.com/alecthomas/template => github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 # github.com/alecthomas/units => github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d @@ -2170,6 +2235,7 @@ sigs.k8s.io/yaml # github.com/bgentry/speakeasy => github.com/bgentry/speakeasy v0.1.0 # github.com/bitly/go-hostpool => github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 # github.com/blang/semver => github.com/blang/semver v3.5.0+incompatible +# github.com/blang/semver/v4 => github.com/blang/semver/v4 v4.0.0 # github.com/bmizerany/assert => github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 # github.com/bmizerany/pat => github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40 # github.com/boltdb/bolt => github.com/boltdb/bolt v1.3.1 @@ -2183,6 +2249,7 @@ sigs.k8s.io/yaml # github.com/casbin/casbin/v2 => github.com/casbin/casbin/v2 v2.1.2 # github.com/cenkalti/backoff => github.com/cenkalti/backoff v2.2.1+incompatible # github.com/census-instrumentation/opencensus-proto => github.com/census-instrumentation/opencensus-proto v0.2.1 +# github.com/certifi/gocertifi => github.com/certifi/gocertifi v0.0.0-20180118203423-deb3ae2ef261 # github.com/cespare/xxhash => github.com/cespare/xxhash v1.1.0 # github.com/cespare/xxhash/v2 => github.com/cespare/xxhash/v2 v2.1.1 # github.com/chai2010/gettext-go => github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 @@ -2194,6 +2261,10 @@ sigs.k8s.io/yaml # github.com/circonus-labs/circonus-gometrics => github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible # github.com/circonus-labs/circonusllhist => github.com/circonus-labs/circonusllhist v0.1.3 # github.com/clbanning/x2j => github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec +# github.com/cloudflare/backoff => github.com/cloudflare/backoff v0.0.0-20161212185259-647f3cdfc87a +# github.com/cloudflare/cfssl => github.com/cloudflare/cfssl v1.5.0 +# github.com/cloudflare/go-metrics => github.com/cloudflare/go-metrics v0.0.0-20151117154305-6a9aea36fb41 +# github.com/cloudflare/redoctober => github.com/cloudflare/redoctober v0.0.0-20171127175943-746a508df14c # github.com/cockroachdb/apd => github.com/cockroachdb/apd v1.1.0 # github.com/cockroachdb/cockroach-go => github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c # github.com/cockroachdb/datadriven => github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa @@ -2222,6 +2293,7 @@ sigs.k8s.io/yaml # github.com/cznic/sortutil => github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65 # github.com/cznic/strutil => github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186 # github.com/cznic/zappy => github.com/cznic/zappy v0.0.0-20160723133515-2533cb5b45cc +# github.com/daaku/go.zipexe => github.com/daaku/go.zipexe v1.0.0 # github.com/dave/jennifer => github.com/dave/jennifer v1.2.0 # github.com/davecgh/go-spew => github.com/davecgh/go-spew v1.1.1 # github.com/daviddengcn/go-colortext => github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd @@ -2279,10 +2351,12 @@ sigs.k8s.io/yaml # github.com/fsouza/fake-gcs-server => github.com/fsouza/fake-gcs-server v1.7.0 # github.com/fvbommel/sortorder => github.com/fvbommel/sortorder v1.0.1 # github.com/garyburd/redigo => github.com/garyburd/redigo v1.6.0 +# github.com/getsentry/raven-go => github.com/getsentry/raven-go v0.0.0-20180121060056-563b81fc02b7 # github.com/ghodss/yaml => github.com/ghodss/yaml v1.0.0 # github.com/gliderlabs/ssh => github.com/gliderlabs/ssh v0.1.1 # github.com/glycerine/go-unsnap-stream => github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd # github.com/glycerine/goconvey => github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 +# github.com/go-bindata/go-bindata/v3 => github.com/go-bindata/go-bindata/v3 v3.1.3 # github.com/go-errors/errors => github.com/go-errors/errors v1.0.1 # github.com/go-kit/kit => github.com/go-kit/kit v0.10.0 # github.com/go-ldap/ldap => github.com/go-ldap/ldap v3.0.3+incompatible @@ -2345,6 +2419,7 @@ sigs.k8s.io/yaml # github.com/gomodule/redigo => github.com/gomodule/redigo v2.0.0+incompatible # github.com/google/addlicense => github.com/google/addlicense v0.0.0-20200906110928-a0294312aa76 # github.com/google/btree => github.com/google/btree v1.0.0 +# github.com/google/certificate-transparency-go => github.com/google/certificate-transparency-go v1.0.21 # github.com/google/flatbuffers => github.com/google/flatbuffers v1.11.0 # github.com/google/go-cmp => github.com/google/go-cmp v0.4.0 # github.com/google/go-containerregistry => github.com/google/go-containerregistry v0.6.0 @@ -2394,6 +2469,7 @@ sigs.k8s.io/yaml # github.com/hodgesds/perf-utils => github.com/hodgesds/perf-utils v0.0.8 # github.com/huandu/xstrings => github.com/huandu/xstrings v1.2.0 # github.com/hudl/fargo => github.com/hudl/fargo v1.3.0 +# github.com/iancoleman/strcase => github.com/iancoleman/strcase v0.1.2 # github.com/ianlancetaylor/demangle => github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6 # github.com/imdario/mergo => github.com/imdario/mergo v0.3.9 # github.com/inconshreveable/mousetrap => github.com/inconshreveable/mousetrap v1.0.0 @@ -2411,7 +2487,9 @@ sigs.k8s.io/yaml # github.com/jbenet/go-context => github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 # github.com/jessevdk/go-flags => github.com/jessevdk/go-flags v1.4.0 # github.com/jmespath/go-jmespath => github.com/jmespath/go-jmespath v0.3.0 +# github.com/jmhodges/clock => github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548 # github.com/jmoiron/sqlx => github.com/jmoiron/sqlx v1.2.0 +# github.com/joelanford/go-apidiff => github.com/joelanford/go-apidiff v0.1.0 # github.com/joeshaw/multierror => github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 # github.com/joho/godotenv => github.com/joho/godotenv v1.3.0 # github.com/jonboulle/clockwork => github.com/jonboulle/clockwork v0.1.0 @@ -2431,6 +2509,8 @@ sigs.k8s.io/yaml # github.com/kevinburke/ssh_config => github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e # github.com/kisielk/errcheck => github.com/kisielk/errcheck v1.2.0 # github.com/kisielk/gotool => github.com/kisielk/gotool v1.0.0 +# github.com/kisielk/sqlstruct => github.com/kisielk/sqlstruct v0.0.0-20150923205031-648daed35d49 +# github.com/kisom/goutils => github.com/kisom/goutils v1.1.0 # github.com/klauspost/compress => github.com/klauspost/compress v1.9.5 # github.com/klauspost/cpuid => github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5 # github.com/klauspost/crc32 => github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 @@ -2443,7 +2523,10 @@ sigs.k8s.io/yaml # github.com/kshvakov/clickhouse => github.com/kshvakov/clickhouse v1.3.5 # github.com/kubernetes-csi/external-snapshotter/client/v3 => github.com/kubernetes-csi/external-snapshotter/client/v3 v3.0.0 # github.com/kubesphere/sonargo => github.com/kubesphere/sonargo v0.0.2 +# github.com/kylelemons/go-gypsy => github.com/kylelemons/go-gypsy v0.0.0-20160905020020-08cad365cd28 # github.com/kylelemons/godebug => github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb +# github.com/lann/builder => github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 +# github.com/lann/ps => github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 # github.com/leanovate/gopter => github.com/leanovate/gopter v0.2.4 # github.com/leodido/go-urn => github.com/leodido/go-urn v0.0.0-20181204092800-a67a23e1c1af # github.com/lib/pq => github.com/lib/pq v1.2.0 @@ -2473,6 +2556,8 @@ sigs.k8s.io/yaml # github.com/mdlayher/wifi => github.com/mdlayher/wifi v0.0.0-20190303161829-b1436901ddee # github.com/mgutz/ansi => github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b # github.com/miekg/dns => github.com/miekg/dns v1.1.29 +# github.com/mikefarah/yaml/v2 => github.com/mikefarah/yaml/v2 v2.4.0 +# github.com/mikefarah/yq/v2 => github.com/mikefarah/yq/v2 v2.4.1 # github.com/minio/md5-simd => github.com/minio/md5-simd v1.1.0 # github.com/minio/minio-go/v7 => github.com/minio/minio-go/v7 v7.0.2 # github.com/minio/sha256-simd => github.com/minio/sha256-simd v0.1.1 @@ -2494,6 +2579,7 @@ sigs.k8s.io/yaml # github.com/morikuni/aec => github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c # github.com/mozillazg/go-cos => github.com/mozillazg/go-cos v0.13.0 # github.com/mozillazg/go-httpheader => github.com/mozillazg/go-httpheader v0.2.1 +# github.com/mreiferson/go-httpclient => github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474 # github.com/mschoch/smat => github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae # github.com/munnerz/goautoneg => github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 # github.com/mwitkow/go-conntrack => github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f @@ -2506,6 +2592,7 @@ sigs.k8s.io/yaml # github.com/nats-io/nuid => github.com/nats-io/nuid v1.0.1 # github.com/ncw/swift => github.com/ncw/swift v1.0.50 # github.com/niemeyer/pretty => github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e +# github.com/nkovacs/streamquote => github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229 # github.com/nxadm/tail => github.com/nxadm/tail v1.4.4 # github.com/oklog/oklog => github.com/oklog/oklog v0.3.2 # github.com/oklog/run => github.com/oklog/run v1.1.0 @@ -2525,6 +2612,9 @@ sigs.k8s.io/yaml # github.com/opentracing/opentracing-go => github.com/opentracing/opentracing-go v1.1.0 # github.com/openzipkin-contrib/zipkin-go-opentracing => github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5 # github.com/openzipkin/zipkin-go => github.com/openzipkin/zipkin-go v0.2.2 +# github.com/operator-framework/api => github.com/operator-framework/api v0.4.0 +# github.com/operator-framework/helm-operator-plugins => github.com/operator-framework/helm-operator-plugins v0.0.8-0.20210810182245-240cc447b3de +# github.com/operator-framework/operator-lib => github.com/operator-framework/operator-lib v0.3.0 # github.com/pact-foundation/pact-go => github.com/pact-foundation/pact-go v1.0.4 # github.com/pascaldekloe/goe => github.com/pascaldekloe/goe v0.1.0 # github.com/patrickmn/go-cache => github.com/patrickmn/go-cache v2.1.0+incompatible @@ -2611,9 +2701,12 @@ sigs.k8s.io/yaml # github.com/uber/jaeger-lib => github.com/uber/jaeger-lib v2.2.0+incompatible # github.com/ugorji/go => github.com/ugorji/go v1.1.4 # github.com/urfave/cli => github.com/urfave/cli v1.20.0 +# github.com/valyala/bytebufferpool => github.com/valyala/bytebufferpool v1.0.0 +# github.com/valyala/fasttemplate => github.com/valyala/fasttemplate v1.0.1 # github.com/vektah/gqlparser => github.com/vektah/gqlparser v1.1.2 # github.com/weaveworks/common => github.com/weaveworks/common v0.0.0-20200820123129-280614068c5e # github.com/weaveworks/promrus => github.com/weaveworks/promrus v1.2.0 +# github.com/weppos/publicsuffix-go => github.com/weppos/publicsuffix-go v0.13.0 # github.com/willf/bitset => github.com/willf/bitset v1.1.3 # github.com/xanzy/go-gitlab => github.com/xanzy/go-gitlab v0.15.0 # github.com/xanzy/ssh-agent => github.com/xanzy/ssh-agent v0.2.1 @@ -2631,6 +2724,10 @@ sigs.k8s.io/yaml # github.com/yvasiyarov/gorelic => github.com/yvasiyarov/gorelic v0.0.6 # github.com/yvasiyarov/newrelic_platform_go => github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f # github.com/ziutek/mymysql => github.com/ziutek/mymysql v1.5.4 +# github.com/zmap/rc2 => github.com/zmap/rc2 v0.0.0-20131011165748-24b9757f5521 +# github.com/zmap/zcertificate => github.com/zmap/zcertificate v0.0.0-20180516150559-0e3d58b1bac4 +# github.com/zmap/zcrypto => github.com/zmap/zcrypto v0.0.0-20200911161511-43ff0ea04f21 +# github.com/zmap/zlint/v2 => github.com/zmap/zlint/v2 v2.2.1 # gitlab.com/nyarla/go-crypt => gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b # go.elastic.co/apm => go.elastic.co/apm v1.5.0 # go.elastic.co/apm/module/apmhttp => go.elastic.co/apm/module/apmhttp v1.5.0 @@ -2684,9 +2781,11 @@ sigs.k8s.io/yaml # gopkg.in/go-playground/assert.v1 => gopkg.in/go-playground/assert.v1 v1.2.1 # gopkg.in/go-playground/validator.v9 => gopkg.in/go-playground/validator.v9 v9.27.0 # gopkg.in/gorp.v1 => gopkg.in/gorp.v1 v1.7.2 +# gopkg.in/imdario/mergo.v0 => gopkg.in/imdario/mergo.v0 v0.3.7 # gopkg.in/inf.v0 => gopkg.in/inf.v0 v0.9.1 # gopkg.in/ini.v1 => gopkg.in/ini.v1 v1.57.0 # gopkg.in/natefinch/lumberjack.v2 => gopkg.in/natefinch/lumberjack.v2 v2.0.0 +# gopkg.in/op/go-logging.v1 => gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 # gopkg.in/square/go-jose.v1 => gopkg.in/square/go-jose.v1 v1.1.2 # gopkg.in/square/go-jose.v2 => gopkg.in/square/go-jose.v2 v2.4.0 # gopkg.in/src-d/go-billy.v4 => gopkg.in/src-d/go-billy.v4 v4.3.0 @@ -2699,7 +2798,7 @@ sigs.k8s.io/yaml # gopkg.in/yaml.v3 => gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c # gotest.tools => gotest.tools v2.2.0+incompatible # gotest.tools/v3 => gotest.tools/v3 v3.0.3 -# helm.sh/helm/v3 => helm.sh/helm/v3 v3.5.0 +# helm.sh/helm/v3 => helm.sh/helm/v3 v3.6.3 # honnef.co/go/tools => honnef.co/go/tools v0.0.1-2020.1.3 # howett.net/plist => howett.net/plist v0.0.0-20181124034731-591f970eefbb # istio.io/api => istio.io/api v0.0.0-20201113182140-d4b7e3fc2b44 @@ -2734,6 +2833,7 @@ sigs.k8s.io/yaml # sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.9.3 # sigs.k8s.io/controller-tools => sigs.k8s.io/controller-tools v0.6.2 # sigs.k8s.io/kind => sigs.k8s.io/kind v0.8.1 +# sigs.k8s.io/kubebuilder/v3 => sigs.k8s.io/kubebuilder/v3 v3.0.0-alpha.0.0.20210716121009-fde793f20067 # sigs.k8s.io/kubefed => sigs.k8s.io/kubefed v0.8.1 # sigs.k8s.io/kustomize/api => sigs.k8s.io/kustomize/api v0.8.8 # sigs.k8s.io/kustomize/cmd/config => sigs.k8s.io/kustomize/cmd/config v0.9.10