From 9293a7f0fc45873d74aac072cf6a22961e7d5707 Mon Sep 17 00:00:00 2001 From: Rohit-PX Date: Mon, 15 May 2023 23:25:31 +0000 Subject: [PATCH] commit to repro torpedo vendor issue Signed-off-by: Rohit-PX --- go.mod | 8 +- go.sum | 58 +- .../github.com/fsnotify/fsnotify/.gitignore | 10 +- vendor/github.com/fsnotify/fsnotify/AUTHORS | 62 -- .../github.com/fsnotify/fsnotify/CHANGELOG.md | 113 +++ .../fsnotify/fsnotify/CONTRIBUTING.md | 72 +- vendor/github.com/fsnotify/fsnotify/LICENSE | 47 +- vendor/github.com/fsnotify/fsnotify/README.md | 205 +++-- .../fsnotify/fsnotify/backend_fen.go | 162 ++++ .../fsnotify/fsnotify/backend_inotify.go | 459 +++++++++++ .../fsnotify/fsnotify/backend_kqueue.go | 707 +++++++++++++++++ .../fsnotify/fsnotify/backend_other.go | 66 ++ .../fsnotify/fsnotify/backend_windows.go | 746 ++++++++++++++++++ vendor/github.com/fsnotify/fsnotify/fen.go | 38 - .../github.com/fsnotify/fsnotify/fsnotify.go | 80 +- .../fsnotify/fsnotify/fsnotify_unsupported.go | 36 - .../github.com/fsnotify/fsnotify/inotify.go | 351 -------- .../fsnotify/fsnotify/inotify_poller.go | 187 ----- vendor/github.com/fsnotify/fsnotify/kqueue.go | 535 ------------- vendor/github.com/fsnotify/fsnotify/mkdoc.zsh | 208 +++++ .../{open_mode_bsd.go => system_bsd.go} | 4 - .../{open_mode_darwin.go => system_darwin.go} | 4 - .../github.com/fsnotify/fsnotify/windows.go | 586 -------------- .../drivers/storage/portworx/util/util.go | 33 +- .../operator/drivers/storage/storage.go | 2 +- .../operator/pkg/apis/addtoscheme_core_v1.go | 10 + .../pkg/apis/addtoscheme_portworx_v1.go | 10 + .../libopenstorage/operator/pkg/apis/apis.go | 13 + .../operator/pkg/apis/core/v1/storagenode.go | 6 +- .../pkg/apis/core/v1/zz_generated.deepcopy.go | 18 +- .../operator/pkg/apis/portworx/portworx.go | 9 + .../operator/pkg/apis/portworx/v1/doc.go | 4 + .../pkg/apis/portworx/v1/portworxdiag.go | 95 +++ .../operator/pkg/apis/portworx/v1/register.go | 25 + .../apis/portworx/v1/zz_generated.deepcopy.go | 195 +++++ .../clientset/versioned/scheme/register.go | 14 +- .../operator/pkg/mock/storagedriver.mock.go | 8 +- .../operator/pkg/util/test/util.go | 18 +- .../libopenstorage/operator/pkg/util/util.go | 45 +- vendor/github.com/onsi/gomega/.gitignore | 1 + vendor/github.com/onsi/gomega/CHANGELOG.md | 86 +- vendor/github.com/onsi/gomega/RELEASING.md | 2 +- .../github.com/onsi/gomega/format/format.go | 78 +- vendor/github.com/onsi/gomega/gomega_dsl.go | 175 ++-- .../onsi/gomega/internal/assertion.go | 8 +- .../onsi/gomega/internal/async_assertion.go | 413 +++++++--- .../onsi/gomega/internal/duration_bundle.go | 16 +- .../github.com/onsi/gomega/internal/gomega.go | 68 +- .../gomega/internal/polling_signal_error.go | 106 +++ vendor/github.com/onsi/gomega/matchers.go | 3 +- vendor/github.com/onsi/gomega/types/types.go | 9 +- .../torpedo/drivers/node/node_registry.go | 22 + .../portworx/torpedo/drivers/node/ssh/ssh.go | 26 +- .../torpedo/drivers/scheduler/k8s/k8s.go | 437 +++++++++- .../torpedo/drivers/scheduler/scheduler.go | 7 + .../portworx/torpedo/drivers/volume/common.go | 15 +- .../drivers/volume/portworx/portworx.go | 324 ++++++-- .../portworx/torpedo/drivers/volume/volume.go | 9 +- .../torpedo/pkg/aetosutil/dashboardutil.go | 17 +- .../portworx/torpedo/pkg/restutil/restutil.go | 10 +- vendor/modules.txt | 13 +- 61 files changed, 4691 insertions(+), 2403 deletions(-) delete mode 100644 vendor/github.com/fsnotify/fsnotify/AUTHORS create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_fen.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_inotify.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_kqueue.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_other.go create mode 100644 vendor/github.com/fsnotify/fsnotify/backend_windows.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/fen.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/inotify.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/inotify_poller.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/kqueue.go create mode 100644 vendor/github.com/fsnotify/fsnotify/mkdoc.zsh rename vendor/github.com/fsnotify/fsnotify/{open_mode_bsd.go => system_bsd.go} (57%) rename vendor/github.com/fsnotify/fsnotify/{open_mode_darwin.go => system_darwin.go} (52%) delete mode 100644 vendor/github.com/fsnotify/fsnotify/windows.go create mode 100644 vendor/github.com/libopenstorage/operator/pkg/apis/addtoscheme_core_v1.go create mode 100644 vendor/github.com/libopenstorage/operator/pkg/apis/addtoscheme_portworx_v1.go create mode 100644 vendor/github.com/libopenstorage/operator/pkg/apis/apis.go create mode 100644 vendor/github.com/libopenstorage/operator/pkg/apis/portworx/portworx.go create mode 100644 vendor/github.com/libopenstorage/operator/pkg/apis/portworx/v1/doc.go create mode 100644 vendor/github.com/libopenstorage/operator/pkg/apis/portworx/v1/portworxdiag.go create mode 100644 vendor/github.com/libopenstorage/operator/pkg/apis/portworx/v1/register.go create mode 100644 vendor/github.com/libopenstorage/operator/pkg/apis/portworx/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/onsi/gomega/internal/polling_signal_error.go diff --git a/go.mod b/go.mod index 55bf431b75..2563464a1f 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,7 @@ require ( github.com/kubernetes-incubator/external-storage v0.20.4-openstorage-rc7 github.com/kubernetes-sigs/aws-ebs-csi-driver v0.9.0 github.com/libopenstorage/openstorage v9.4.47+incompatible - github.com/libopenstorage/operator v0.0.0-20230323034810-8853b151f594 + github.com/libopenstorage/operator v0.0.0-20230511042455-244266620049 github.com/libopenstorage/secrets v0.0.0-20220413195519-57d1c446c5e9 github.com/mitchellh/hashstructure v1.0.0 github.com/openshift/api v0.0.0-20210105115604-44119421ec6b @@ -116,7 +116,7 @@ require ( github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect github.com/fatih/color v1.13.0 // indirect - github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 // indirect github.com/go-errors/errors v1.0.1 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect @@ -212,7 +212,7 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nxadm/tail v1.4.8 // indirect github.com/onsi/ginkgo v1.16.5 // indirect - github.com/onsi/gomega v1.21.1 // indirect + github.com/onsi/gomega v1.24.2 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect github.com/opencontainers/runc v1.1.3 // indirect @@ -302,7 +302,7 @@ replace ( github.com/libopenstorage/openstorage => github.com/libopenstorage/openstorage v1.0.1-0.20230324214216-7f88436db3de github.com/portworx/kdmp => github.com/portworx/kdmp v0.4.1-0.20230316085313-95fc97e8493b github.com/portworx/sched-ops => github.com/portworx/sched-ops v1.20.4-rc1.0.20230330091134-421296e5f8d0 - github.com/portworx/torpedo => github.com/portworx/torpedo v0.0.0-20230410110229-ad7b0df86670 + github.com/portworx/torpedo => github.com/portworx/torpedo v0.0.0-20230512123351-2de37aaa31a7 gopkg.in/fsnotify.v1 v1.4.7 => github.com/fsnotify/fsnotify v1.4.7 helm.sh/helm/v3 => helm.sh/helm/v3 v3.10.3 diff --git a/go.sum b/go.sum index 1b5e9c7ad2..59f7f34be1 100644 --- a/go.sum +++ b/go.sum @@ -568,6 +568,12 @@ github.com/Azure/azure-sdk-for-go v43.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9mo github.com/Azure/azure-sdk-for-go v55.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v56.3.0+incompatible h1:DmhwMrUIvpeoTDiWRDtNHqelNUd3Og8JCkrLHQK795c= github.com/Azure/azure-sdk-for-go v56.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.0.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.0/go.mod h1:tZoQYdDZNOiIjdSn0dVWVfl0NEPGOJqVLzSrcFk4Is0= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0/go.mod h1:bhXu1AjYL+wutSL/kpSq6s7733q2Rb0yuot9Zgfqa/0= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0/go.mod h1:2e8rMJtl2+2j+HXbTBwnyGpm5Nou7KhvSfxOq8JpTag= github.com/Azure/azure-service-bus-go v0.10.1/go.mod h1:E/FOceuKAFUfpbIJDKWz/May6guE+eGibfGT6q+n1to= github.com/Azure/azure-storage-blob-go v0.0.0-20190123011202-457680cc0804/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y= github.com/Azure/azure-storage-blob-go v0.9.0 h1:kORqvzXP8ORhKbW13FflGUaSE5CMyDWun9UwMxY8gPs= @@ -632,6 +638,7 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= @@ -1094,6 +1101,7 @@ github.com/distribution/distribution/v3 v3.0.0-20220526142353-ffbd94cbe269 h1:hb github.com/distribution/distribution/v3 v3.0.0-20220526142353-ffbd94cbe269/go.mod h1:28YO/VJk9/64+sTGNuYaBjWxrXTPrj0C0XmgTIOjxX4= github.com/djherbis/atime v1.0.0/go.mod h1:5W+KBIuTwVGcqjIfaTwt+KSYX1o6uep8dtevevQP/f8= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= github.com/docker/cli v0.0.0-20190925022749-754388324470/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v20.10.17+incompatible h1:eO2KS7ZFeov5UJeaDmIs1NFEDRf32PaqRpvoEkKBy5M= @@ -1198,8 +1206,9 @@ github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUork github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fsouza/fake-gcs-server v0.0.0-20180612165233-e85be23bdaa8/go.mod h1:1/HufuJ+eaDf4KTnYdS6HJMGvMRU8d4cYTuu/1QaBbI= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= @@ -1228,7 +1237,6 @@ github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmn github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gorp/gorp/v3 v3.0.2 h1:ULqJXIekoqMx29FI5ekXXFoH1dT2Vc8UhnRzBg+Emz4= github.com/go-gorp/gorp/v3 v3.0.2/go.mod h1:BJ3q1ejpV8cVALtcXvXaXyTOlMmJhWDxTmncaR6rwBY= github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= @@ -1324,6 +1332,7 @@ github.com/gobuffalo/envy v1.7.1/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6 github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.3.0/go.mod h1:5pf3aGnsvqvCj50AVni7mJJF8ICxGZ8HomberC3pXLE= github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= @@ -1381,6 +1390,7 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= +github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoBog= @@ -1906,8 +1916,8 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.0.0/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= @@ -1943,7 +1953,6 @@ github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs= github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= @@ -1961,8 +1970,9 @@ github.com/libopenstorage/openstorage v1.0.1-0.20230324214216-7f88436db3de/go.mo github.com/libopenstorage/openstorage-sdk-clients v0.109.0/go.mod h1:vo0c/nLG2HIyQva4Avwx61U1kWcw4HGQh3sjzV2DEEs= github.com/libopenstorage/operator v0.0.0-20230112002659-7e73437e61d4/go.mod h1:/sGycOMcavuKLWm3Vd907luRXZA/mcqFs1AdEbbT5k4= github.com/libopenstorage/operator v0.0.0-20230202214252-140e2e1fd86a/go.mod h1:Q66wsNUAekPawgGg9yh0Bs7eXWPa4/+c2JTefdiDciM= -github.com/libopenstorage/operator v0.0.0-20230323034810-8853b151f594 h1:pVtnM9o5BlvWSrRphA7qJsV32bXW98cmxhHlA2xrA9E= github.com/libopenstorage/operator v0.0.0-20230323034810-8853b151f594/go.mod h1:0S4k1ouTScuVS3rxPukfEFvc5bMasmI1wpAAM0NejWQ= +github.com/libopenstorage/operator v0.0.0-20230511042455-244266620049 h1:7eMjGI9/ax49bcsHAx5iZlfGKu1NtW9Ccmx1IrgnRkY= +github.com/libopenstorage/operator v0.0.0-20230511042455-244266620049/go.mod h1:qsngZJ/v10+To05Xvlva3bi07OJz+3izpZCJlxxqV5U= github.com/libopenstorage/secrets v0.0.0-20200207034622-cdb443738c67/go.mod h1:4QOUUeRMHtTniKPj+biL3wlusahU/7OnMsGKc4FN/EY= github.com/libopenstorage/secrets v0.0.0-20210908194121-a1d19aa9713a/go.mod h1:gE8rSd6lwLNXNbiW3DrRZjFMs+y4fDHy/6uiOO9cdzY= github.com/libopenstorage/secrets v0.0.0-20220413195519-57d1c446c5e9 h1:pZQ5uaWky7KerHX2saBO971ti+IBDJRorHwcHwc+0aM= @@ -2051,8 +2061,8 @@ github.com/mattn/go-sqlite3 v0.0.0-20160514122348-38ee283dabf1/go.mod h1:FPy6Kqz github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw= github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI= github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -2115,8 +2125,6 @@ github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= -github.com/moby/moby v20.10.23+incompatible h1:5+Q6jGL7oH89tx+ms0fGsTYEXrQ3P4vuL3i7DayMUuM= -github.com/moby/moby v20.10.23+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= github.com/moby/moby v20.10.24+incompatible h1:hjfxUufgeyrgolyaOWASyR9SvehpNcq/QHp/tx4VgsM= github.com/moby/moby v20.10.24+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= @@ -2143,6 +2151,7 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= @@ -2150,6 +2159,7 @@ github.com/mongodb/go-client-mongodb-atlas v0.1.2/go.mod h1:LS8O0YLkA+sbtOb3fZLF github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= @@ -2218,8 +2228,12 @@ github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= -github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU= github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= +github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0= +github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= +github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw= +github.com/onsi/ginkgo/v2 v2.6.1 h1:1xQPCjcqYw/J5LchOcp4/2q/jzJFjiAOc25chhnDw+Q= +github.com/onsi/ginkgo/v2 v2.6.1/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.2.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= @@ -2238,8 +2252,12 @@ github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAl github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/onsi/gomega v1.20.0/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= -github.com/onsi/gomega v1.21.1 h1:OB/euWYIExnPBohllTicTHmGTrMaqJ67nIu80j0/uEM= github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= +github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM= +github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= +github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM= +github.com/onsi/gomega v1.24.2 h1:J/tulyYK6JwBldPViHJReihxxZ+22FHs0piGjQAvoUE= +github.com/onsi/gomega v1.24.2/go.mod h1:gs3J10IS7Z7r7eXRoNJIrNqU4ToQukCJhFtKrWgHWnk= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -2340,6 +2358,7 @@ github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -2360,7 +2379,7 @@ github.com/portworx/kvdb v0.0.0-20190105022415-cccaa09abfc9/go.mod h1:Q8YyrNDvPp github.com/portworx/kvdb v0.0.0-20200723230726-2734b7f40194/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= github.com/portworx/kvdb v0.0.0-20200929023115-b312c7519467 h1:jkqzdbOnejgSN5HG/FLt4enNrozWT/K+nlmaRm3P1II= github.com/portworx/kvdb v0.0.0-20200929023115-b312c7519467/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= -github.com/portworx/pds-api-go-client v0.0.0-20220901142946-b6ecf97f5e71/go.mod h1:yn0abd9g4Q3fBpY720Eb+hwRV1OnTjROUaSb9mOlzBk= +github.com/portworx/pds-api-go-client v0.0.0-20230328163250-90d945a030b9/go.mod h1:yn0abd9g4Q3fBpY720Eb+hwRV1OnTjROUaSb9mOlzBk= github.com/portworx/px-backup-api v1.2.2-0.20230302013809-a43e52d727ba/go.mod h1:fwJb8kCDTvEwzQbId6jJL+z6GX6m6acfHgPBFncZJGE= github.com/portworx/px-object-controller v0.0.0-20220804234424-40d3b8a84987 h1:VNBTmIPjJRZ2QP64zdsrif3ELDHiMzoyNNX74VNHgZ8= github.com/portworx/px-object-controller v0.0.0-20220804234424-40d3b8a84987/go.mod h1:g3pw2lI2AjqAixUCRhaBdKTY98znsCPR7NGRrlpimVU= @@ -2369,14 +2388,14 @@ github.com/portworx/sched-ops v1.20.4-rc1.0.20230330091134-421296e5f8d0 h1:58JTP github.com/portworx/sched-ops v1.20.4-rc1.0.20230330091134-421296e5f8d0/go.mod h1:4Tm81DwlMhF4kNT+6toXeLnxGrqacMHtgz6x2FfFUnQ= github.com/portworx/talisman v0.0.0-20210302012732-8af4564777f7/go.mod h1:e8a6uFpSbOlRpZQlW9aXYogC+GWAo065G0RL9hDkD4Q= github.com/portworx/talisman v1.1.3/go.mod h1:e8a6uFpSbOlRpZQlW9aXYogC+GWAo065G0RL9hDkD4Q= -github.com/portworx/torpedo v0.0.0-20230410110229-ad7b0df86670 h1:dvMA2uZo8BKC+NC1IL6GIe58GCrmS6pim5QAMlh3a5g= -github.com/portworx/torpedo v0.0.0-20230410110229-ad7b0df86670/go.mod h1:s3fDUFWKAYiHKz/CEMRcyn3C/u2yYk5rTJpyGTlq+/0= +github.com/portworx/torpedo v0.0.0-20230512123351-2de37aaa31a7 h1:5EKO7hc9CafDAHuND8bFBxPxhc5AsNc4NBOeEubUUYU= +github.com/portworx/torpedo v0.0.0-20230512123351-2de37aaa31a7/go.mod h1:vVXpmGvonF4P40yOM2H9eXXWfnY52DEn+tl6fckhB2o= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/poy/onpar v0.0.0-20190519213022-ee068f8ea4d1 h1:oL4IBbcqwhhNWh31bjOX8C/OCy0zs9906d/VUru+bqg= github.com/poy/onpar v0.0.0-20190519213022-ee068f8ea4d1/go.mod h1:nSbFQvMj97ZyhFRSJYtut+msi4sOY6zJDGCdSc+/rZU= +github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= @@ -2766,7 +2785,6 @@ github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1 github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= -github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= github.com/zoido/yag-config v0.4.0 h1:KahwuvyTIWjgo5CBCpGcktCaBCOTQOEtwyXk6OqUW50= github.com/zoido/yag-config v0.4.0/go.mod h1:HcK2GbfzhDVmgwP4miBIfD2qKz6Y5LAJayebVhie/nE= @@ -2911,6 +2929,7 @@ golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= @@ -3040,6 +3059,7 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -3079,6 +3099,8 @@ golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfS golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= @@ -3304,9 +3326,11 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220915200043-7b5979e65e41/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= @@ -3318,6 +3342,7 @@ golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuX golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= @@ -3489,6 +3514,7 @@ golang.org/x/tools v0.1.12-0.20220628192153-7743d1d949f1/go.mod h1:SgwaegtQh8clI golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= @@ -4000,6 +4026,7 @@ k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/ k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/utils v0.0.0-20230202215443-34013725500c/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/utils v0.0.0-20230313181309-38a27ef9d749 h1:xMMXJlJbsU8w3V5N2FLDQ8YgU8s1EoULdbQBcAeNJkY= k8s.io/utils v0.0.0-20230313181309-38a27ef9d749/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= @@ -4064,6 +4091,7 @@ sigs.k8s.io/cluster-api v0.2.11/go.mod h1:BCw+Pqy1sc8mQ/3d2NZM/f5BApKFCMPsnGvKol sigs.k8s.io/container-object-storage-interface-spec v0.0.0-20220211001052-50e143052de8/go.mod h1:kafkL5l/lTUrZXhVi/9p1GzpEE/ts29BkWkL3Ao33WU= sigs.k8s.io/controller-runtime v0.13.0 h1:iqa5RNciy7ADWnIc8QxCbOX5FEKVR3uxVxKHRMc2WIQ= sigs.k8s.io/controller-runtime v0.13.0/go.mod h1:Zbz+el8Yg31jubvAEyglRZGdLAjplZl+PgtYNI6WNTI= +sigs.k8s.io/controller-tools v0.11.3/go.mod h1:qcfX7jfcfYD/b7lAhvqAyTbt/px4GpvN88WKLFFv7p8= sigs.k8s.io/gcp-compute-persistent-disk-csi-driver v0.7.0 h1:mvSbjzrnOd+3AB/7jvz7UNdZs5fhYorhm2H0A2HcIVg= sigs.k8s.io/gcp-compute-persistent-disk-csi-driver v0.7.0/go.mod h1:aSyCjg9bNQQxY9hnnNo10vjhZsQTkLliruvRXp3N9B4= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore index 4cd0cbaf43..1d89d85ce4 100644 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -1,6 +1,6 @@ -# Setup a Global .gitignore for OS and editor generated files: -# https://help.github.com/articles/ignoring-files -# git config --global core.excludesfile ~/.gitignore_global +# go test -c output +*.test +*.test.exe -.vagrant -*.sublime-project +# Output of go build ./cmd/fsnotify +/fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS deleted file mode 100644 index 6cbabe5ef5..0000000000 --- a/vendor/github.com/fsnotify/fsnotify/AUTHORS +++ /dev/null @@ -1,62 +0,0 @@ -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# You can update this list using the following command: -# -# $ (head -n10 AUTHORS && git shortlog -se | sed -E 's/^\s+[0-9]+\t//') | tee AUTHORS - -# Please keep the list sorted. - -Aaron L -Adrien Bustany -Alexey Kazakov -Amit Krishnan -Anmol Sethi -Bjørn Erik Pedersen -Brian Goff -Bruno Bigras -Caleb Spare -Case Nelson -Chris Howey -Christoffer Buchholz -Daniel Wagner-Hall -Dave Cheney -Eric Lin -Evan Phoenix -Francisco Souza -Gautam Dey -Hari haran -Ichinose Shogo -Johannes Ebke -John C Barstow -Kelvin Fo -Ken-ichirou MATSUZAWA -Matt Layher -Matthias Stone -Nathan Youngman -Nickolai Zeldovich -Oliver Bristow -Patrick -Paul Hammond -Pawel Knap -Pieter Droogendijk -Pratik Shinde -Pursuit92 -Riku Voipio -Rob Figueiredo -Rodrigo Chiossi -Slawek Ligus -Soge Zhang -Tiffany Jernigan -Tilak Sharma -Tobias Klauser -Tom Payne -Travis Cline -Tudor Golubenco -Vahe Khachikyan -Yukang -bronze1man -debrando -henrikedwards -铁哥 diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index cc01c08f56..77f9593bd5 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -7,6 +7,95 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +Nothing yet. + +## [1.6.0] - 2022-10-13 + +This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1, +but not documented). It also increases the minimum Linux version to 2.6.32. + +### Additions + +- all: add `Event.Has()` and `Op.Has()` ([#477]) + + This makes checking events a lot easier; for example: + + if event.Op&Write == Write && !(event.Op&Remove == Remove) { + } + + Becomes: + + if event.Has(Write) && !event.Has(Remove) { + } + +- all: add cmd/fsnotify ([#463]) + + A command-line utility for testing and some examples. + +### Changes and fixes + +- inotify: don't ignore events for files that don't exist ([#260], [#470]) + + Previously the inotify watcher would call `os.Lstat()` to check if a file + still exists before emitting events. + + This was inconsistent with other platforms and resulted in inconsistent event + reporting (e.g. when a file is quickly removed and re-created), and generally + a source of confusion. It was added in 2013 to fix a memory leak that no + longer exists. + +- all: return `ErrNonExistentWatch` when `Remove()` is called on a path that's + not watched ([#460]) + +- inotify: replace epoll() with non-blocking inotify ([#434]) + + Non-blocking inotify was not generally available at the time this library was + written in 2014, but now it is. As a result, the minimum Linux version is + bumped from 2.6.27 to 2.6.32. This hugely simplifies the code and is faster. + +- kqueue: don't check for events every 100ms ([#480]) + + The watcher would wake up every 100ms, even when there was nothing to do. Now + it waits until there is something to do. + +- macos: retry opening files on EINTR ([#475]) + +- kqueue: skip unreadable files ([#479]) + + kqueue requires a file descriptor for every file in a directory; this would + fail if a file was unreadable by the current user. Now these files are simply + skipped. + +- windows: fix renaming a watched directory if the parent is also watched ([#370]) + +- windows: increase buffer size from 4K to 64K ([#485]) + +- windows: close file handle on Remove() ([#288]) + +- kqueue: put pathname in the error if watching a file fails ([#471]) + +- inotify, windows: calling Close() more than once could race ([#465]) + +- kqueue: improve Close() performance ([#233]) + +- all: various documentation additions and clarifications. + +[#233]: https://github.com/fsnotify/fsnotify/pull/233 +[#260]: https://github.com/fsnotify/fsnotify/pull/260 +[#288]: https://github.com/fsnotify/fsnotify/pull/288 +[#370]: https://github.com/fsnotify/fsnotify/pull/370 +[#434]: https://github.com/fsnotify/fsnotify/pull/434 +[#460]: https://github.com/fsnotify/fsnotify/pull/460 +[#463]: https://github.com/fsnotify/fsnotify/pull/463 +[#465]: https://github.com/fsnotify/fsnotify/pull/465 +[#470]: https://github.com/fsnotify/fsnotify/pull/470 +[#471]: https://github.com/fsnotify/fsnotify/pull/471 +[#475]: https://github.com/fsnotify/fsnotify/pull/475 +[#477]: https://github.com/fsnotify/fsnotify/pull/477 +[#479]: https://github.com/fsnotify/fsnotify/pull/479 +[#480]: https://github.com/fsnotify/fsnotify/pull/480 +[#485]: https://github.com/fsnotify/fsnotify/pull/485 + ## [1.5.4] - 2022-04-25 * Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447) @@ -40,6 +129,30 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 [#385](https://github.com/fsnotify/fsnotify/pull/385) * Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325) +## [1.4.9] - 2020-03-11 + +* Move example usage to the readme #329. This may resolve #328. + +## [1.4.8] - 2020-03-10 + +* CI: test more go versions (@nathany 1d13583d846ea9d66dcabbfefbfb9d8e6fb05216) +* Tests: Queued inotify events could have been read by the test before max_queued_events was hit (@matthias-stone #265) +* Tests: t.Fatalf -> t.Errorf in go routines (@gdey #266) +* CI: Less verbosity (@nathany #267) +* Tests: Darwin: Exchangedata is deprecated on 10.13 (@nathany #267) +* Tests: Check if channels are closed in the example (@alexeykazakov #244) +* CI: Only run golint on latest version of go and fix issues (@cpuguy83 #284) +* CI: Add windows to travis matrix (@cpuguy83 #284) +* Docs: Remover appveyor badge (@nathany 11844c0959f6fff69ba325d097fce35bd85a8e93) +* Linux: create epoll and pipe fds with close-on-exec (@JohannesEbke #219) +* Linux: open files with close-on-exec (@linxiulei #273) +* Docs: Plan to support fanotify (@nathany ab058b44498e8b7566a799372a39d150d9ea0119 ) +* Project: Add go.mod (@nathany #309) +* Project: Revise editor config (@nathany #309) +* Project: Update copyright for 2019 (@nathany #309) +* CI: Drop go1.8 from CI matrix (@nathany #309) +* Docs: Updating the FAQ section for supportability with NFS & FUSE filesystems (@Pratik32 4bf2d1fec78374803a39307bfb8d340688f4f28e ) + ## [1.4.7] - 2018-01-09 * BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index 8a642563d7..ea379759d5 100644 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -1,60 +1,26 @@ -# Contributing +Thank you for your interest in contributing to fsnotify! We try to review and +merge PRs in a reasonable timeframe, but please be aware that: -## Issues +- To avoid "wasted" work, please discus changes on the issue tracker first. You + can just send PRs, but they may end up being rejected for one reason or the + other. -* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues). -* Please indicate the platform you are using fsnotify on. -* A code example to reproduce the problem is appreciated. +- fsnotify is a cross-platform library, and changes must work reasonably well on + all supported platforms. -## Pull Requests +- Changes will need to be compatible; old code should still compile, and the + runtime behaviour can't change in ways that are likely to lead to problems for + users. -### Contributor License Agreement +Testing +------- +Just `go test ./...` runs all the tests; the CI runs this on all supported +platforms. Testing different platforms locally can be done with something like +[goon] or [Vagrant], but this isn't super-easy to set up at the moment. -fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). +Use the `-short` flag to make the "stress test" run faster. -Please indicate that you have signed the CLA in your pull request. -### How fsnotify is Developed - -* Development is done on feature branches. -* Tests are run on BSD, Linux, macOS and Windows. -* Pull requests are reviewed and [applied to master][am] using [hub][]. - * Maintainers may modify or squash commits rather than asking contributors to. -* To issue a new release, the maintainers will: - * Update the CHANGELOG - * Tag a version, which will become available through gopkg.in. - -### How to Fork - -For smooth sailing, always use the original import path. Installing with `go get` makes this easy. - -1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`) -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Ensure everything works and the tests pass (see below) -4. Commit your changes (`git commit -am 'Add some feature'`) - -Contribute upstream: - -1. Fork fsnotify on GitHub -2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) -3. Push to the branch (`git push fork my-new-feature`) -4. Create a new Pull Request on GitHub - -This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/). - -### Testing - -fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows. - -Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. - -### Maintainers - -Help maintaining fsnotify is welcome. To be a maintainer: - -* Submit a pull request and sign the CLA as above. -* You must be able to run the test suite on Mac, Windows, Linux and BSD. - -All code changes should be internal pull requests. - -Releases are tagged using [Semantic Versioning](http://semver.org/). +[goon]: https://github.com/arp242/goon +[Vagrant]: https://www.vagrantup.com/ +[integration_test.go]: /integration_test.go diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE index e180c8fb05..fb03ade750 100644 --- a/vendor/github.com/fsnotify/fsnotify/LICENSE +++ b/vendor/github.com/fsnotify/fsnotify/LICENSE @@ -1,28 +1,25 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2012-2019 fsnotify Authors. All rights reserved. +Copyright © 2012 The Go Authors. All rights reserved. +Copyright © fsnotify Authors. All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. +* Neither the name of Google Inc. nor the names of its contributors may be used + to endorse or promote products derived from this software without specific + prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md index 0731c5ef8a..d4e6080feb 100644 --- a/vendor/github.com/fsnotify/fsnotify/README.md +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -1,120 +1,161 @@ -# File system notifications for Go +fsnotify is a Go library to provide cross-platform filesystem notifications on +Windows, Linux, macOS, and BSD systems. -[![Go Reference](https://pkg.go.dev/badge/github.com/fsnotify/fsnotify.svg)](https://pkg.go.dev/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) [![Maintainers Wanted](https://img.shields.io/badge/maintainers-wanted-red.svg)](https://github.com/fsnotify/fsnotify/issues/413) +Go 1.16 or newer is required; the full documentation is at +https://pkg.go.dev/github.com/fsnotify/fsnotify -fsnotify utilizes [`golang.org/x/sys`](https://pkg.go.dev/golang.org/x/sys) rather than [`syscall`](https://pkg.go.dev/syscall) from the standard library. +**It's best to read the documentation at pkg.go.dev, as it's pinned to the last +released version, whereas this README is for the last development version which +may include additions/changes.** -Cross platform: Windows, Linux, BSD and macOS. +--- -| Adapter | OS | Status | -| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | -| inotify | Linux 2.6.27 or later, Android\* | Supported | -| kqueue | BSD, macOS, iOS\* | Supported | -| ReadDirectoryChangesW | Windows | Supported | -| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | -| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) | -| fanotify | Linux 2.6.37+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) | -| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | -| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | +Platform support: -\* Android and iOS are untested. +| Adapter | OS | Status | +| --------------------- | ---------------| -------------------------------------------------------------| +| inotify | Linux 2.6.32+ | Supported | +| kqueue | BSD, macOS | Supported | +| ReadDirectoryChangesW | Windows | Supported | +| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | +| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) | +| fanotify | Linux 5.9+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) | +| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | +| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | -Please see [the documentation](https://pkg.go.dev/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. +Linux and macOS should include Android and iOS, but these are currently untested. -## API stability - -fsnotify is a fork of [howeyc/fsnotify](https://github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). - -All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). - -## Usage +Usage +----- +A basic example: ```go package main import ( - "log" + "log" - "github.com/fsnotify/fsnotify" + "github.com/fsnotify/fsnotify" ) func main() { - watcher, err := fsnotify.NewWatcher() - if err != nil { - log.Fatal(err) - } - defer watcher.Close() - - done := make(chan bool) - go func() { - for { - select { - case event, ok := <-watcher.Events: - if !ok { - return - } - log.Println("event:", event) - if event.Op&fsnotify.Write == fsnotify.Write { - log.Println("modified file:", event.Name) - } - case err, ok := <-watcher.Errors: - if !ok { - return - } - log.Println("error:", err) - } - } - }() - - err = watcher.Add("/tmp/foo") - if err != nil { - log.Fatal(err) - } - <-done + // Create new watcher. + watcher, err := fsnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + defer watcher.Close() + + // Start listening for events. + go func() { + for { + select { + case event, ok := <-watcher.Events: + if !ok { + return + } + log.Println("event:", event) + if event.Has(fsnotify.Write) { + log.Println("modified file:", event.Name) + } + case err, ok := <-watcher.Errors: + if !ok { + return + } + log.Println("error:", err) + } + } + }() + + // Add a path. + err = watcher.Add("/tmp") + if err != nil { + log.Fatal(err) + } + + // Block main goroutine forever. + <-make(chan struct{}) } ``` -## Contributing +Some more examples can be found in [cmd/fsnotify](cmd/fsnotify), which can be +run with: -Please refer to [CONTRIBUTING][] before opening an issue or pull request. + % go run ./cmd/fsnotify -## FAQ +FAQ +--- +### Will a file still be watched when it's moved to another directory? +No, not unless you are watching the location it was moved to. -**When a file is moved to another directory is it still being watched?** +### Are subdirectories watched too? +No, you must add watches for any directory you want to watch (a recursive +watcher is on the roadmap: [#18]). -No (it shouldn't be, unless you are watching where it was moved to). +[#18]: https://github.com/fsnotify/fsnotify/issues/18 -**When I watch a directory, are all subdirectories watched as well?** +### Do I have to watch the Error and Event channels in a goroutine? +As of now, yes (you can read both channels in the same goroutine using `select`, +you don't need a separate goroutine for both channels; see the example). -No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]). +### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys? +fsnotify requires support from underlying OS to work. The current NFS and SMB +protocols does not provide network level support for file notifications, and +neither do the /proc and /sys virtual filesystems. -**Do I have to watch the Error and Event channels in a separate goroutine?** +This could be fixed with a polling watcher ([#9]), but it's not yet implemented. -As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7]) +[#9]: https://github.com/fsnotify/fsnotify/issues/9 -**Why am I receiving multiple events for the same file on OS X?** +Platform-specific notes +----------------------- +### Linux +When a file is removed a REMOVE event won't be emitted until all file +descriptors are closed; it will emit a CHMOD instead: -Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]). + fp := os.Open("file") + os.Remove("file") // CHMOD + fp.Close() // REMOVE -**How many files can be watched at once?** +This is the event that inotify sends, so not much can be changed about this. -There are OS-specific limits as to how many watches can be created: -* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. -* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. +The `fs.inotify.max_user_watches` sysctl variable specifies the upper limit for +the number of watches per user, and `fs.inotify.max_user_instances` specifies +the maximum number of inotify instances per user. Every Watcher you create is an +"instance", and every path you add is a "watch". -**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?** +These are also exposed in `/proc` as `/proc/sys/fs/inotify/max_user_watches` and +`/proc/sys/fs/inotify/max_user_instances` -fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications. +To increase them you can use `sysctl` or write the value to proc file: -[#62]: https://github.com/howeyc/fsnotify/issues/62 -[#18]: https://github.com/fsnotify/fsnotify/issues/18 -[#11]: https://github.com/fsnotify/fsnotify/issues/11 -[#7]: https://github.com/howeyc/fsnotify/issues/7 + # The default values on Linux 5.18 + sysctl fs.inotify.max_user_watches=124983 + sysctl fs.inotify.max_user_instances=128 + +To make the changes persist on reboot edit `/etc/sysctl.conf` or +`/usr/lib/sysctl.d/50-default.conf` (details differ per Linux distro; check your +distro's documentation): + + fs.inotify.max_user_watches=124983 + fs.inotify.max_user_instances=128 -[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md +Reaching the limit will result in a "no space left on device" or "too many open +files" error. -## Related Projects +### kqueue (macOS, all BSD systems) +kqueue requires opening a file descriptor for every file that's being watched; +so if you're watching a directory with five files then that's six file +descriptors. You will run in to your system's "max open files" limit faster on +these platforms. -* [notify](https://github.com/rjeczalik/notify) -* [fsevents](https://github.com/fsnotify/fsevents) +The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to +control the maximum number of open files. +### macOS +Spotlight indexing on macOS can result in multiple events (see [#15]). A temporary +workaround is to add your folder(s) to the *Spotlight Privacy settings* until we +have a native FSEvents implementation (see [#11]). + +[#11]: https://github.com/fsnotify/fsnotify/issues/11 +[#15]: https://github.com/fsnotify/fsnotify/issues/15 diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go new file mode 100644 index 0000000000..1a95ad8e7c --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -0,0 +1,162 @@ +//go:build solaris +// +build solaris + +package fsnotify + +import ( + "errors" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go new file mode 100644 index 0000000000..54c77fbb0e --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -0,0 +1,459 @@ +//go:build linux +// +build linux + +package fsnotify + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error + + // Store fd here as os.File.Read() will no longer return on close after + // calling Fd(). See: https://github.com/golang/go/issues/26439 + fd int + mu sync.Mutex // Map access + inotifyFile *os.File + watches map[string]*watch // Map of inotify watches (key: path) + paths map[int]string // Map of watched paths (key: watch descriptor) + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + doneResp chan struct{} // Channel to respond to Close +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + // Create inotify fd + // Need to set the FD to nonblocking mode in order for SetDeadline methods to work + // Otherwise, blocking i/o operations won't terminate on close + fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) + if fd == -1 { + return nil, errno + } + + w := &Watcher{ + fd: fd, + inotifyFile: os.NewFile(uintptr(fd), ""), + watches: make(map[string]*watch), + paths: make(map[int]string), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + doneResp: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// Returns true if the event was sent, or false if watcher is closed. +func (w *Watcher) sendEvent(e Event) bool { + select { + case w.Events <- e: + return true + case <-w.done: + } + return false +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *Watcher) sendError(err error) bool { + select { + case w.Errors <- err: + return true + case <-w.done: + return false + } +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed() { + w.mu.Unlock() + return nil + } + + // Send 'close' signal to goroutine, and set the Watcher to closed. + close(w.done) + w.mu.Unlock() + + // Causes any blocking reads to return with an error, provided the file + // still supports deadline operations. + err := w.inotifyFile.Close() + if err != nil { + return err + } + + // Wait for goroutine to close + <-w.doneResp + + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + name = filepath.Clean(name) + if w.isClosed() { + return errors.New("inotify instance already closed") + } + + var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | + unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | + unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + + w.mu.Lock() + defer w.mu.Unlock() + watchEntry := w.watches[name] + if watchEntry != nil { + flags |= watchEntry.flags | unix.IN_MASK_ADD + } + wd, errno := unix.InotifyAddWatch(w.fd, name, flags) + if wd == -1 { + return errno + } + + if watchEntry == nil { + w.watches[name] = &watch{wd: uint32(wd), flags: flags} + w.paths[wd] = name + } else { + watchEntry.wd = uint32(wd) + watchEntry.flags = flags + } + + return nil +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + + // Fetch the watch. + w.mu.Lock() + defer w.mu.Unlock() + watch, ok := w.watches[name] + + // Remove it from inotify. + if !ok { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) + } + + // We successfully removed the watch if InotifyRmWatch doesn't return an + // error, we need to clean up our internal state to ensure it matches + // inotify's kernel state. + delete(w.paths, int(watch.wd)) + delete(w.watches, name) + + // inotify_rm_watch will return EINVAL if the file has been deleted; + // the inotify will already have been removed. + // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously + // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE + // so that EINVAL means that the wd is being rm_watch()ed or its file removed + // by another thread and we have not received IN_IGNORE event. + success, errno := unix.InotifyRmWatch(w.fd, watch.wd) + if success == -1 { + // TODO: Perhaps it's not helpful to return an error here in every case; + // The only two possible errors are: + // + // - EBADF, which happens when w.fd is not a valid file descriptor + // of any kind. + // - EINVAL, which is when fd is not an inotify descriptor or wd + // is not a valid watch descriptor. Watch descriptors are + // invalidated when they are removed explicitly or implicitly; + // explicitly by inotify_rm_watch, implicitly when the file they + // are watching is deleted. + return errno + } + + return nil +} + +// WatchList returns all paths added with [Add] (and are not yet removed). +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for pathname := range w.watches { + entries = append(entries, pathname) + } + + return entries +} + +type watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Events channel +func (w *Watcher) readEvents() { + defer func() { + close(w.doneResp) + close(w.Errors) + close(w.Events) + }() + + var ( + buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events + errno error // Syscall errno + ) + for { + // See if we have been closed. + if w.isClosed() { + return + } + + n, err := w.inotifyFile.Read(buf[:]) + switch { + case errors.Unwrap(err) == os.ErrClosed: + return + case err != nil: + if !w.sendError(err) { + return + } + continue + } + + if n < unix.SizeofInotifyEvent { + var err error + if n == 0 { + // If EOF is received. This should really never happen. + err = io.EOF + } else if n < 0 { + // If an error occurred while reading. + err = errno + } else { + // Read was too short. + err = errors.New("notify: short read in readEvents()") + } + if !w.sendError(err) { + return + } + continue + } + + var offset uint32 + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + for offset <= uint32(n-unix.SizeofInotifyEvent) { + var ( + // Point "raw" to the event in the buffer + raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + mask = uint32(raw.Mask) + nameLen = uint32(raw.Len) + ) + + if mask&unix.IN_Q_OVERFLOW != 0 { + if !w.sendError(ErrEventOverflow) { + return + } + } + + // If the event happened to the watched directory or the watched file, the kernel + // doesn't append the filename to the event, but we would like to always fill the + // the "Name" field with a valid filename. We retrieve the path of the watch from + // the "paths" map. + w.mu.Lock() + name, ok := w.paths[int(raw.Wd)] + // IN_DELETE_SELF occurs when the file/directory being watched is removed. + // This is a sign to clean up the maps, otherwise we are no longer in sync + // with the inotify kernel state which has already deleted the watch + // automatically. + if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + delete(w.paths, int(raw.Wd)) + delete(w.watches, name) + } + w.mu.Unlock() + + if nameLen > 0 { + // Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + // The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + event := w.newEvent(name, mask) + + // Send the events that are not ignored on the events channel + if mask&unix.IN_IGNORED == 0 { + if !w.sendEvent(event) { + return + } + } + + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + nameLen + } + } +} + +// newEvent returns an platform-independent Event based on an inotify mask. +func (w *Watcher) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + e.Op |= Create + } + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { + e.Op |= Remove + } + if mask&unix.IN_MODIFY == unix.IN_MODIFY { + e.Op |= Write + } + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + e.Op |= Rename + } + if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { + e.Op |= Chmod + } + return e +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go new file mode 100644 index 0000000000..29087469bf --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -0,0 +1,707 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin +// +build freebsd openbsd netbsd dragonfly darwin + +package fsnotify + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error + + done chan struct{} + kq int // File descriptor (as returned by the kqueue() syscall). + closepipe [2]int // Pipe used for closing. + mu sync.Mutex // Protects access to watcher data + watches map[string]int // Watched file descriptors (key: path). + watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). + userWatches map[string]struct{} // Watches added with Watcher.Add() + dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. + paths map[int]pathInfo // File descriptors to path names for processing kqueue events. + fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). + isClosed bool // Set to true when Close() is first called +} + +type pathInfo struct { + name string + isDir bool +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + kq, closepipe, err := newKqueue() + if err != nil { + return nil, err + } + + w := &Watcher{ + kq: kq, + closepipe: closepipe, + watches: make(map[string]int), + watchesByDir: make(map[string]map[int]struct{}), + dirFlags: make(map[string]uint32), + paths: make(map[int]pathInfo), + fileExists: make(map[string]struct{}), + userWatches: make(map[string]struct{}), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// newKqueue creates a new kernel event queue and returns a descriptor. +// +// This registers a new event on closepipe, which will trigger an event when +// it's closed. This way we can use kevent() without timeout/polling; without +// the closepipe, it would block forever and we wouldn't be able to stop it at +// all. +func newKqueue() (kq int, closepipe [2]int, err error) { + kq, err = unix.Kqueue() + if kq == -1 { + return kq, closepipe, err + } + + // Register the close pipe. + err = unix.Pipe(closepipe[:]) + if err != nil { + unix.Close(kq) + return kq, closepipe, err + } + + // Register changes to listen on the closepipe. + changes := make([]unix.Kevent_t, 1) + // SetKevent converts int to the platform-specific types. + unix.SetKevent(&changes[0], closepipe[0], unix.EVFILT_READ, + unix.EV_ADD|unix.EV_ENABLE|unix.EV_ONESHOT) + + ok, err := unix.Kevent(kq, changes, nil, nil) + if ok == -1 { + unix.Close(kq) + unix.Close(closepipe[0]) + unix.Close(closepipe[1]) + return kq, closepipe, err + } + return kq, closepipe, nil +} + +// Returns true if the event was sent, or false if watcher is closed. +func (w *Watcher) sendEvent(e Event) bool { + select { + case w.Events <- e: + return true + case <-w.done: + } + return false +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *Watcher) sendError(err error) bool { + select { + case w.Errors <- err: + return true + case <-w.done: + } + return false +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + + // copy paths to remove while locked + pathsToRemove := make([]string, 0, len(w.watches)) + for name := range w.watches { + pathsToRemove = append(pathsToRemove, name) + } + w.mu.Unlock() // Unlock before calling Remove, which also locks + for _, name := range pathsToRemove { + w.Remove(name) + } + + // Send "quit" message to the reader goroutine. + unix.Close(w.closepipe[1]) + close(w.done) + + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + w.mu.Lock() + w.userWatches[name] = struct{}{} + w.mu.Unlock() + _, err := w.addWatch(name, noteAllEvents) + return err +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + w.mu.Lock() + watchfd, ok := w.watches[name] + w.mu.Unlock() + if !ok { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) + } + + err := w.register([]int{watchfd}, unix.EV_DELETE, 0) + if err != nil { + return err + } + + unix.Close(watchfd) + + w.mu.Lock() + isDir := w.paths[watchfd].isDir + delete(w.watches, name) + delete(w.userWatches, name) + + parentName := filepath.Dir(name) + delete(w.watchesByDir[parentName], watchfd) + + if len(w.watchesByDir[parentName]) == 0 { + delete(w.watchesByDir, parentName) + } + + delete(w.paths, watchfd) + delete(w.dirFlags, name) + delete(w.fileExists, name) + w.mu.Unlock() + + // Find all watched paths that are in this directory that are not external. + if isDir { + var pathsToRemove []string + w.mu.Lock() + for fd := range w.watchesByDir[name] { + path := w.paths[fd] + if _, ok := w.userWatches[path.name]; !ok { + pathsToRemove = append(pathsToRemove, path.name) + } + } + w.mu.Unlock() + for _, name := range pathsToRemove { + // Since these are internal, not much sense in propagating error + // to the user, as that will just confuse them with an error about + // a path they did not explicitly watch themselves. + w.Remove(name) + } + } + + return nil +} + +// WatchList returns all paths added with [Add] (and are not yet removed). +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.userWatches)) + for pathname := range w.userWatches { + entries = append(entries, pathname) + } + + return entries +} + +// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) +const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME + +// addWatch adds name to the watched file set. +// The flags are interpreted as described in kevent(2). +// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. +func (w *Watcher) addWatch(name string, flags uint32) (string, error) { + var isDir bool + // Make ./name and name equivalent + name = filepath.Clean(name) + + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return "", errors.New("kevent instance already closed") + } + watchfd, alreadyWatching := w.watches[name] + // We already have a watch, but we can still override flags. + if alreadyWatching { + isDir = w.paths[watchfd].isDir + } + w.mu.Unlock() + + if !alreadyWatching { + fi, err := os.Lstat(name) + if err != nil { + return "", err + } + + // Don't watch sockets or named pipes + if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { + return "", nil + } + + // Follow Symlinks + // + // Linux can add unresolvable symlinks to the watch list without issue, + // and Windows can't do symlinks period. To maintain consistency, we + // will act like everything is fine if the link can't be resolved. + // There will simply be no file events for broken symlinks. Hence the + // returns of nil on errors. + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + name, err = filepath.EvalSymlinks(name) + if err != nil { + return "", nil + } + + w.mu.Lock() + _, alreadyWatching = w.watches[name] + w.mu.Unlock() + + if alreadyWatching { + return name, nil + } + + fi, err = os.Lstat(name) + if err != nil { + return "", nil + } + } + + // Retry on EINTR; open() can return EINTR in practice on macOS. + // See #354, and go issues 11180 and 39237. + for { + watchfd, err = unix.Open(name, openMode, 0) + if err == nil { + break + } + if errors.Is(err, unix.EINTR) { + continue + } + + return "", err + } + + isDir = fi.IsDir() + } + + err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) + if err != nil { + unix.Close(watchfd) + return "", err + } + + if !alreadyWatching { + w.mu.Lock() + parentName := filepath.Dir(name) + w.watches[name] = watchfd + + watchesByDir, ok := w.watchesByDir[parentName] + if !ok { + watchesByDir = make(map[int]struct{}, 1) + w.watchesByDir[parentName] = watchesByDir + } + watchesByDir[watchfd] = struct{}{} + + w.paths[watchfd] = pathInfo{name: name, isDir: isDir} + w.mu.Unlock() + } + + if isDir { + // Watch the directory if it has not been watched before, + // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + w.mu.Lock() + + watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && + (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) + // Store flags so this watch can be updated later + w.dirFlags[name] = flags + w.mu.Unlock() + + if watchDir { + if err := w.watchDirectoryFiles(name); err != nil { + return "", err + } + } + } + return name, nil +} + +// readEvents reads from kqueue and converts the received kevents into +// Event values that it sends down the Events channel. +func (w *Watcher) readEvents() { + defer func() { + err := unix.Close(w.kq) + if err != nil { + w.Errors <- err + } + unix.Close(w.closepipe[0]) + close(w.Events) + close(w.Errors) + }() + + eventBuffer := make([]unix.Kevent_t, 10) + for closed := false; !closed; { + kevents, err := w.read(eventBuffer) + // EINTR is okay, the syscall was interrupted before timeout expired. + if err != nil && err != unix.EINTR { + if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { + closed = true + } + continue + } + + // Flush the events we received to the Events channel + for _, kevent := range kevents { + var ( + watchfd = int(kevent.Ident) + mask = uint32(kevent.Fflags) + ) + + // Shut down the loop when the pipe is closed, but only after all + // other events have been processed. + if watchfd == w.closepipe[0] { + closed = true + continue + } + + w.mu.Lock() + path := w.paths[watchfd] + w.mu.Unlock() + + event := w.newEvent(path.name, mask) + + if path.isDir && !event.Has(Remove) { + // Double check to make sure the directory exists. This can + // happen when we do a rm -fr on a recursively watched folders + // and we receive a modification event first but the folder has + // been deleted and later receive the delete event. + if _, err := os.Lstat(event.Name); os.IsNotExist(err) { + event.Op |= Remove + } + } + + if event.Has(Rename) || event.Has(Remove) { + w.Remove(event.Name) + w.mu.Lock() + delete(w.fileExists, event.Name) + w.mu.Unlock() + } + + if path.isDir && event.Has(Write) && !event.Has(Remove) { + w.sendDirectoryChangeEvents(event.Name) + } else { + if !w.sendEvent(event) { + closed = true + continue + } + } + + if event.Has(Remove) { + // Look for a file that may have overwritten this. + // For example, mv f1 f2 will delete f2, then create f2. + if path.isDir { + fileDir := filepath.Clean(event.Name) + w.mu.Lock() + _, found := w.watches[fileDir] + w.mu.Unlock() + if found { + // make sure the directory exists before we watch for changes. When we + // do a recursive watch and perform rm -fr, the parent directory might + // have gone missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the parent directory. + if _, err := os.Lstat(fileDir); err == nil { + w.sendDirectoryChangeEvents(fileDir) + } + } + } else { + filePath := filepath.Clean(event.Name) + if fileInfo, err := os.Lstat(filePath); err == nil { + w.sendFileCreatedEventIfNew(filePath, fileInfo) + } + } + } + } + } +} + +// newEvent returns an platform-independent Event based on kqueue Fflags. +func (w *Watcher) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { + e.Op |= Remove + } + if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { + e.Op |= Write + } + if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { + e.Op |= Rename + } + if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { + e.Op |= Chmod + } + return e +} + +// watchDirectoryFiles to mimic inotify when adding a watch on a directory +func (w *Watcher) watchDirectoryFiles(dirPath string) error { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + return err + } + + for _, fileInfo := range files { + path := filepath.Join(dirPath, fileInfo.Name()) + + cleanPath, err := w.internalWatch(path, fileInfo) + if err != nil { + // No permission to read the file; that's not a problem: just skip. + // But do add it to w.fileExists to prevent it from being picked up + // as a "new" file later (it still shows up in the directory + // listing). + switch { + case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM): + cleanPath = filepath.Clean(path) + default: + return fmt.Errorf("%q: %w", filepath.Join(dirPath, fileInfo.Name()), err) + } + } + + w.mu.Lock() + w.fileExists[cleanPath] = struct{}{} + w.mu.Unlock() + } + + return nil +} + +// Search the directory for new files and send an event for them. +// +// This functionality is to have the BSD watcher match the inotify, which sends +// a create event for files created in a watched directory. +func (w *Watcher) sendDirectoryChangeEvents(dir string) { + // Get all files + files, err := ioutil.ReadDir(dir) + if err != nil { + if !w.sendError(fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)) { + return + } + } + + // Search for new files + for _, fi := range files { + err := w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) + if err != nil { + return + } + } +} + +// sendFileCreatedEvent sends a create event if the file isn't already being tracked. +func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { + w.mu.Lock() + _, doesExist := w.fileExists[filePath] + w.mu.Unlock() + if !doesExist { + if !w.sendEvent(Event{Name: filePath, Op: Create}) { + return + } + } + + // like watchDirectoryFiles (but without doing another ReadDir) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = struct{}{} + w.mu.Unlock() + + return nil +} + +func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { + if fileInfo.IsDir() { + // mimic Linux providing delete events for subdirectories + // but preserve the flags used if currently watching subdirectory + w.mu.Lock() + flags := w.dirFlags[name] + w.mu.Unlock() + + flags |= unix.NOTE_DELETE | unix.NOTE_RENAME + return w.addWatch(name, flags) + } + + // watch file to mimic Linux inotify + return w.addWatch(name, noteAllEvents) +} + +// Register events with the queue. +func (w *Watcher) register(fds []int, flags int, fflags uint32) error { + changes := make([]unix.Kevent_t, len(fds)) + for i, fd := range fds { + // SetKevent converts int to the platform-specific types. + unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) + changes[i].Fflags = fflags + } + + // Register the events. + success, err := unix.Kevent(w.kq, changes, nil, nil) + if success == -1 { + return err + } + return nil +} + +// read retrieves pending events, or waits until an event occurs. +func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { + n, err := unix.Kevent(w.kq, nil, events, nil) + if err != nil { + return nil, err + } + return events[0:n], nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go new file mode 100644 index 0000000000..a9bb1c3c4d --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -0,0 +1,66 @@ +//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows +// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows + +package fsnotify + +import ( + "fmt" + "runtime" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct{} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS) +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go new file mode 100644 index 0000000000..ae392867c0 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -0,0 +1,746 @@ +//go:build windows +// +build windows + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/windows" +) + +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # macOS notes +// +// Spotlight indexing on macOS can result in multiple events (see [#15]). A +// temporary workaround is to add your folder(s) to the "Spotlight Privacy +// Settings" until we have a native FSEvents implementation (see [#11]). +// +// [#11]: https://github.com/fsnotify/fsnotify/issues/11 +// [#15]: https://github.com/fsnotify/fsnotify/issues/15 +type Watcher struct { + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, so you + // probably want to wait until you've stopped receiving + // them (see the dedup example in cmd/fsnotify). + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // and on kqueue when a file is truncated. On Windows + // it's never sent. + Events chan Event + + // Errors sends any errors. + Errors chan error + + port windows.Handle // Handle to completion port + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error + + mu sync.Mutex // Protects access to watches, isClosed + watches watchMap // Map of watches (key: i-number) + isClosed bool // Set to true when Close() is first called +} + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) + if err != nil { + return nil, os.NewSyscallError("CreateIoCompletionPort", err) + } + w := &Watcher{ + port: port, + watches: make(watchMap), + input: make(chan *input, 1), + Events: make(chan Event, 50), + Errors: make(chan error), + quit: make(chan chan<- error, 1), + } + go w.readEvents() + return w, nil +} + +func (w *Watcher) sendEvent(name string, mask uint64) bool { + if mask == 0 { + return false + } + + event := w.newEvent(name, uint32(mask)) + select { + case ch := <-w.quit: + w.quit <- ch + case w.Events <- event: + } + return true +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *Watcher) sendError(err error) bool { + select { + case w.Errors <- err: + return true + case <-w.quit: + } + return false +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + w.mu.Unlock() + + // Send "quit" message to the reader goroutine + ch := make(chan error) + w.quit <- ch + if err := w.wakeupReader(); err != nil { + return err + } + return <-ch +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; attempting to watch it more than once will +// return an error. Paths that do not yet exist on the filesystem cannot be +// added. A watch will be automatically removed if the path is deleted. +// +// A path will remain watched if it gets renamed to somewhere else on the same +// filesystem, but the monitor will get removed if the path gets deleted and +// re-created, or if it's moved to a different filesystem. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many tools update files atomically. Instead of "just" writing +// to the file a temporary file will be written to first, and if successful the +// temporary file is moved to to destination removing the original, or some +// variant thereof. The watcher on the original file is now lost, as it no +// longer exists. +// +// Instead, watch the parent directory and use Event.Name to filter out files +// you're not interested in. There is an example of this in [cmd/fsnotify/file.go]. +func (w *Watcher) Add(name string) error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return errors.New("watcher already closed") + } + w.mu.Unlock() + + in := &input{ + op: opAddWatch, + path: filepath.Clean(name), + flags: sysFSALLEVENTS, + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +func (w *Watcher) Remove(name string) error { + in := &input{ + op: opRemoveWatch, + path: filepath.Clean(name), + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// WatchList returns all paths added with [Add] (and are not yet removed). +func (w *Watcher) WatchList() []string { + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for _, entry := range w.watches { + for _, watchEntry := range entry { + entries = append(entries, watchEntry.path) + } + } + + return entries +} + +// These options are from the old golang.org/x/exp/winfsnotify, where you could +// add various options to the watch. This has long since been removed. +// +// The "sys" in the name is misleading as they're not part of any "system". +// +// This should all be removed at some point, and just use windows.FILE_NOTIFY_* +const ( + sysFSALLEVENTS = 0xfff + sysFSATTRIB = 0x4 + sysFSCREATE = 0x100 + sysFSDELETE = 0x200 + sysFSDELETESELF = 0x400 + sysFSMODIFY = 0x2 + sysFSMOVE = 0xc0 + sysFSMOVEDFROM = 0x40 + sysFSMOVEDTO = 0x80 + sysFSMOVESELF = 0x800 + sysFSIGNORED = 0x8000 +) + +func (w *Watcher) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { + e.Op |= Create + } + if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { + e.Op |= Remove + } + if mask&sysFSMODIFY == sysFSMODIFY { + e.Op |= Write + } + if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { + e.Op |= Rename + } + if mask&sysFSATTRIB == sysFSATTRIB { + e.Op |= Chmod + } + return e +} + +const ( + opAddWatch = iota + opRemoveWatch +) + +const ( + provisional uint64 = 1 << (32 + iota) +) + +type input struct { + op int + path string + flags uint32 + reply chan error +} + +type inode struct { + handle windows.Handle + volume uint32 + index uint64 +} + +type watch struct { + ov windows.Overlapped + ino *inode // i-number + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf [65536]byte // 64K buffer +} + +type ( + indexMap map[uint64]*watch + watchMap map[uint32]indexMap +) + +func (w *Watcher) wakeupReader() error { + err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) + if err != nil { + return os.NewSyscallError("PostQueuedCompletionStatus", err) + } + return nil +} + +func (w *Watcher) getDir(pathname string) (dir string, err error) { + attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) + if err != nil { + return "", os.NewSyscallError("GetFileAttributes", err) + } + if attr&windows.FILE_ATTRIBUTE_DIRECTORY != 0 { + dir = pathname + } else { + dir, _ = filepath.Split(pathname) + dir = filepath.Clean(dir) + } + return +} + +func (w *Watcher) getIno(path string) (ino *inode, err error) { + h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), + windows.FILE_LIST_DIRECTORY, + windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, + nil, windows.OPEN_EXISTING, + windows.FILE_FLAG_BACKUP_SEMANTICS|windows.FILE_FLAG_OVERLAPPED, 0) + if err != nil { + return nil, os.NewSyscallError("CreateFile", err) + } + + var fi windows.ByHandleFileInformation + err = windows.GetFileInformationByHandle(h, &fi) + if err != nil { + windows.CloseHandle(h) + return nil, os.NewSyscallError("GetFileInformationByHandle", err) + } + ino = &inode{ + handle: h, + volume: fi.VolumeSerialNumber, + index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), + } + return ino, nil +} + +// Must run within the I/O thread. +func (m watchMap) get(ino *inode) *watch { + if i := m[ino.volume]; i != nil { + return i[ino.index] + } + return nil +} + +// Must run within the I/O thread. +func (m watchMap) set(ino *inode, watch *watch) { + i := m[ino.volume] + if i == nil { + i = make(indexMap) + m[ino.volume] = i + } + i[ino.index] = watch +} + +// Must run within the I/O thread. +func (w *Watcher) addWatch(pathname string, flags uint64) error { + dir, err := w.getDir(pathname) + if err != nil { + return err + } + + ino, err := w.getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watchEntry := w.watches.get(ino) + w.mu.Unlock() + if watchEntry == nil { + _, err := windows.CreateIoCompletionPort(ino.handle, w.port, 0, 0) + if err != nil { + windows.CloseHandle(ino.handle) + return os.NewSyscallError("CreateIoCompletionPort", err) + } + watchEntry = &watch{ + ino: ino, + path: dir, + names: make(map[string]uint64), + } + w.mu.Lock() + w.watches.set(ino, watchEntry) + w.mu.Unlock() + flags |= provisional + } else { + windows.CloseHandle(ino.handle) + } + if pathname == dir { + watchEntry.mask |= flags + } else { + watchEntry.names[filepath.Base(pathname)] |= flags + } + + err = w.startRead(watchEntry) + if err != nil { + return err + } + + if pathname == dir { + watchEntry.mask &= ^provisional + } else { + watchEntry.names[filepath.Base(pathname)] &= ^provisional + } + return nil +} + +// Must run within the I/O thread. +func (w *Watcher) remWatch(pathname string) error { + dir, err := w.getDir(pathname) + if err != nil { + return err + } + ino, err := w.getIno(dir) + if err != nil { + return err + } + + w.mu.Lock() + watch := w.watches.get(ino) + w.mu.Unlock() + + err = windows.CloseHandle(ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CloseHandle", err)) + } + if watch == nil { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) + } + if pathname == dir { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + watch.mask = 0 + } else { + name := filepath.Base(pathname) + w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + + return w.startRead(watch) +} + +// Must run within the I/O thread. +func (w *Watcher) deleteWatch(watch *watch) { + for name, mask := range watch.names { + if mask&provisional == 0 { + w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + } + delete(watch.names, name) + } + if watch.mask != 0 { + if watch.mask&provisional == 0 { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + } + watch.mask = 0 + } +} + +// Must run within the I/O thread. +func (w *Watcher) startRead(watch *watch) error { + err := windows.CancelIo(watch.ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CancelIo", err)) + w.deleteWatch(watch) + } + mask := w.toWindowsFlags(watch.mask) + for _, m := range watch.names { + mask |= w.toWindowsFlags(m) + } + if mask == 0 { + err := windows.CloseHandle(watch.ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CloseHandle", err)) + } + w.mu.Lock() + delete(w.watches[watch.ino.volume], watch.ino.index) + w.mu.Unlock() + return nil + } + + rdErr := windows.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], + uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) + if rdErr != nil { + err := os.NewSyscallError("ReadDirectoryChanges", rdErr) + if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + err = nil + } + w.deleteWatch(watch) + w.startRead(watch) + return err + } + return nil +} + +// readEvents reads from the I/O completion port, converts the +// received events into Event objects and sends them via the Events channel. +// Entry point to the I/O thread. +func (w *Watcher) readEvents() { + var ( + n uint32 + key uintptr + ov *windows.Overlapped + ) + runtime.LockOSThread() + + for { + qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE) + // This error is handled after the watch == nil check below. NOTE: this + // seems odd, note sure if it's correct. + + watch := (*watch)(unsafe.Pointer(ov)) + if watch == nil { + select { + case ch := <-w.quit: + w.mu.Lock() + var indexes []indexMap + for _, index := range w.watches { + indexes = append(indexes, index) + } + w.mu.Unlock() + for _, index := range indexes { + for _, watch := range index { + w.deleteWatch(watch) + w.startRead(watch) + } + } + + err := windows.CloseHandle(w.port) + if err != nil { + err = os.NewSyscallError("CloseHandle", err) + } + close(w.Events) + close(w.Errors) + ch <- err + return + case in := <-w.input: + switch in.op { + case opAddWatch: + in.reply <- w.addWatch(in.path, uint64(in.flags)) + case opRemoveWatch: + in.reply <- w.remWatch(in.path) + } + default: + } + continue + } + + switch qErr { + case windows.ERROR_MORE_DATA: + if watch == nil { + w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")) + } else { + // The i/o succeeded but the buffer is full. + // In theory we should be building up a full packet. + // In practice we can get away with just carrying on. + n = uint32(unsafe.Sizeof(watch.buf)) + } + case windows.ERROR_ACCESS_DENIED: + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.deleteWatch(watch) + w.startRead(watch) + continue + case windows.ERROR_OPERATION_ABORTED: + // CancelIo was called on this handle + continue + default: + w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr)) + continue + case nil: + } + + var offset uint32 + for { + if n == 0 { + w.sendError(errors.New("short read in readEvents()")) + break + } + + // Point "raw" to the event in the buffer + raw := (*windows.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) + + // Create a buf that is the size of the path name + size := int(raw.FileNameLength / 2) + var buf []uint16 + // TODO: Use unsafe.Slice in Go 1.17; https://stackoverflow.com/questions/51187973 + sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + sh.Data = uintptr(unsafe.Pointer(&raw.FileName)) + sh.Len = size + sh.Cap = size + name := windows.UTF16ToString(buf) + fullname := filepath.Join(watch.path, name) + + var mask uint64 + switch raw.Action { + case windows.FILE_ACTION_REMOVED: + mask = sysFSDELETESELF + case windows.FILE_ACTION_MODIFIED: + mask = sysFSMODIFY + case windows.FILE_ACTION_RENAMED_OLD_NAME: + watch.rename = name + case windows.FILE_ACTION_RENAMED_NEW_NAME: + // Update saved path of all sub-watches. + old := filepath.Join(watch.path, watch.rename) + w.mu.Lock() + for _, watchMap := range w.watches { + for _, ww := range watchMap { + if strings.HasPrefix(ww.path, old) { + ww.path = filepath.Join(fullname, strings.TrimPrefix(ww.path, old)) + } + } + } + w.mu.Unlock() + + if watch.names[watch.rename] != 0 { + watch.names[name] |= watch.names[watch.rename] + delete(watch.names, watch.rename) + mask = sysFSMOVESELF + } + } + + sendNameEvent := func() { + w.sendEvent(fullname, watch.names[name]&mask) + } + if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { + sendNameEvent() + } + if raw.Action == windows.FILE_ACTION_REMOVED { + w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + + w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) + if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { + fullname = filepath.Join(watch.path, watch.rename) + sendNameEvent() + } + + // Move to the next event in the buffer + if raw.NextEntryOffset == 0 { + break + } + offset += raw.NextEntryOffset + + // Error! + if offset >= n { + w.sendError(errors.New( + "Windows system assumed buffer larger than it is, events have likely been missed.")) + break + } + } + + if err := w.startRead(watch); err != nil { + w.sendError(err) + } + } +} + +func (w *Watcher) toWindowsFlags(mask uint64) uint32 { + var m uint32 + if mask&sysFSMODIFY != 0 { + m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE + } + if mask&sysFSATTRIB != 0 { + m |= windows.FILE_NOTIFY_CHANGE_ATTRIBUTES + } + if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { + m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME + } + return m +} + +func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { + switch action { + case windows.FILE_ACTION_ADDED: + return sysFSCREATE + case windows.FILE_ACTION_REMOVED: + return sysFSDELETE + case windows.FILE_ACTION_MODIFIED: + return sysFSMODIFY + case windows.FILE_ACTION_RENAMED_OLD_NAME: + return sysFSMOVEDFROM + case windows.FILE_ACTION_RENAMED_NEW_NAME: + return sysFSMOVEDTO + } + return 0 +} diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go deleted file mode 100644 index b3ac3d8f55..0000000000 --- a/vendor/github.com/fsnotify/fsnotify/fen.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build solaris -// +build solaris - -package fsnotify - -import ( - "errors" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - return nil -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - return nil -} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 0f4ee52e8a..30a5bf0f07 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -1,29 +1,37 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - //go:build !plan9 // +build !plan9 -// Package fsnotify provides a platform-independent interface for file system notifications. +// Package fsnotify provides a cross-platform interface for file system +// notifications. package fsnotify import ( - "bytes" "errors" "fmt" + "strings" ) -// Event represents a single file system notification. +// Event represents a file system notification. type Event struct { - Name string // Relative path to the file or directory. - Op Op // File operation that triggered the event. + // Path to the file or directory. + // + // Paths are relative to the input; for example with Add("dir") the Name + // will be set to "dir/file" if you create that file, but if you use + // Add("/path/to/dir") it will be "/path/to/dir/file". + Name string + + // File operation that triggered the event. + // + // This is a bitmask and some systems may send multiple operations at once. + // Use the Event.Has() method instead of comparing with ==. + Op Op } // Op describes a set of file operations. type Op uint32 -// These are the generalized file operations that can trigger a notification. +// The operations fsnotify can trigger; see the documentation on [Watcher] for a +// full description, and check them with [Event.Has]. const ( Create Op = 1 << iota Write @@ -32,38 +40,42 @@ const ( Chmod ) -func (op Op) String() string { - // Use a buffer for efficient string concatenation - var buffer bytes.Buffer +// Common errors that can be reported by a watcher +var ( + ErrNonExistentWatch = errors.New("can't remove non-existent watcher") + ErrEventOverflow = errors.New("fsnotify queue overflow") +) - if op&Create == Create { - buffer.WriteString("|CREATE") +func (op Op) String() string { + var b strings.Builder + if op.Has(Create) { + b.WriteString("|CREATE") } - if op&Remove == Remove { - buffer.WriteString("|REMOVE") + if op.Has(Remove) { + b.WriteString("|REMOVE") } - if op&Write == Write { - buffer.WriteString("|WRITE") + if op.Has(Write) { + b.WriteString("|WRITE") } - if op&Rename == Rename { - buffer.WriteString("|RENAME") + if op.Has(Rename) { + b.WriteString("|RENAME") } - if op&Chmod == Chmod { - buffer.WriteString("|CHMOD") + if op.Has(Chmod) { + b.WriteString("|CHMOD") } - if buffer.Len() == 0 { - return "" + if b.Len() == 0 { + return "[no events]" } - return buffer.String()[1:] // Strip leading pipe + return b.String()[1:] } -// String returns a string representation of the event in the form -// "file: REMOVE|WRITE|..." +// Has reports if this operation has the given operation. +func (o Op) Has(h Op) bool { return o&h == h } + +// Has reports if this event has the given operation. +func (e Event) Has(op Op) bool { return e.Op.Has(op) } + +// String returns a string representation of the event with their path. func (e Event) String() string { - return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) + return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) } - -// Common errors that can be reported by a watcher -var ( - ErrEventOverflow = errors.New("fsnotify queue overflow") -) diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go b/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go deleted file mode 100644 index 5968855983..0000000000 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows -// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows - -package fsnotify - -import ( - "fmt" - "runtime" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct{} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS) -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - return nil -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - return nil -} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go deleted file mode 100644 index a6d0e0ec8c..0000000000 --- a/vendor/github.com/fsnotify/fsnotify/inotify.go +++ /dev/null @@ -1,351 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package fsnotify - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - "unsafe" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - mu sync.Mutex // Map access - fd int - poller *fdPoller - watches map[string]*watch // Map of inotify watches (key: path) - paths map[int]string // Map of watched paths (key: watch descriptor) - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - doneResp chan struct{} // Channel to respond to Close -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - // Create inotify fd - fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) - if fd == -1 { - return nil, errno - } - // Create epoll - poller, err := newFdPoller(fd) - if err != nil { - unix.Close(fd) - return nil, err - } - w := &Watcher{ - fd: fd, - poller: poller, - watches: make(map[string]*watch), - paths: make(map[int]string), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - doneResp: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -func (w *Watcher) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed() { - return nil - } - - // Send 'close' signal to goroutine, and set the Watcher to closed. - close(w.done) - - // Wake up goroutine - w.poller.wake() - - // Wait for goroutine to close - <-w.doneResp - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - name = filepath.Clean(name) - if w.isClosed() { - return errors.New("inotify instance already closed") - } - - const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF - - var flags uint32 = agnosticEvents - - w.mu.Lock() - defer w.mu.Unlock() - watchEntry := w.watches[name] - if watchEntry != nil { - flags |= watchEntry.flags | unix.IN_MASK_ADD - } - wd, errno := unix.InotifyAddWatch(w.fd, name, flags) - if wd == -1 { - return errno - } - - if watchEntry == nil { - w.watches[name] = &watch{wd: uint32(wd), flags: flags} - w.paths[wd] = name - } else { - watchEntry.wd = uint32(wd) - watchEntry.flags = flags - } - - return nil -} - -// Remove stops watching the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - - // Fetch the watch. - w.mu.Lock() - defer w.mu.Unlock() - watch, ok := w.watches[name] - - // Remove it from inotify. - if !ok { - return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) - } - - // We successfully removed the watch if InotifyRmWatch doesn't return an - // error, we need to clean up our internal state to ensure it matches - // inotify's kernel state. - delete(w.paths, int(watch.wd)) - delete(w.watches, name) - - // inotify_rm_watch will return EINVAL if the file has been deleted; - // the inotify will already have been removed. - // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously - // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE - // so that EINVAL means that the wd is being rm_watch()ed or its file removed - // by another thread and we have not received IN_IGNORE event. - success, errno := unix.InotifyRmWatch(w.fd, watch.wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case. - // the only two possible errors are: - // EBADF, which happens when w.fd is not a valid file descriptor of any kind. - // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. - // Watch descriptors are invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. - return errno - } - - return nil -} - -// WatchList returns the directories and files that are being monitered. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.watches)) - for pathname := range w.watches { - entries = append(entries, pathname) - } - - return entries -} - -type watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) -} - -// readEvents reads from the inotify file descriptor, converts the -// received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { - var ( - buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events - n int // Number of bytes read with read() - errno error // Syscall errno - ok bool // For poller.wait - ) - - defer close(w.doneResp) - defer close(w.Errors) - defer close(w.Events) - defer unix.Close(w.fd) - defer w.poller.close() - - for { - // See if we have been closed. - if w.isClosed() { - return - } - - ok, errno = w.poller.wait() - if errno != nil { - select { - case w.Errors <- errno: - case <-w.done: - return - } - continue - } - - if !ok { - continue - } - - n, errno = unix.Read(w.fd, buf[:]) - // If a signal interrupted execution, see if we've been asked to close, and try again. - // http://man7.org/linux/man-pages/man7/signal.7.html : - // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" - if errno == unix.EINTR { - continue - } - - // unix.Read might have been woken up by Close. If so, we're done. - if w.isClosed() { - return - } - - if n < unix.SizeofInotifyEvent { - var err error - if n == 0 { - // If EOF is received. This should really never happen. - err = io.EOF - } else if n < 0 { - // If an error occurred while reading. - err = errno - } else { - // Read was too short. - err = errors.New("notify: short read in readEvents()") - } - select { - case w.Errors <- err: - case <-w.done: - return - } - continue - } - - var offset uint32 - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... - for offset <= uint32(n-unix.SizeofInotifyEvent) { - // Point "raw" to the event in the buffer - raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) - - mask := uint32(raw.Mask) - nameLen := uint32(raw.Len) - - if mask&unix.IN_Q_OVERFLOW != 0 { - select { - case w.Errors <- ErrEventOverflow: - case <-w.done: - return - } - } - - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. - w.mu.Lock() - name, ok := w.paths[int(raw.Wd)] - // IN_DELETE_SELF occurs when the file/directory being watched is removed. - // This is a sign to clean up the maps, otherwise we are no longer in sync - // with the inotify kernel state which has already deleted the watch - // automatically. - if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { - delete(w.paths, int(raw.Wd)) - delete(w.watches, name) - } - w.mu.Unlock() - - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") - } - - event := newEvent(name, mask) - - // Send the events that are not ignored on the events channel - if !event.ignoreLinux(mask) { - select { - case w.Events <- event: - case <-w.done: - return - } - } - - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen - } - } -} - -// Certain types of events can be "ignored" and not sent over the Events -// channel. Such as events marked ignore by the kernel, or MODIFY events -// against files that do not exist. -func (e *Event) ignoreLinux(mask uint32) bool { - // Ignore anything the inotify API says to ignore - if mask&unix.IN_IGNORED == unix.IN_IGNORED { - return true - } - - // If the event is not a DELETE or RENAME, the file must exist. - // Otherwise the event is ignored. - // *Note*: this was put in place because it was seen that a MODIFY - // event was sent after the DELETE. This ignores that MODIFY and - // assumes a DELETE will come or has come if the file doesn't exist. - if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { - _, statErr := os.Lstat(e.Name) - return os.IsNotExist(statErr) - } - return false -} - -// newEvent returns an platform-independent Event based on an inotify mask. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { - e.Op |= Create - } - if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { - e.Op |= Remove - } - if mask&unix.IN_MODIFY == unix.IN_MODIFY { - e.Op |= Write - } - if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { - e.Op |= Rename - } - if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { - e.Op |= Chmod - } - return e -} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go deleted file mode 100644 index b572a37c3f..0000000000 --- a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package fsnotify - -import ( - "errors" - - "golang.org/x/sys/unix" -) - -type fdPoller struct { - fd int // File descriptor (as returned by the inotify_init() syscall) - epfd int // Epoll file descriptor - pipe [2]int // Pipe for waking up -} - -func emptyPoller(fd int) *fdPoller { - poller := new(fdPoller) - poller.fd = fd - poller.epfd = -1 - poller.pipe[0] = -1 - poller.pipe[1] = -1 - return poller -} - -// Create a new inotify poller. -// This creates an inotify handler, and an epoll handler. -func newFdPoller(fd int) (*fdPoller, error) { - var errno error - poller := emptyPoller(fd) - defer func() { - if errno != nil { - poller.close() - } - }() - - // Create epoll fd - poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC) - if poller.epfd == -1 { - return nil, errno - } - // Create pipe; pipe[0] is the read end, pipe[1] the write end. - errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC) - if errno != nil { - return nil, errno - } - - // Register inotify fd with epoll - event := unix.EpollEvent{ - Fd: int32(poller.fd), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) - if errno != nil { - return nil, errno - } - - // Register pipe fd with epoll - event = unix.EpollEvent{ - Fd: int32(poller.pipe[0]), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) - if errno != nil { - return nil, errno - } - - return poller, nil -} - -// Wait using epoll. -// Returns true if something is ready to be read, -// false if there is not. -func (poller *fdPoller) wait() (bool, error) { - // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. - // I don't know whether epoll_wait returns the number of events returned, - // or the total number of events ready. - // I decided to catch both by making the buffer one larger than the maximum. - events := make([]unix.EpollEvent, 7) - for { - n, errno := unix.EpollWait(poller.epfd, events, -1) - if n == -1 { - if errno == unix.EINTR { - continue - } - return false, errno - } - if n == 0 { - // If there are no events, try again. - continue - } - if n > 6 { - // This should never happen. More events were returned than should be possible. - return false, errors.New("epoll_wait returned more events than I know what to do with") - } - ready := events[:n] - epollhup := false - epollerr := false - epollin := false - for _, event := range ready { - if event.Fd == int32(poller.fd) { - if event.Events&unix.EPOLLHUP != 0 { - // This should not happen, but if it does, treat it as a wakeup. - epollhup = true - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the file descriptor, we should pretend - // something is ready to read, and let unix.Read pick up the error. - epollerr = true - } - if event.Events&unix.EPOLLIN != 0 { - // There is data to read. - epollin = true - } - } - if event.Fd == int32(poller.pipe[0]) { - if event.Events&unix.EPOLLHUP != 0 { - // Write pipe descriptor was closed, by us. This means we're closing down the - // watcher, and we should wake up. - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the pipe file descriptor. - // This is an absolute mystery, and should never ever happen. - return false, errors.New("Error on the pipe descriptor.") - } - if event.Events&unix.EPOLLIN != 0 { - // This is a regular wakeup, so we have to clear the buffer. - err := poller.clearWake() - if err != nil { - return false, err - } - } - } - } - - if epollhup || epollerr || epollin { - return true, nil - } - return false, nil - } -} - -// Close the write end of the poller. -func (poller *fdPoller) wake() error { - buf := make([]byte, 1) - n, errno := unix.Write(poller.pipe[1], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is full, poller will wake. - return nil - } - return errno - } - return nil -} - -func (poller *fdPoller) clearWake() error { - // You have to be woken up a LOT in order to get to 100! - buf := make([]byte, 100) - n, errno := unix.Read(poller.pipe[0], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is empty, someone else cleared our wake. - return nil - } - return errno - } - return nil -} - -// Close all poller file descriptors, but not the one passed to it. -func (poller *fdPoller) close() { - if poller.pipe[1] != -1 { - unix.Close(poller.pipe[1]) - } - if poller.pipe[0] != -1 { - unix.Close(poller.pipe[0]) - } - if poller.epfd != -1 { - unix.Close(poller.epfd) - } -} diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go deleted file mode 100644 index 6fb8d8532e..0000000000 --- a/vendor/github.com/fsnotify/fsnotify/kqueue.go +++ /dev/null @@ -1,535 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build freebsd || openbsd || netbsd || dragonfly || darwin -// +build freebsd openbsd netbsd dragonfly darwin - -package fsnotify - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" - "time" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - - kq int // File descriptor (as returned by the kqueue() syscall). - - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Map of watched file descriptors (key: path). - externalWatches map[string]bool // Map of watches added by user of the library. - dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. - paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. - fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called -} - -type pathInfo struct { - name string - isDir bool -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - kq, err := kqueue() - if err != nil { - return nil, err - } - - w := &Watcher{ - kq: kq, - watches: make(map[string]int), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]bool), - externalWatches: make(map[string]bool), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return nil - } - w.isClosed = true - - // copy paths to remove while locked - var pathsToRemove = make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() - // unlock before calling Remove, which also locks - - for _, name := range pathsToRemove { - w.Remove(name) - } - - // send a "quit" message to the reader goroutine - close(w.done) - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - w.mu.Lock() - w.externalWatches[name] = true - w.mu.Unlock() - _, err := w.addWatch(name, noteAllEvents) - return err -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - w.mu.Lock() - watchfd, ok := w.watches[name] - w.mu.Unlock() - if !ok { - return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) - } - - const registerRemove = unix.EV_DELETE - if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { - return err - } - - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - w.mu.Unlock() - - // Find all watched paths that are in this directory that are not external. - if isDir { - var pathsToRemove []string - w.mu.Lock() - for _, path := range w.paths { - wdir, _ := filepath.Split(path.name) - if filepath.Clean(wdir) == name { - if !w.externalWatches[path.name] { - pathsToRemove = append(pathsToRemove, path.name) - } - } - } - w.mu.Unlock() - for _, name := range pathsToRemove { - // Since these are internal, not much sense in propagating error - // to the user, as that will just confuse them with an error about - // a path they did not explicitly watch themselves. - w.Remove(name) - } - } - - return nil -} - -// WatchList returns the directories and files that are being monitered. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.watches)) - for pathname := range w.watches { - entries = append(entries, pathname) - } - - return entries -} - -// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) -const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME - -// keventWaitTime to block on each read from kevent -var keventWaitTime = durationToTimespec(100 * time.Millisecond) - -// addWatch adds name to the watched file set. -// The flags are interpreted as described in kevent(2). -// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - // Make ./name and name equivalent - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return "", errors.New("kevent instance already closed") - } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() - - if !alreadyWatching { - fi, err := os.Lstat(name) - if err != nil { - return "", err - } - - // Don't watch sockets. - if fi.Mode()&os.ModeSocket == os.ModeSocket { - return "", nil - } - - // Don't watch named pipes. - if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { - return "", nil - } - - // Follow Symlinks - // Unfortunately, Linux can add bogus symlinks to watch list without - // issue, and Windows can't do symlinks period (AFAIK). To maintain - // consistency, we will act like everything is fine. There will simply - // be no file events for broken symlinks. - // Hence the returns of nil on errors. - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - name, err = filepath.EvalSymlinks(name) - if err != nil { - return "", nil - } - - w.mu.Lock() - _, alreadyWatching = w.watches[name] - w.mu.Unlock() - - if alreadyWatching { - return name, nil - } - - fi, err = os.Lstat(name) - if err != nil { - return "", nil - } - } - - watchfd, err = unix.Open(name, openMode, 0700) - if watchfd == -1 { - return "", err - } - - isDir = fi.IsDir() - } - - const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE - if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { - unix.Close(watchfd) - return "", err - } - - if !alreadyWatching { - w.mu.Lock() - w.watches[name] = watchfd - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() - } - - if isDir { - // Watch the directory if it has not been watched before, - // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - - watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() - - if watchDir { - if err := w.watchDirectoryFiles(name); err != nil { - return "", err - } - } - } - return name, nil -} - -// readEvents reads from kqueue and converts the received kevents into -// Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { - eventBuffer := make([]unix.Kevent_t, 10) - -loop: - for { - // See if there is a message on the "done" channel - select { - case <-w.done: - break loop - default: - } - - // Get new events - kevents, err := read(w.kq, eventBuffer, &keventWaitTime) - // EINTR is okay, the syscall was interrupted before timeout expired. - if err != nil && err != unix.EINTR { - select { - case w.Errors <- err: - case <-w.done: - break loop - } - continue - } - - // Flush the events we received to the Events channel - for len(kevents) > 0 { - kevent := &kevents[0] - watchfd := int(kevent.Ident) - mask := uint32(kevent.Fflags) - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() - event := newEvent(path.name, mask) - - if path.isDir && !(event.Op&Remove == Remove) { - // Double check to make sure the directory exists. This can happen when - // we do a rm -fr on a recursively watched folders and we receive a - // modification event first but the folder has been deleted and later - // receive the delete event - if _, err := os.Lstat(event.Name); os.IsNotExist(err) { - // mark is as delete event - event.Op |= Remove - } - } - - if event.Op&Rename == Rename || event.Op&Remove == Remove { - w.Remove(event.Name) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() - } - - if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - // Send the event on the Events channel. - select { - case w.Events <- event: - case <-w.done: - break loop - } - } - - if event.Op&Remove == Remove { - // Look for a file that may have overwritten this. - // For example, mv f1 f2 will delete f2, then create f2. - if path.isDir { - fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() - if found { - // make sure the directory exists before we watch for changes. When we - // do a recursive watch and perform rm -fr, the parent directory might - // have gone missing, ignore the missing directory and let the - // upcoming delete event remove the watch from the parent directory. - if _, err := os.Lstat(fileDir); err == nil { - w.sendDirectoryChangeEvents(fileDir) - } - } - } else { - filePath := filepath.Clean(event.Name) - if fileInfo, err := os.Lstat(filePath); err == nil { - w.sendFileCreatedEventIfNew(filePath, fileInfo) - } - } - } - - // Move to next event - kevents = kevents[1:] - } - } - - // cleanup - err := unix.Close(w.kq) - if err != nil { - // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. - select { - case w.Errors <- err: - default: - } - } - close(w.Events) - close(w.Errors) -} - -// newEvent returns an platform-independent Event based on kqueue Fflags. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { - e.Op |= Remove - } - if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { - e.Op |= Write - } - if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { - e.Op |= Rename - } - if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { - e.Op |= Chmod - } - return e -} - -func newCreateEvent(name string) Event { - return Event{Name: name, Op: Create} -} - -// watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - return err - } - - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - } - - return nil -} - -// sendDirectoryEvents searches the directory for newly created files -// and sends them over the event channel. This functionality is to have -// the BSD version of fsnotify match Linux inotify which provides a -// create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - select { - case w.Errors <- err: - case <-w.done: - return - } - } - - // Search for new files - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - err := w.sendFileCreatedEventIfNew(filePath, fileInfo) - - if err != nil { - return - } - } -} - -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - // Send create event - select { - case w.Events <- newCreateEvent(filePath): - case <-w.done: - return - } - } - - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - - return nil -} - -func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { - if fileInfo.IsDir() { - // mimic Linux providing delete events for subdirectories - // but preserve the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) - } - - // watch file to mimic Linux inotify - return w.addWatch(name, noteAllEvents) -} - -// kqueue creates a new kernel event queue and returns a descriptor. -func kqueue() (kq int, err error) { - kq, err = unix.Kqueue() - if kq == -1 { - return kq, err - } - return kq, nil -} - -// register events with the queue -func register(kq int, fds []int, flags int, fflags uint32) error { - changes := make([]unix.Kevent_t, len(fds)) - - for i, fd := range fds { - // SetKevent converts int to the platform-specific types: - unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) - changes[i].Fflags = fflags - } - - // register the events - success, err := unix.Kevent(kq, changes, nil, nil) - if success == -1 { - return err - } - return nil -} - -// read retrieves pending events, or waits until an event occurs. -// A timeout of nil blocks indefinitely, while 0 polls the queue. -func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { - n, err := unix.Kevent(kq, nil, events, timeout) - if err != nil { - return nil, err - } - return events[0:n], nil -} - -// durationToTimespec prepares a timeout value -func durationToTimespec(d time.Duration) unix.Timespec { - return unix.NsecToTimespec(d.Nanoseconds()) -} diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh new file mode 100644 index 0000000000..b09ef76834 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh @@ -0,0 +1,208 @@ +#!/usr/bin/env zsh +[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 +setopt err_exit no_unset pipefail extended_glob + +# Simple script to update the godoc comments on all watchers. Probably took me +# more time to write this than doing it manually, but ah well 🙃 + +watcher=$(</tmp/x + print -r -- $cmt >>/tmp/x + tail -n+$(( end + 1 )) $file >>/tmp/x + mv /tmp/x $file + done +} + +set-cmt '^type Watcher struct ' $watcher +set-cmt '^func NewWatcher(' $new +set-cmt '^func (w \*Watcher) Add(' $add +set-cmt '^func (w \*Watcher) Remove(' $remove +set-cmt '^func (w \*Watcher) Close(' $close +set-cmt '^func (w \*Watcher) WatchList(' $watchlist +set-cmt '^[[:space:]]*Events *chan Event$' $events +set-cmt '^[[:space:]]*Errors *chan error$' $errors diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/system_bsd.go similarity index 57% rename from vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go rename to vendor/github.com/fsnotify/fsnotify/system_bsd.go index 36cc3845b6..4322b0b885 100644 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go +++ b/vendor/github.com/fsnotify/fsnotify/system_bsd.go @@ -1,7 +1,3 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - //go:build freebsd || openbsd || netbsd || dragonfly // +build freebsd openbsd netbsd dragonfly diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/system_darwin.go similarity index 52% rename from vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go rename to vendor/github.com/fsnotify/fsnotify/system_darwin.go index 98cd8476ff..5da5ffa78f 100644 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/system_darwin.go @@ -1,7 +1,3 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - //go:build darwin // +build darwin diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go deleted file mode 100644 index 02ce7deb0b..0000000000 --- a/vendor/github.com/fsnotify/fsnotify/windows.go +++ /dev/null @@ -1,586 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build windows -// +build windows - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "reflect" - "runtime" - "sync" - "syscall" - "unsafe" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - isClosed bool // Set to true when Close() is first called - mu sync.Mutex // Map access - port syscall.Handle // Handle to completion port - watches watchMap // Map of watches (key: i-number) - input chan *input // Inputs to the reader are sent on this channel - quit chan chan<- error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) - if e != nil { - return nil, os.NewSyscallError("CreateIoCompletionPort", e) - } - w := &Watcher{ - port: port, - watches: make(watchMap), - input: make(chan *input, 1), - Events: make(chan Event, 50), - Errors: make(chan error), - quit: make(chan chan<- error, 1), - } - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed { - return nil - } - w.isClosed = true - - // Send "quit" message to the reader goroutine - ch := make(chan error) - w.quit <- ch - if err := w.wakeupReader(); err != nil { - return err - } - return <-ch -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - if w.isClosed { - return errors.New("watcher already closed") - } - in := &input{ - op: opAddWatch, - path: filepath.Clean(name), - flags: sysFSALLEVENTS, - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - in := &input{ - op: opRemoveWatch, - path: filepath.Clean(name), - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// WatchList returns the directories and files that are being monitered. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.watches)) - for _, entry := range w.watches { - for _, watchEntry := range entry { - entries = append(entries, watchEntry.path) - } - } - - return entries -} - -const ( - // Options for AddWatch - sysFSONESHOT = 0x80000000 - sysFSONLYDIR = 0x1000000 - - // Events - sysFSACCESS = 0x1 - sysFSALLEVENTS = 0xfff - sysFSATTRIB = 0x4 - sysFSCLOSE = 0x18 - sysFSCREATE = 0x100 - sysFSDELETE = 0x200 - sysFSDELETESELF = 0x400 - sysFSMODIFY = 0x2 - sysFSMOVE = 0xc0 - sysFSMOVEDFROM = 0x40 - sysFSMOVEDTO = 0x80 - sysFSMOVESELF = 0x800 - - // Special events - sysFSIGNORED = 0x8000 - sysFSQOVERFLOW = 0x4000 -) - -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { - e.Op |= Create - } - if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { - e.Op |= Remove - } - if mask&sysFSMODIFY == sysFSMODIFY { - e.Op |= Write - } - if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { - e.Op |= Rename - } - if mask&sysFSATTRIB == sysFSATTRIB { - e.Op |= Chmod - } - return e -} - -const ( - opAddWatch = iota - opRemoveWatch -) - -const ( - provisional uint64 = 1 << (32 + iota) -) - -type input struct { - op int - path string - flags uint32 - reply chan error -} - -type inode struct { - handle syscall.Handle - volume uint32 - index uint64 -} - -type watch struct { - ov syscall.Overlapped - ino *inode // i-number - path string // Directory path - mask uint64 // Directory itself is being watched with these notify flags - names map[string]uint64 // Map of names being watched and their notify flags - rename string // Remembers the old name while renaming a file - buf [4096]byte -} - -type indexMap map[uint64]*watch -type watchMap map[uint32]indexMap - -func (w *Watcher) wakeupReader() error { - e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) - if e != nil { - return os.NewSyscallError("PostQueuedCompletionStatus", e) - } - return nil -} - -func getDir(pathname string) (dir string, err error) { - attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) - if e != nil { - return "", os.NewSyscallError("GetFileAttributes", e) - } - if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { - dir = pathname - } else { - dir, _ = filepath.Split(pathname) - dir = filepath.Clean(dir) - } - return -} - -func getIno(path string) (ino *inode, err error) { - h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), - syscall.FILE_LIST_DIRECTORY, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - nil, syscall.OPEN_EXISTING, - syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) - if e != nil { - return nil, os.NewSyscallError("CreateFile", e) - } - var fi syscall.ByHandleFileInformation - if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { - syscall.CloseHandle(h) - return nil, os.NewSyscallError("GetFileInformationByHandle", e) - } - ino = &inode{ - handle: h, - volume: fi.VolumeSerialNumber, - index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), - } - return ino, nil -} - -// Must run within the I/O thread. -func (m watchMap) get(ino *inode) *watch { - if i := m[ino.volume]; i != nil { - return i[ino.index] - } - return nil -} - -// Must run within the I/O thread. -func (m watchMap) set(ino *inode, watch *watch) { - i := m[ino.volume] - if i == nil { - i = make(indexMap) - m[ino.volume] = i - } - i[ino.index] = watch -} - -// Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - if flags&sysFSONLYDIR != 0 && pathname != dir { - return nil - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watchEntry := w.watches.get(ino) - w.mu.Unlock() - if watchEntry == nil { - if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { - syscall.CloseHandle(ino.handle) - return os.NewSyscallError("CreateIoCompletionPort", e) - } - watchEntry = &watch{ - ino: ino, - path: dir, - names: make(map[string]uint64), - } - w.mu.Lock() - w.watches.set(ino, watchEntry) - w.mu.Unlock() - flags |= provisional - } else { - syscall.CloseHandle(ino.handle) - } - if pathname == dir { - watchEntry.mask |= flags - } else { - watchEntry.names[filepath.Base(pathname)] |= flags - } - if err = w.startRead(watchEntry); err != nil { - return err - } - if pathname == dir { - watchEntry.mask &= ^provisional - } else { - watchEntry.names[filepath.Base(pathname)] &= ^provisional - } - return nil -} - -// Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watch := w.watches.get(ino) - w.mu.Unlock() - if watch == nil { - return fmt.Errorf("can't remove non-existent watch for: %s", pathname) - } - if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - watch.mask = 0 - } else { - name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - return w.startRead(watch) -} - -// Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { - for name, mask := range watch.names { - if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) - } - delete(watch.names, name) - } - if watch.mask != 0 { - if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - } - watch.mask = 0 - } -} - -// Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { - if e := syscall.CancelIo(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CancelIo", e) - w.deleteWatch(watch) - } - mask := toWindowsFlags(watch.mask) - for _, m := range watch.names { - mask |= toWindowsFlags(m) - } - if mask == 0 { - if e := syscall.CloseHandle(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CloseHandle", e) - } - w.mu.Lock() - delete(w.watches[watch.ino.volume], watch.ino.index) - w.mu.Unlock() - return nil - } - e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], - uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) - if e != nil { - err := os.NewSyscallError("ReadDirectoryChanges", e) - if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { - // Watched directory was probably removed - if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - err = nil - } - w.deleteWatch(watch) - w.startRead(watch) - return err - } - return nil -} - -// readEvents reads from the I/O completion port, converts the -// received events into Event objects and sends them via the Events channel. -// Entry point to the I/O thread. -func (w *Watcher) readEvents() { - var ( - n, key uint32 - ov *syscall.Overlapped - ) - runtime.LockOSThread() - - for { - e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) - watch := (*watch)(unsafe.Pointer(ov)) - - if watch == nil { - select { - case ch := <-w.quit: - w.mu.Lock() - var indexes []indexMap - for _, index := range w.watches { - indexes = append(indexes, index) - } - w.mu.Unlock() - for _, index := range indexes { - for _, watch := range index { - w.deleteWatch(watch) - w.startRead(watch) - } - } - var err error - if e := syscall.CloseHandle(w.port); e != nil { - err = os.NewSyscallError("CloseHandle", e) - } - close(w.Events) - close(w.Errors) - ch <- err - return - case in := <-w.input: - switch in.op { - case opAddWatch: - in.reply <- w.addWatch(in.path, uint64(in.flags)) - case opRemoveWatch: - in.reply <- w.remWatch(in.path) - } - default: - } - continue - } - - switch e { - case syscall.ERROR_MORE_DATA: - if watch == nil { - w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") - } else { - // The i/o succeeded but the buffer is full. - // In theory we should be building up a full packet. - // In practice we can get away with just carrying on. - n = uint32(unsafe.Sizeof(watch.buf)) - } - case syscall.ERROR_ACCESS_DENIED: - // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) - w.deleteWatch(watch) - w.startRead(watch) - continue - case syscall.ERROR_OPERATION_ABORTED: - // CancelIo was called on this handle - continue - default: - w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) - continue - case nil: - } - - var offset uint32 - for { - if n == 0 { - w.Events <- newEvent("", sysFSQOVERFLOW) - w.Errors <- errors.New("short read in readEvents()") - break - } - - // Point "raw" to the event in the buffer - raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) - // TODO: Consider using unsafe.Slice that is available from go1.17 - // https://stackoverflow.com/questions/51187973/how-to-create-an-array-or-a-slice-from-an-array-unsafe-pointer-in-golang - // instead of using a fixed syscall.MAX_PATH buf, we create a buf that is the size of the path name - size := int(raw.FileNameLength / 2) - var buf []uint16 - sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - sh.Data = uintptr(unsafe.Pointer(&raw.FileName)) - sh.Len = size - sh.Cap = size - name := syscall.UTF16ToString(buf) - fullname := filepath.Join(watch.path, name) - - var mask uint64 - switch raw.Action { - case syscall.FILE_ACTION_REMOVED: - mask = sysFSDELETESELF - case syscall.FILE_ACTION_MODIFIED: - mask = sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - watch.rename = name - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - if watch.names[watch.rename] != 0 { - watch.names[name] |= watch.names[watch.rename] - delete(watch.names, watch.rename) - mask = sysFSMOVESELF - } - } - - sendNameEvent := func() { - if w.sendEvent(fullname, watch.names[name]&mask) { - if watch.names[name]&sysFSONESHOT != 0 { - delete(watch.names, name) - } - } - } - if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() - } - if raw.Action == syscall.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() - } - - // Move to the next event in the buffer - if raw.NextEntryOffset == 0 { - break - } - offset += raw.NextEntryOffset - - // Error! - if offset >= n { - w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") - break - } - } - - if err := w.startRead(watch); err != nil { - w.Errors <- err - } - } -} - -func (w *Watcher) sendEvent(name string, mask uint64) bool { - if mask == 0 { - return false - } - event := newEvent(name, uint32(mask)) - select { - case ch := <-w.quit: - w.quit <- ch - case w.Events <- event: - } - return true -} - -func toWindowsFlags(mask uint64) uint32 { - var m uint32 - if mask&sysFSACCESS != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS - } - if mask&sysFSMODIFY != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE - } - if mask&sysFSATTRIB != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES - } - if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME - } - return m -} - -func toFSnotifyFlags(action uint32) uint64 { - switch action { - case syscall.FILE_ACTION_ADDED: - return sysFSCREATE - case syscall.FILE_ACTION_REMOVED: - return sysFSDELETE - case syscall.FILE_ACTION_MODIFIED: - return sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - return sysFSMOVEDFROM - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - return sysFSMOVEDTO - } - return 0 -} diff --git a/vendor/github.com/libopenstorage/operator/drivers/storage/portworx/util/util.go b/vendor/github.com/libopenstorage/operator/drivers/storage/portworx/util/util.go index 5e757ba155..650a676149 100644 --- a/vendor/github.com/libopenstorage/operator/drivers/storage/portworx/util/util.go +++ b/vendor/github.com/libopenstorage/operator/drivers/storage/portworx/util/util.go @@ -516,7 +516,7 @@ func GetPortworxVersion(cluster *corev1.StorageCluster) *version.Version { pxVersion *version.Version ) - pxImage := cluster.Spec.Image + pxImage := strings.TrimSpace(cluster.Spec.Image) var manifestURL string for _, env := range cluster.Spec.Env { if env.Name == EnvKeyPXImage { @@ -1112,37 +1112,6 @@ func IsCCMGoSupported(pxVersion *version.Version) bool { return pxVersion.GreaterThanOrEqual(MinimumPxVersionCCMGO) } -// ValidateTelemetry validates if telemetry is enabled correctly -func ValidateTelemetry(cluster *corev1.StorageCluster) error { - if !IsTelemetryEnabled(cluster.Spec) { - return nil - } - - pxVersion := GetPortworxVersion(cluster) - if pxVersion.LessThan(MinimumPxVersionCCM) { - // PX version is lower than 2.8 - return fmt.Errorf("telemetry is not supported on Portworx version: %s", pxVersion) - } else if IsCCMGoSupported(pxVersion) { - proxyType, proxy := GetPxProxyEnvVarValue(cluster) - if proxy == "" { - return nil - } else if proxyType == EnvKeyPortworxHTTPProxy { - // CCM Go is enabled with http proxy, but it cannot be split into host and port - if _, _, err := SplitPxProxyHostPort(proxy); err != nil { - return fmt.Errorf("telemetry is not supported with proxy in a format of: %s", proxy) - } - } else if proxyType == EnvKeyPortworxHTTPSProxy { - // CCM Go is enabled with https proxy - // TODO: remove when custom proxy is supported - return fmt.Errorf("telemetry is not supported with secure proxy: %s", proxy) - } - } - - // NOTE: don't check if air-gapped here when telemetry is specified enabled or disabled explicitly, - // as it will ping the registration endpoint periodically - return nil -} - // IsPxRepoEnabled returns true is pxRepo is enabled func IsPxRepoEnabled(spec corev1.StorageClusterSpec) bool { return spec.PxRepo != nil && diff --git a/vendor/github.com/libopenstorage/operator/drivers/storage/storage.go b/vendor/github.com/libopenstorage/operator/drivers/storage/storage.go index 9363e3f0a6..996b2f0724 100644 --- a/vendor/github.com/libopenstorage/operator/drivers/storage/storage.go +++ b/vendor/github.com/libopenstorage/operator/drivers/storage/storage.go @@ -27,7 +27,7 @@ type Driver interface { // Init initializes the storage driver Init(client.Client, *runtime.Scheme, record.EventRecorder) error // Validate validates if the driver is correctly configured - Validate() error + Validate(cluster *corev1.StorageCluster) error // String returns the string name of the driver String() string // UpdateDriver updates the driver with the current cluster and node conditions. diff --git a/vendor/github.com/libopenstorage/operator/pkg/apis/addtoscheme_core_v1.go b/vendor/github.com/libopenstorage/operator/pkg/apis/addtoscheme_core_v1.go new file mode 100644 index 0000000000..bedd92eaba --- /dev/null +++ b/vendor/github.com/libopenstorage/operator/pkg/apis/addtoscheme_core_v1.go @@ -0,0 +1,10 @@ +package apis + +import ( + "github.com/libopenstorage/operator/pkg/apis/core/v1" +) + +func init() { + // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back + AddToSchemes = append(AddToSchemes, v1.SchemeBuilder.AddToScheme) +} diff --git a/vendor/github.com/libopenstorage/operator/pkg/apis/addtoscheme_portworx_v1.go b/vendor/github.com/libopenstorage/operator/pkg/apis/addtoscheme_portworx_v1.go new file mode 100644 index 0000000000..83c06e4e3a --- /dev/null +++ b/vendor/github.com/libopenstorage/operator/pkg/apis/addtoscheme_portworx_v1.go @@ -0,0 +1,10 @@ +package apis + +import ( + v1 "github.com/libopenstorage/operator/pkg/apis/portworx/v1" +) + +func init() { + // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back + AddToSchemes = append(AddToSchemes, v1.SchemeBuilder.AddToScheme) +} diff --git a/vendor/github.com/libopenstorage/operator/pkg/apis/apis.go b/vendor/github.com/libopenstorage/operator/pkg/apis/apis.go new file mode 100644 index 0000000000..07dc961644 --- /dev/null +++ b/vendor/github.com/libopenstorage/operator/pkg/apis/apis.go @@ -0,0 +1,13 @@ +package apis + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// AddToSchemes may be used to add all resources defined in the project to a Scheme +var AddToSchemes runtime.SchemeBuilder + +// AddToScheme adds all Resources to the Scheme +func AddToScheme(s *runtime.Scheme) error { + return AddToSchemes.AddToScheme(s) +} diff --git a/vendor/github.com/libopenstorage/operator/pkg/apis/core/v1/storagenode.go b/vendor/github.com/libopenstorage/operator/pkg/apis/core/v1/storagenode.go index 87e77a9eb7..4e2cef1e3f 100644 --- a/vendor/github.com/libopenstorage/operator/pkg/apis/core/v1/storagenode.go +++ b/vendor/github.com/libopenstorage/operator/pkg/apis/core/v1/storagenode.go @@ -67,11 +67,11 @@ type NodeStatus struct { // Phase is the current status of the storage node Phase string `json:"phase,omitempty"` // Network details used by the storage driver - Network NetworkStatus `json:"network,omitempty"` + Network *NetworkStatus `json:"network,omitempty"` // Storage details used by the storage driver - Storage StorageStatus `json:"storage,omitempty"` + Storage *StorageStatus `json:"storage,omitempty"` // Geo topology information for a node - Geo Geography `json:"geography,omitempty"` + Geo *Geography `json:"geography,omitempty"` // Conditions is an array of current node conditions Conditions []NodeCondition `json:"conditions,omitempty"` // Checks are a list of pre or post flight checks that are performed by the Operator diff --git a/vendor/github.com/libopenstorage/operator/pkg/apis/core/v1/zz_generated.deepcopy.go b/vendor/github.com/libopenstorage/operator/pkg/apis/core/v1/zz_generated.deepcopy.go index 19fa1e9afb..9555ca2b91 100644 --- a/vendor/github.com/libopenstorage/operator/pkg/apis/core/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/libopenstorage/operator/pkg/apis/core/v1/zz_generated.deepcopy.go @@ -681,9 +681,21 @@ func (in *NodeSpec) DeepCopy() *NodeSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { *out = *in - out.Network = in.Network - in.Storage.DeepCopyInto(&out.Storage) - out.Geo = in.Geo + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(NetworkStatus) + **out = **in + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(StorageStatus) + (*in).DeepCopyInto(*out) + } + if in.Geo != nil { + in, out := &in.Geo, &out.Geo + *out = new(Geography) + **out = **in + } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]NodeCondition, len(*in)) diff --git a/vendor/github.com/libopenstorage/operator/pkg/apis/portworx/portworx.go b/vendor/github.com/libopenstorage/operator/pkg/apis/portworx/portworx.go new file mode 100644 index 0000000000..0b5bf4489e --- /dev/null +++ b/vendor/github.com/libopenstorage/operator/pkg/apis/portworx/portworx.go @@ -0,0 +1,9 @@ +// Package v1 contains API Schema definitions for the portworx API group +// +k8s:deepcopy-gen=package,register +// +groupName=portworx.io +package portworx + +const ( + // GroupName is the group name of the CRD + GroupName = "portworx.io" +) diff --git a/vendor/github.com/libopenstorage/operator/pkg/apis/portworx/v1/doc.go b/vendor/github.com/libopenstorage/operator/pkg/apis/portworx/v1/doc.go new file mode 100644 index 0000000000..95e0bc4511 --- /dev/null +++ b/vendor/github.com/libopenstorage/operator/pkg/apis/portworx/v1/doc.go @@ -0,0 +1,4 @@ +// Package v1 contains API Schema definitions for the core v1 API group +// +kubebuilder:object:generate=true +// +groupName=portworx.io +package v1 diff --git a/vendor/github.com/libopenstorage/operator/pkg/apis/portworx/v1/portworxdiag.go b/vendor/github.com/libopenstorage/operator/pkg/apis/portworx/v1/portworxdiag.go new file mode 100644 index 0000000000..880b81f90e --- /dev/null +++ b/vendor/github.com/libopenstorage/operator/pkg/apis/portworx/v1/portworxdiag.go @@ -0,0 +1,95 @@ +package v1 + +import ( + meta "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PortworxDiagSpec is the spec used to define a portworx diag. +type PortworxDiagSpec struct { + // Configuration for diags collection of the main Portworx component. + Portworx *PortworxComponent `json:"portworx,omitempty"` +} + +// Specifies the diags collection configuration for the Portworx component. +type PortworxComponent struct { + // Nodes for which the diags need to be collected. If a volume selector is + // also specified, then both the selectors will be honored and the selected + // nodes will be a union of both selectors. + NodeSelector NodeSelector `json:"nodes,omitempty"` + // Volumes for which the diags need to be collected. + VolumeSelector VolumeSelector `json:"volumes,omitempty"` + // Generates the core dump as well when collecting the diags. Could be useful + // to analyze the current state of the system. + GenerateCore bool `json:"generateCore,omitempty"` +} + +// Allows selecting all the nodes in the entire cluster or a specific set with +// node IDs or node label selectors. It will take a union of all the combinations +// and collect diags from those nodes. +type NodeSelector struct { + // Select all nodes in the Portworx cluster. If set to true, other selectors are ignored. + All bool `json:"all,omitempty"` + // Ids of the nodes to be selected. + IDs []string `json:"ids,omitempty"` + // Labels of the volumes to be selected. + Labels map[string]string `json:"labels,omitempty"` +} + +// Allows selecting one or more volumes for which diags need to be collected. +// It will take a union of volume IDs and volume labels and collect diags for +// all that match. Currently it will find the Portworx nodes that contain the +// replicas of the selected volumes and wherever the volume is attached or +// being consumed from. +type VolumeSelector struct { + // Ids of the volumes to be selected. + IDs []string `json:"ids,omitempty"` + // Labels of the volumes to be selected. + Labels map[string]string `json:"labels,omitempty"` +} + +// PortworxDiagStatus is the status of a portworx diag. +type PortworxDiagStatus struct { + // One word status of the entire diags collection job. + Phase string `json:"phase,omitempty"` + // UUID of the Portworx cluster. This is useful to find the uploaded diags. + ClusterUUID string `json:"clusterUuid,omitempty"` + // Status of the diags collection from all the selected nodes. + NodeStatuses []NodeStatus `json:"nodes,omitempty"` +} + +// Status of the diags collection from a single node. +type NodeStatus struct { + // ID of the node for which the diag status is reported. + NodeID string `json:"nodeId,omitempty"` + // One word status of the diags collection on the node. + Status string `json:"status,omitempty"` + // Optional message used to give the reason for any failure. + Message string `json:"message,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Namespaced,shortName=pxdiag +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="Status of the Portworx diag collection." +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of the diag resource." + +// PortworxDiag represents a portworx diag +type PortworxDiag struct { + meta.TypeMeta `json:",inline"` + meta.ObjectMeta `json:"metadata,omitempty"` + Spec PortworxDiagSpec `json:"spec,omitempty"` + Status PortworxDiagStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PortworxDiagList is a list of portworx diags. +type PortworxDiagList struct { + meta.TypeMeta `json:",inline"` + meta.ListMeta `json:"metadata,omitempty"` + Items []PortworxDiag `json:"items"` +} + +func init() { + SchemeBuilder.Register(&PortworxDiag{}, &PortworxDiagList{}) +} diff --git a/vendor/github.com/libopenstorage/operator/pkg/apis/portworx/v1/register.go b/vendor/github.com/libopenstorage/operator/pkg/apis/portworx/v1/register.go new file mode 100644 index 0000000000..97d8a98f5f --- /dev/null +++ b/vendor/github.com/libopenstorage/operator/pkg/apis/portworx/v1/register.go @@ -0,0 +1,25 @@ +// Package v1 contains API Schema definitions for the core v1 API group +// +kubebuilder:object:generate=true +// +groupName=portworx.io +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" + + "github.com/libopenstorage/operator/pkg/apis/portworx" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: portworx.GroupName, Version: "v1"} + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} + // AddToScheme adds all the registered types to the scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} diff --git a/vendor/github.com/libopenstorage/operator/pkg/apis/portworx/v1/zz_generated.deepcopy.go b/vendor/github.com/libopenstorage/operator/pkg/apis/portworx/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..551ec620d9 --- /dev/null +++ b/vendor/github.com/libopenstorage/operator/pkg/apis/portworx/v1/zz_generated.deepcopy.go @@ -0,0 +1,195 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSelector) DeepCopyInto(out *NodeSelector) { + *out = *in + if in.IDs != nil { + in, out := &in.IDs, &out.IDs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelector. +func (in *NodeSelector) DeepCopy() *NodeSelector { + if in == nil { + return nil + } + out := new(NodeSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus. +func (in *NodeStatus) DeepCopy() *NodeStatus { + if in == nil { + return nil + } + out := new(NodeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortworxComponent) DeepCopyInto(out *PortworxComponent) { + *out = *in + in.NodeSelector.DeepCopyInto(&out.NodeSelector) + in.VolumeSelector.DeepCopyInto(&out.VolumeSelector) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortworxComponent. +func (in *PortworxComponent) DeepCopy() *PortworxComponent { + if in == nil { + return nil + } + out := new(PortworxComponent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortworxDiag) DeepCopyInto(out *PortworxDiag) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortworxDiag. +func (in *PortworxDiag) DeepCopy() *PortworxDiag { + if in == nil { + return nil + } + out := new(PortworxDiag) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PortworxDiag) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortworxDiagList) DeepCopyInto(out *PortworxDiagList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PortworxDiag, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortworxDiagList. +func (in *PortworxDiagList) DeepCopy() *PortworxDiagList { + if in == nil { + return nil + } + out := new(PortworxDiagList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PortworxDiagList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortworxDiagSpec) DeepCopyInto(out *PortworxDiagSpec) { + *out = *in + if in.Portworx != nil { + in, out := &in.Portworx, &out.Portworx + *out = new(PortworxComponent) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortworxDiagSpec. +func (in *PortworxDiagSpec) DeepCopy() *PortworxDiagSpec { + if in == nil { + return nil + } + out := new(PortworxDiagSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortworxDiagStatus) DeepCopyInto(out *PortworxDiagStatus) { + *out = *in + if in.NodeStatuses != nil { + in, out := &in.NodeStatuses, &out.NodeStatuses + *out = make([]NodeStatus, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortworxDiagStatus. +func (in *PortworxDiagStatus) DeepCopy() *PortworxDiagStatus { + if in == nil { + return nil + } + out := new(PortworxDiagStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSelector) DeepCopyInto(out *VolumeSelector) { + *out = *in + if in.IDs != nil { + in, out := &in.IDs, &out.IDs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSelector. +func (in *VolumeSelector) DeepCopy() *VolumeSelector { + if in == nil { + return nil + } + out := new(VolumeSelector) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/libopenstorage/operator/pkg/client/clientset/versioned/scheme/register.go b/vendor/github.com/libopenstorage/operator/pkg/client/clientset/versioned/scheme/register.go index 4a4ff01172..abe2e3a8ed 100644 --- a/vendor/github.com/libopenstorage/operator/pkg/client/clientset/versioned/scheme/register.go +++ b/vendor/github.com/libopenstorage/operator/pkg/client/clientset/versioned/scheme/register.go @@ -37,14 +37,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/vendor/github.com/libopenstorage/operator/pkg/mock/storagedriver.mock.go b/vendor/github.com/libopenstorage/operator/pkg/mock/storagedriver.mock.go index 3a6f45e514..c989397960 100644 --- a/vendor/github.com/libopenstorage/operator/pkg/mock/storagedriver.mock.go +++ b/vendor/github.com/libopenstorage/operator/pkg/mock/storagedriver.mock.go @@ -242,15 +242,15 @@ func (mr *MockDriverMockRecorder) UpdateStorageClusterStatus(arg0, arg1 interfac } // Validate mocks base method. -func (m *MockDriver) Validate() error { +func (m *MockDriver) Validate(arg0 *v1.StorageCluster) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Validate") + ret := m.ctrl.Call(m, "Validate", arg0) ret0, _ := ret[0].(error) return ret0 } // Validate indicates an expected call of Validate. -func (mr *MockDriverMockRecorder) Validate() *gomock.Call { +func (mr *MockDriverMockRecorder) Validate(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Validate", reflect.TypeOf((*MockDriver)(nil).Validate)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Validate", reflect.TypeOf((*MockDriver)(nil).Validate), arg0) } diff --git a/vendor/github.com/libopenstorage/operator/pkg/util/test/util.go b/vendor/github.com/libopenstorage/operator/pkg/util/test/util.go index d247c22351..8859d506c8 100644 --- a/vendor/github.com/libopenstorage/operator/pkg/util/test/util.go +++ b/vendor/github.com/libopenstorage/operator/pkg/util/test/util.go @@ -46,6 +46,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" "github.com/libopenstorage/openstorage/api" + "github.com/libopenstorage/operator/pkg/apis" corev1 "github.com/libopenstorage/operator/pkg/apis/core/v1" "github.com/libopenstorage/operator/pkg/mock" "github.com/libopenstorage/operator/pkg/util" @@ -129,8 +130,9 @@ var TestSpecPath = "testspec" var ( pxVer2_12, _ = version.NewVersion("2.12.0-") - opVer1_10, _ = version.NewVersion("1.10.0-") opVer1_9_1, _ = version.NewVersion("1.9.1-") + opVer1_10, _ = version.NewVersion("1.10.0-") + opVer23_5, _ = version.NewVersion("23.5.0-") minOpVersionForKubeSchedConfig, _ = version.NewVersion("1.10.2-") ) @@ -143,7 +145,7 @@ func MockDriver(mockCtrl *gomock.Controller) *mock.MockDriver { // adds the CRDs defined in this repository to the scheme func FakeK8sClient(initObjects ...runtime.Object) client.Client { s := scheme.Scheme - if err := corev1.AddToScheme(s); err != nil { + if err := apis.AddToScheme(s); err != nil { logrus.Error(err) } if err := monitoringv1.AddToScheme(s); err != nil { @@ -3178,10 +3180,6 @@ func ValidateAlertManagerEnabled(pxImageList map[string]string, cluster *corev1. return fmt.Errorf("failed to get service alertmanager-portworx") } - if _, err := coreops.Instance().GetService("alertmanager-operated", cluster.Namespace); err != nil { - return fmt.Errorf("failed to get service alertmanager-operated") - } - logrus.Infof("Alert manager is enabled and deployed") return nil } @@ -3200,10 +3198,6 @@ func ValidateAlertManagerDisabled(pxImageList map[string]string, cluster *corev1 } } - if _, err := coreops.Instance().GetService("alertmanager-operated", cluster.Namespace); !errors.IsNotFound(err) { - return "", true, fmt.Errorf("wait for service alertmanager-operated deletion, err %v", err) - } - return "", false, nil } @@ -4009,9 +4003,8 @@ func validateAllStorageNodesInState(namespace string, status corev1.NodeConditio func ValidateStorageClusterIsOnline(cluster *corev1.StorageCluster, timeout, interval time.Duration) (*corev1.StorageCluster, error) { state := string(corev1.ClusterConditionStatusOnline) var conditions []corev1.ClusterCondition - masterOpVersion, _ := version.NewVersion(PxOperatorMasterVersion) opVersion, _ := GetPxOperatorVersion() - if opVersion.GreaterThanOrEqual(masterOpVersion) { + if opVersion.GreaterThanOrEqual(opVer23_5) { state = string(corev1.ClusterStateRunning) conditions = append(conditions, corev1.ClusterCondition{ Source: "Portworx", @@ -4172,6 +4165,7 @@ func CreateClusterWithTLS(caCertFileName, serverCertFileName, serverKeyFileName Namespace: "kube-system", }, Spec: corev1.StorageClusterSpec{ + Monitoring: &corev1.MonitoringSpec{Telemetry: &corev1.TelemetrySpec{}}, Security: &corev1.SecuritySpec{ Enabled: true, Auth: &corev1.AuthSpec{ diff --git a/vendor/github.com/libopenstorage/operator/pkg/util/util.go b/vendor/github.com/libopenstorage/operator/pkg/util/util.go index 8ce9ce1577..6a41f64327 100644 --- a/vendor/github.com/libopenstorage/operator/pkg/util/util.go +++ b/vendor/github.com/libopenstorage/operator/pkg/util/util.go @@ -3,8 +3,10 @@ package util import ( "context" "fmt" + "net" "path" "reflect" + "regexp" "sort" "strconv" "strings" @@ -49,6 +51,14 @@ const ( UnevenStorageNodesReason = "UnevenStorageNodes" // AllStoragelessNodesReason is added to an event when all the nodes in the cluster is labelled as storageless AllStoragelessNodesReason = "AllStoragelessNodes" + // TelemetryEnabledReason is added to an event when the telemetry is enabled by operator + TelemetryEnabledReason = "TelemetryEnabled" + // TelemetryDisabledReason is added to an event when the telemetry is disabled by operator + TelemetryDisabledReason = "TelemetryDisabled" + // FailedPreFlight is added to denote pre-flight failure. + FailedPreFlight = "FailedPreFlight" + // PassedPreFlight is added to denote pre-flight Passed. + PassPreFlight = "PassedPreFlight" // MigrationDryRunCompletedReason is added to an event when dry run is completed MigrationDryRunCompletedReason = "MigrationDryRunCompleted" @@ -89,6 +99,9 @@ var ( "topology.kubernetes.io/region", "topology.kubernetes.io/zone", } + + // hostnameWithDomainRe regex matches hostname with a DNS domain + hostnameWithDomainRe = regexp.MustCompile(`^(?i)[a-z0-9-]+(\.[a-z0-9-]+)+\.?$`) ) // AddDefaultRegistryToImage adds default registry to image. @@ -97,12 +110,38 @@ func AddDefaultRegistryToImage(image string) string { return "" } - for k := range commonDockerRegistries { - if strings.HasPrefix(image, k) || strings.HasPrefix(image, "gcr.io") || strings.HasPrefix(image, "k8s.gcr.io") { + // check if image has version-tag included, add ":latest" if missing + if strings.Count(image, ":") < 1 { + image = image + ":latest" + } + + parts := strings.Split(image, "/") + + hn := parts[0] + + // if host:port and not IPv6, reset to host + if hn[len(hn)-1] != ']' && strings.Count(hn, ":") > 0 { + var err error + hn, _, err = net.SplitHostPort(parts[0]) + if err != nil || hn == "" { + logrus.WithError(err).Errorf("got error splitting %s in image-url %s", parts[0], image) return image } } + // check if 1st part is IP address + ip := net.ParseIP(strings.Trim(hn, "[]")) + if ip != nil { + logrus.Tracef("image-url starts w/ IP: %s", image) + return image + } + + // check if 1st part is a hostname+domain + if hostnameWithDomainRe.MatchString(hn) { + logrus.Tracef("image-url starts w/ host+domain: %s", image) + return image + } + return DefaultImageRegistry + "/" + image } @@ -143,7 +182,7 @@ func GetImageURN(cluster *corev1.StorageCluster, image string) string { registryAndRepo = strings.TrimRight(registryAndRepo, "/") if registryAndRepo == "" { - // no registry/repository specifed, return image + // no registry/repository specified, return image return AddDefaultRegistryToImage(image) } diff --git a/vendor/github.com/onsi/gomega/.gitignore b/vendor/github.com/onsi/gomega/.gitignore index 720c13cba8..5f12ff053e 100644 --- a/vendor/github.com/onsi/gomega/.gitignore +++ b/vendor/github.com/onsi/gomega/.gitignore @@ -3,3 +3,4 @@ . .idea gomega.iml +TODO.md \ No newline at end of file diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index c5f35de694..35dfec0677 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,4 +1,88 @@ -## v1.21.1 +## 1.24.2 + +### Fixes +- Correctly handle assertion failure panics for eventually/consistnetly "g Gomega"s in a goroutine [78f1660] +- docs:Fix typo "you an" -> "you can" (#607) [3187c1f] +- fixes issue #600 (#606) [808d192] + +### Maintenance +- Bump golang.org/x/net from 0.2.0 to 0.4.0 (#611) [6ebc0bf] +- Bump nokogiri from 1.13.9 to 1.13.10 in /docs (#612) [258cfc8] +- Bump github.com/onsi/ginkgo/v2 from 2.5.0 to 2.5.1 (#609) [e6c3eb9] + +## 1.24.1 + +### Fixes +- maintain backward compatibility for Eventually and Consisntetly's signatures [4c7df5e] +- fix small typo (#601) [ea0ebe6] + +### Maintenance +- Bump golang.org/x/net from 0.1.0 to 0.2.0 (#603) [1ba8372] +- Bump github.com/onsi/ginkgo/v2 from 2.4.0 to 2.5.0 (#602) [f9426cb] +- fix label-filter in test.yml [d795db6] +- stop running flakey tests and rely on external network dependencies in CI [7133290] + +## 1.24.0 + +### Features + +Introducting [gcustom](https://onsi.github.io/gomega/#gcustom-a-convenient-mechanism-for-buildling-custom-matchers) - a convenient mechanism for building custom matchers. + +This is an RC release for `gcustom`. The external API may be tweaked in response to feedback however it is expected to remain mostly stable. + +### Maintenance + +- Update BeComparableTo documentation [756eaa0] + +## 1.23.0 + +### Features +- Custom formatting on a per-type basis can be provided using `format.RegisterCustomFormatter()` -- see the docs [here](https://onsi.github.io/gomega/#adjusting-output) + +- Substantial improvement have been made to `StopTrying()`: + - Users can now use `StopTrying().Wrap(err)` to wrap errors and `StopTrying().Attach(description, object)` to attach arbitrary objects to the `StopTrying()` error + - `StopTrying()` is now always interpreted as a failure. If you are an early adopter of `StopTrying()` you may need to change your code as the prior version would match against the returned value even if `StopTrying()` was returned. Going forward the `StopTrying()` api should remain stable. + - `StopTrying()` and `StopTrying().Now()` can both be used in matchers - not just polled functions. + +- `TryAgainAfter(duration)` is used like `StopTrying()` but instructs `Eventually` and `Consistently` that the poll should be tried again after the specified duration. This allows you to dynamically adjust the polling duration. + +- `ctx` can now be passed-in as the first argument to `Eventually` and `Consistently`. + +## Maintenance + +- Bump github.com/onsi/ginkgo/v2 from 2.3.0 to 2.3.1 (#597) [afed901] +- Bump nokogiri from 1.13.8 to 1.13.9 in /docs (#599) [7c691b3] +- Bump github.com/google/go-cmp from 0.5.8 to 0.5.9 (#587) [ff22665] + +## 1.22.1 + +## Fixes +- When passed a context and no explicit timeout, Eventually will only timeout when the context is cancelled [e5105cf] +- Allow StopTrying() to be wrapped [bf3cba9] + +## Maintenance +- bump to ginkgo v2.3.0 [c5d5c39] + +## 1.22.0 + +### Features + +Several improvements have been made to `Eventually` and `Consistently` in this and the most recent releases: + +- Eventually and Consistently can take a context.Context [65c01bc] + This enables integration with Ginkgo 2.3.0's interruptible nodes and node timeouts. +- Eventually and Consistently that are passed a SpecContext can provide reports when an interrupt occurs [0d063c9] +- Eventually/Consistently will forward an attached context to functions that ask for one [e2091c5] +- Eventually/Consistently supports passing arguments to functions via WithArguments() [a2dc7c3] +- Eventually and Consistently can now be stopped early with StopTrying(message) and StopTrying(message).Now() [52976bb] + +These improvements are all documented in [Gomega's docs](https://onsi.github.io/gomega/#making-asynchronous-assertions) + +## Fixes + +## Maintenance + +## 1.21.1 ### Features - Eventually and Consistently that are passed a SpecContext can provide reports when an interrupt occurs [0d063c9] diff --git a/vendor/github.com/onsi/gomega/RELEASING.md b/vendor/github.com/onsi/gomega/RELEASING.md index 7153b9b94d..9973fff49e 100644 --- a/vendor/github.com/onsi/gomega/RELEASING.md +++ b/vendor/github.com/onsi/gomega/RELEASING.md @@ -5,7 +5,7 @@ A Gomega release is a tagged sha and a GitHub release. To cut a release: ```bash LAST_VERSION=$(git tag --sort=version:refname | tail -n1) CHANGES=$(git log --pretty=format:'- %s [%h]' HEAD...$LAST_VERSION) - echo -e "## NEXT\n\n$CHANGES\n\n### Features\n\n## Fixes\n\n## Maintenance\n\n$(cat CHANGELOG.md)" > CHANGELOG.md + echo -e "## NEXT\n\n$CHANGES\n\n### Features\n\n### Fixes\n\n### Maintenance\n\n$(cat CHANGELOG.md)" > CHANGELOG.md ``` to update the changelog - Categorize the changes into diff --git a/vendor/github.com/onsi/gomega/format/format.go b/vendor/github.com/onsi/gomega/format/format.go index 6e78c391df..1a2ed877a9 100644 --- a/vendor/github.com/onsi/gomega/format/format.go +++ b/vendor/github.com/onsi/gomega/format/format.go @@ -65,6 +65,52 @@ type GomegaStringer interface { GomegaString() string } +/* +CustomFormatters can be registered with Gomega via RegisterCustomFormatter() +Any value to be rendered by Gomega is passed to each registered CustomFormatters. +The CustomFormatter signals that it will handle formatting the value by returning (formatted-string, true) +If the CustomFormatter does not want to handle the object it should return ("", false) + +Strings returned by CustomFormatters are not truncated +*/ +type CustomFormatter func(value interface{}) (string, bool) +type CustomFormatterKey uint + +var customFormatterKey CustomFormatterKey = 1 + +type customFormatterKeyPair struct { + CustomFormatter + CustomFormatterKey +} + +/* +RegisterCustomFormatter registers a CustomFormatter and returns a CustomFormatterKey + +You can call UnregisterCustomFormatter with the returned key to unregister the associated CustomFormatter +*/ +func RegisterCustomFormatter(customFormatter CustomFormatter) CustomFormatterKey { + key := customFormatterKey + customFormatterKey += 1 + customFormatters = append(customFormatters, customFormatterKeyPair{customFormatter, key}) + return key +} + +/* +UnregisterCustomFormatter unregisters a previously registered CustomFormatter. You should pass in the key returned by RegisterCustomFormatter +*/ +func UnregisterCustomFormatter(key CustomFormatterKey) { + formatters := []customFormatterKeyPair{} + for _, f := range customFormatters { + if f.CustomFormatterKey == key { + continue + } + formatters = append(formatters, f) + } + customFormatters = formatters +} + +var customFormatters = []customFormatterKeyPair{} + /* Generates a formatted matcher success/failure message of the form: @@ -219,17 +265,24 @@ func Object(object interface{}, indentation uint) string { IndentString takes a string and indents each line by the specified amount. */ func IndentString(s string, indentation uint) string { + return indentString(s, indentation, true) +} + +func indentString(s string, indentation uint, indentFirstLine bool) string { + result := &strings.Builder{} components := strings.Split(s, "\n") - result := "" indent := strings.Repeat(Indent, int(indentation)) for i, component := range components { - result += indent + component + if i > 0 || indentFirstLine { + result.WriteString(indent) + } + result.WriteString(component) if i < len(components)-1 { - result += "\n" + result.WriteString("\n") } } - return result + return result.String() } func formatType(v reflect.Value) string { @@ -261,18 +314,27 @@ func formatValue(value reflect.Value, indentation uint) string { if value.CanInterface() { obj := value.Interface() + // if a CustomFormatter handles this values, we'll go with that + for _, customFormatter := range customFormatters { + formatted, handled := customFormatter.CustomFormatter(obj) + // do not truncate a user-provided CustomFormatter() + if handled { + return indentString(formatted, indentation+1, false) + } + } + // GomegaStringer will take precedence to other representations and disregards UseStringerRepresentation if x, ok := obj.(GomegaStringer); ok { - // do not truncate a user-defined GoMegaString() value - return x.GomegaString() + // do not truncate a user-defined GomegaString() value + return indentString(x.GomegaString(), indentation+1, false) } if UseStringerRepresentation { switch x := obj.(type) { case fmt.GoStringer: - return truncateLongStrings(x.GoString()) + return indentString(truncateLongStrings(x.GoString()), indentation+1, false) case fmt.Stringer: - return truncateLongStrings(x.String()) + return indentString(truncateLongStrings(x.String()), indentation+1, false) } } } diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 274116fd4f..b65c8be9b0 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.21.1" +const GOMEGA_VERSION = "1.24.2" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). @@ -86,12 +86,12 @@ func internalGomega(g Gomega) *internal.Gomega { // NewWithT takes a *testing.T and returns a `gomega.WithT` allowing you to use `Expect`, `Eventually`, and `Consistently` along with // Gomega's rich ecosystem of matchers in standard `testing` test suits. // -// func TestFarmHasCow(t *testing.T) { -// g := gomega.NewWithT(t) +// func TestFarmHasCow(t *testing.T) { +// g := gomega.NewWithT(t) // -// f := farm.New([]string{"Cow", "Horse"}) -// g.Expect(f.HasCow()).To(BeTrue(), "Farm should have cow") -// } +// f := farm.New([]string{"Cow", "Horse"}) +// g.Expect(f.HasCow()).To(BeTrue(), "Farm should have cow") +// } func NewWithT(t types.GomegaTestingT) *WithT { return internal.NewGomega(internalGomega(Default).DurationBundle).ConfigureWithT(t) } @@ -171,7 +171,8 @@ func ensureDefaultGomegaIsConfigured() { } // Ω wraps an actual value allowing assertions to be made on it: -// Ω("foo").Should(Equal("foo")) +// +// Ω("foo").Should(Equal("foo")) // // If Ω is passed more than one argument it will pass the *first* argument to the matcher. // All subsequent arguments will be required to be nil/zero. @@ -180,10 +181,13 @@ func ensureDefaultGomegaIsConfigured() { // a value and an error - a common patter in Go. // // For example, given a function with signature: -// func MyAmazingThing() (int, error) +// +// func MyAmazingThing() (int, error) // // Then: -// Ω(MyAmazingThing()).Should(Equal(3)) +// +// Ω(MyAmazingThing()).Should(Equal(3)) +// // Will succeed only if `MyAmazingThing()` returns `(3, nil)` // // Ω and Expect are identical @@ -193,7 +197,8 @@ func Ω(actual interface{}, extra ...interface{}) Assertion { } // Expect wraps an actual value allowing assertions to be made on it: -// Expect("foo").To(Equal("foo")) +// +// Expect("foo").To(Equal("foo")) // // If Expect is passed more than one argument it will pass the *first* argument to the matcher. // All subsequent arguments will be required to be nil/zero. @@ -202,10 +207,13 @@ func Ω(actual interface{}, extra ...interface{}) Assertion { // a value and an error - a common patter in Go. // // For example, given a function with signature: -// func MyAmazingThing() (int, error) +// +// func MyAmazingThing() (int, error) // // Then: -// Expect(MyAmazingThing()).Should(Equal(3)) +// +// Expect(MyAmazingThing()).Should(Equal(3)) +// // Will succeed only if `MyAmazingThing()` returns `(3, nil)` // // Expect and Ω are identical @@ -215,7 +223,8 @@ func Expect(actual interface{}, extra ...interface{}) Assertion { } // ExpectWithOffset wraps an actual value allowing assertions to be made on it: -// ExpectWithOffset(1, "foo").To(Equal("foo")) +// +// ExpectWithOffset(1, "foo").To(Equal("foo")) // // Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument // that is used to modify the call-stack offset when computing line numbers. It is @@ -241,15 +250,15 @@ Eventually works with any Gomega compatible matcher and supports making assertio There are several examples of values that can change over time. These can be passed in to Eventually and will be passed to the matcher repeatedly until a match occurs. For example: - c := make(chan bool) - go DoStuff(c) - Eventually(c, "50ms").Should(BeClosed()) + c := make(chan bool) + go DoStuff(c) + Eventually(c, "50ms").Should(BeClosed()) will poll the channel repeatedly until it is closed. In this example `Eventually` will block until either the specified timeout of 50ms has elapsed or the channel is closed, whichever comes first. Several Gomega libraries allow you to use Eventually in this way. For example, the gomega/gexec package allows you to block until a *gexec.Session exits successfully via: - Eventually(session).Should(gexec.Exit(0)) + Eventually(session).Should(gexec.Exit(0)) And the gomega/gbytes package allows you to monitor a streaming *gbytes.Buffer until a given string is seen: @@ -266,35 +275,51 @@ this will trigger Go's race detector as the goroutine polling via Eventually wil **Category 2: Make Eventually assertions on functions** -Eventually can be passed functions that **take no arguments** and **return at least one value**. When configured this way, Eventually will poll the function repeatedly and pass the first returned value to the matcher. +Eventually can be passed functions that **return at least one value**. When configured this way, Eventually will poll the function repeatedly and pass the first returned value to the matcher. For example: - Eventually(func() int { - return client.FetchCount() - }).Should(BeNumerically(">=", 17)) + Eventually(func() int { + return client.FetchCount() + }).Should(BeNumerically(">=", 17)) - will repeatedly poll client.FetchCount until the BeNumerically matcher is satisfied. (Note that this example could have been written as Eventually(client.FetchCount).Should(BeNumerically(">=", 17))) + will repeatedly poll client.FetchCount until the BeNumerically matcher is satisfied. (Note that this example could have been written as Eventually(client.FetchCount).Should(BeNumerically(">=", 17))) If multiple values are returned by the function, Eventually will pass the first value to the matcher and require that all others are zero-valued. This allows you to pass Eventually a function that returns a value and an error - a common pattern in Go. For example, consider a method that returns a value and an error: - func FetchFromDB() (string, error) + + func FetchFromDB() (string, error) Then - Eventually(FetchFromDB).Should(Equal("got it")) + + Eventually(FetchFromDB).Should(Equal("got it")) will pass only if and when the returned error is nil *and* the returned string satisfies the matcher. +Eventually can also accept functions that take arguments, however you must provide those arguments using .WithArguments(). For example, consider a function that takes a user-id and makes a network request to fetch a full name: + + func FetchFullName(userId int) (string, error) + +You can poll this function like so: + + Eventually(FetchFullName).WithArguments(1138).Should(Equal("Wookie")) + It is important to note that the function passed into Eventually is invoked *synchronously* when polled. Eventually does not (in fact, it cannot) kill the function if it takes longer to return than Eventually's configured timeout. A common practice here is to use a context. Here's an example that combines Ginkgo's spec timeout support with Eventually: It("fetches the correct count", func(ctx SpecContext) { - Eventually(func() int { - return client.FetchCount(ctx) - }, ctx).Should(BeNumerically(">=", 17)) + Eventually(ctx, func() int { + return client.FetchCount(ctx, "/users") + }).Should(BeNumerically(">=", 17)) + }, SpecTimeout(time.Second)) + +you an also use Eventually().WithContext(ctx) to pass in the context. Passed-in contexts play nicely with paseed-in arguments as long as the context appears first. You can rewrite the above example as: + + It("fetches the correct count", func(ctx SpecContext) { + Eventually(client.FetchCount).WithContext(ctx).WithArguments("/users").Should(BeNumerically(">=", 17)) }, SpecTimeout(time.Second)) -now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit. +Either way the context passd to Eventually is also passed to the underlying funciton. Now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit. **Category 3: Making assertions _in_ the function passed into Eventually** @@ -314,27 +339,38 @@ will pass only if all the assertions in the polled function pass and the return Eventually also supports a special case polling function that takes a single Gomega argument and returns no values. Eventually assumes such a function is making assertions and is designed to work with the Succeed matcher to validate that all assertions have passed. For example: - Eventually(func(g Gomega) { - model, err := client.Find(1138) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(model.Reticulate()).To(Succeed()) - g.Expect(model.IsReticulated()).To(BeTrue()) - g.Expect(model.Save()).To(Succeed()) - }).Should(Succeed()) + Eventually(func(g Gomega) { + model, err := client.Find(1138) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(model.Reticulate()).To(Succeed()) + g.Expect(model.IsReticulated()).To(BeTrue()) + g.Expect(model.Save()).To(Succeed()) + }).Should(Succeed()) will rerun the function until all assertions pass. +You can also pass additional arugments to functions that take a Gomega. The only rule is that the Gomega argument must be first. If you also want to pass the context attached to Eventually you must ensure that is the second argument. For example: + + Eventually(func(g Gomega, ctx context.Context, path string, expected ...string){ + tok, err := client.GetToken(ctx) + g.Expect(err).NotTo(HaveOccurred()) + + elements, err := client.Fetch(ctx, tok, path) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(elements).To(ConsistOf(expected)) + }).WithContext(ctx).WithArguments("/names", "Joe", "Jane", "Sam").Should(Succeed()) + Finally, in addition to passing timeouts and a context to Eventually you can be more explicit with Eventually's chaining configuration methods: Eventually(..., "1s", "2s", ctx).Should(...) is equivalent to - Eventually(...).WithTimeout(time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...) + Eventually(...).WithTimeout(time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...) */ -func Eventually(actual interface{}, args ...interface{}) AsyncAssertion { +func Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() - return Default.Eventually(actual, args...) + return Default.Eventually(actualOrCtx, args...) } // EventuallyWithOffset operates like Eventually but takes an additional @@ -346,9 +382,9 @@ func Eventually(actual interface{}, args ...interface{}) AsyncAssertion { // `EventuallyWithOffset` specifying a timeout interval (and an optional polling interval) are // the same as `Eventually(...).WithOffset(...).WithTimeout` or // `Eventually(...).WithOffset(...).WithTimeout(...).WithPolling`. -func EventuallyWithOffset(offset int, actual interface{}, args ...interface{}) AsyncAssertion { +func EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() - return Default.EventuallyWithOffset(offset, actual, args...) + return Default.EventuallyWithOffset(offset, actualOrCtx, args...) } /* @@ -362,13 +398,13 @@ Consistently accepts the same three categories of actual as Eventually, check th Consistently is useful in cases where you want to assert that something *does not happen* for a period of time. For example, you may want to assert that a goroutine does *not* send data down a channel. In this case you could write: - Consistently(channel, "200ms").ShouldNot(Receive()) + Consistently(channel, "200ms").ShouldNot(Receive()) This will block for 200 milliseconds and repeatedly check the channel and ensure nothing has been received. */ -func Consistently(actual interface{}, args ...interface{}) AsyncAssertion { +func Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() - return Default.Consistently(actual, args...) + return Default.Consistently(actualOrCtx, args...) } // ConsistentlyWithOffset operates like Consistently but takes an additional @@ -377,11 +413,54 @@ func Consistently(actual interface{}, args ...interface{}) AsyncAssertion { // // `ConsistentlyWithOffset` is the same as `Consistently(...).WithOffset` and // optional `WithTimeout` and `WithPolling`. -func ConsistentlyWithOffset(offset int, actual interface{}, args ...interface{}) AsyncAssertion { +func ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() - return Default.ConsistentlyWithOffset(offset, actual, args...) + return Default.ConsistentlyWithOffset(offset, actualOrCtx, args...) } +/* +StopTrying can be used to signal to Eventually and Consistentlythat they should abort and stop trying. This always results in a failure of the assertion - and the failure message is the content of the StopTrying signal. + +You can send the StopTrying signal by either returning StopTrying("message") as an error from your passed-in function _or_ by calling StopTrying("message").Now() to trigger a panic and end execution. + +You can also wrap StopTrying around an error with `StopTrying("message").Wrap(err)` and can attach additional objects via `StopTrying("message").Attach("description", object). When rendered, the signal will include the wrapped error and any attached objects rendered using Gomega's default formatting. + +Here are a couple of examples. This is how you might use StopTrying() as an error to signal that Eventually should stop: + + playerIndex, numPlayers := 0, 11 + Eventually(func() (string, error) { + if playerIndex == numPlayers { + return "", StopTrying("no more players left") + } + name := client.FetchPlayer(playerIndex) + playerIndex += 1 + return name, nil + }).Should(Equal("Patrick Mahomes")) + +And here's an example where `StopTrying().Now()` is called to halt execution immediately: + + Eventually(func() []string { + names, err := client.FetchAllPlayers() + if err == client.IRRECOVERABLE_ERROR { + StopTrying("Irrecoverable error occurred").Wrap(err).Now() + } + return names + }).Should(ContainElement("Patrick Mahomes")) +*/ +var StopTrying = internal.StopTrying + +/* +TryAgainAfter() allows you to adjust the polling interval for the _next_ iteration of `Eventually` or `Consistently`. Like `StopTrying` you can either return `TryAgainAfter` as an error or trigger it immedieately with `.Now()` + +When `TryAgainAfter(` is triggered `Eventually` and `Consistently` will wait for that duration. If a timeout occurs before the next poll is triggered both `Eventually` and `Consistently` will always fail with the content of the TryAgainAfter message. As with StopTrying you can `.Wrap()` and error and `.Attach()` additional objects to `TryAgainAfter`. +*/ +var TryAgainAfter = internal.TryAgainAfter + +/* +PollingSignalError is the error returned by StopTrying() and TryAgainAfter() +*/ +type PollingSignalError = internal.PollingSignalError + // SetDefaultEventuallyTimeout sets the default timeout duration for Eventually. Eventually will repeatedly poll your condition until it succeeds, or until this timeout elapses. func SetDefaultEventuallyTimeout(t time.Duration) { Default.SetDefaultEventuallyTimeout(t) @@ -415,8 +494,8 @@ func SetDefaultConsistentlyPollingInterval(t time.Duration) { // // Example: // -// Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.") -// Consistently(myChannel).ShouldNot(Receive(), func() string { return "Nothing should have come down the pipe." }) +// Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.") +// Consistently(myChannel).ShouldNot(Receive(), func() string { return "Nothing should have come down the pipe." }) type AsyncAssertion = types.AsyncAssertion // GomegaAsyncAssertion is deprecated in favor of AsyncAssertion, which does not stutter. @@ -438,7 +517,7 @@ type GomegaAsyncAssertion = types.AsyncAssertion // // Example: // -// Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm) +// Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm) type Assertion = types.Assertion // GomegaAssertion is deprecated in favor of Assertion, which does not stutter. diff --git a/vendor/github.com/onsi/gomega/internal/assertion.go b/vendor/github.com/onsi/gomega/internal/assertion.go index 7b7bdd149a..08356a610b 100644 --- a/vendor/github.com/onsi/gomega/internal/assertion.go +++ b/vendor/github.com/onsi/gomega/internal/assertion.go @@ -4,6 +4,7 @@ import ( "fmt" "reflect" + "github.com/onsi/gomega/format" "github.com/onsi/gomega/types" ) @@ -146,7 +147,12 @@ func vetActuals(actuals []interface{}, skipIndex int) (bool, string) { if actual != nil { zeroValue := reflect.Zero(reflect.TypeOf(actual)).Interface() if !reflect.DeepEqual(zeroValue, actual) { - message := fmt.Sprintf("Unexpected non-nil/non-zero argument at index %d:\n\t<%T>: %#v", i, actual, actual) + var message string + if err, ok := actual.(error); ok { + message = fmt.Sprintf("Unexpected error: %s\n%s", err, format.Object(err, 1)) + } else { + message = fmt.Sprintf("Unexpected non-nil/non-zero argument at index %d:\n\t<%T>: %#v", i, actual, actual) + } return false, message } } diff --git a/vendor/github.com/onsi/gomega/internal/async_assertion.go b/vendor/github.com/onsi/gomega/internal/async_assertion.go index defa80512b..cc8615a117 100644 --- a/vendor/github.com/onsi/gomega/internal/async_assertion.go +++ b/vendor/github.com/onsi/gomega/internal/async_assertion.go @@ -2,16 +2,35 @@ package internal import ( "context" - "errors" "fmt" "reflect" "runtime" "sync" "time" + "github.com/onsi/gomega/format" "github.com/onsi/gomega/types" ) +var errInterface = reflect.TypeOf((*error)(nil)).Elem() +var gomegaType = reflect.TypeOf((*types.Gomega)(nil)).Elem() +var contextType = reflect.TypeOf(new(context.Context)).Elem() + +type contextWithAttachProgressReporter interface { + AttachProgressReporter(func() string) func() +} + +type asyncGomegaHaltExecutionError struct{} + +func (a asyncGomegaHaltExecutionError) GinkgoRecoverShouldIgnoreThisPanic() {} +func (a asyncGomegaHaltExecutionError) Error() string { + return `An assertion has failed in a goroutine. You should call + + defer GinkgoRecover() + +at the top of the goroutine that caused this panic. This will allow Ginkgo and Gomega to correctly capture and manage this panic.` +} + type AsyncAssertionType uint const ( @@ -19,12 +38,22 @@ const ( AsyncAssertionTypeConsistently ) +func (at AsyncAssertionType) String() string { + switch at { + case AsyncAssertionTypeEventually: + return "Eventually" + case AsyncAssertionTypeConsistently: + return "Consistently" + } + return "INVALID ASYNC ASSERTION TYPE" +} + type AsyncAssertion struct { asyncType AsyncAssertionType - actualIsFunc bool - actualValue interface{} - actualFunc func() ([]reflect.Value, error) + actualIsFunc bool + actual interface{} + argsToForward []interface{} timeoutInterval time.Duration pollingInterval time.Duration @@ -43,49 +72,9 @@ func NewAsyncAssertion(asyncType AsyncAssertionType, actualInput interface{}, g g: g, } - switch actualType := reflect.TypeOf(actualInput); { - case actualInput == nil || actualType.Kind() != reflect.Func: - out.actualValue = actualInput - case actualType.NumIn() == 0 && actualType.NumOut() > 0: - out.actualIsFunc = true - out.actualFunc = func() ([]reflect.Value, error) { - return reflect.ValueOf(actualInput).Call([]reflect.Value{}), nil - } - case actualType.NumIn() == 1 && actualType.In(0).Implements(reflect.TypeOf((*types.Gomega)(nil)).Elem()): + out.actual = actualInput + if actualInput != nil && reflect.TypeOf(actualInput).Kind() == reflect.Func { out.actualIsFunc = true - out.actualFunc = func() (values []reflect.Value, err error) { - var assertionFailure error - assertionCapturingGomega := NewGomega(g.DurationBundle).ConfigureWithFailHandler(func(message string, callerSkip ...int) { - skip := 0 - if len(callerSkip) > 0 { - skip = callerSkip[0] - } - _, file, line, _ := runtime.Caller(skip + 1) - assertionFailure = fmt.Errorf("Assertion in callback at %s:%d failed:\n%s", file, line, message) - panic("stop execution") - }) - - defer func() { - if actualType.NumOut() == 0 { - if assertionFailure == nil { - values = []reflect.Value{reflect.Zero(reflect.TypeOf((*error)(nil)).Elem())} - } else { - values = []reflect.Value{reflect.ValueOf(assertionFailure)} - } - } else { - err = assertionFailure - } - if e := recover(); e != nil && assertionFailure == nil { - panic(e) - } - }() - - values = reflect.ValueOf(actualInput).Call([]reflect.Value{reflect.ValueOf(assertionCapturingGomega)}) - return - } - default: - msg := fmt.Sprintf("The function passed to Gomega's async assertions should either take no arguments and return values, or take a single Gomega interface that it can use to make assertions within the body of the function. When taking a Gomega interface the function can optionally return values or return nothing. The function you passed takes %d arguments and returns %d values.", actualType.NumIn(), actualType.NumOut()) - g.Fail(msg, offset+4) } return out @@ -121,6 +110,11 @@ func (assertion *AsyncAssertion) WithContext(ctx context.Context) types.AsyncAss return assertion } +func (assertion *AsyncAssertion) WithArguments(argsToForward ...interface{}) types.AsyncAssertion { + assertion.argsToForward = argsToForward + return assertion +} + func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { assertion.g.THelper() vetOptionalDescription("Asynchronous assertion", optionalDescription...) @@ -145,63 +139,238 @@ func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interfa return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" } -func (assertion *AsyncAssertion) pollActual() (interface{}, error) { +func (assertion *AsyncAssertion) processReturnValues(values []reflect.Value) (interface{}, error) { + if len(values) == 0 { + return nil, fmt.Errorf("No values were returned by the function passed to Gomega") + } + + actual := values[0].Interface() + if _, ok := AsPollingSignalError(actual); ok { + return actual, actual.(error) + } + + var err error + for i, extraValue := range values[1:] { + extra := extraValue.Interface() + if extra == nil { + continue + } + if _, ok := AsPollingSignalError(extra); ok { + return actual, extra.(error) + } + extraType := reflect.TypeOf(extra) + zero := reflect.Zero(extraType).Interface() + if reflect.DeepEqual(extra, zero) { + continue + } + if i == len(values)-2 && extraType.Implements(errInterface) { + err = fmt.Errorf("function returned error: %w", extra.(error)) + } + if err == nil { + err = fmt.Errorf("Unexpected non-nil/non-zero return value at index %d:\n\t<%T>: %#v", i+1, extra, extra) + } + } + + return actual, err +} + +func (assertion *AsyncAssertion) invalidFunctionError(t reflect.Type) error { + return fmt.Errorf(`The function passed to %s had an invalid signature of %s. Functions passed to %s must either: + + (a) have return values or + (b) take a Gomega interface as their first argument and use that Gomega instance to make assertions. + +You can learn more at https://onsi.github.io/gomega/#eventually +`, assertion.asyncType, t, assertion.asyncType) +} + +func (assertion *AsyncAssertion) noConfiguredContextForFunctionError() error { + return fmt.Errorf(`The function passed to %s requested a context.Context, but no context has been provided. Please pass one in using %s().WithContext(). + +You can learn more at https://onsi.github.io/gomega/#eventually +`, assertion.asyncType, assertion.asyncType) +} + +func (assertion *AsyncAssertion) argumentMismatchError(t reflect.Type, numProvided int) error { + have := "have" + if numProvided == 1 { + have = "has" + } + return fmt.Errorf(`The function passed to %s has signature %s takes %d arguments but %d %s been provided. Please use %s().WithArguments() to pass the corect set of arguments. + +You can learn more at https://onsi.github.io/gomega/#eventually +`, assertion.asyncType, t, t.NumIn(), numProvided, have, assertion.asyncType) +} + +func (assertion *AsyncAssertion) buildActualPoller() (func() (interface{}, error), error) { if !assertion.actualIsFunc { - return assertion.actualValue, nil + return func() (interface{}, error) { return assertion.actual, nil }, nil } + actualValue := reflect.ValueOf(assertion.actual) + actualType := reflect.TypeOf(assertion.actual) + numIn, numOut, isVariadic := actualType.NumIn(), actualType.NumOut(), actualType.IsVariadic() - values, err := assertion.actualFunc() - if err != nil { - return nil, err + if numIn == 0 && numOut == 0 { + return nil, assertion.invalidFunctionError(actualType) + } + takesGomega, takesContext := false, false + if numIn > 0 { + takesGomega, takesContext = actualType.In(0).Implements(gomegaType), actualType.In(0).Implements(contextType) + } + if takesGomega && numIn > 1 && actualType.In(1).Implements(contextType) { + takesContext = true + } + if takesContext && len(assertion.argsToForward) > 0 && reflect.TypeOf(assertion.argsToForward[0]).Implements(contextType) { + takesContext = false + } + if !takesGomega && numOut == 0 { + return nil, assertion.invalidFunctionError(actualType) + } + if takesContext && assertion.ctx == nil { + return nil, assertion.noConfiguredContextForFunctionError() + } + + var assertionFailure error + inValues := []reflect.Value{} + if takesGomega { + inValues = append(inValues, reflect.ValueOf(NewGomega(assertion.g.DurationBundle).ConfigureWithFailHandler(func(message string, callerSkip ...int) { + skip := 0 + if len(callerSkip) > 0 { + skip = callerSkip[0] + } + _, file, line, _ := runtime.Caller(skip + 1) + assertionFailure = fmt.Errorf("Assertion in callback at %s:%d failed:\n%s", file, line, message) + // we throw an asyncGomegaHaltExecutionError so that defer GinkgoRecover() can catch this error if the user makes an assertion in a goroutine + panic(asyncGomegaHaltExecutionError{}) + }))) + } + if takesContext { + inValues = append(inValues, reflect.ValueOf(assertion.ctx)) + } + for _, arg := range assertion.argsToForward { + inValues = append(inValues, reflect.ValueOf(arg)) } - extras := []interface{}{nil} - for _, value := range values[1:] { - extras = append(extras, value.Interface()) + + if !isVariadic && numIn != len(inValues) { + return nil, assertion.argumentMismatchError(actualType, len(inValues)) + } else if isVariadic && len(inValues) < numIn-1 { + return nil, assertion.argumentMismatchError(actualType, len(inValues)) } - success, message := vetActuals(extras, 0) - if !success { - return nil, errors.New(message) + + return func() (actual interface{}, err error) { + var values []reflect.Value + assertionFailure = nil + defer func() { + if numOut == 0 && takesGomega { + actual = assertionFailure + } else { + actual, err = assertion.processReturnValues(values) + _, isAsyncError := AsPollingSignalError(err) + if assertionFailure != nil && !isAsyncError { + err = assertionFailure + } + } + if e := recover(); e != nil { + if _, isAsyncError := AsPollingSignalError(e); isAsyncError { + err = e.(error) + } else if assertionFailure == nil { + panic(e) + } + } + }() + values = actualValue.Call(inValues) + return + }, nil +} + +func (assertion *AsyncAssertion) afterTimeout() <-chan time.Time { + if assertion.timeoutInterval >= 0 { + return time.After(assertion.timeoutInterval) } - return values[0].Interface(), nil + if assertion.asyncType == AsyncAssertionTypeConsistently { + return time.After(assertion.g.DurationBundle.ConsistentlyDuration) + } else { + if assertion.ctx == nil { + return time.After(assertion.g.DurationBundle.EventuallyTimeout) + } else { + return nil + } + } } -func (assertion *AsyncAssertion) matcherMayChange(matcher types.GomegaMatcher, value interface{}) bool { - if assertion.actualIsFunc { - return true +func (assertion *AsyncAssertion) afterPolling() <-chan time.Time { + if assertion.pollingInterval >= 0 { + return time.After(assertion.pollingInterval) + } + if assertion.asyncType == AsyncAssertionTypeConsistently { + return time.After(assertion.g.DurationBundle.ConsistentlyPollingInterval) + } else { + return time.After(assertion.g.DurationBundle.EventuallyPollingInterval) } - return types.MatchMayChangeInTheFuture(matcher, value) } -type contextWithAttachProgressReporter interface { - AttachProgressReporter(func() string) func() +func (assertion *AsyncAssertion) matcherSaysStopTrying(matcher types.GomegaMatcher, value interface{}) bool { + if assertion.actualIsFunc || types.MatchMayChangeInTheFuture(matcher, value) { + return false + } + return true +} + +func (assertion *AsyncAssertion) pollMatcher(matcher types.GomegaMatcher, value interface{}) (matches bool, err error) { + defer func() { + if e := recover(); e != nil { + if _, isAsyncError := AsPollingSignalError(e); isAsyncError { + err = e.(error) + } else { + panic(e) + } + } + }() + + matches, err = matcher.Match(value) + + return } func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool { timer := time.Now() - timeout := time.After(assertion.timeoutInterval) + timeout := assertion.afterTimeout() lock := sync.Mutex{} var matches bool var err error - mayChange := true + var oracleMatcherSaysStop bool - value, err := assertion.pollActual() - if err == nil { - mayChange = assertion.matcherMayChange(matcher, value) - matches, err = matcher.Match(value) + assertion.g.THelper() + + pollActual, err := assertion.buildActualPoller() + if err != nil { + assertion.g.Fail(err.Error(), 2+assertion.offset) + return false } - assertion.g.THelper() + value, err := pollActual() + if err == nil { + oracleMatcherSaysStop = assertion.matcherSaysStopTrying(matcher, value) + matches, err = assertion.pollMatcher(matcher, value) + } messageGenerator := func() string { // can be called out of band by Ginkgo if the user requests a progress report lock.Lock() defer lock.Unlock() - errMsg := "" message := "" if err != nil { - errMsg = "Error: " + err.Error() + if pollingSignalErr, ok := AsPollingSignalError(err); ok && pollingSignalErr.IsStopTrying() { + message = err.Error() + for _, attachment := range pollingSignalErr.Attachments { + message += fmt.Sprintf("\n%s:\n", attachment.Description) + message += format.Object(attachment.Object, 1) + } + } else { + message = "Error: " + err.Error() + "\n" + format.Object(err, 1) + } } else { if desiredMatch { message = matcher.FailureMessage(value) @@ -210,7 +379,7 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch } } description := assertion.buildDescription(optionalDescription...) - return fmt.Sprintf("%s%s%s", description, message, errMsg) + return fmt.Sprintf("%s%s", description, message) } fail := func(preamble string) { @@ -227,70 +396,72 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch } } - if assertion.asyncType == AsyncAssertionTypeEventually { - for { - if err == nil && matches == desiredMatch { - return true - } + for { + var nextPoll <-chan time.Time = nil + var isTryAgainAfterError = false - if !mayChange { - fail("No future change is possible. Bailing out early") + if pollingSignalErr, ok := AsPollingSignalError(err); ok { + if pollingSignalErr.IsStopTrying() { + fail("Told to stop trying") return false } - - select { - case <-time.After(assertion.pollingInterval): - v, e := assertion.pollActual() - lock.Lock() - value, err = v, e - lock.Unlock() - if err == nil { - mayChange = assertion.matcherMayChange(matcher, value) - matches, e = matcher.Match(value) - lock.Lock() - err = e - lock.Unlock() - } - case <-contextDone: - fail("Context was cancelled") - return false - case <-timeout: - fail("Timed out") - return false + if pollingSignalErr.IsTryAgainAfter() { + nextPoll = time.After(pollingSignalErr.TryAgainDuration()) + isTryAgainAfterError = true } } - } else if assertion.asyncType == AsyncAssertionTypeConsistently { - for { - if !(err == nil && matches == desiredMatch) { + + if err == nil && matches == desiredMatch { + if assertion.asyncType == AsyncAssertionTypeEventually { + return true + } + } else if !isTryAgainAfterError { + if assertion.asyncType == AsyncAssertionTypeConsistently { fail("Failed") return false } + } - if !mayChange { + if oracleMatcherSaysStop { + if assertion.asyncType == AsyncAssertionTypeEventually { + fail("No future change is possible. Bailing out early") + return false + } else { return true } + } - select { - case <-time.After(assertion.pollingInterval): - v, e := assertion.pollActual() + if nextPoll == nil { + nextPoll = assertion.afterPolling() + } + + select { + case <-nextPoll: + v, e := pollActual() + lock.Lock() + value, err = v, e + lock.Unlock() + if err == nil { + oracleMatcherSaysStop = assertion.matcherSaysStopTrying(matcher, value) + m, e := assertion.pollMatcher(matcher, value) lock.Lock() - value, err = v, e + matches, err = m, e lock.Unlock() - if err == nil { - mayChange = assertion.matcherMayChange(matcher, value) - matches, e = matcher.Match(value) - lock.Lock() - err = e - lock.Unlock() - } - case <-contextDone: - fail("Context was cancelled") + } + case <-contextDone: + fail("Context was cancelled") + return false + case <-timeout: + if assertion.asyncType == AsyncAssertionTypeEventually { + fail("Timed out") return false - case <-timeout: + } else { + if isTryAgainAfterError { + fail("Timed out while waiting on TryAgainAfter") + return false + } return true } } } - - return false } diff --git a/vendor/github.com/onsi/gomega/internal/duration_bundle.go b/vendor/github.com/onsi/gomega/internal/duration_bundle.go index af8d989fa9..6e0d90d3a1 100644 --- a/vendor/github.com/onsi/gomega/internal/duration_bundle.go +++ b/vendor/github.com/onsi/gomega/internal/duration_bundle.go @@ -44,28 +44,28 @@ func durationFromEnv(key string, defaultDuration time.Duration) time.Duration { return duration } -func toDuration(input interface{}) time.Duration { +func toDuration(input interface{}) (time.Duration, error) { duration, ok := input.(time.Duration) if ok { - return duration + return duration, nil } value := reflect.ValueOf(input) kind := reflect.TypeOf(input).Kind() if reflect.Int <= kind && kind <= reflect.Int64 { - return time.Duration(value.Int()) * time.Second + return time.Duration(value.Int()) * time.Second, nil } else if reflect.Uint <= kind && kind <= reflect.Uint64 { - return time.Duration(value.Uint()) * time.Second + return time.Duration(value.Uint()) * time.Second, nil } else if reflect.Float32 <= kind && kind <= reflect.Float64 { - return time.Duration(value.Float() * float64(time.Second)) + return time.Duration(value.Float() * float64(time.Second)), nil } else if reflect.String == kind { duration, err := time.ParseDuration(value.String()) if err != nil { - panic(fmt.Sprintf("%#v is not a valid parsable duration string.", input)) + return 0, fmt.Errorf("%#v is not a valid parsable duration string: %w", input, err) } - return duration + return duration, nil } - panic(fmt.Sprintf("%v is not a valid interval. Must be time.Duration, parsable duration string or a number.", input)) + return 0, fmt.Errorf("%#v is not a valid interval. Must be a time.Duration, a parsable duration string, or a number.", input) } diff --git a/vendor/github.com/onsi/gomega/internal/gomega.go b/vendor/github.com/onsi/gomega/internal/gomega.go index 52a6b243c4..2d92877f3d 100644 --- a/vendor/github.com/onsi/gomega/internal/gomega.go +++ b/vendor/github.com/onsi/gomega/internal/gomega.go @@ -52,43 +52,42 @@ func (g *Gomega) ExpectWithOffset(offset int, actual interface{}, extra ...inter return NewAssertion(actual, g, offset, extra...) } -func (g *Gomega) Eventually(actual interface{}, intervals ...interface{}) types.AsyncAssertion { - return g.EventuallyWithOffset(0, actual, intervals...) +func (g *Gomega) Eventually(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { + return g.makeAsyncAssertion(AsyncAssertionTypeEventually, 0, actualOrCtx, args...) } -func (g *Gomega) EventuallyWithOffset(offset int, actual interface{}, args ...interface{}) types.AsyncAssertion { - timeoutInterval := g.DurationBundle.EventuallyTimeout - pollingInterval := g.DurationBundle.EventuallyPollingInterval - intervals := []interface{}{} - var ctx context.Context - for _, arg := range args { - switch v := arg.(type) { - case context.Context: - ctx = v - default: - intervals = append(intervals, arg) - } - } - if len(intervals) > 0 { - timeoutInterval = toDuration(intervals[0]) - } - if len(intervals) > 1 { - pollingInterval = toDuration(intervals[1]) - } +func (g *Gomega) EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { + return g.makeAsyncAssertion(AsyncAssertionTypeEventually, offset, actualOrCtx, args...) +} - return NewAsyncAssertion(AsyncAssertionTypeEventually, actual, g, timeoutInterval, pollingInterval, ctx, offset) +func (g *Gomega) Consistently(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { + return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, 0, actualOrCtx, args...) } -func (g *Gomega) Consistently(actual interface{}, intervals ...interface{}) types.AsyncAssertion { - return g.ConsistentlyWithOffset(0, actual, intervals...) +func (g *Gomega) ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { + return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, offset, actualOrCtx, args...) } -func (g *Gomega) ConsistentlyWithOffset(offset int, actual interface{}, args ...interface{}) types.AsyncAssertion { - timeoutInterval := g.DurationBundle.ConsistentlyDuration - pollingInterval := g.DurationBundle.ConsistentlyPollingInterval +func (g *Gomega) makeAsyncAssertion(asyncAssertionType AsyncAssertionType, offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { + baseOffset := 3 + timeoutInterval := -time.Duration(1) + pollingInterval := -time.Duration(1) intervals := []interface{}{} var ctx context.Context - for _, arg := range args { + + actual := actualOrCtx + startingIndex := 0 + if _, isCtx := actualOrCtx.(context.Context); isCtx && len(args) > 0 { + // the first argument is a context, we should accept it as the context _only if_ it is **not** the only argumnent **and** the second argument is not a parseable duration + // this is due to an unfortunate ambiguity in early version of Gomega in which multi-type durations are allowed after the actual + if _, err := toDuration(args[0]); err != nil { + ctx = actualOrCtx.(context.Context) + actual = args[0] + startingIndex = 1 + } + } + + for _, arg := range args[startingIndex:] { switch v := arg.(type) { case context.Context: ctx = v @@ -96,14 +95,21 @@ func (g *Gomega) ConsistentlyWithOffset(offset int, actual interface{}, args ... intervals = append(intervals, arg) } } + var err error if len(intervals) > 0 { - timeoutInterval = toDuration(intervals[0]) + timeoutInterval, err = toDuration(intervals[0]) + if err != nil { + g.Fail(err.Error(), offset+baseOffset) + } } if len(intervals) > 1 { - pollingInterval = toDuration(intervals[1]) + pollingInterval, err = toDuration(intervals[1]) + if err != nil { + g.Fail(err.Error(), offset+baseOffset) + } } - return NewAsyncAssertion(AsyncAssertionTypeConsistently, actual, g, timeoutInterval, pollingInterval, ctx, offset) + return NewAsyncAssertion(asyncAssertionType, actual, g, timeoutInterval, pollingInterval, ctx, offset) } func (g *Gomega) SetDefaultEventuallyTimeout(t time.Duration) { diff --git a/vendor/github.com/onsi/gomega/internal/polling_signal_error.go b/vendor/github.com/onsi/gomega/internal/polling_signal_error.go new file mode 100644 index 0000000000..83b04b1a4c --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/polling_signal_error.go @@ -0,0 +1,106 @@ +package internal + +import ( + "errors" + "fmt" + "time" +) + +type PollingSignalErrorType int + +const ( + PollingSignalErrorTypeStopTrying PollingSignalErrorType = iota + PollingSignalErrorTypeTryAgainAfter +) + +type PollingSignalError interface { + error + Wrap(err error) PollingSignalError + Attach(description string, obj any) PollingSignalError + Now() +} + +var StopTrying = func(message string) PollingSignalError { + return &PollingSignalErrorImpl{ + message: message, + pollingSignalErrorType: PollingSignalErrorTypeStopTrying, + } +} + +var TryAgainAfter = func(duration time.Duration) PollingSignalError { + return &PollingSignalErrorImpl{ + message: fmt.Sprintf("told to try again after %s", duration), + duration: duration, + pollingSignalErrorType: PollingSignalErrorTypeTryAgainAfter, + } +} + +type PollingSignalErrorAttachment struct { + Description string + Object any +} + +type PollingSignalErrorImpl struct { + message string + wrappedErr error + pollingSignalErrorType PollingSignalErrorType + duration time.Duration + Attachments []PollingSignalErrorAttachment +} + +func (s *PollingSignalErrorImpl) Wrap(err error) PollingSignalError { + s.wrappedErr = err + return s +} + +func (s *PollingSignalErrorImpl) Attach(description string, obj any) PollingSignalError { + s.Attachments = append(s.Attachments, PollingSignalErrorAttachment{description, obj}) + return s +} + +func (s *PollingSignalErrorImpl) Error() string { + if s.wrappedErr == nil { + return s.message + } else { + return s.message + ": " + s.wrappedErr.Error() + } +} + +func (s *PollingSignalErrorImpl) Unwrap() error { + if s == nil { + return nil + } + return s.wrappedErr +} + +func (s *PollingSignalErrorImpl) Now() { + panic(s) +} + +func (s *PollingSignalErrorImpl) IsStopTrying() bool { + return s.pollingSignalErrorType == PollingSignalErrorTypeStopTrying +} + +func (s *PollingSignalErrorImpl) IsTryAgainAfter() bool { + return s.pollingSignalErrorType == PollingSignalErrorTypeTryAgainAfter +} + +func (s *PollingSignalErrorImpl) TryAgainDuration() time.Duration { + return s.duration +} + +func AsPollingSignalError(actual interface{}) (*PollingSignalErrorImpl, bool) { + if actual == nil { + return nil, false + } + if actualErr, ok := actual.(error); ok { + var target *PollingSignalErrorImpl + if errors.As(actualErr, &target) { + return target, true + } else { + return nil, false + } + } + + return nil, false +} diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go index f9d9f2aad2..857586a910 100644 --- a/vendor/github.com/onsi/gomega/matchers.go +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -27,7 +27,8 @@ func BeEquivalentTo(expected interface{}) types.GomegaMatcher { } } -// BeComparableTo uses gocmp.Equal to compare. You can pass cmp.Option as options. +// BeComparableTo uses gocmp.Equal from github.com/google/go-cmp (instead of reflect.DeepEqual) to perform a deep comparison. +// You can pass cmp.Option as options. // It is an error for actual and expected to be nil. Use BeNil() instead. func BeComparableTo(expected interface{}, opts ...cmp.Option) types.GomegaMatcher { return &matchers.BeComparableToMatcher{ diff --git a/vendor/github.com/onsi/gomega/types/types.go b/vendor/github.com/onsi/gomega/types/types.go index 8f2998a5e2..125de64971 100644 --- a/vendor/github.com/onsi/gomega/types/types.go +++ b/vendor/github.com/onsi/gomega/types/types.go @@ -19,11 +19,11 @@ type Gomega interface { Expect(actual interface{}, extra ...interface{}) Assertion ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion - Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion - EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion + Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion + EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion - Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion - ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion + Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion + ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion SetDefaultEventuallyTimeout(time.Duration) SetDefaultEventuallyPollingInterval(time.Duration) @@ -74,6 +74,7 @@ type AsyncAssertion interface { Within(timeout time.Duration) AsyncAssertion ProbeEvery(interval time.Duration) AsyncAssertion WithContext(ctx context.Context) AsyncAssertion + WithArguments(argsToForward ...interface{}) AsyncAssertion } // Assertions are returned by Ω and Expect and enable assertions against Gomega matchers diff --git a/vendor/github.com/portworx/torpedo/drivers/node/node_registry.go b/vendor/github.com/portworx/torpedo/drivers/node/node_registry.go index 626d59fe92..19fd92b399 100644 --- a/vendor/github.com/portworx/torpedo/drivers/node/node_registry.go +++ b/vendor/github.com/portworx/torpedo/drivers/node/node_registry.go @@ -77,6 +77,16 @@ func GetMasterNodes() []Node { return nodeList } +// IsMasterNode returns true if node is a Masternode +func IsMasterNode(n Node) bool { + for _, each := range GetMasterNodes() { + if each.uuid == n.uuid { + return true + } + } + return false +} + // GetStorageDriverNodes returns only the worker node where storage // driver is installed func GetStorageDriverNodes() []Node { @@ -217,3 +227,15 @@ func GetNodeDetailsByNodeName(nodeName string) (Node, error) { } return Node{}, fmt.Errorf("failed to get Node Details by Node Name [%s] ", nodeName) } + +// GetNodeDetailsByNodeID get node details for a given node name +func GetNodeDetailsByNodeID(nodeID string) (Node, error) { + storageNodes := GetStorageNodes() + + for _, each := range storageNodes { + if each.Id == nodeID { + return each, nil + } + } + return Node{}, fmt.Errorf("failed to get Node Details by Node ID [%s] ", nodeID) +} diff --git a/vendor/github.com/portworx/torpedo/drivers/node/ssh/ssh.go b/vendor/github.com/portworx/torpedo/drivers/node/ssh/ssh.go index 145e4735b5..18a02cd766 100644 --- a/vendor/github.com/portworx/torpedo/drivers/node/ssh/ssh.go +++ b/vendor/github.com/portworx/torpedo/drivers/node/ssh/ssh.go @@ -664,12 +664,12 @@ func (s *SSH) doCmdSSH(n node.Node, options node.ConnectionOpts, cmd string, ign stderr, err := session.StderrPipe() if err != nil { - return "", fmt.Errorf("fail to setup stderr") + return "", fmt.Errorf("fail to setup stderr, err: %v", err) } stdout, err := session.StdoutPipe() if err != nil { - return "", fmt.Errorf("fail to setup stdout") + return "", fmt.Errorf("fail to setup stdout, err: %v", err) } if options.Sudo { cmd = fmt.Sprintf("sudo su -c '%s' -", cmd) // Hyphen necessary to preserve PATH for commands like "which pxctl" @@ -688,9 +688,10 @@ func (s *SSH) doCmdSSH(n node.Node, options node.ConnectionOpts, cmd string, ign } if ignoreErr == false && err != nil { + log.Infof("SSH ERR: %v", err) return out, &node.ErrFailedToRunCommand{ Addr: n.UsableAddr, - Cause: fmt.Sprintf("failed to run command due to: %v", sterr), + Cause: fmt.Sprintf("failed to run command. sterr: %v, err: %v", sterr, err), } } return out, nil @@ -741,6 +742,25 @@ func (s *SSH) SystemCheck(n node.Node, options node.ConnectionOpts) (string, err return file, nil } +// UpdateNodeCreds update username and ssh key path +func (s *SSH) UpdateNodeCreds(username string, keyPath string) error { + s.username = username + s.keyPath = keyPath + pubkey, err := getKeyFile(s.keyPath) + if err != nil { + return fmt.Errorf("error getting public keyPath from keyfile") + } + s.sshConfig = &ssh_pkg.ClientConfig{ + User: username, + Auth: []ssh_pkg.AuthMethod{ + ssh_pkg.PublicKeys(pubkey), + }, + HostKeyCallback: ssh_pkg.InsecureIgnoreHostKey(), + Timeout: time.Second * 5, + } + return nil +} + // GetBlockDrives returns the block drives on the node func (s *SSH) GetBlockDrives(n node.Node, options node.SystemctlOpts) (map[string]*node.BlockDrive, error) { drives := make(map[string]*node.BlockDrive) diff --git a/vendor/github.com/portworx/torpedo/drivers/scheduler/k8s/k8s.go b/vendor/github.com/portworx/torpedo/drivers/scheduler/k8s/k8s.go index 5d28ae4ed0..74c7f53853 100644 --- a/vendor/github.com/portworx/torpedo/drivers/scheduler/k8s/k8s.go +++ b/vendor/github.com/portworx/torpedo/drivers/scheduler/k8s/k8s.go @@ -35,6 +35,7 @@ import ( "github.com/libopenstorage/openstorage/pkg/units" storkapi "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" admissionregistration "github.com/portworx/sched-ops/k8s/admissionregistration" + "github.com/portworx/sched-ops/k8s/apiextensions" "github.com/portworx/sched-ops/k8s/apps" "github.com/portworx/sched-ops/k8s/autopilot" "github.com/portworx/sched-ops/k8s/batch" @@ -133,11 +134,11 @@ const ( k8sDestroyTimeout = 10 * time.Minute // FindFilesOnWorkerTimeout timeout for find files on worker FindFilesOnWorkerTimeout = 1 * time.Minute - deleteTasksWaitTimeout = 3 * time.Minute + deleteTasksWaitTimeout = 5 * time.Minute // DefaultRetryInterval Default retry interval DefaultRetryInterval = 10 * time.Second // DefaultTimeout default timeout - DefaultTimeout = 3 * time.Minute + DefaultTimeout = 5 * time.Minute // SnapshotReadyTimeout timeout for snapshot to be ready SnapshotReadyTimeout = 5 * time.Minute numOfRestoredPVCForCloneManyTest = 500 @@ -204,6 +205,7 @@ var ( k8sMonitoring = prometheus.Instance() k8sPolicy = policy.Instance() k8sAdmissionRegistration = admissionregistration.Instance() + k8sApiExtensions = apiextensions.Instance() // k8sExternalsnap is a instance of csisnapshot instance k8sExternalsnap = csisnapshot.Instance() @@ -211,6 +213,14 @@ var ( SnapshotAPIGroup = "snapshot.storage.k8s.io" ) +// CustomResourceObjectYAML Used as spec object for all CRs +type CustomResourceObjectYAML struct { + Path string + // Namespace will only be assigned DURING creation + Namespace string + Name string +} + // K8s The kubernetes structure type K8s struct { SpecFactory *spec.Factory @@ -349,7 +359,11 @@ func (k *K8s) SetConfig(kubeconfigPath string) error { k8sRbac.SetConfig(config) k8sMonitoring.SetConfig(config) k8sPolicy.SetConfig(config) + k8sBatch.SetConfig(config) + k8sMonitoring.SetConfig(config) k8sAdmissionRegistration.SetConfig(config) + k8sExternalsnap.SetConfig(config) + k8sApiExtensions.SetConfig(config) return nil } @@ -411,7 +425,16 @@ func (k *K8s) ParseSpecs(specDir, storageProvisioner string) ([]interface{}, err if err != nil { return nil, err } - if !isHelmChart { + + splitPath := strings.Split(fileName, "/") + if strings.HasPrefix(splitPath[len(splitPath)-1], "cr-") { + // TODO: process with templates + specObj := &CustomResourceObjectYAML{ + Path: fileName, + } + specs = append(specs, specObj) + log.Warnf("custom res: %v", specObj) //TODO: remove + } else if !isHelmChart { file, err := ioutil.ReadFile(fileName) if err != nil { return nil, err @@ -845,6 +868,50 @@ func (k *K8s) Schedule(instanceID string, options scheduler.ScheduleOptions) ([] return contexts, nil } +// ScheduleWithCustomAppSpecs Schedules the application with custom app specs +func (k *K8s) ScheduleWithCustomAppSpecs(apps []*spec.AppSpec, instanceID string, options scheduler.ScheduleOptions) ([]*scheduler.Context, error) { + var contexts []*scheduler.Context + oldOptionsNamespace := options.Namespace + for _, app := range apps { + + appNamespace := app.GetID(instanceID) + if options.Namespace != "" { + appNamespace = options.Namespace + } else { + options.Namespace = appNamespace + } + if len(options.TopologyLabels) > 1 { + rotateTopologyArray(&options) + } + + specObjects, err := k.CreateSpecObjects(app, appNamespace, options) + if err != nil { + return nil, err + } + + helmSpecObjects, err := k.HelmSchedule(app, appNamespace, options) + if err != nil { + return nil, err + } + + specObjects = append(specObjects, helmSpecObjects...) + ctx := &scheduler.Context{ + UID: instanceID, + App: &spec.AppSpec{ + Key: app.Key, + SpecList: specObjects, + Enabled: app.Enabled, + }, + ScheduleOptions: options, + } + + contexts = append(contexts, ctx) + options.Namespace = oldOptionsNamespace + } + + return contexts, nil +} + // CreateSpecObjects Create application func (k *K8s) CreateSpecObjects(app *spec.AppSpec, namespace string, options scheduler.ScheduleOptions) ([]interface{}, error) { var specObjects []interface{} @@ -870,6 +937,23 @@ func (k *K8s) CreateSpecObjects(app *spec.AppSpec, namespace string, options sch } } + for _, appSpec := range app.SpecList { + t := func() (interface{}, bool, error) { + obj, err := k.createCRDObjects(appSpec, ns, app) + if err != nil { + return nil, true, err + } + return obj, false, nil + } + obj, err := task.DoRetryWithTimeout(t, k8sObjectCreateTimeout, DefaultRetryInterval) + if err != nil { + return nil, err + } + if obj != nil { + specObjects = append(specObjects, obj) + } + } + for _, appSpec := range app.SpecList { t := func() (interface{}, bool, error) { obj, err := k.createVolumeSnapshotRestore(appSpec, ns, app) @@ -1055,6 +1139,25 @@ func (k *K8s) CreateSpecObjects(app *spec.AppSpec, namespace string, options sch } } + // creation of CustomResourceObjects must most likely be done *last*, + // as it may have resources that depend on other resources, which should be create *before* this + for _, appSpec := range app.SpecList { + t := func() (interface{}, bool, error) { + obj, err := k.createCustomResourceObjects(appSpec, ns, app) + if err != nil { + return nil, true, err + } + return obj, false, nil + } + obj, err := task.DoRetryWithTimeout(t, k8sObjectCreateTimeout, DefaultRetryInterval) + if err != nil { + return nil, err + } + if obj != nil { + specObjects = append(specObjects, obj) + } + } + return specObjects, nil } @@ -1261,6 +1364,25 @@ func (k *K8s) UpdateTasksID(ctx *scheduler.Context, id string) error { return nil } +// GetUpdatedSpec takes a spec and returns the latest version of it by `GET`-ing it +func (k *K8s) GetUpdatedSpec(spec interface{}) (interface{}, error) { + if obj, ok := spec.(*corev1.PersistentVolumeClaim); ok { + pvc, err := k8sCore.GetPersistentVolumeClaim(obj.Name, obj.Namespace) + + if err != nil { + return nil, err + } + + // This is a hack because the `Kind` field is empty due to K8s bug. + // Refer https://github.com/portworx/torpedo/pull/1345 + pvc.Kind = "PersistentVolumeClaim" + + return pvc, nil + } + + return nil, fmt.Errorf("unsupported object: %v", reflect.TypeOf(spec)) +} + func (k *K8s) createNamespace(app *spec.AppSpec, namespace string, options scheduler.ScheduleOptions) (*corev1.Namespace, error) { k8sOps := k8sCore @@ -1375,6 +1497,11 @@ func (k *K8s) createStorageObject(spec interface{}, ns *corev1.Namespace, app *s if k8serrors.IsAlreadyExists(err) { if sc, err = k8sStorage.GetStorageClass(obj.Name); err == nil { log.Infof("[%v] Found existing storage class: %v", app.Key, sc.Name) + + // This is a hack because the `Kind` field is empty due to K8s bug. + // Refer https://github.com/portworx/torpedo/pull/1345 + sc.Kind = "StorageClass" + return sc, nil } } @@ -1385,6 +1512,10 @@ func (k *K8s) createStorageObject(spec interface{}, ns *corev1.Namespace, app *s } } + // This is a hack because the `Kind` field is empty due to K8s bug. + // Refer https://github.com/portworx/torpedo/pull/1345 + sc.Kind = "StorageClass" + log.Infof("[%v] Created storage class: %v", app.Key, sc.Name) return sc, nil @@ -1432,6 +1563,11 @@ func (k *K8s) createStorageObject(spec interface{}, ns *corev1.Namespace, app *s if k8serrors.IsAlreadyExists(err) { if pvc, err = k8sCore.GetPersistentVolumeClaim(newPvcObj.Name, newPvcObj.Namespace); err == nil { log.Infof("[%v] Found existing PVC: %v", app.Key, pvc.Name) + + // This is a hack because the `Kind` field is empty due to K8s bug. + // Refer https://github.com/portworx/torpedo/pull/1345 + pvc.Kind = "PersistentVolumeClaim" + return pvc, nil } } @@ -1442,6 +1578,10 @@ func (k *K8s) createStorageObject(spec interface{}, ns *corev1.Namespace, app *s } } + // This is a hack because the `Kind` field is empty due to K8s bug. + // Refer https://github.com/portworx/torpedo/pull/1345 + pvc.Kind = "PersistentVolumeClaim" + log.Infof("[%v] Created PVC: %v", app.Key, pvc.Name) autopilotEnabled := false @@ -1470,6 +1610,11 @@ func (k *K8s) createStorageObject(spec interface{}, ns *corev1.Namespace, app *s if k8serrors.IsAlreadyExists(err) { if snap, err = k8sExternalStorage.GetSnapshot(obj.Metadata.Name, obj.Metadata.Namespace); err == nil { log.Infof("[%v] Found existing snapshot: %v", app.Key, snap.Metadata.Name) + + // This is a hack because the `Kind` field is empty due to K8s bug. + // Refer https://github.com/portworx/torpedo/pull/1345 + snap.Kind = "VolumeSnapshot" + return snap, nil } } @@ -1480,6 +1625,10 @@ func (k *K8s) createStorageObject(spec interface{}, ns *corev1.Namespace, app *s } } + // This is a hack because the `Kind` field is empty due to K8s bug. + // Refer https://github.com/portworx/torpedo/pull/1345 + snap.Kind = "VolumeSnapshot" + log.Infof("[%v] Created Snapshot: %v", app.Key, snap.Metadata.Name) return snap, nil } else if obj, ok := spec.(*storkapi.GroupVolumeSnapshot); ok { @@ -1762,6 +1911,9 @@ func (k *K8s) createCoreObject(spec interface{}, ns *corev1.Namespace, app *spec if k8serrors.IsAlreadyExists(err) { if dep, err = k8sApps.GetDeployment(obj.Name, obj.Namespace); err == nil { log.Infof("[%v] Found existing deployment: %v", app.Key, dep.Name) + // This is a hack because the `Kind` field is empty due to K8s bug. + // Refer https://github.com/portworx/torpedo/pull/1345 + dep.Kind = "Deployment" return dep, nil } } @@ -1772,6 +1924,10 @@ func (k *K8s) createCoreObject(spec interface{}, ns *corev1.Namespace, app *spec } } + // This is a hack because the `Kind` field is empty due to K8s bug. + // Refer https://github.com/portworx/torpedo/pull/1345 + dep.Kind = "Deployment" + log.Infof("[%v] Created deployment: %v", app.Key, dep.Name) return dep, nil @@ -1852,6 +2008,11 @@ func (k *K8s) createCoreObject(spec interface{}, ns *corev1.Namespace, app *spec if k8serrors.IsAlreadyExists(err) { if svc, err = k8sCore.GetService(obj.Name, obj.Namespace); err == nil { log.Infof("[%v] Found existing Service: %v", app.Key, svc.Name) + + // This is a hack because the `Kind` field is empty due to K8s bug. + // Refer https://github.com/portworx/torpedo/pull/1345 + svc.Kind = "Service" + return svc, nil } } @@ -1862,6 +2023,10 @@ func (k *K8s) createCoreObject(spec interface{}, ns *corev1.Namespace, app *spec } } + // This is a hack because the `Kind` field is empty due to K8s bug. + // Refer https://github.com/portworx/torpedo/pull/1345 + svc.Kind = "Service" + log.Infof("[%v] Created Service: %v", app.Key, svc.Name) return svc, nil @@ -1876,6 +2041,11 @@ func (k *K8s) createCoreObject(spec interface{}, ns *corev1.Namespace, app *spec if k8serrors.IsAlreadyExists(err) { if secret, err = k8sCore.GetSecret(obj.Name, obj.Namespace); err == nil { log.Infof("[%v] Found existing Secret: %v", app.Key, secret.Name) + + // This is a hack because the `Kind` field is empty due to K8s bug. + // Refer https://github.com/portworx/torpedo/pull/1345 + secret.Kind = "Secret" + return secret, nil } } @@ -1886,6 +2056,10 @@ func (k *K8s) createCoreObject(spec interface{}, ns *corev1.Namespace, app *spec } } + // This is a hack because the `Kind` field is empty due to K8s bug. + // Refer https://github.com/portworx/torpedo/pull/1345 + secret.Kind = "Secret" + log.Infof("[%v] Created Secret: %v", app.Key, secret.Name) return secret, nil } else if obj, ok := spec.(*storkapi.Rule); ok { @@ -2151,6 +2325,36 @@ func (k *K8s) destroyAdmissionRegistrationObjects(spec interface{}, app *spec.Ap return nil } +// destroyCRDObjects is used to destroy Resources in the group `apiextensions` (like CRDs) +func (k *K8s) destroyCRDObjects(spec interface{}, app *spec.AppSpec) error { + + if obj, ok := spec.(*apiextensionsv1.CustomResourceDefinition); ok { + err := k8sApiExtensions.DeleteCRD(obj.Name) + if err != nil { + return &scheduler.ErrFailedToDestroyApp{ + App: app, + Cause: fmt.Sprintf("Failed to destroy CRD: %v. Err: %v", obj.Name, err), + } + } else { + log.Infof("[%v] Destroyed CRD: %v", app.Key, obj.Name) + return nil + } + } else if obj, ok := spec.(*apiextensionsv1beta1.CustomResourceDefinition); ok { + err := k8sApiExtensions.DeleteCRDV1beta1(obj.Name) + if err != nil { + return &scheduler.ErrFailedToDestroyApp{ + App: app, + Cause: fmt.Sprintf("Failed to destroy CRDV1beta1: %v. Err: %v", obj.Name, err), + } + } else { + log.Infof("[%v] Destroyed CRDV1beta1: %v", app.Key, obj.Name) + return nil + } + } + + return nil +} + func (k *K8s) substituteNamespaceInContainers(containers []corev1.Container, ns string) []corev1.Container { var updatedContainers []corev1.Container for _, container := range containers { @@ -2470,6 +2674,22 @@ func (k *K8s) WaitForRunning(ctx *scheduler.Context, timeout, retryInterval time func (k *K8s) Destroy(ctx *scheduler.Context, opts map[string]bool) error { var podList []corev1.Pod + // destruction of CustomResourceObjects must most likely be done *first*, + // as it may have resources that depend on other resources, which should be deleted *after* this + for _, appSpec := range ctx.App.SpecList { + t := func() (interface{}, bool, error) { + err := k.destroyCustomResourceObjects(appSpec, ctx.App) + if err != nil { + return nil, true, err + } else { + return nil, false, nil + } + } + if _, err := task.DoRetryWithTimeout(t, k8sDestroyTimeout, DefaultRetryInterval); err != nil { + return err + } + } + var removeSpecs []interface{} for _, appSpec := range ctx.App.SpecList { if repoInfo, ok := appSpec.(*scheduler.HelmRepo); ok { @@ -2495,6 +2715,7 @@ func (k *K8s) Destroy(ctx *scheduler.Context, opts map[string]bool) error { } } } + for _, appSpec := range ctx.App.SpecList { t := func() (interface{}, bool, error) { err := k.destroyAdmissionRegistrationObjects(appSpec, ctx.App) @@ -2509,6 +2730,20 @@ func (k *K8s) Destroy(ctx *scheduler.Context, opts map[string]bool) error { } } + for _, appSpec := range ctx.App.SpecList { + t := func() (interface{}, bool, error) { + err := k.destroyRbacObjects(appSpec, ctx.App) + if err != nil { + return nil, true, err + } else { + return nil, false, nil + } + } + if _, err := task.DoRetryWithTimeout(t, k8sDestroyTimeout, DefaultRetryInterval); err != nil { + return err + } + } + for _, appSpec := range ctx.App.SpecList { t := func() (interface{}, bool, error) { currPods, err := k.destroyCoreObject(appSpec, opts, ctx.App) @@ -2601,6 +2836,20 @@ func (k *K8s) Destroy(ctx *scheduler.Context, opts map[string]bool) error { } } + for _, appSpec := range ctx.App.SpecList { + t := func() (interface{}, bool, error) { + err := k.destroyCRDObjects(appSpec, ctx.App) + if err != nil { + return nil, true, err + } else { + return nil, false, nil + } + } + if _, err := task.DoRetryWithTimeout(t, k8sDestroyTimeout, DefaultRetryInterval); err != nil { + return err + } + } + if value, ok := opts[scheduler.OptionsWaitForResourceLeakCleanup]; ok && value { if err := k.WaitForDestroy(ctx, k8sDestroyTimeout); err != nil { return err @@ -4222,6 +4471,132 @@ func (k *K8s) createAdmissionRegistrationObjects( return nil, nil } +// createCustomResourceObjects is used to create objects whose resource `kind` is defined by a CRD. NOTE: this is done using the `kubectl apply -f` command instead of the conventional method of using an api library +func (k *K8s) createCustomResourceObjects( + spec interface{}, + ns *corev1.Namespace, + app *spec.AppSpec, +) (interface{}, error) { + + if obj, ok := spec.(*CustomResourceObjectYAML); ok { + log.Warn("applying custom resources") + cryaml := obj.Path + if _, err := os.Stat(cryaml); baseErrors.Is(err, os.ErrNotExist) { + return nil, fmt.Errorf("Cannot find yaml in path %s", cryaml) + } + cmdArgs := []string{"apply", "-f", cryaml, "-n", ns.Name} + err := osutils.Kubectl(cmdArgs) + if err != nil { + return nil, fmt.Errorf("Error applying spec [%s], Error: %s", cryaml, err) + } + obj.Namespace = ns.Name + obj.Name = "placeholder" //TODO1 + return obj, nil + } + + return nil, nil +} + +// destroyCustomResourceObjects is used to delete objects whose resource `kind` is defined by a CRD. NOTE: this is done using the `kubectl delete -f` command instead of the conventional method of using an api library +func (k *K8s) destroyCustomResourceObjects(spec interface{}, app *spec.AppSpec) error { + + if obj, ok := spec.(*CustomResourceObjectYAML); ok { + cryaml := obj.Path + if _, err := os.Stat(cryaml); baseErrors.Is(err, os.ErrNotExist) { + return &scheduler.ErrFailedToDestroyApp{ + App: app, + Cause: fmt.Sprintf("Failed to destroy Custom Resource Object: %v. Err: Cannot find yaml in path: %v", obj.Name, cryaml), + } + } + + cmdArgs := []string{"delete", "-f", cryaml, "-n", obj.Namespace} + err := osutils.Kubectl(cmdArgs) + if err != nil { + return &scheduler.ErrFailedToDestroyApp{ + App: app, + Cause: fmt.Sprintf("Failed to destroy Custom Resource Object: %v. Err: %v", obj.Name, err), + } + } else { + log.Infof("[%v] Destroyed CustomResourceObject: %v", app.Key, obj.Name) + return nil + } + } + + return nil +} + +// createCRDObjects is used to create Resources in the group `apiextensions` group (like CRDs) +func (k *K8s) createCRDObjects( + specObj interface{}, + ns *corev1.Namespace, + app *spec.AppSpec, +) (interface{}, error) { + + if obj, ok := specObj.(*apiextensionsv1.CustomResourceDefinition); ok { + obj.Namespace = ns.Name + err := k8sApiExtensions.RegisterCRD(obj) + + if k8serrors.IsAlreadyExists(err) { + options := metav1.GetOptions{} + if crd, err := k8sApiExtensions.GetCRD(obj.Name, options); err == nil { + log.Infof("[%v] Found existing CRD: %v", app.Key, crd.Name) + return crd, nil + } + } + + if err != nil { + return nil, &scheduler.ErrFailedToScheduleApp{ + App: app, + Cause: fmt.Sprintf("Failed to Register CRD: %v. Err: %v", obj.Name, err), + } + } else { + options := metav1.GetOptions{} + if crd, err := k8sApiExtensions.GetCRD(obj.Name, options); err == nil { + log.Infof("[%v] Registered CRD: %v", app.Key, crd.Name) + return crd, nil + } else { + // if it fails, then you need to `validate` before `get` + return nil, &scheduler.ErrFailedToScheduleApp{ + App: app, + Cause: fmt.Sprintf("Failed to Get CRD after Registration: %v. Err: %v", obj.Name, err), + } + } + } + } else if obj, ok := specObj.(*apiextensionsv1beta1.CustomResourceDefinition); ok { + obj.Namespace = ns.Name + err := k8sApiExtensions.RegisterCRDV1beta1(obj) + + if k8serrors.IsAlreadyExists(err) { + options := metav1.GetOptions{} + if crd, err := k8sApiExtensions.GetCRDV1beta1(obj.Name, options); err == nil { + log.Infof("[%v] Found existing CRDV1beta1: %v", app.Key, crd.Name) + return crd, nil + } + } + + if err != nil { + return nil, &scheduler.ErrFailedToScheduleApp{ + App: app, + Cause: fmt.Sprintf("Failed to Register CRDV1beta1: %v. Err: %v", obj.Name, err), + } + } else { + options := metav1.GetOptions{} + if crd, err := k8sApiExtensions.GetCRDV1beta1(obj.Name, options); err == nil { + log.Infof("[%v] Registered CRDV1beta1: %v", app.Key, crd.Name) + return crd, nil + } else { + // if it fails, then you need to `validate` before `get` + return nil, &scheduler.ErrFailedToScheduleApp{ + App: app, + Cause: fmt.Sprintf("Failed to Get CRDV1beta1 after Registration: %v. Err: %v", obj.Name, err), + } + } + } + } + + return nil, nil +} + func (k *K8s) createMigrationObjects( specObj interface{}, ns *corev1.Namespace, @@ -4644,6 +5019,12 @@ func (k *K8s) createRbacObjects( return clusterrole, nil } else if obj, ok := spec.(*rbacv1.ClusterRoleBinding); ok { obj.Namespace = ns.Name + for i := range obj.Subjects { + // since everything in a spec is in the same namespace in cluster, we can set here ONLY for namespaced object: + if obj.Subjects[i].Kind == "ServiceAccount" { + obj.Subjects[i].Namespace = ns.Name + } + } clusterrolebinding, err := k8sRbac.CreateClusterRoleBinding(obj) if k8serrors.IsAlreadyExists(err) { if clusterrolebinding, err = k8sRbac.GetClusterRoleBinding(obj.Name); err == nil { @@ -4683,6 +5064,47 @@ func (k *K8s) createRbacObjects( return nil, nil } +// destroyRbacObjects destroys objects in the `Rbac.authorization` group (like ClusterRole, ClusterRoleBinding, ServiceAccount) +func (k *K8s) destroyRbacObjects(spec interface{}, app *spec.AppSpec) error { + + if obj, ok := spec.(*rbacv1.ClusterRole); ok { + err := k8sRbac.DeleteClusterRole(obj.Name) + if err != nil { + return &scheduler.ErrFailedToDestroyApp{ + App: app, + Cause: fmt.Sprintf("Failed to destroy ClusterRole: %v. Err: %v", obj.Name, err), + } + } else { + log.Infof("[%v] Destroyed ClusterRole: %v", app.Key, obj.Name) + return nil + } + } else if obj, ok := spec.(*rbacv1.ClusterRoleBinding); ok { + err := k8sRbac.DeleteClusterRoleBinding(obj.Name) + if err != nil { + return &scheduler.ErrFailedToDestroyApp{ + App: app, + Cause: fmt.Sprintf("Failed to destroy ClusterRoleBinding: %v. Err: %v", obj.Name, err), + } + } else { + log.Infof("[%v] Destroyed ClusterRoleBinding: %v", app.Key, obj.Name) + return nil + } + } else if obj, ok := spec.(*corev1.ServiceAccount); ok { + err := k8sCore.DeleteServiceAccount(obj.Name, obj.Namespace) + if err != nil { + return &scheduler.ErrFailedToDestroyApp{ + App: app, + Cause: fmt.Sprintf("Failed to destroy ServiceAccount: %v. Err: %v", obj.Name, err), + } + } else { + log.Infof("[%v] Destroyed ServiceAccount: %v", app.Key, obj.Name) + return nil + } + } + + return nil +} + func (k *K8s) createNetworkingObjects( spec interface{}, ns *corev1.Namespace, @@ -6553,3 +6975,12 @@ func init() { k := &K8s{} scheduler.Register(SchedName, k) } + +// ClusterVersion returns the cluster version of the kubernetes cluster as a string (like "1.23.0") +func ClusterVersion() (string, error) { + ver, err := k8sCore.GetVersion() + if err != nil { + return "", err + } + return strings.TrimLeft(ver.String(), "v"), nil +} diff --git a/vendor/github.com/portworx/torpedo/drivers/scheduler/scheduler.go b/vendor/github.com/portworx/torpedo/drivers/scheduler/scheduler.go index 69484c3e0d..4826be4376 100644 --- a/vendor/github.com/portworx/torpedo/drivers/scheduler/scheduler.go +++ b/vendor/github.com/portworx/torpedo/drivers/scheduler/scheduler.go @@ -113,6 +113,10 @@ type InitOptions struct { RunCSISnapshotAndRestoreManyTest bool //SecureApps identifies apps to be deployed with secure annotation in storage class SecureApps []string + // AnthosAdminWorkStationNodeIP needed for anthos scheduler + AnthosAdminWorkStationNodeIP string + // AnthosInstancePath needed for anthos scheduler + AnthosInstancePath string } // ScheduleOptions are options that callers to pass to influence the apps that get schduled @@ -162,6 +166,9 @@ type Driver interface { // Schedule starts applications and returns a context for each one of them Schedule(instanceID string, opts ScheduleOptions) ([]*Context, error) + // ScheduleWithCustomAppSpecs starts applications with custom app specs and returns a context for each one of them + ScheduleWithCustomAppSpecs(apps []*spec.AppSpec, instanceID string, options ScheduleOptions) ([]*Context, error) + // WaitForRunning waits for application to start running. WaitForRunning(cc *Context, timeout, retryInterval time.Duration) error diff --git a/vendor/github.com/portworx/torpedo/drivers/volume/common.go b/vendor/github.com/portworx/torpedo/drivers/volume/common.go index 73b61c0c23..43b6380f8a 100644 --- a/vendor/github.com/portworx/torpedo/drivers/volume/common.go +++ b/vendor/github.com/portworx/torpedo/drivers/volume/common.go @@ -73,6 +73,7 @@ type MetadataNode struct { DbSize int `json:"DbSize"` IsHealthy bool `json:"IsHealthy"` ID string `json:"ID"` + Name string `json:"Name"` } // DefaultDriver implements defaults for Driver interface @@ -643,7 +644,7 @@ func (d *DefaultDriver) ValidateStoragePools() error { } // ExpandPool resizes a pool of a given ID -func (d *DefaultDriver) ExpandPool(poolUID string, operation api.SdkStoragePool_ResizeOperationType, size uint64) error { +func (d *DefaultDriver) ExpandPool(poolUID string, operation api.SdkStoragePool_ResizeOperationType, size uint64, skipWaitForCleanVolumes bool) error { return &errors.ErrNotSupported{ Type: "Function", Operation: "ExpandPool()", @@ -651,7 +652,7 @@ func (d *DefaultDriver) ExpandPool(poolUID string, operation api.SdkStoragePool_ } // ExpandPoolUsingPxctlCmd resizes pool of a given ID using CLI Command -func (d *DefaultDriver) ExpandPoolUsingPxctlCmd(n node.Node, poolUUID string, operation api.SdkStoragePool_ResizeOperationType, size uint64) error { +func (d *DefaultDriver) ExpandPoolUsingPxctlCmd(n node.Node, poolUUID string, operation api.SdkStoragePool_ResizeOperationType, size uint64, skipWaitForCleanVolumes bool) error { return &errors.ErrNotSupported{ Type: "Function", Operation: "ExpandPoolUsingPxctlCmd()", @@ -813,7 +814,7 @@ func (d *DefaultDriver) UpdateIOPriority(volumeName string, priorityType string) } } -// ValidateMountOptions for pure volumes +// ValidatePureFaFbMountOptions Validates MountOptions for pure volumes func (d *DefaultDriver) ValidatePureFaFbMountOptions(volumeName string, mountoption []string, volumeNode *node.Node) error { return &errors.ErrNotSupported{ Type: "Function", @@ -821,6 +822,14 @@ func (d *DefaultDriver) ValidatePureFaFbMountOptions(volumeName string, mountopt } } +// ValidatePureFaCreateOptions validates createoptions for pure volumes +func (d *DefaultDriver) ValidatePureFaCreateOptions(volumeName string, FSType string, volumeNode *node.Node) error { + return &errors.ErrNotSupported{ + Type: "Function", + Operation: "ValidateCreateOptions", + } +} + // Contains return func (d *DefaultDriver) Contains(nodes []*api.StorageNode, n *api.StorageNode) bool { return false diff --git a/vendor/github.com/portworx/torpedo/drivers/volume/portworx/portworx.go b/vendor/github.com/portworx/torpedo/drivers/volume/portworx/portworx.go index e6cf07e742..7870776aab 100644 --- a/vendor/github.com/portworx/torpedo/drivers/volume/portworx/portworx.go +++ b/vendor/github.com/portworx/torpedo/drivers/volume/portworx/portworx.go @@ -36,6 +36,7 @@ import ( optest "github.com/libopenstorage/operator/pkg/util/test" "github.com/pborman/uuid" "github.com/portworx/sched-ops/k8s/apiextensions" + "github.com/portworx/sched-ops/k8s/apps" "github.com/portworx/sched-ops/k8s/core" "github.com/portworx/sched-ops/k8s/operator" "github.com/portworx/sched-ops/task" @@ -54,6 +55,7 @@ import ( "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/timestamppb" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" @@ -94,12 +96,14 @@ const ( refreshEndpointParam = "refresh-endpoint" defaultPXAPITimeout = 5 * time.Minute envSkipPXServiceEndpoint = "SKIP_PX_SERVICE_ENDPOINT" + envSkipPxOperatorUpgrade = "SKIP_PX_OPERATOR_UPGRADE" pureKey = "backend" pureBlockValue = "pure_block" clusterIDFile = "/etc/pwx/cluster_uuid" pxReleaseManifestURLEnvVarName = "PX_RELEASE_MANIFEST_URL" pxServiceLocalEndpoint = "portworx-service.kube-system.svc.cluster.local" mountGrepVolume = "mount | grep %s" + mountGrepFirstColumn = "mount | grep %s | awk '{print $1}'" ) const ( @@ -108,8 +112,8 @@ const ( podUpRetryInterval = 30 * time.Second maintenanceOpTimeout = 1 * time.Minute maintenanceWaitTimeout = 2 * time.Minute - inspectVolumeTimeout = 1 * time.Minute - inspectVolumeRetryInterval = 2 * time.Second + inspectVolumeTimeout = 2 * time.Minute + inspectVolumeRetryInterval = 3 * time.Second validateDeleteVolumeTimeout = 6 * time.Minute validateReplicationUpdateTimeout = 60 * time.Minute validateClusterStartTimeout = 2 * time.Minute @@ -120,6 +124,8 @@ const ( validateStoragePoolSizeInterval = 30 * time.Second validateRebalanceJobsTimeout = 30 * time.Minute validateRebalanceJobsInterval = 30 * time.Second + validateDeploymentTimeout = 3 * time.Minute + validateDeploymentInterval = 5 * time.Second getNodeTimeout = 3 * time.Minute getNodeRetryInterval = 5 * time.Second stopDriverTimeout = 5 * time.Minute @@ -147,8 +153,9 @@ const ( ) const ( - driveAddSuccessStatus = "Drive add done" - driveExitsStatus = "Device already exists" + driveAddSuccessStatus = "Drive add done" + driveExitsStatus = "Device already exists" + metadataAddSuccessStatus = "Successfully added metadata device" ) // Provisioners types of supported provisioners @@ -204,6 +211,7 @@ type portworx struct { refreshEndpoint bool token string skipPXSvcEndpoint bool + skipPxOperatorUpgrade bool DiagsFile string } @@ -214,7 +222,7 @@ type statusJSON struct { } // ExpandPool resizes a pool of a given ID -func (d *portworx) ExpandPool(poolUUID string, operation api.SdkStoragePool_ResizeOperationType, size uint64) error { +func (d *portworx) ExpandPool(poolUUID string, operation api.SdkStoragePool_ResizeOperationType, size uint64, skipWaitForCleanVolumes bool) error { log.Infof("Initiating pool %v resize by %v with operationtype %v", poolUUID, size, operation.String()) // start a task to check if pool resize is done @@ -224,13 +232,14 @@ func (d *portworx) ExpandPool(poolUUID string, operation api.SdkStoragePool_Resi ResizeFactor: &api.SdkStoragePoolResizeRequest_Size{ Size: size, }, - OperationType: operation, + OperationType: operation, + SkipWaitForCleanVolumes: skipWaitForCleanVolumes, }) if err != nil { return nil, true, err } if jobListResp.String() != "" { - log.Debugf("Resize respone: %v", jobListResp.String()) + log.Debugf("Resize response: %v", jobListResp.String()) } return nil, false, nil } @@ -242,7 +251,7 @@ func (d *portworx) ExpandPool(poolUUID string, operation api.SdkStoragePool_Resi } // ExpandPoolUsingPxctlCmd resizes a pool of a given UUID using CLI command -func (d *portworx) ExpandPoolUsingPxctlCmd(n node.Node, poolUUID string, operation api.SdkStoragePool_ResizeOperationType, size uint64) error { +func (d *portworx) ExpandPoolUsingPxctlCmd(n node.Node, poolUUID string, operation api.SdkStoragePool_ResizeOperationType, size uint64, skipWaitForCleanVolumes bool) error { var operationString string @@ -257,6 +266,9 @@ func (d *portworx) ExpandPoolUsingPxctlCmd(n node.Node, poolUUID string, operati log.InfoD("Initiate Pool %v resize by %v with operationtype %v using CLI", poolUUID, size, operation.String()) cmd := fmt.Sprintf("pxctl sv pool expand --uid %v --size %v --operation %v", poolUUID, size, operationString) + if skipWaitForCleanVolumes { + cmd = fmt.Sprintf("%s -f", cmd) + } out, err := d.nodeDriver.RunCommandWithNoRetry( n, cmd, @@ -354,6 +366,11 @@ func (d *portworx) init(sched, nodeDriver, token, storageProvisioner, csiGeneric d.skipPXSvcEndpoint, _ = strconv.ParseBool(skipStr) } + // If true, will skip upgrade of PX Operator along with PX during upgrade hops + if skipStr := os.Getenv(envSkipPxOperatorUpgrade); skipStr != "" { + d.skipPxOperatorUpgrade, _ = strconv.ParseBool(skipStr) + } + d.token = token if d.nodeDriver, err = node.Get(nodeDriver); err != nil { @@ -1010,6 +1027,7 @@ func (d *portworx) UpdatePoolIOPriority(n node.Node, poolUUID string, IOPriority func (d *portworx) EnterMaintenance(n node.Node) error { t := func() (interface{}, bool, error) { if err := d.maintenanceOp(n, enterMaintenancePath); err != nil { + return nil, true, err } return nil, false, nil @@ -1018,6 +1036,8 @@ func (d *portworx) EnterMaintenance(n node.Node) error { if _, err := task.DoRetryWithTimeout(t, maintenanceOpTimeout, defaultRetryInterval); err != nil { return err } + log.Infof("waiting for 3 mins allowing node to completely transition to maintenance mode") + time.Sleep(3 * time.Minute) t = func() (interface{}, bool, error) { apiNode, err := d.GetDriverNode(&n) if err != nil { @@ -1035,6 +1055,7 @@ func (d *portworx) EnterMaintenance(n node.Node) error { Cause: err.Error(), } } + return nil } @@ -1080,6 +1101,8 @@ func (d *portworx) EnterPoolMaintenance(n node.Node) error { return fmt.Errorf("error when entering pool maintenance on node [%s], Err: %v", n.Name, err) } log.Infof("Enter pool maintenance %s", out) + log.Infof("waiting for 3 mins allowing pool to completely transition to maintenance mode") + time.Sleep(3 * time.Minute) return nil } @@ -1424,7 +1447,7 @@ func (d *portworx) ValidateCreateSnapshotUsingPxctl(volumeName string) error { } func (d *portworx) UpdateIOPriority(volumeName string, priorityType string) error { - nodes := node.GetWorkerNodes() + nodes := node.GetStorageDriverNodes() cmd := fmt.Sprintf("%s --io_priority %s %s", pxctlVolumeUpdate, priorityType, volumeName) _, err := d.nodeDriver.RunCommandWithNoRetry( nodes[0], @@ -1462,6 +1485,87 @@ func (d *portworx) ValidatePureFaFbMountOptions(volumeName string, mountoption [ } +// ValidatePureFaCreateOptions validates FStype and createoptions with block size 2048 on those FStypes +func (d *portworx) ValidatePureFaCreateOptions(volumeName string, FStype string, volumeNode *node.Node) error { + // Checking if file systems are properly set + FScmd := fmt.Sprintf(mountGrepVolume, volumeName) + FSout, err := d.nodeDriver.RunCommandWithNoRetry( + *volumeNode, + FScmd, + node.ConnectionOpts{ + Timeout: crashDriverTimeout, + TimeBeforeRetry: defaultRetryInterval, + }) + if err != nil { + return fmt.Errorf("Failed to get mount response for volume %s, Err: %v", volumeName, err) + } + if strings.Contains(FSout, FStype) { + log.Infof("%s file system is available in the volume %s", FStype, volumeName) + } else { + return fmt.Errorf("Failed to get %s File system, Err: %v", FStype, err) + } + + // Getting mapper volumename where createoptions are applied + mapperCmd := fmt.Sprintf(mountGrepFirstColumn, volumeName) + mapperOut, err := d.nodeDriver.RunCommandWithNoRetry( + *volumeNode, + mapperCmd, + node.ConnectionOpts{ + Timeout: crashDriverTimeout, + TimeBeforeRetry: defaultRetryInterval, + }) + if err != nil { + return fmt.Errorf("Failed to get attached volume for create option for pvc %s, Err: %v", volumeName, err) + } + + // Validating implementation of create options + if FStype == "xfs" { + xfsInfoCmd := fmt.Sprintf("xfs_info %s ", strings.ReplaceAll(mapperOut, "\n", "")) + xfsInfoOut, err := d.nodeDriver.RunCommandWithNoRetry( + *volumeNode, + xfsInfoCmd, + node.ConnectionOpts{ + Timeout: crashDriverTimeout, + TimeBeforeRetry: defaultRetryInterval, + }) + if err != nil { + return fmt.Errorf("Failed to get bsize for create option for pvc %s, Err: %v", volumeName, err) + } + if strings.Contains(xfsInfoOut, "bsize=2048") { + log.Infof("Blocksize 2048 is correctly configured by the create option of volume %s", volumeName) + } else { + log.Warnf("The filesystem type %s isn't properly implemented as block size 2048 has not been set, Err: %v", FStype, err) + return fmt.Errorf("Failed to get %s proper block size in the %s file system, Err: %v", xfsInfoOut, FStype, err) + } + } else if FStype == "ext4" { + ext4InfoCmd := fmt.Sprintf("tune2fs -l %s ", strings.ReplaceAll(mapperOut, "\n", "")) + ext4InfoOut, err := d.nodeDriver.RunCommandWithNoRetry( + *volumeNode, + ext4InfoCmd, + node.ConnectionOpts{ + Timeout: crashDriverTimeout, + TimeBeforeRetry: defaultRetryInterval, + }) + if err != nil { + return fmt.Errorf("Failed to get bsize for create option for pvc %s, Err: %v", volumeName, err) + } + blockSize := false + for _, b := range strings.Split(ext4InfoOut, "\n") { + if strings.Contains(b, "Block size") && strings.Contains(b, "2048") { + blockSize = true + break + } + } + if blockSize { + log.Infof("Blocksize 2048 is correctly configured by the create options of volume %s", volumeName) + } else { + log.Warnf("The filesystem type %s isn't properly implemented as block size 2048 has not been set, Err: %v", FStype, err) + return fmt.Errorf("Failed to get %s proper block size in the %s file system, Err: %v", ext4InfoOut, FStype, err) + } + } + return nil +} + func (d *portworx) UpdateSharedv4FailoverStrategyUsingPxctl(volumeName string, strategy api.Sharedv4FailoverStrategy_Value) error { nodes := node.GetStorageDriverNodes() var strategyStr string @@ -2107,7 +2211,7 @@ func (d *portworx) ValidateRebalanceJobs() error { } func (d *portworx) ResizeStoragePoolByPercentage(poolUUID string, e api.SdkStoragePool_ResizeOperationType, percentage uint64) error { - log.Infof("Initiating pool %v resize by %v with operationtype %v", poolUUID, percentage, e.String()) + log.InfoD("Initiating pool %v resize by %v with operationtype %v", poolUUID, percentage, e.String()) // Start a task to check if pool resize is done t := func() (interface{}, bool, error) { @@ -2116,13 +2220,14 @@ func (d *portworx) ResizeStoragePoolByPercentage(poolUUID string, e api.SdkStora ResizeFactor: &api.SdkStoragePoolResizeRequest_Percentage{ Percentage: percentage, }, - OperationType: e, + OperationType: e, + SkipWaitForCleanVolumes: true, }) if err != nil { return nil, true, err } if jobListResp.String() != "" { - log.Debugf("Resize respone: %v", jobListResp.String()) + log.Debugf("Resize response: %v", jobListResp.String()) } return nil, false, nil } @@ -2171,10 +2276,9 @@ func (d *portworx) getExpectedPoolSizes(listApRules *apapi.AutopilotRuleList) (m func (d *portworx) GetAutoFsTrimStatus(endpoint string) (map[string]api.FilesystemTrim_FilesystemTrimStatus, error) { sdkport, _ := d.getSDKPort() pxEndpoint := net.JoinHostPort(endpoint, strconv.Itoa(int(sdkport))) - newConn, err := grpc.Dial(pxEndpoint, grpc.WithInsecure()) + newConn, err := grpc.Dial(pxEndpoint, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, fmt.Errorf("failed to set the connection endpoint [%s], Err: %v", endpoint, err) - } d.autoFsTrimManager = api.NewOpenStorageFilesystemTrimClient(newConn) @@ -2715,41 +2819,17 @@ func (d *portworx) UpgradeDriver(endpointVersion string) error { // If PX Operator based install, perform PX StorageCluster upgrade isOperatorBasedInstall, _ := d.IsOperatorBasedInstall() if isOperatorBasedInstall { - log.InfoD("Upgrading Portworx StorageCluster") - var k8sCore = core.Instance() - k8sVersion, err := k8sCore.GetVersion() - if err != nil { - return err - } - - imageList, err := optest.GetImagesFromVersionURL(specGenUrl, k8sVersion.String()) - if err != nil { - return fmt.Errorf("failed to get image list from version URL [%s], Err: %v", specGenUrl, err) - } - - // Get PX StorageCluster - cluster, err := d.GetDriver() - if err != nil { - return err - } - - // Update PX Image and Env vars if needed - updateParamFunc := func(cluster *v1.StorageCluster) *v1.StorageCluster { - // Set oci-mon version - cluster.Spec.Image = imageList["version"] - var envVars []corev1.EnvVar - // Add Release Manifest URL incase of edge spec - envVars, err := configurePxReleaseManifestEnvVars(cluster.Spec.Env, specGenUrl) - if err != nil { - return nil + log.Debugf("Env var SKIP_PX_OPERATOR_UPGRADE is set to [%v]", d.skipPxOperatorUpgrade) + if !d.skipPxOperatorUpgrade { + log.InfoD("Will upgrade PX Operator, if new version is availabe for given PX endpoint [%s]", specGenUrl) + if err := d.upgradePortworxOperator(specGenUrl); err != nil { + return fmt.Errorf("failed to upgrade PX Operator, Err: %v", err) } - cluster.Spec.Env = envVars - return cluster } - // Update and validate PX StorageCluster - if _, err := d.updateAndValidateStorageCluster(cluster, updateParamFunc, specGenUrl, false); err != nil { - return err + log.InfoD("Will upgrade Portworx StorageCluster") + if err := d.upgradePortworxStorageCluster(specGenUrl); err != nil { + return fmt.Errorf("failed to upgrade PX StorageCluster, Err: %v", err) } } else { log.InfoD("Upgrading Portworx DaemonSet") @@ -2766,29 +2846,29 @@ func (d *portworx) UpgradeDriver(endpointVersion string) error { return nil } -// configurePxReleaseManifestEnvVars configure PX Release Manifest URL for edge end production PX versions +// configurePxReleaseManifestEnvVars configure PX Release Manifest URL for edge and production PX versions, if needed func configurePxReleaseManifestEnvVars(origEnvVarList []corev1.EnvVar, specGenURL string) ([]corev1.EnvVar, error) { var newEnvVarList []corev1.EnvVar - // Set release manifest URL in case of edge-install.portworx.com + // Remove release manifest URLs from Env Vars, if any exist + for _, env := range origEnvVarList { + if env.Name == pxReleaseManifestURLEnvVarName { + continue + } + newEnvVarList = append(newEnvVarList, env) + } + + // Set release manifest URL in case of edge if strings.Contains(specGenURL, "edge") { releaseManifestURL, err := optest.ConstructPxReleaseManifestURL(specGenURL) if err != nil { return nil, err } - // Add release manifest URL to Env Vars incase of edge URL - newEnvVarList = origEnvVarList + // Add release manifest URL to Env Vars newEnvVarList = append(newEnvVarList, corev1.EnvVar{Name: pxReleaseManifestURLEnvVarName, Value: releaseManifestURL}) - } else { - for _, env := range origEnvVarList { - // Skip adding or remove release manifest URL to Env Vars incase of production URL - if env.Name == pxReleaseManifestURLEnvVarName { - continue - } - newEnvVarList = append(newEnvVarList, env) - } } + return newEnvVarList, nil } @@ -2859,6 +2939,107 @@ func (d *portworx) upgradePortworxDaemonset(specGenUrl string) error { return nil } +// upgradePortworxStorageCluster upgrades PX StorageCluster based on the given Spec Generator URL +func (d *portworx) upgradePortworxStorageCluster(specGenUrl string) error { + log.InfoD("Upgrading Portworx StorageCluster") + + // Get k8s version + k8sVersion, err := k8sCore.GetVersion() + if err != nil { + return err + } + + // Get images from version URL + imageList, err := optest.GetImagesFromVersionURL(specGenUrl, k8sVersion.String()) + if err != nil { + return fmt.Errorf("failed to get image list from version URL [%s], Err: %v", specGenUrl, err) + } + + // Get PX StorageCluster + cluster, err := d.GetDriver() + if err != nil { + return err + } + + // Update PX Image and Env vars if needed + updateParamFunc := func(cluster *v1.StorageCluster) *v1.StorageCluster { + // Set oci-mon version + cluster.Spec.Image = imageList["version"] + var envVars []corev1.EnvVar + // Add Release Manifest URL incase of edge spec + envVars, err := configurePxReleaseManifestEnvVars(cluster.Spec.Env, specGenUrl) + if err != nil { + return nil + } + cluster.Spec.Env = envVars + return cluster + } + + // Update and validate PX StorageCluster + if _, err := d.updateAndValidateStorageCluster(cluster, updateParamFunc, specGenUrl, false); err != nil { + return err + } + + log.Infof("Successfully upgraded Portworx StorageCluster [%s]", cluster.Name) + return nil +} + +// upgradePortworxOperator upgrades PX Operator based on the given Spec Generator URL +func (d *portworx) upgradePortworxOperator(specGenUrl string) error { + log.InfoD("Upgrading Portworx Operator") + + pxOperatorSpecFileName := "/px-operator.yaml" + + // Get k8s version + kubeVersion, err := d.schedOps.GetKubernetesVersion() + if err != nil { + return err + } + + // Getting PX Operator spec + u, err := url.Parse(specGenUrl) + if err != nil { + return fmt.Errorf("failed to parse URL [%s], Err: %v", specGenUrl, err) + } + q := u.Query() + q.Set("kbver", kubeVersion.String()) + q.Set("comp", "pxoperator") + q.Set("ns", d.namespace) + u.RawQuery = q.Encode() + pxOperatorSpecGenUrl := u.String() + log.Debugf("Getting PX Operator spec from URL [%s]", pxOperatorSpecGenUrl) + if err := osutils.Wget(pxOperatorSpecGenUrl, pxOperatorSpecFileName, true); err != nil { + return err + } + + // Getting context of the file + if _, err := osutils.Cat(pxOperatorSpecFileName); err != nil { + return err + } + + // Apply PX Operator spec + log.Info("Applying PX Operator spec") + cmdArgs := []string{"apply", "-f", pxOperatorSpecFileName} + if err := osutils.Kubectl(cmdArgs); err != nil { + return err + } + + // TODO: Temporary workaround, need to rewrite Operator validation functions to be able to use it here + // Validate PX Operator deployment + pxOperatorDeployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "portworx-operator", + Namespace: d.namespace, + }, + } + if err := apps.Instance().ValidateDeployment(pxOperatorDeployment, validateDeploymentTimeout, validateDeploymentInterval); err != nil { + return err + } + + log.Info("Successfully upgraded Portworx Operator") + return nil +} + // UpgradeStork upgrades Stork based on the given Spec Generator URL func (d *portworx) UpgradeStork(specGenUrl string) error { storkSpecFileName := "/stork.yaml" @@ -2892,7 +3073,7 @@ func (d *portworx) UpgradeStork(specGenUrl string) error { // Getting stork spec u, err := url.Parse(specGenUrl) if err != nil { - fmt.Printf("failed to parse URL [%s], Err: %v", specGenUrl, err) + return fmt.Errorf("failed to parse URL [%s], Err: %v", specGenUrl, err) } q := u.Query() q.Set("kbver", kubeVersion.String()) @@ -2994,7 +3175,7 @@ func (d *portworx) DecommissionNode(n *node.Node) error { } } - log.Infof("Waiting for a minute for node [%s] to transistion to maintenece mode", n.Name) + log.Infof("Waiting for a minute for node [%s] to transition to maintenance mode", n.Name) time.Sleep(1 * time.Minute) nodeResp, err := d.getNodeManager().Inspect(d.getContext(), &api.SdkNodeInspectRequest{NodeId: n.VolDriverNodeID}) @@ -3474,7 +3655,7 @@ func (d *portworx) GetKvdbMembers(n node.Node) (map[string]*torpedovolume.Metada url = netutil.MakeURL("http://", endpoint, int(pxdRestPort)) } // TODO replace by sdk call whenever it is available - log.Infof("Url to call %v", url) + log.Debugf("Url to call %v", url) c, err := client.NewClient(url, "", "") if err != nil { return nil, err @@ -4792,7 +4973,7 @@ func addDrive(n node.Node, drivePath string, poolID int32, d *portworx) error { return fmt.Errorf("failed to add drive [%s] on node [%s]", drivePath, n.Name) } - if !strings.Contains(addDriveStatus.Status, driveAddSuccessStatus) { + if !strings.Contains(addDriveStatus.Status, driveAddSuccessStatus) && !strings.Contains(addDriveStatus.Status, metadataAddSuccessStatus) { return fmt.Errorf("failed to add drive [%s] on node [%s], AddDrive Status: %+v", drivePath, n.Name, addDriveStatus) } @@ -4846,16 +5027,25 @@ func waitForAddDriveToComplete(n node.Node, drivePath string, d *portworx) error // GetPoolsUsedSize returns map of pool id and current used size func (d *portworx) GetPoolsUsedSize(n *node.Node) (map[string]string, error) { cmd := fmt.Sprintf("%s sv pool show -j | grep -e uuid -e '\"Used\"'", d.getPxctlPath(*n)) + log.Infof("Running command [%s] on node [%s]", cmd, n.Name) - out, err := d.nodeDriver.RunCommandWithNoRetry(*n, cmd, node.ConnectionOpts{ - Timeout: 2 * time.Minute, - TimeBeforeRetry: 10 * time.Second, - }) + t := func() (interface{}, bool, error) { + out, err := d.nodeDriver.RunCommandWithNoRetry(*n, cmd, node.ConnectionOpts{ + Timeout: 2 * time.Minute, + TimeBeforeRetry: 10 * time.Second, + }) + if err != nil { + return "", true, err + } + return out, false, nil + } + + out, err := task.DoRetryWithTimeout(t, 5*time.Minute, 10*time.Second) if err != nil { return nil, err } - outLines := strings.Split(out, "\n") + outLines := strings.Split(out.(string), "\n") poolsData := make(map[string]string) var poolId string diff --git a/vendor/github.com/portworx/torpedo/drivers/volume/volume.go b/vendor/github.com/portworx/torpedo/drivers/volume/volume.go index a49a8d0d55..778ace4469 100644 --- a/vendor/github.com/portworx/torpedo/drivers/volume/volume.go +++ b/vendor/github.com/portworx/torpedo/drivers/volume/volume.go @@ -336,9 +336,12 @@ type Driver interface { //UpdateIOPriority IO priority using pxctl command UpdateIOPriority(volumeName string, priorityType string) error - //validate mount options by executing mount command + //ValidatePureFaFbMountOptions validates mount options by executing mount command ValidatePureFaFbMountOptions(volumeName string, mountoption []string, volumeNode *node.Node) error + //ValidatePureFaCreateOptions validates create options using xfs_info and tune2fs commands + ValidatePureFaCreateOptions(volumeName string, FSType string, volumeNode *node.Node) error + // UpdateSharedv4FailoverStrategyUsingPxctl updates the sharedv4 failover strategy using pxctl UpdateSharedv4FailoverStrategyUsingPxctl(volumeName string, strategy api.Sharedv4FailoverStrategy_Value) error @@ -355,10 +358,10 @@ type Driver interface { ValidateDriver(endpointVersion string, autoUpdateComponents bool) error // ExpandPool resizes a pool of a given ID - ExpandPool(poolUID string, operation api.SdkStoragePool_ResizeOperationType, size uint64) error + ExpandPool(poolUID string, operation api.SdkStoragePool_ResizeOperationType, size uint64, skipWaitForCleanVolumes bool) error // ExpandPoolUsingPxctlCmd resizes pool of a given ID using CLI Command - ExpandPoolUsingPxctlCmd(n node.Node, poolUUID string, operation api.SdkStoragePool_ResizeOperationType, size uint64) error + ExpandPoolUsingPxctlCmd(n node.Node, poolUUID string, operation api.SdkStoragePool_ResizeOperationType, size uint64, skipWaitForCleanVolumes bool) error // ListStoragePools lists all existing storage pools ListStoragePools(labelSelector metav1.LabelSelector) (map[string]*api.StoragePool, error) diff --git a/vendor/github.com/portworx/torpedo/pkg/aetosutil/dashboardutil.go b/vendor/github.com/portworx/torpedo/pkg/aetosutil/dashboardutil.go index 6d7fb080a4..487aa357da 100644 --- a/vendor/github.com/portworx/torpedo/pkg/aetosutil/dashboardutil.go +++ b/vendor/github.com/portworx/torpedo/pkg/aetosutil/dashboardutil.go @@ -3,6 +3,8 @@ package aetosutil import ( "encoding/json" "fmt" + "github.com/sirupsen/logrus" + "net/http" "os" "reflect" @@ -15,7 +17,6 @@ import ( "github.com/onsi/gomega" rest "github.com/portworx/torpedo/pkg/restutil" - "github.com/sirupsen/logrus" ) var ( @@ -31,8 +32,8 @@ var ( const ( //DashBoardBaseURL for posting logs - DashBoardBaseURL = "http://aetos.pwx.purestorage.com/dashboard" //"http://aetos-dm.pwx.purestorage.com:3939/dashboard" - AetosBaseURL = "http://aetos.pwx.purestorage.com" + DashBoardBaseURL = "https://aetos.pwx.purestorage.com/dashboard" //"http://aetos-dm.pwx.purestorage.com:3939/dashboard" + AetosBaseURL = "https://aetos.pwx.purestorage.com" ) const ( @@ -163,11 +164,13 @@ func (d *Dashboard) TestSetBegin(testSet *TestSet) { if err != nil { logrus.Errorf("TestSetId creation failed. Cause : %v", err) } - dashURL = fmt.Sprintf("Dashboard URL : %s/resultSet/testSetID/%d", AetosBaseURL, d.TestSetID) - os.Setenv("DASH_UID", fmt.Sprint(d.TestSetID)) } } - logrus.Info(dashURL) + if d.TestSetID != 0 { + dashURL = fmt.Sprintf("Dashboard URL : %s/resultSet/testSetID/%d", AetosBaseURL, d.TestSetID) + os.Setenv("DASH_UID", fmt.Sprint(d.TestSetID)) + } + logrus.Infof(dashURL) } // TestSetEnd end testset and update to dashboard DB @@ -260,7 +263,7 @@ func (d *Dashboard) TestSetUpdate(testSet *TestSet) { resp, respStatusCode, err := rest.PUT(updateTestSetURL, testSet, nil, nil) if err != nil { - logrus.Errorf("Error in updating TestSet, Caose: %v", err) + logrus.Errorf("Error in updating TestSet, Cause: %v", err) } else if respStatusCode != http.StatusOK { logrus.Errorf("Failed to update TestSet, Resp : %s", string(resp)) } diff --git a/vendor/github.com/portworx/torpedo/pkg/restutil/restutil.go b/vendor/github.com/portworx/torpedo/pkg/restutil/restutil.go index 7905dbb274..f47978acd5 100644 --- a/vendor/github.com/portworx/torpedo/pkg/restutil/restutil.go +++ b/vendor/github.com/portworx/torpedo/pkg/restutil/restutil.go @@ -2,6 +2,7 @@ package restutil import ( "bytes" + "crypto/tls" "encoding/json" "github.com/sirupsen/logrus" "io" @@ -22,8 +23,8 @@ const ( defaultRestTimeOut = 10 * time.Second ) -// Get rest get call -func Get(url string, auth *Auth, headers map[string]string) ([]byte, int, error) { +// GET rest get call +func GET(url string, auth *Auth, headers map[string]string) ([]byte, int, error) { respBody, respStatusCode, err := getResponse(http.MethodGet, url, nil, auth, headers) if err != nil { @@ -97,6 +98,11 @@ func getResponse(httpMethod, url string, payload interface{}, auth *Auth, header setBasicAuthAndHeaders(req, auth, headers) client := &http.Client{ Timeout: defaultRestTimeOut, + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + }, } var resp *http.Response resp, err = client.Do(req) diff --git a/vendor/modules.txt b/vendor/modules.txt index 8bf3d8c74e..fac6fd3717 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -357,7 +357,7 @@ github.com/exponent-io/jsonpath # github.com/fatih/color v1.13.0 ## explicit; go 1.13 github.com/fatih/color -# github.com/fsnotify/fsnotify v1.5.4 +# github.com/fsnotify/fsnotify v1.6.0 ## explicit; go 1.16 github.com/fsnotify/fsnotify # github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 @@ -714,11 +714,14 @@ github.com/libopenstorage/openstorage/volume/drivers/fake github.com/libopenstorage/openstorage/volume/drivers/nfs github.com/libopenstorage/openstorage/volume/drivers/pwx github.com/libopenstorage/openstorage/volume/drivers/vfs -# github.com/libopenstorage/operator v0.0.0-20230323034810-8853b151f594 +# github.com/libopenstorage/operator v0.0.0-20230511042455-244266620049 ## explicit; go 1.19 github.com/libopenstorage/operator/drivers/storage github.com/libopenstorage/operator/drivers/storage/portworx/util +github.com/libopenstorage/operator/pkg/apis github.com/libopenstorage/operator/pkg/apis/core/v1 +github.com/libopenstorage/operator/pkg/apis/portworx +github.com/libopenstorage/operator/pkg/apis/portworx/v1 github.com/libopenstorage/operator/pkg/client/clientset/versioned github.com/libopenstorage/operator/pkg/client/clientset/versioned/scheme github.com/libopenstorage/operator/pkg/client/clientset/versioned/typed/core/v1 @@ -850,7 +853,7 @@ github.com/onsi/ginkgo/reporters/stenographer github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty github.com/onsi/ginkgo/types -# github.com/onsi/gomega v1.21.1 +# github.com/onsi/gomega v1.24.2 ## explicit; go 1.18 github.com/onsi/gomega github.com/onsi/gomega/format @@ -985,7 +988,7 @@ github.com/portworx/sched-ops/k8s/rbac github.com/portworx/sched-ops/k8s/storage github.com/portworx/sched-ops/k8s/stork github.com/portworx/sched-ops/task -# github.com/portworx/torpedo v0.20.4-rc1.0.20210325154352-eb81b0cdd145 => github.com/portworx/torpedo v0.0.0-20230410110229-ad7b0df86670 +# github.com/portworx/torpedo v0.20.4-rc1.0.20210325154352-eb81b0cdd145 => github.com/portworx/torpedo v0.0.0-20230512123351-2de37aaa31a7 ## explicit; go 1.19 github.com/portworx/torpedo/drivers/api github.com/portworx/torpedo/drivers/node @@ -2274,7 +2277,7 @@ sigs.k8s.io/yaml # github.com/libopenstorage/openstorage => github.com/libopenstorage/openstorage v1.0.1-0.20230324214216-7f88436db3de # github.com/portworx/kdmp => github.com/portworx/kdmp v0.4.1-0.20230316085313-95fc97e8493b # github.com/portworx/sched-ops => github.com/portworx/sched-ops v1.20.4-rc1.0.20230330091134-421296e5f8d0 -# github.com/portworx/torpedo => github.com/portworx/torpedo v0.0.0-20230410110229-ad7b0df86670 +# github.com/portworx/torpedo => github.com/portworx/torpedo v0.0.0-20230512123351-2de37aaa31a7 # gopkg.in/fsnotify.v1 v1.4.7 => github.com/fsnotify/fsnotify v1.4.7 # helm.sh/helm/v3 => helm.sh/helm/v3 v3.10.3 # k8s.io/api => k8s.io/api v0.25.0