diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..b1c7968f2 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,2 @@ +# ignore .cache folder +.cache diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..854788b63 --- /dev/null +++ b/.gitignore @@ -0,0 +1,8 @@ +*.pyc +.idea +.coverage +.project +.pydevproject +/.metadata/ +/bin/ +node/mocks diff --git a/Dockerfile-cleanup-volumes b/Dockerfile-cleanup-volumes new file mode 100644 index 000000000..39d63a517 --- /dev/null +++ b/Dockerfile-cleanup-volumes @@ -0,0 +1,32 @@ +# Copyright IBM Corporation 2019. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM centos:7 +RUN yum --enablerepo=extras -y install epel-release && yum -y install python36-pip + +COPY scripts/ci/cleanup_volumes.py /scripts/cleanup_volumes.py +RUN pip3 install pyxcli +RUN pip3 install munch + +RUN groupadd -g 9999 appuser && \ + useradd -r -u 9999 -g appuser appuser +RUN chown -R appuser:appuser /scripts +USER appuser + +WORKDIR /scripts +ENV PYTHONPATH=/scripts + +ENTRYPOINT ["/scripts/cleanup_volumes.py"] + + diff --git a/Dockerfile-csi-controller b/Dockerfile-csi-controller new file mode 100644 index 000000000..1067d4573 --- /dev/null +++ b/Dockerfile-csi-controller @@ -0,0 +1,45 @@ +# Copyright IBM Corporation 2019. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM registry.access.redhat.com/ubi8/python-36:1 +MAINTAINER IBM Storage + +###Required Labels +LABEL name="IBM block storage CSI driver controller" \ + vendor="IBM" \ + version="0.9.0" \ + release="b62" \ + summary="The controller component of the IBM block storage CSI driver" \ + description="The IBM block storage CSI driver enables container orchestrators, such as Kubernetes and OpenShift, to manage the life-cycle of persistent storage." \ + io.k8s.display-name="IBM block storage CSI driver controller" \ + io.k8s.description="The IBM block storage CSI driver enables container orchestrators, such as Kubernetes and OpenShift, to manage the life-cycle of persistent storage." \ + io.openshift.tags=ibm,csi,ibm-block-csi-driver,ibm-block-csi-node + +COPY controller/requirements.txt /driver/controller/ +RUN pip3 install --upgrade pip==19.1.1 +RUN pip3 install -r /driver/controller/requirements.txt + +COPY ./common /driver/common +COPY ./controller /driver/controller +COPY ./LICENSE /licenses/ +COPY ./NOTICES /licenses/ + +WORKDIR /driver +ENV PYTHONPATH=/driver + +# Note: UBI runs with app-user by default. + +ENTRYPOINT ["/driver/controller/scripts/entrypoint.sh"] + + diff --git a/Dockerfile-csi-controller.test b/Dockerfile-csi-controller.test new file mode 100644 index 000000000..30e08a674 --- /dev/null +++ b/Dockerfile-csi-controller.test @@ -0,0 +1,40 @@ +# Copyright IBM Corporation 2019. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Description: +# This Dockerfile.test is for running the csi controller unit testing inside a container. +# Its similar to the Dockerfile but with additional requirements-tests.txt and CMD to run nosetests. + +FROM centos:7 +#TODO : move to UBI like FROM registry.access.redhat.com/ubi7/python-27:2.7 +RUN yum --enablerepo=extras -y install epel-release && yum -y install python36-pip + +COPY controller/requirements.txt /driver/controller/ +RUN pip3 install --upgrade pip==19.1.1 +RUN pip3 install -r /driver/controller/requirements.txt + +# Requires to run unit testing +COPY controller/requirements-tests.txt /driver/controller/ +RUN pip3 install -r /driver/controller/requirements-tests.txt + +COPY ./common /driver/common +COPY ./controller /driver/controller +RUN groupadd -g 9999 appuser && \ + useradd -r -u 9999 -g appuser appuser +RUN chown -R appuser:appuser /driver +USER appuser +WORKDIR /driver + +ENTRYPOINT ["/driver/controller/scripts/entrypoint-test.sh"] + diff --git a/Dockerfile-csi-node b/Dockerfile-csi-node new file mode 100644 index 000000000..71d59b9d8 --- /dev/null +++ b/Dockerfile-csi-node @@ -0,0 +1,69 @@ +# Copyright IBM Corporation 2019. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build stage +FROM golang:1.12.6 as builder + +WORKDIR /go/src/github.com/ibm/ibm-block-csi-driver +ENV GO111MODULE=on + +# Populate the module cache based on the go.{mod,sum} files. +COPY go.mod . +COPY go.sum . +RUN go mod download + +COPY . . +RUN make ibm-block-csi-driver + +# Final stage +FROM registry.access.redhat.com/ubi8/ubi-minimal:8.0 +MAINTAINER IBM Storage + +LABEL name="IBM block storage CSI driver node" \ + vendor="IBM" \ + version="0.9.0" \ + release="b62" \ + summary="The node component of the IBM block storage CSI driver" \ + description="The IBM block storage CSI driver enables container orchestrators, such as Kubernetes and OpenShift, to manage the life-cycle of persistent storage." \ + io.k8s.display-name="IBM block storage CSI driver node" \ + io.k8s.description="The IBM block storage CSI driver enables container orchestrators, such as Kubernetes and OpenShift, to manage the life-cycle of persistent storage." \ + io.openshift.tags=ibm,csi,ibm-block-csi-driver,ibm-block-csi-node + +WORKDIR /root +COPY --from=builder /go/src/github.com/ibm/ibm-block-csi-driver/common/config.yaml . +COPY --from=builder /go/src/github.com/ibm/ibm-block-csi-driver/bin/ibm-block-csi-node-driver . +COPY ./LICENSE /licenses/ +COPY ./NOTICES /licenses/ + +RUN mkdir /chroot +ADD chroot-host-wrapper.sh /chroot +RUN chmod 777 /chroot/chroot-host-wrapper.sh +RUN ln -s /chroot/chroot-host-wrapper.sh /chroot/blkid \ + && ln -s /chroot/chroot-host-wrapper.sh /chroot/blockdev \ + && ln -s /chroot/chroot-host-wrapper.sh /chroot/iscsiadm \ + && ln -s /chroot/chroot-host-wrapper.sh /chroot/lsblk \ + && ln -s /chroot/chroot-host-wrapper.sh /chroot/lsscsi \ + && ln -s /chroot/chroot-host-wrapper.sh /chroot/mkfs.ext3 \ + && ln -s /chroot/chroot-host-wrapper.sh /chroot/mkfs.ext4 \ + && ln -s /chroot/chroot-host-wrapper.sh /chroot/mkfs.xfs \ + && ln -s /chroot/chroot-host-wrapper.sh /chroot/fsck \ + && ln -s /chroot/chroot-host-wrapper.sh /chroot/mount \ + && ln -s /chroot/chroot-host-wrapper.sh /chroot/multipath \ + && ln -s /chroot/chroot-host-wrapper.sh /chroot/multipathd \ + && ln -s /chroot/chroot-host-wrapper.sh /chroot/umount + +ENV PATH="/chroot:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + + +ENTRYPOINT ["/root/ibm-block-csi-node-driver"] diff --git a/Dockerfile-csi-node.test b/Dockerfile-csi-node.test new file mode 100644 index 000000000..4b58f716a --- /dev/null +++ b/Dockerfile-csi-node.test @@ -0,0 +1,30 @@ +# Copyright IBM Corporation 2019. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build stage +FROM golang:1.12.6 as builder + +WORKDIR /go/src/github.com/ibm/ibm-block-csi-driver +ENV GO111MODULE=on + +# Populate the module cache based on the go.{mod,sum} files. +COPY go.mod . +COPY go.sum . +RUN go mod download +RUN go get github.com/tebeka/go2xunit # when GO111MODULE=on the module will not become executable, so get it here to run it as binary. +RUN go get github.com/golang/mock/gomock +RUN go install github.com/golang/mock/mockgen + +COPY . . +ENTRYPOINT ["make", "test-xunit"] diff --git a/Dockerfile-csi-test b/Dockerfile-csi-test new file mode 100644 index 000000000..43321141d --- /dev/null +++ b/Dockerfile-csi-test @@ -0,0 +1,36 @@ +# Copyright IBM Corporation 2019. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM golang:1.11 + +RUN git clone https://github.com/kubernetes-csi/csi-test /usr/local/go/src/github.com/kubernetes-csi/csi-test +RUN make -C /usr/local/go/src/github.com/kubernetes-csi/csi-test + +COPY ./scripts/csi_test /usr/local/go/src/github.com/kubernetes-csi/csi-test/ibm-driver + +RUN groupadd -g 9999 appuser && \ + useradd -r -u 9999 -g appuser appuser +RUN chown -R appuser:appuser /usr/local/go/src/github.com/kubernetes-csi/csi-test + +USER appuser +WORKDIR /usr/local/go/src/github.com/kubernetes-csi/csi-test + +ENV JUNIT_OUTPUT="/tmp/test_results/out_client.xml" +ENV SECRET_FILE="/usr/local/go/src/github.com/kubernetes-csi/csi-test/ibm-driver/csi_secrets" +ENV PARAM_FILE="/usr/local/go/src/github.com/kubernetes-csi/csi-test/ibm-driver/csi_params" +ENV ENDPOINT="/tmp/k8s_dir/nodecsi" +ENV ENDPOINT_CONTROLLER="/tmp/k8s_dir/f" +ENV TESTS_TO_RUN_FILE="/usr/local/go/src/github.com/kubernetes-csi/csi-test/ibm-driver/csi_tests_to_run" + +ENTRYPOINT ["/usr/local/go/src/github.com/kubernetes-csi/csi-test/ibm-driver/entrypoint-csi-tests.sh"] diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..9994ab4f3 --- /dev/null +++ b/Makefile @@ -0,0 +1,66 @@ +# +# Copyright 2019 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Note: this Makefile currently applicable responsible for CSI compile, test and build image. Later will should add csi-controller to the makefile party. + +PKG=github.com/ibm/ibm-block-csi-driver +IMAGE=ibmcom/ibm-block-csi-driver +GIT_COMMIT?=$(shell git rev-parse HEAD) +BUILD_DATE?=$(shell date -u +"%Y-%m-%dT%H:%M:%SZ") +LDFLAGS?="-X ${PKG}/node/pkg/driver.gitCommit=${GIT_COMMIT} -X ${PKG}/node/pkg/driver.buildDate=${BUILD_DATE} -s -w" +GO111MODULE=on +DRIVER_CONFIG_YML=$(shell pwd)/common/config.yaml + +.EXPORT_ALL_VARIABLES: + +.PHONY: ibm-block-csi-driver +ibm-block-csi-driver: + mkdir -p bin + CGO_ENABLED=0 GOOS=linux go build -ldflags ${LDFLAGS} -o bin/ibm-block-csi-node-driver ./node/cmd + +.PHONY: test +test: + if [ -d ./node/mocks ]; then rm -rf ./node/mocks; fi + go generate ./... + go test -v -race ./node/... + +.PHONY: test-xunit +test-xunit: + mkdir -p ./build/reports + if [ -d ./node/mocks ]; then rm -rf ./node/mocks; fi + go generate ./... + go test -v -race ./node/... | go2xunit -output build/reports/csi-node-unitests.xml + go test -v -race ./node/... # run again so the makefile will fail in case tests failing + +.PHONY: test-xunit-in-container +test-xunit-in-container: + # Run make test-xunit inside csi node container for testing (to avoid go and other testing utils on your laptop). + docker build -f Dockerfile-csi-node.test -t csi-node-unitests . + docker run --rm -t -v $(CURDIR)/build/reports/:/go/src/github.com/ibm/ibm-block-csi-driver/build/reports/ csi-node-unitests + + +.PHONY: gofmt +gofmt: + gofmt -w ./node + +.PHONY: csi-build-images-and-push-artifactory +csi-build-images-and-push-artifactory: + ./scripts/ci/build_push_images.sh + +.PHONY: list +list: + @$(MAKE) -pRrq -f $(lastword $(MAKEFILE_LIST)) : 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' | sort | egrep -v -e '^[^[:alnum:]]' -e '^$@$$' + diff --git a/NOTICES b/NOTICES new file mode 100644 index 000000000..a11c9b6aa --- /dev/null +++ b/NOTICES @@ -0,0 +1,1732 @@ + +Additional Third Party Software License Agreements and Notices + +This file details additional Third Party Software License Agreements +and third party notices and information that are required +to be reproduced for the following programs: + +IBM Block Storage CSI Driver version 0.9 + + + +=========================================================================== +Section 1 - TERMS AND CONDITIONS FOR SEPARATELY LICENSED CODE +=========================================================================== + +The "Separately Licensed Code" identified in Section 1 of this +document is provided to Licensee under terms and conditions that +are different from the license agreement for the Program. + +Licensee's use of such components or portions thereof is subject to the +terms of the associated license agreement provided or referenced in this +section and not the terms of the license agreement for the Program. + +The following are Separately Licensed Code: + +Paramiko version 2.4.2 +Red Hat Universal Base Image Version 8 + + + + +@@@@@@@@@@@@ +=========================================================================== +GNU LESSER GENERAL PUBLIC LICENSE VERSION 2.1: THE FOLLOWING TERMS AND +CONDITIONS APPLY to the listed components below which are licensed under +the GNU LESSER GENERAL PUBLIC LICENSE VERSION 2.1: + +paramiko version 2.4.2 + +--------------------------------------------------------------------------- +Start of GNU LGPL Version 2.1 License +--------------------------------------------------------------------------- + + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms of the +ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, + Boston, MA 02110-1301 USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written + by James Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! + + + +--------------------------------------------------------------------------- +END of GNU LGPL Version 2.1 License +--------------------------------------------------------------------------- + + + + +@@@@@@@@@@@@ +=========================================================================== +THE FOLLOWING TERMS AND CONDITIONS APPLY to Red Hat Universal +Base Image Version 8 software: +--------------------------------------------------------------------------- + +END USER LICENSE AGREEMENT + +RED HAT UNIVERSAL BASE IMAGE + +PLEASE READ THIS END USER LICENSE AGREEMENT CAREFULLY BEFORE USING +SOFTWARE FROM RED HAT. BY USING RED HAT SOFTWARE, YOU SIGNIFY YOUR +ASSENT TO AND ACCEPTANCE OF THIS END USER LICENSE AGREEMENT AND +ACKNOWLEDGE YOU HAVE READ AND UNDERSTAND THE TERMS. AN INDIVIDUAL ACTING +ON BEHALF OF AN ENTITY REPRESENTS THAT HE OR SHE HAS THE AUTHORITY TO +ENTER INTO THIS END USER LICENSE AGREEMENT ON BEHALF OF THAT ENTITY. IF +YOU DO NOT ACCEPT THE TERMS OF THIS AGREEMENT, THEN YOU MUST NOT USE THE +RED HAT SOFTWARE. THIS END USER LICENSE AGREEMENT DOES NOT PROVIDE ANY +RIGHTS TO RED HAT SERVICES SUCH AS SOFTWARE MAINTENANCE, UPGRADES OR +SUPPORT. PLEASE REVIEW YOUR SERVICE OR SUBSCRIPTION AGREEMENT(S) THAT +YOU MAY HAVE WITH RED HAT OR OTHER AUTHORIZED RED HAT SERVICE PROVIDERS +REGARDING SERVICES AND ASSOCIATED PAYMENTS. + +This end user license agreement (“EULA”) governs the use of Red Hat +Universal Base Image and associated software supporting such +container(s) and any related updates, source code, including the +appearance, structure and organization (the “Programs”), regardless +of the delivery mechanism. If a Red Hat Universal Base Image is included +in another Red Hat product, the EULA terms of such other Red Hat product +will apply and supersede this EULA. If a Red Hat Universal Base Image is +included in a third party work, the terms of this EULA will continue to +govern the Red Hat Universal Base Image. + +1. License Grant. Subject to the terms of this EULA, Red Hat, Inc. +(“Red Hat”) grants to you a perpetual, worldwide license to the +Programs (each of which may include multiple software components). With +the exception of the Red Hat trademark identified in Section 2 below, +each software component is governed by a license that permits you to +run, copy, modify, and redistribute (subject to certain obligations in +some cases) the software components. This EULA pertains solely to the +Programs and does not limit your rights under, or grant you rights that +supersede, the license terms applicable to any particular component. The +license terms applicable to each software component are provided in the +source code of that component. + +2. Intellectual Property Rights. The Programs and each of their +components are owned by Red Hat and other licensors and are protected +under copyright law and other laws as applicable. Title to the Programs +and any component shall remain with Red Hat and other licensors, subject +to the applicable license, excluding any independently developed and +licensed work. The “Red Hat” trademark is a registered trademark of +Red Hat and its affiliates in the U.S. and other countries. Subject to +Red Hat’s trademark usage guidelines (set forth at +http://www.redhat.com/about/corporate/trademark/), this EULA permits you +to distribute the Programs that include the Red Hat trademark, provided +you do not make any statements on behalf of Red Hat, including but not +limited to, stating or in any way suggesting (in any public, private +and/or confidential statement (whether written or verbal)) that Red Hat +supports or endorses software built and delivered with a Red Hat +Universal Base Image(s) (such derivative works referred to as a “Red +Hat Based Container Images”); provided if a Red Hat Based Container +Image is Red Hat Certified and deployed on a Red Hat supported +configuration as set forth at https://access.redhat.com/articles/2726611 +then you may state that the Red Hat Universal Base Image is supported by +Red Hat. You agree to include this unmodified EULA in all distributions +of container images sourced, built or otherwise derived from the +Programs. If you modify the Red Hat Universal Base Image(s), you must +remove any Red Hat trademark(s) prior to any subsequent distribution. +Any breach of this Section 2 is a material breach of the EULA and you +may no longer use and/or distribute the Red Hat trademark(s). +Modifications to the software may corrupt the Programs. + +3. Limited Warranty. Except as specifically stated in this Section 3, a +separate agreement with Red Hat, or a license for a particular +component, to the maximum extent permitted under applicable law, the +Programs and the components are provided and licensed “as is” +without warranty of any kind, expressed or implied, including the +implied warranties of merchantability, non-infringement or fitness for a +particular purpose. Neither Red Hat nor its affiliates warrant that the +functions contained in the Programs will meet your requirements or that +the operation of the Programs will be entirely error free, appear or +perform precisely as described in the accompanying documentation, or +comply with regulatory requirements. Red Hat warrants that the media on +which the Programs and the components are provided will be free from +defects in materials and manufacture under normal use for a period of 30 +days from the date of delivery to you. This warranty extends only to the +party that purchases subscription services for the supported +configurations from Red Hat and/or its affiliates or a Red Hat +authorized distributor. + +4. Limitation of Remedies and Liability. To the maximum extent permitted +by applicable law, your exclusive remedy under this EULA is to return +any defective media within 30 days of delivery along with a copy of your +payment receipt and Red Hat, at its option, will replace it or refund +the money you paid for the media. To the maximum extent permitted under +applicable law, under no circumstances will Red Hat, its affiliates, any +Red Hat authorized distributor, or the licensor of any component +provided to you under this EULA be liable to you for any incidental or +consequential damages, including lost profits or lost savings arising +out of the use or inability to use the Programs or any component, even +if Red Hat, its affiliates, an authorized distributor, and/or licensor +has been advised of the possibility of such damages. In no event shall +Red Hat's or its affiliates’ liability, an authorized distributor’s +liability or the liability of the licensor of a component provided to +you under this EULA exceed the amount that you paid to Red Hat for the +media under this EULA. + +5. Export Control. As required by the laws of the United States and +other countries, you represent and warrant that you: (a) understand that +the Programs and their components may be subject to export controls +under the U.S. Commerce Department’s Export Administration Regulations +(“EAR”); (b) are not located in a prohibited destination country +under the EAR or U.S. sanctions regulations (currently Cuba, Iran, North +Korea, Sudan, Syria, and the Crimea Region of Ukraine, subject to change +as posted by the United States government); (c) will not export, +re-export, or transfer the Programs to any prohibited destination, +persons or entities on the U.S. Bureau of Industry and Security Denied +Parties List or Entity List, or the U.S. Office of Foreign Assets +Control list of Specially Designated Nationals and Blocked Persons, or +any similar lists maintained by other countries, without the necessary +export license(s) or authorizations(s); (d) will not use or transfer the +Programs for use in connection with any nuclear, chemical or biological +weapons, missile technology, or military end-uses where prohibited by an +applicable arms embargo, unless authorized by the relevant government +agency by regulation or specific license; (e) understand and agree that +if you are in the United States and export or transfer the Programs to +eligible end users, you will, to the extent required by EAR Section +740.17(e), submit semi-annual reports to + +Red Hat Universal Base Image +End User License Agreement + + +=========================================================================== +END OF TERMS AND CONDITIONS FOR Red Hat Universal Base Image +Version 8 +=========================================================================== + + + +=========================================================================== +END OF TERMS AND CONDITIONS FOR SEPARATELY LICENSED CODE for IBM Block +Storage CSI Driver version +=========================================================================== + + + +@@@@@@@@@@@@ +=========================================================================== +======================== SOURCE CODE OFFERS =============================== + +GNU GPL and / or LGPL Source Code for: + +IBM Block Storage CSI Driver Version 0.9 Third Party Licenses and Notices + +=========================================================================== + + + + + +@@@@@@@@@@@@ +=========================================================================== +Lesser General Public License version 2.1: The product includes the +following licensed code to the licensee as Separately Licensed Code under +the GNU Lesser General Public License 2.1. +=========================================================================== + + paramiko version 2.4.2 + +Source code to any of the above-listed packages distributed with IBM Block +Storage CSI Driver Version 0.9 Third Party Licenses and Notices is +available at the website below, when a URL is provided, or by sending a +request to the following address or email: + + IBM Corporation + Attn: Dept 4XNA / 9032-2, Storage Open Source Management + 9000 S. Rita Road + Tucson, AZ 85744 + +Please identify the name of the IBM product and the GPL or LGPL licensed +program(s) required in the request for source code. + +=========================================================================== +END of GNU LGPL Version 2.1 Notices and Information +=========================================================================== + +========================================================================= +Section 2 - NOTICES and INFORMATION +========================================================================= + +Notwithstanding the terms and conditions of any other agreement Licensee +may have with IBM or any of its related or affiliated entities +(collectively "IBM"), the third party code identified below is subject +to the terms and conditions of the license agreement for the +Program and not the license terms that may be contained in the notices +below. The notices are provided for informational purposes. + +IMPORTANT: IBM does not represent or warrant that the information in this +NOTICES file is accurate. Third party websites are independent of IBM and +IBM does not represent or warrant that the information on any third party +website referenced in this NOTICES file is accurate. IBM disclaims any +and all liability for errors and omissions or for any damages accruing +from the use of this NOTICES file or its contents, including without +limitation URLs or references to any third party websites. + +--------------------------------------------------------------------------- +The following are NOTICES and INFORMATION: +--------------------------------------------------------------------------- + + + +@@@@@@@@@@@@ +=========================================================================== +APACHE 2.0 LICENSED CODE: The Program includes all or portions of +the following software which IBM obtained under the terms and conditions +of the Apache License Version 2.0: + +grpc version 1.20.1 +grpc version 1.22.0 +k8s.io/apimachinery commit f97a4e5 +kubernetes-mount version 1.13.1 +mock version 1.3.1 +yaml version 2.2.2 + +--------------------------------------------------------------------------- + +--------------------------------------------------------------------------- +Start of Apache Software License Version 2.0 +--------------------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +--------------------------------------------------------------------------- +End of Apache Software License Version 2.0 +--------------------------------------------------------------------------- + + +--------------------------------------------------------------------------- + + + +=========================================================================== +NOTICE file corresponding to section 4(d) of the Apache License, +Version 2.0, in this case for the grpc version 1.20.1 distribution +=========================================================================== +Copyright 2014 gRPC authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + +=========================================================================== +NOTICE file corresponding to section 4(d) of the Apache License, +Version 2.0, in this case for the grpc version 1.22.0 distribution +=========================================================================== +Copyright 2014 gRPC authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + + +=========================================================================== +NOTICE file corresponding to section 4(d) of the Apache License, +Version 2.0, in this case for the kubernetes-mount version +1.13.1 distribution +=========================================================================== +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + +=========================================================================== +NOTICE file corresponding to section 4(d) of the Apache License, +Version 2.0, in this case for the yaml version 2.2.2 distribution +=========================================================================== +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +=========================================================================== +END OF APACHE 2.0 NOTICES AND INFORMATION +=========================================================================== + + + +@@@@@@@@@@@@ +=========================================================================== +errors version 0.8.1: The Program includes errors version 0.8.1 +software. IBM obtained the errors version 0.8.1 software under the +terms and conditions of the following license(s): +--------------------------------------------------------------------------- + +Copyright (c) 2015, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +=========================================================================== +END OF errors version 0.8.1 NOTICES AND INFORMATION +=========================================================================== + + +@@@@@@@@@@@@ +=========================================================================== +grpcio version 1.20.1: The Program includes grpcio version 1.20.1 +software. IBM obtained portions of the grpcio version 1.20.1 software +under the terms and conditions of the following license(s): +--------------------------------------------------------------------------- + +Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +3. Neither the name of the project nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +--------------------------------------------------------------------------- + +Copyright (c) 2011 Petteri Aimonen + +This software is provided 'as-is', without any express or implied +warranty. In no event will the authors be held liable for any damages +arising from the use of this software. + +Permission is granted to anyone to use this software for any purpose, +including commercial applications, and to alter it and redistribute it +freely, subject to the following restrictions: + +1. The origin of this software must not be misrepresented; you must not +claim that you wrote the original software. If you use this software in +a product, an acknowledgment in the product documentation would be +appreciated but is not required. +2. Altered source versions must be plainly marked as such, and must not +be misrepresented as being the original software. + +3. This notice may not be removed or altered from any source +distribution. + +--------------------------------------------------------------------------- + +Copyright (c) 2011 Petteri Aimonen + +This software is provided 'as-is', without any express or +implied warranty. In no event will the authors be held liable +for any damages arising from the use of this software. + +Permission is granted to anyone to use this software for any +purpose, including commercial applications, and to alter it and +redistribute it freely, subject to the following restrictions: + +1. The origin of this software must not be misrepresented; you + must not claim that you wrote the original software. If you use + this software in a product, an acknowledgment in the product + documentation would be appreciated but is not required. + +2. Altered source versions must be plainly marked as such, and + must not be misrepresented as being the original software. + +3. This notice may not be removed or altered from any source + distribution. + +--------------------------------------------------------------------------- + +Copyright 2015, Google Inc. +Copyright 2016, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +--------------------------------------------------------------------------- + +Copyright (c) 2015, Google Inc. + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION +OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +--------------------------------------------------------------------------- + +Protocol Buffers - Google's data interchange format +Copyright 2008 Google Inc. All rights reserved. +https://developers.google.com/protocol-buffers/ + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +--------------------------------------------------------------------------- + +Copyright 2009 Kitware, Inc. +Copyright 2009-2011 Philip Lowman +Copyright 2008 Esben Mose Hansen, Ange Optimization ApS + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +* Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +* Neither the names of Kitware, Inc., the Insight Software Consortium, + nor the names of their contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +=========================================================================== +END OF grpcio version 1.20.1 NOTICES AND INFORMATION +=========================================================================== + + +@@@@@@@@@@@@ +=========================================================================== +logrus version 1.4.2: The Program includes logrus version 1.4.2 +software. IBM obtained the logrus version 1.4.2 software under the +terms and conditions of the following license(s): +--------------------------------------------------------------------------- + +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +=========================================================================== +END OF logrus version 1.4.2 NOTICES AND INFORMATION +=========================================================================== + + +@@@@@@@@@@@@ +=========================================================================== +munch version 2.3.2: The Program includes munch version 2.3.2 +software. IBM obtained the munch version 2.3.2 software under the +terms and conditions of the following license(s): +--------------------------------------------------------------------------- + +Copyright (c) 2010 David Schoonover + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +=========================================================================== +END OF munch version 2.3.2 NOTICES AND INFORMATION +=========================================================================== + + +@@@@@@@@@@@@ +=========================================================================== +net commit 3b0461e: The Program includes net commit 3b0461e software. +IBM obtained the net commit 3b0461e software under the terms and +conditions of the following license(s): +--------------------------------------------------------------------------- + +Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright (c) 2010 The Go Authors. All rights reserved. +Copyright (c) 2011 The Go Authors. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2013 The Go Authors. All rights reserved. +Copyright (c) 2014 The Go Authors. All rights reserved. +Copyright (c) 2015 The Go Authors. All rights reserved. +Copyright (c) 2016 The Go Authors. All rights reserved. +Copyright (c) 2017 The Go Authors. All rights reserved. +Copyright (c) 2018 The Go Authors. All rights reserved. +Copyright (c) 2019 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +--------------------------------------------------------------------------- + +Project, specifically $WEBKITROOT/LayoutTests/html5lib/resources. +WebKit is licensed under a BSD style license. +http://webkit.org/coding/bsd-license.html says: + +Copyright (C) 2009 Apple Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS "AS IS" AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +=========================================================================== +END OF net commit 3b0461e NOTICES AND INFORMATION +=========================================================================== + + +@@@@@@@@@@@@ +=========================================================================== +protobuf version 3.7.1: The Program includes protobuf version 3.7.1 +software. IBM obtained the protobuf version 3.7.1 software under the +terms and conditions of the following license(s): +--------------------------------------------------------------------------- + +Copyright 2008 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Code generated by the Protocol Buffer compiler is owned by the owner +of the input file used when generating it. This code is not +standalone and requires a support library to be linked with it. This +support library is itself covered by the above license. + + +=========================================================================== +END OF protobuf version 3.7.1 NOTICES AND INFORMATION +=========================================================================== + + +@@@@@@@@@@@@ +=========================================================================== +pyyaml version 5.1: The Program includes pyyaml version 5.1 software. +IBM obtained the pyyaml version 5.1 software under the terms and +conditions of the following license(s): +--------------------------------------------------------------------------- + +Copyright (c) 2017-2019 Ingy döt Net +Copyright (c) 2006-2016 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +=========================================================================== +END OF pyyaml version 5.1 NOTICES AND INFORMATION +=========================================================================== + + +@@@@@@@@@@@@ +=========================================================================== +setuptools (python) version 41.0.1: The Program includes setuptools +(python) version 41.0.1 software. IBM obtained the setuptools +(python) version 41.0.1 software under the terms and conditions of +the following license(s): +--------------------------------------------------------------------------- + +Copyright (C) 2016 Jason R Coombs + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------------------------------------------------------------------- + +Copyright (c) 2003-2018 Paul T. McGuire + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +--------------------------------------------------------------------------- + +Copyright (c) 2010-2015 Benjamin Peterson + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +=========================================================================== +END OF setuptools (python) version 41.0.1 NOTICES AND +INFORMATION +=========================================================================== + + +@@@@@@@@@@@@ +=========================================================================== +sync commit 1122301: The Program includes sync commit 1122301 +software. IBM obtained the sync commit 1122301 software under the +terms and conditions of the following license(s): +--------------------------------------------------------------------------- + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +=========================================================================== +END OF sync commit 1122301 NOTICES AND INFORMATION +=========================================================================== + + +@@@@@@@@@@@@ +=========================================================================== +yaml version 2.2.2: The Program includes yaml version 2.2.2 software. +IBM obtained portions of the yaml version 2.2.2 software under the +terms and conditions of the following license(s): +--------------------------------------------------------------------------- + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +=========================================================================== +END OF yaml version 2.2.2 NOTICES AND INFORMATION +=========================================================================== + + +========================================================================= +Section 3 - Other IBM Open Source Project codes +========================================================================= + +Use of other IBM Open Source Project codes in IBM Block +Storage CSI Driver Version 0.9 + +https://github.com/IBM/pyxcli (Apache v2) +https://github.com/IBM/pysvc (Apache v2) + +------------------------------------------------------------------------- + + + + +=========================================================================== +END OF NOTICES AND INFORMATION FOR IBM Block Storage CSI +Driver Version 0.9 Third Party Licenses and Notices +=========================================================================== + diff --git a/README.md b/README.md index 3939c7dab..2a04c5cde 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,344 @@ -# ibm-block-csi-drive -The Container Storage Interface (CSI) Driver for IBM block storage systems enables container orchestrators such as Kubernetes to manage the life-cycle of persistent storage. +# IBM block storage CSI driver + +The IBM block storage CSI driver enables container orchestrators, such as Kubernetes and OpenShift, to manage the life-cycle of persistent storage. + +Supported container platforms: + - OpenShift v4.1 + - Kubernetes v1.13 + +Supported IBM storage systems: + - IBM FlashSystem 9100 + - IBM Spectrum Virtualize + - IBM Storwize + - IBM FlashSystem A9000/R + +Supported operating systems: + - RHEL 7.x (x86 architecture) + +DISCLAIMER: The driver is provided as is, without warranty. Version 0.9.0 of the IBM block storage CSI driver is a beta software version. Do not use this driver for production purposes and do not contact IBM for support. Any issue will be handled on a best-effort basis. + +## Table of content: +* [Prerequisites for driver installation](#prerequisites-for-driver-installation) + - Install Fibre Channel and iSCSI connectivity rpms, multipath configurations, and configure storage system connectivity. +* [Installing the driver](#installing-the-driver) +* [Configuring k8s secret and storage class](#configuring-k8s-secret-and-storage-class) + - Configure the k8s storage class - to define the storage system pool name, secret reference, SpaceEfficiency (Thin, Compressed or Deduplicated) and fstype(xfs\ext4) + - Storage system secret - to define the storage credential(user and password) and its address +* [Driver usage](#driver-usage) + - Example of how to create PVC and StatefulSet application, with full detail behind the scenes +* [Uninstalling the driver](#uninstalling-the-driver) +* [More details and troubleshooting](#more-details-and-troubleshooting) + + +## Prerequisites for driver installation + +### Preparing worker nodes +Perform these steps for each worker node in Kubernetes cluster: + +#### 1. Install Linux packages to ensure Fibre Channel and iSCSI connectivity +Skip this step if the packages are already installed. + +RHEL 7.x: +```sh +yum -y install iscsi-initiator-utils # Only if iSCSI connectivity is required +yum -y install xfsprogs # Only if XFS file system is required +``` + +#### 2. Configure Linux multipath devices on the host +Create and set the relevant storage system parameters in the `/etc/multipath.conf` file. +You can also use the default `multipath.conf` file, located in the `/usr/share/doc/device-mapper-multipath-*` directory. +Verify that the `systemctl status multipathd` output indicates that the multipath status is active and error-free. + +RHEL 7.x: +```sh +yum install device-mapper-multipath +modprobe dm-multipath +systemctl start multipathd +systemctl status multipathd +multipath -ll +``` + +**Important:** When configuring Linux multipath devices, verify that the `find_multipaths` parameter in the `multipath.conf` file is disabled. In RHEL 7.x, remove the`find_multipaths yes` string from the `multipath.conf` file. + +#### 3. Configure storage system connectivity +3.1. Define the hostname of each Kubernetes node on the relevant storage systems with the valid WWPN or IQN of the node. + +3.2. For Fibre Channel, configure the relevant zoning from the storage to the host. + +3.3. For iSCSI, perform the following steps: + +3.3.1. Make sure that the login to the iSCSI targets is permanent and remains available after a reboot of the worker node. To do this, verify that the node.startup in the /etc/iscsi/iscsid.conf file is set to automatic. If not, set it as required and then restart the iscsid service `$> service iscsid restart`. + +3.3.2. Discover and log into at least two iSCSI targets on the relevant storage +systems. + +```sh +$> iscsiadm -m discoverydb -t st -p ${storage system iSCSI port IP}:3260 +--discover +$> iscsiadm -m node -p ${storage system iSCSI port IP/hostname} --login +``` + +3.3.3. Verify that the login was successful and display all targets that you logged into. The portal value must be the iSCSI target IP address. + +```sh +$> iscsiadm -m session --rescan +Rescanning session [sid: 1, target: {storage system IQN}, +portal: {storage system iSCSI port IP},{port number} +``` + +End of worker node setup. + + +### Installing the CSIDriver CRD (optional) +Enable the CSIDriver on Kubernetes. For more details see https://kubernetes-csi.github.io/docs/csi-driver-object.html#enabling-csidriver-on-kubernetes +. +In Kubernetes v1.13, this feature was disabled by default (the feature was alpha). To enable the use of CSIDriver on this Kubernetes version, perform the the following steps: + +**Note:** If the feature gate was not enabled, CSIDriver for the block.csi.ibm.com will not be created automatically. + +1. Ensure the feature gate is enabled via the following Kubernetes feature flag: --feature-gates=CSIDriverRegistry=true + For example, on kubeadm installation, add the flag inside `/etc/kubernetes/manifests/kube-apiserver.yaml`. +2. Perform one of the following: +-Ensure the CSIDriver CRD is automatically installed via the Kubernetes Storage CRD addon +OR +-Manually install the CSIDriver CRD on the Kubernetes cluster with the following command: + ```sh + $> kubectl create -f https://raw.githubusercontent.com/kubernetes/csi-api/master/pkg/crd/manifests/csidriver.yaml + ``` + + + + +
+
+
+ + + + +## Installing the driver +This section describes how to install the CSI driver. + +```sh +###### Download the driver yml file from github: +$> curl https://raw.githubusercontent.com/IBM/ibm-block-csi-driver/master/deploy/kubernetes/v1.13/ibm-block-csi-driver.yaml > ibm-block-csi-driver.yaml + +###### Optional: Only edit the `ibm-block-csi-driver.yaml` file if you need to change the driver IMAGE URL. By default, the URL is `ibmcom/ibm-block-csi-driver-controller:0.9.0` and `ibmcom/ibm-block-csi-driver-node:0.9.0`. + +###### Install the driver: +$> kubectl apply -f ibm-block-csi-driver.yaml +``` + +Verify the driver is running. (Make sure the csi-controller pod status is Running): + +```sh + +$> kubectl get all -n kube-system -l csi +NAME READY STATUS RESTARTS AGE +pod/ibm-block-csi-controller-0 5/5 Running 0 9m36s +pod/ibm-block-csi-node-jvmvh 3/3 Running 0 9m36s +pod/ibm-block-csi-node-tsppw 3/3 Running 0 9m36s + +NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE +daemonset.apps/ibm-block-csi-node 2 2 2 2 2 9m36s + +NAME READY AGE +statefulset.apps/ibm-block-csi-controller 1/1 9m36s + +``` + +
+
+
+ +## Configuring k8s secret and storage class +In order to use the driver, create the relevant storage classes and secrets, as needed. + +This section describes how to: + 1. Create a storage system secret - to define the storage credential (user and password) and its address. + 2. Configure the k8s storage class - to define the storage system pool name, secret reference, SpaceEfficiency (thin, compressed, or deduplicated) and fstype(xfs\ext4). + +#### 1. Create an array secret +Create a secret file as follows and update the relevant credentials: + +``` +kind: Secret +apiVersion: v1 +metadata: + name: + namespace: kube-system +type: Opaque +data: + username: # Array username. + password: # Array password. + management_address: # Array managment addresses +``` + +Apply the secret: + +``` +$> kubectl apply -f array-secret.yaml +``` + +#### 2. Create storage classes + +Create a storage class yaml file `storageclass-gold.yaml` as follows, with the relevant capabilities, pool and, array secret: + +```sh +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: gold +provisioner: block.csi.ibm.com +parameters: + #SpaceEfficiency: # Optional: Values applicable for Storwize are: thin, compressed, or deduplicated + pool: + + csi.storage.k8s.io/provisioner-secret-name: + csi.storage.k8s.io/provisioner-secret-namespace: + csi.storage.k8s.io/controller-publish-secret-name: + csi.storage.k8s.io/controller-publish-secret-namespace: + + csi.storage.k8s.io/fstype: xfs # Optional: Values ext4/xfs. The default is ext4. +``` + +Apply the storage class: + +```sh +$> kubectl apply -f storageclass-gold.yaml +storageclass.storage.k8s.io/gold created +``` +You can now run stateful applications using IBM block storage systems. + + + + +
+
+
+ + + +## Driver usage +Create PVC demo-pvc-gold using `demo-pvc-gold.yaml`: + +```sh +$> cat demo-pvc-gold.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-demo +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: gold + +$> kubectl apply -f demo-pvc-gold.yaml +persistentvolumeclaim/demo-pvc created +``` + + +Create StatefulSet application `demo-statefulset` that uses the demo-pvc. + +```sh +$> cat demo-statefulset-with-demo-pvc.yml +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: demo-statefulset +spec: + selector: + matchLabels: + app: demo-statefulset + serviceName: demo-statefulset + replicas: 1 + template: + metadata: + labels: + app: demo-statefulset + spec: + containers: + - name: container1 + image: registry.access.redhat.com/ubi8/ubi:latest + command: [ "/bin/sh", "-c", "--" ] + args: [ "while true; do sleep 30; done;" ] + volumeMounts: + - name: demo-pvc + mountPath: "/data" + volumes: + - name: demo-pvc + persistentVolumeClaim: + claimName: demo-pvc + + #nodeSelector: + # kubernetes.io/hostname: NODESELECTOR + + +$> kubectl create -f demo-statefulset-with-demo-pvc.yml +statefulset/demo-statefulset created + +$> kubectl get pod demo-statefulset-0 +NAME READY STATUS RESTARTS AGE +demo-statefulset-0 1/1 Running 0 43s + +###### Review the mountpoint inside the pod: +$> kubectl exec demo-statefulset-0 -- bash -c "df -h /data" +Filesystem Size Used Avail Use% Mounted on +/dev/mapper/mpathz 1014M 33M 982M 4% /data +``` + + +Delete StatefulSet and PVC + +```sh +$> kubectl delete statefulset/demo-statefulset +statefulset/demo-statefulset deleted + +$> kubectl get statefulset/demo-statefulset +No resources found. + +$> kubectl delete pvc/demo-pvc +persistentvolumeclaim/demo-pvc deleted + +$> kubectl get pv,pvc +No resources found. +``` + +
+
+
+ +## Uninstalling the driver + +### Delete the storage class, secret, and driver + +```sh +$> kubectl delete storageclass/gold +$> kubectl delete -n kube-system secret/a9000-array1 +$> kubectl delete -f ibm-block-csi-driver.yaml + +##### Kubernetes version 1.13 automatically creates the CSIDriver `block.csi.ibm.com`, but it does not delete it automatically when removing the driver manifest. In order to clean up the CSIDriver object, run the following command: +$> kubectl delete CSIDriver block.csi.ibm.com + +``` + +
+
+
+ +## More details and troubleshooting +[USAGE-DETAILS.md](USAGE-DETAILS.md) + +
+
+
## Licensing -Copyright 2016, 2017 IBM Corp. +Copyright 2019 IBM Corp. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,3 +351,4 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + diff --git a/USAGE-DETAILS.md b/USAGE-DETAILS.md new file mode 100644 index 000000000..1b67195fe --- /dev/null +++ b/USAGE-DETAILS.md @@ -0,0 +1,371 @@ +# IBM block storage CSI driver - Usage Details + + + +## Driver Usage Details +This section shows how to: +- Create k8s secret `a9000-array1` for the storage system. (The example below uses FlashSystem A9000R as an example but the same can be used for FlashSystem A9000 and FlashSystem 9100.) +- Create storage class `gold`. +- Create PVC `demo-pvc`from the storage class `gold` and show some details on the created PVC and PV. +- Create StatefulSet application `demo-statefulset` and observe the mountpoint \ multipath device that was created by the driver. +- Write some data inside the 'demo-stateful', delete the 'demo-stateful' and then create it again, to validate that the data remains. + +Create secret and storage class: + +```sh +###### Create secret +$> cat demo-secret-a9000-array1.yaml +kind: Secret +apiVersion: v1 +metadata: + name: a9000-array1 + namespace: kube-system +type: Opaque +data: + username: # Replace with valid username + password: # Replace with valid password + management_address: # Replace with valid FlashSystem A9000 management address + +$> kubectl create -f demo-secret-a9000-array1.yaml +secret/a9000-array1 created + +###### Create storage class +$> cat demo-storageclass-gold-A9000R.yaml +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: gold +provisioner: block.csi.ibm.com +parameters: + pool: gold + + csi.storage.k8s.io/provisioner-secret-name: a9000-array1 + csi.storage.k8s.io/provisioner-secret-namespace: kube-system + csi.storage.k8s.io/controller-publish-secret-name: a9000-array1 + csi.storage.k8s.io/controller-publish-secret-namespace: kube-system + + csi.storage.k8s.io/fstype: xfs # Optional. values ext4/xfs. The default is ext4. + volume_name_prefix: demo1 # Optional. + +$> kubectl create -f demo-storageclass-gold-A9000R.yaml +storageclass.storage.k8s.io/gold created +``` + +Create PVC demo-pvc-gold using `demo-pvc-gold.yaml`: + +```sh +$> cat demo-pvc-gold.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-demo +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: gold + + +$> kubectl apply -f demo-pvc-gold.yaml +persistentvolumeclaim/demo-pvc created +``` + +View the PVC and the created PV: + +```sh +$> kubectl get pv,pvc +NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE +persistentvolume/pvc-a04bd32f-bd0f-11e9-a1f5-005056a45d5f 1Gi RWO Delete Bound default/demo-pvc gold 78s + +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +persistentvolumeclaim/demo-pvc Bound pvc-a04bd32f-bd0f-11e9-a1f5-005056a45d5f 1Gi RWO gold 78s + + +$> kubectl describe persistentvolume/pvc-a04bd32f-bd0f-11e9-a1f5-005056a45d5f +Name: pvc-a04bd32f-bd0f-11e9-a1f5-005056a45d5f +Labels: +Annotations: pv.kubernetes.io/provisioned-by: block.csi.ibm.com +Finalizers: [kubernetes.io/pv-protection] +StorageClass: gold +Status: Bound +Claim: default/demo-pvc +Reclaim Policy: Delete +Access Modes: RWO +VolumeMode: Filesystem +Capacity: 1Gi +Node Affinity: +Message: +Source: + Type: CSI (a Container Storage Interface (CSI) volume source) + Driver: block.csi.ibm.com + VolumeHandle: A9000:6001738CFC9035EB0000000000D1F68F + ReadOnly: false + VolumeAttributes: array_address= + pool_name=gold + storage.kubernetes.io/csiProvisionerIdentity=1565550204603-8081-block.csi.ibm.com + storage_type=A9000 + volume_name=demo1_pvc-a04bd32f-bd0f-11e9-a1f5-005056a45d5f +Events: + +##### View the newly created volume on the storage system side of thing (Using XCLI utility): +$> xcli vol_list pool=gold +Name Size (GB) Master Name Consistency Group Pool Creator Written (GB) +------------------------------------------------ ----------- ------------- ------------------- ------ --------- -------------- +demo1_pvc-a04bd32f-bd0f-11e9-a1f5-005056a45d5f 1 gold admin 0 + +``` + + + +Create StatefulSet application `demo-statefulset` that uses the demo-pvc. + +```sh +$> cat demo-statefulset-with-demo-pvc.yml +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: demo-statefulset +spec: + selector: + matchLabels: + app: demo-statefulset + serviceName: demo-statefulset + replicas: 1 + template: + metadata: + labels: + app: demo-statefulset + spec: + containers: + - name: container1 + image: registry.access.redhat.com/ubi8/ubi:latest + command: [ "/bin/sh", "-c", "--" ] + args: [ "while true; do sleep 30; done;" ] + volumeMounts: + - name: demo-pvc + mountPath: "/data" + volumes: + - name: demo-pvc + persistentVolumeClaim: + claimName: demo-pvc + + #nodeSelector: + # kubernetes.io/hostname: NODESELECTOR + + +$> kubectl create -f demo-statefulset-with-demo-pvc.yml +statefulset/demo-statefulset created +``` + +Display the newly created pod (make sure that the pod status is Running) and write data to its persistent volume. + +```sh +###### Wait for the pod Status to be Running. +$> kubectl get pod demo-statefulset-0 +NAME READY STATUS RESTARTS AGE +demo-statefulset-0 1/1 Running 0 43s + + +###### Review the mountpoint inside the pod: +$> kubectl exec demo-statefulset-0 -- bash -c "df -h /data" +Filesystem Size Used Avail Use% Mounted on +/dev/mapper/mpathz 1014M 33M 982M 4% /data + +$> kubectl exec demo-statefulset-0 -- bash -c "mount | grep /data" +/dev/mapper/mpathz on /data type xfs (rw,relatime,seclabel,attr2,inode64,noquota) + + +###### Write data in the PV inside the demo-statefulset-0 pod (the PV mounted inside the pod at /data) +$> kubectl exec demo-statefulset-0 touch /data/FILE +$> kubectl exec demo-statefulset-0 ls /data/FILE +File + +``` + +Log into the worker node that has the running pod and display the newly attached volume on the node. + +```sh +###### Verify which worker node is running the pod demo-statefulset-0 +$> kubectl describe pod demo-statefulset-0| grep "^Node:" +Node: k8s-node1/hostname + +###### Establish an SSH connection and log into the worker node +$> ssh k8s-node1 + +###### List multipath devices on the worker node (view the same `mpathz` that was mentioned above) +$>[k8s-node1] multipath -ll +mpathz (36001738cfc9035eb0000000000d1f68f) dm-3 IBM ,2810XIV +size=1.0G features='1 queue_if_no_path' hwhandler='0' wp=rw +`-+- policy='service-time 0' prio=1 status=active + |- 37:0:0:12 sdc 8:32 active ready running + `- 36:0:0:12 sdb 8:16 active ready running + +$>[k8s-node1] ls -l /dev/mapper/mpathz +lrwxrwxrwx. 1 root root 7 Aug 12 19:29 /dev/mapper/mpathz -> ../dm-3 + + +###### List the physical devices of the multipath `mpathz` and its mountpoint on the host. (This is the /data inside the stateful pod). +$>[k8s-node1] lsblk /dev/sdb /dev/sdc +NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT +sdb 8:16 0 1G 0 disk +└─mpathz 253:3 0 1G 0 mpath /var/lib/kubelet/pods/d67d22b8-bd10-11e9-a1f5-005056a45d5f/volumes/kubernetes.io~csi/pvc-a04bd32f-bd0f-11e9-a1f5 +sdc 8:32 0 1G 0 disk +└─mpathz 253:3 0 1G 0 mpath /var/lib/kubelet/pods/d67d22b8-bd10-11e9-a1f5-005056a45d5f/volumes/kubernetes.io~csi/pvc-a04bd32f-bd0f-11e9-a1f5 + +###### View the PV mounted on this host +###### (All PV mountpoints looks like the following: `/var/lib/kubelet/pods/*/volumes/kubernetes.io~csi/pvc-*/mount`) +$>[k8s-node1] df | egrep pvc +/dev/mapper/mpathz 1038336 32944 1005392 4% /var/lib/kubelet/pods/d67d22b8-bd10-11e9-a1f5-005056a45d5f/volumes/kubernetes.io~csi/pvc-a04bd32f-bd0f-11e9-a1f5-005056a45d5f/mount + + +##### Details about the driver internal metadata file `.stageInfo.json` are stored in the k8s PV node stage path `/var/lib/kubelet/plugins/kubernetes.io/csi/pv//globalmount/.stageInfo.json`. The CSI driver creates it during the NodeStage API, and it is used by the NodePublishVolume, NodeUnPublishVolume, and NodeUnStage CSI APIs later on. + +$> cat /var/lib/kubelet/plugins/kubernetes.io/csi/pv/pvc-711b6fef-bcf9-11e9-a1f5-005056a45d5f/globalmount/.stageInfo.json +{"connectivity":"iscsi","mpathDevice":"dm-3","sysDevices":",sdb,sdc"} + +``` + + +Delete StatefulSet and then restart, in order to validate data (/data/FILE) remains in the persistent volume. + +```sh +$> kubectl delete statefulset/demo-statefulset +statefulset/demo-statefulset deleted + +### Wait until the pod is deleted. Once deleted the '"demo-statefulset" not found' is returned. +$> kubectl get statefulset/demo-statefulset +NAME READY STATUS RESTARTS AGE +demo-statefulset-0 0/1 Terminating 0 91m + + +###### Establish an SSH connection and log into the worker node in order to see that the multipath was deleted and that the PV mountpoint no longer exists. +$> ssh k8s-node1 + +$>[k8s-node1] df | egrep pvc +$>[k8s-node1] multipath -ll +$>[k8s-node1] lsblk /dev/sdb /dev/sdc +lsblk: /dev/sdb: not a block device +lsblk: /dev/sdc: not a block device + + +###### Recreate the StatefulSet again in order to verify /data/FILE exists +$> kubectl create -f demo-statefulset-with-demo-pvc.yml +statefulset/demo-statefulset created + +$> kubectl exec demo-statefulset-0 ls /data/FILE +File +``` + + +Delete StatefulSet and PVC + +```sh +$> kubectl delete statefulset/demo-statefulset +statefulset/demo-statefulset deleted + +$> kubectl get statefulset/demo-statefulset +No resources found. + +$> kubectl delete pvc/demo-pvc +persistentvolumeclaim/demo-pvc deleted + +$> kubectl get pv,pvc +No resources found. +``` + + + +
+
+
+ + + +## Troubleshooting +``` +###### View the CSI pods, daemonset and statefulset: +$> kubectl get all -n kube-system -l csi +NAME READY STATUS RESTARTS AGE +pod/ibm-block-csi-controller-0 5/5 Running 0 9m36s +pod/ibm-block-csi-node-jvmvh 3/3 Running 0 9m36s +pod/ibm-block-csi-node-tsppw 3/3 Running 0 9m36s + +NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE +daemonset.apps/ibm-block-csi-node 2 2 2 2 2 9m36s + +NAME READY AGE +statefulset.apps/ibm-block-csi-controller 1/1 9m36s + +###### If pod/ibm-block-csi-controller-0 Status is not Running, troubleshoot by running the following: +$> kubectl describe -n kube-system pod/ibm-block-csi-controller-0 + +###### View the CSI controller logs +$> kubectl log -f -n kube-system ibm-block-csi-controller-0 -c ibm-block-csi-controller + +###### View the CSI daemonset node logs +$> kubectl log -f -n kube-system ibm-block-csi-node- -c ibm-block-csi-node +``` + +Additional driver details: +``` +###### If `feature-gates=CSIDriverRegistry` was set to `true` then CSIDriver object for the driver will be automatically created. See this by running: + +$> kubectl describe csidriver block.csi.ibm.com +Name: block.csi.ibm.com +Namespace: +Labels: +Annotations: +API Version: csi.storage.k8s.io/v1alpha1 +Kind: CSIDriver +Metadata: + Creation Timestamp: 2019-07-15T12:04:32Z + Generation: 1 + Resource Version: 1404 + Self Link: /apis/csi.storage.k8s.io/v1alpha1/csidrivers/block.csi.ibm.com + UID: b46db4ed-a6f8-11e9-b93e-005056a45d5f +Spec: + Attach Required: true + Pod Info On Mount Version: +Events: + + +$> kubectl get -n kube-system csidriver,sa,clusterrole,clusterrolebinding,statefulset,pod,daemonset | grep ibm-block-csi +csidriver.storage.k8s.io/block.csi.ibm.com 2019-06-02T09:30:36Z +serviceaccount/ibm-block-csi-controller-sa 1 2m16s +clusterrole.rbac.authorization.k8s.io/ibm-block-csi-cluster-driver-registrar-role 2m16s +clusterrole.rbac.authorization.k8s.io/ibm-block-csi-external-attacher-role 2m16s +clusterrole.rbac.authorization.k8s.io/ibm-block-csi-external-provisioner-role 2m16s +clusterrole.rbac.authorization.k8s.io/ibm-block-csi-external-snapshotter-role 2m16s +clusterrolebinding.rbac.authorization.k8s.io/ibm-block-csi-cluster-driver-registrar-binding 2m16s +clusterrolebinding.rbac.authorization.k8s.io/ibm-block-csi-external-attacher-binding 2m16s +clusterrolebinding.rbac.authorization.k8s.io/ibm-block-csi-external-provisioner-binding 2m16s +clusterrolebinding.rbac.authorization.k8s.io/ibm-block-csi-external-snapshotter-binding 2m16s +statefulset.apps/ibm-block-csi-controller 1/1 2m16s +pod/ibm-block-csi-controller-0 5/5 Running 0 2m16s +pod/ibm-block-csi-node-xnfgp 3/3 Running 0 13m +pod/ibm-block-csi-node-zgh5h 3/3 Running 0 13m +daemonset.extensions/ibm-block-csi-node 2 2 2 2 2 13m +``` + +
+
+
+ +## Licensing + +Copyright 2019 IBM Corp. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + diff --git a/chroot-host-wrapper.sh b/chroot-host-wrapper.sh new file mode 100755 index 000000000..a90575489 --- /dev/null +++ b/chroot-host-wrapper.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +ME=`basename "$0"` + +DIR="/host" # The CSI node daemonset mount the / of the host into /host inside the container. +if [ ! -d "${DIR}" ]; then + echo "Could not find docker engine host's filesystem at expected location: ${DIR}" + exit 1 +fi + +exec chroot $DIR /usr/bin/env -i PATH="/sbin:/bin:/usr/bin" ${ME} "${@:1}" + diff --git a/common/config.yaml b/common/config.yaml new file mode 100644 index 000000000..a560fb391 --- /dev/null +++ b/common/config.yaml @@ -0,0 +1,11 @@ +identity: + name: block.csi.ibm.com + version: 0.9.0 + capabilities: + - CONTROLLER_SERVICE + +controller: + publish_context_lun_parameter : "PUBLISH_CONTEXT_LUN" + publish_context_connectivity_parameter : "PUBLISH_CONTEXT_CONNECTIVITY" + publish_context_array_iqn : "PUBLISH_CONTEXT_ARRAY_IQN" + publish_context_fc_initiators : "PUBLISH_CONTEXT_ARRAY_FC_INITIATORS" diff --git a/controller/__init__.py b/controller/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/controller/array_action/__init__.py b/controller/array_action/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/controller/array_action/array_action_types.py b/controller/array_action/array_action_types.py new file mode 100644 index 000000000..763ed5bd9 --- /dev/null +++ b/controller/array_action/array_action_types.py @@ -0,0 +1,8 @@ +class Volume: + def __init__(self, vol_size_bytes, vol_id, vol_name, array_address, pool_name, array_type): + self.capacity_bytes = vol_size_bytes + self.id = vol_id + self.volume_name = vol_name + self.array_address = array_address + self.pool_name = pool_name + self.array_type = array_type diff --git a/controller/array_action/array_connection_manager.py b/controller/array_action/array_connection_manager.py new file mode 100644 index 000000000..930b95f18 --- /dev/null +++ b/controller/array_action/array_connection_manager.py @@ -0,0 +1,122 @@ +from threading import Lock +import socket + +from controller.common.csi_logger import get_stdout_logger +from controller.array_action.errors import NoConnectionAvailableException, FailedToFindStorageSystemType +from controller.array_action.array_mediator_xiv import XIVArrayMediator +from controller.array_action.array_mediator_svc import SVCArrayMediator + +connection_lock_dict = {} +array_connections_dict = {} + + +logger = get_stdout_logger() + + +def _socket_connect_test(ipaddr, port, timeout=1): + ''' + function to test socket connection to ip:port. + + :param ipaddr: ip address + :param port: port + :param timeout: connection timeout + + :return: 0 - on successful connection + -1 for exception + other return codes for connection errors. + + ''' + try: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(timeout) + ret = sock.connect_ex((ipaddr, port)) + sock.close() + return ret + except Exception as e: + logger.debug('socket_connect {}'.format(e)) + return -1 + + +class ArrayConnectionManager(object): + + + def __init__(self, user, password, endpoint, array_type=None): # TODO return the params back. + self.array_mediator_class_dict = { + XIVArrayMediator.array_type: XIVArrayMediator, + SVCArrayMediator.array_type: SVCArrayMediator} + + self.array_type = array_type + self.user = user + self.password = password + self.endpoints = endpoint + self.endpoint_key = ",".join(self.endpoints) + + if self.array_type is None: + self.array_type = self.detect_array_type() + + connection_lock_dict[self.endpoint_key] = Lock() + self.med_class = None + self.connected = False + + def __enter__(self): + logger.debug("in enter") + arr_connection = self.get_array_connection() + return arr_connection + + def __exit__(self, type, value, traceback): + logger.debug("closing the connection") + with connection_lock_dict[self.endpoint_key]: # TODO: when moving to python 3 add tiemout! + if self.connected: + self.med_class.disconnect() + logger.debug("reducing the connection count") + if array_connections_dict[self.endpoint_key] == 1: + del array_connections_dict[self.endpoint_key] + else: + array_connections_dict[self.endpoint_key] -= 1 + logger.debug("removing the connection : {}".format(array_connections_dict)) + self.connected = False + + def get_array_connection(self): + logger.debug("get array connection") + med_class = self.array_mediator_class_dict[self.array_type] + + with connection_lock_dict[self.endpoint_key]: # TODO: when moving to python 3 - add timeout to the lock! + if self.endpoint_key in array_connections_dict: + + if array_connections_dict[self.endpoint_key] < med_class.max_connections: + logger.debug("adding new connection ") + array_connections_dict[self.endpoint_key] += 1 + + else: + logger.error("failed to get connection. current connections: {}".format(array_connections_dict)) + raise NoConnectionAvailableException(self.endpoint_key) + else: + logger.debug("adding new connection to new endpoint : {}".format(self.endpoint_key)) + array_connections_dict[self.endpoint_key] = 1 + + logger.debug("got connection lock. array connection dict is: {}".format(array_connections_dict)) + try: + self.med_class = med_class(self.user, self.password, self.endpoints) + except Exception as ex: + if array_connections_dict[self.endpoint_key] == 1: + del array_connections_dict[self.endpoint_key] + else: + array_connections_dict[self.endpoint_key] -= 1 + + raise ex + + self.connected = True + + return self.med_class + + def detect_array_type(self): + logger.debug("detecting array connection type") + + for storage_type, port in [(XIVArrayMediator.array_type, XIVArrayMediator.port), + (SVCArrayMediator.array_type, SVCArrayMediator.port)]: # ds8k : 8452 + + for endpoint in self.endpoints: + if _socket_connect_test(endpoint, port) == 0: + logger.debug("storage array type is : {0}".format(storage_type)) + return storage_type + + raise FailedToFindStorageSystemType(self.endpoints) diff --git a/controller/array_action/array_mediator_interface.py b/controller/array_action/array_mediator_interface.py new file mode 100644 index 000000000..ae1833f02 --- /dev/null +++ b/controller/array_action/array_mediator_interface.py @@ -0,0 +1,243 @@ +import abc + + +class ArrayMediator: + + @abc.abstractmethod + def __init__(self, user, password, address): + """ + This is the init function for the class. + it should establish the connection to the storage system. + + Args: + user : user name for connecting to the endpoint + password : password for connecting to the endpoint + endpoint : storage array fqdn or ip + + Raises: + CredentialsError + """ + raise NotImplementedError + + @abc.abstractmethod + def disconnect(self): + """ + This function disconnect the storage system connection that was opened in the init phase. + + Returns: + None + """ + raise NotImplementedError + + @abc.abstractmethod + def create_volume(self, vol_name, size_in_bytes, capabilities, pool): + """ + This function should create a volume in the storage system. + + Args: + vol_name : name of the volume to be created in the stoarge system + size_in_bytes : size in bytes of the volume + capabilities : dict of capabilities {:} + pool : pool name to create the volume in. + + Returns: + volume_id : the volume WWN. + + Raises: + VolumeAlreadyExists + PoolDoesNotExist + PoolDoesNotMatchCapabilities + IllegalObjectName + VolumeNameIsNotSupported + PermissionDenied + """ + raise NotImplementedError + + @abc.abstractmethod + def delete_volume(self, volume_id): + """ + This function should delete a volume in the storage system. + + Args: + vol_id : wwn of the volume to delete + + Returns: + None + + Raises: + volumeNotFound + PermissionDenied + """ + raise NotImplementedError + + @abc.abstractmethod + def get_volume(self, volume_name): + """ + This function return volume info about the volume. + + Args: + vol_name : name of the volume to be created in the storage system + + Returns: + Volume + + Raises: + volumeNotFound + IllegalObjectName + PermissionDenied + """ + raise NotImplementedError + + @abc.abstractmethod + def get_volume_mappings(self, volume_id): + """ + This function return volume mappings. + + Args: + volume_id : the volume WWN. + + Returns: + mapped_host_luns : a dict like this: {:,...} + + Raises: + volumeNotFound + """ + raise NotImplementedError + + @abc.abstractmethod + def map_volume(self, volume_id, host_name): + """ + This function will find the next available lun for the host and map the volume to it. + + Args: + volume_id : the volume WWN. + host_name : the name of the host to map the volume to. + + Returns: + lun : the lun_id the volume was mapped to. + + Raises: + NoAvailableLun + LunAlreadyInUse + volumeNotFound + hostNotFound + PermissionDenied + MappingError + """ + raise NotImplementedError + + @abc.abstractmethod + def get_array_iqns(self): + """ + This function will return the iscsi name of the storage array + + Args: + None + + Returns: + iscsi_names : list of iscsi addressses of the storage + + Raises: + None + """ + raise NotImplementedError + + @abc.abstractmethod + def get_array_fc_wwns(self, host_name=None): + """ + This function will return the wwn of the connected + FC port of the storage array + + Args: + None + + Returns: + wwn : the wwn of the storage + + Raises: + None + """ + raise NotImplementedError + + @abc.abstractmethod + def get_host_by_host_identifiers(self, initiators): + """ + This function will find the host name by iscsi iqn or fc wwns. + + Args: + initiators : initiators (e.g. fc wwns, iqn) of the wanted host. + + Returns: + connectivity_types : list of connectivity types ([iscis, fc] or just [iscsi],..) + hostname : the name of the host + + Raises: + hostNotFound + multipleHostsFoundError + PermissionDenied + """ + raise NotImplementedError + + @abc.abstractmethod + def validate_supported_capabilities(self, capabilities): + """ + This function will check if the capabilities passed to the create volume are valid + + Args: + capabilities : as passed from the storage class + + Returns: + None + + Raises: + CapabilityNotSupported + """ + raise NotImplementedError + + @property + @abc.abstractmethod + def array_type(self): + """ + The storage system type. + """ + raise NotImplementedError + + @property + @abc.abstractmethod + def port(self): + """ + The storage system management port number. + """ + raise NotImplementedError + + @property + @abc.abstractmethod + def max_vol_name_length(self): + """ + The max number of concurrent connections to the storage system. + """ + raise NotImplementedError + + @property + @abc.abstractmethod + def max_connections(self): + """ + The max number of concurrent connections to the storage system. + """ + raise NotImplementedError + + @property + @abc.abstractmethod + def minimal_volume_size_in_bytes(self): + """ + The minimal volume size in bytes (used in case trying to provision volume with zero size). + """ + raise NotImplementedError + + @property + @abc.abstractmethod + def max_lun_retries(self): + """ + The maximum number of times a map operation will retry if lun is already in use + """ + raise NotImplementedError diff --git a/controller/array_action/array_mediator_svc.py b/controller/array_action/array_mediator_svc.py new file mode 100644 index 000000000..33bd99020 --- /dev/null +++ b/controller/array_action/array_mediator_svc.py @@ -0,0 +1,406 @@ +from pysvc.unified.client import connect +from pysvc import errors as svc_errors +from pysvc.unified.response import CLIFailureError +from controller.common.csi_logger import get_stdout_logger +from controller.array_action.array_mediator_interface import ArrayMediator +from controller.array_action.array_action_types import Volume +import controller.array_action.errors as controller_errors +from controller.array_action.utils import classproperty +import controller.array_action.config as config + +array_connections_dict = {} +logger = get_stdout_logger() + +OBJ_NOT_FOUND = 'CMMVC5753E' +NAME_NOT_MEET = 'CMMVC5754E' +SPECIFIED_OBJ_NOT_EXIST = 'CMMVC5804E' +VOL_ALREADY_MAPPED = 'CMMVC5878E' +VOL_ALREADY_UNMAPPED = 'CMMVC5842E' +OBJ_ALREADY_EXIST = 'CMMVC6035E' +VOL_NOT_FOUND = 'CMMVC8957E' +POOL_NOT_MATCH_VOL_CAPABILITIES = 'CMMVC9292E' +NOT_REDUCTION_POOL = 'CMMVC9301E' + + +def is_warning_message(ex): + """ Return True if the exception message is warning """ + info_seperated_by_quotation = str(ex).split('"') + message = info_seperated_by_quotation[1] + word_in_message = message.split() + message_tag = word_in_message[0] + if message_tag[-1] == 'W': + return True + return False + + +def build_kwargs_from_capabilities(capabilities, pool_name, volume_name, + volume_size): + cli_kwargs = {} + cli_kwargs.update({ + 'name': volume_name, + 'unit': 'b', + 'size': volume_size, + 'pool': pool_name + }) + # if capabilities == None, create default capability volume thick + capability = capabilities.get(config.CAPABILITIES_SPACEEFFICIENCY) + if capability: + capability = capability.lower() + if capability == config.CAPABILITY_THIN: + cli_kwargs.update({'thin': True}) + elif capability == config.CAPABILITY_COMPRESSED: + cli_kwargs.update({'compressed': True}) + elif capability == config.CAPABILITY_DEDUPLICATED: + cli_kwargs.update({'compressed': True, 'deduplicated': True}) + + return cli_kwargs + + +class SVCArrayMediator(ArrayMediator): + ARRAY_ACTIONS = {} + BLOCK_SIZE_IN_BYTES = 512 + MAX_LUN_NUMBER = 511 + MIN_LUN_NUMBER = 0 + + @classproperty + def array_type(self): + return 'SVC' + + @classproperty + def port(self): + return 22 + + @classproperty + def max_vol_name_length(self): + return 64 + + @classproperty + def max_connections(self): + return 2 + + @classproperty + def minimal_volume_size_in_bytes(self): + return 512 # 512 Bytes + + @classproperty + def max_lun_retries(self): + return 10 + + def __init__(self, user, password, endpoint): + self.user = user + self.password = password + self.client = None + # SVC only accept one IP address + if len(endpoint) == 0 or len(endpoint) > 1: + logger.error("SVC only support one cluster IP") + raise controller_errors.StorageManagementIPsNotSupportError( + endpoint) + self.endpoint = endpoint[0] + + logger.debug("in init") + self._connect() + + def _connect(self): + logger.debug("Connecting to SVC {0}".format(self.endpoint)) + try: + self.client = connect(self.endpoint, username=self.user, + password=self.password) + except (svc_errors.IncorrectCredentials, + svc_errors.StorageArrayClientException): + raise controller_errors.CredentialsError(self.endpoint) + + def disconnect(self): + if self.client: + self.client.close() + + def _generate_volume_response(self, cli_volume): + return Volume( + int(cli_volume.capacity), + cli_volume.vdisk_UID, + cli_volume.name, + self.endpoint, + cli_volume.mdisk_grp_name, + self.array_type) + + def get_volume(self, vol_name): + logger.debug("Get volume : {}".format(vol_name)) + cli_volume = None + try: + cli_volume = self.client.svcinfo.lsvdisk( + bytes=True, object_id=vol_name).as_single_element + except (svc_errors.CommandExecutionError, CLIFailureError) as ex: + if not is_warning_message(ex.my_message): + if (OBJ_NOT_FOUND in ex.my_message or + NAME_NOT_MEET in ex.my_message): + logger.error("Volume not found") + raise controller_errors.VolumeNotFoundError(vol_name) + except Exception as ex: + logger.exception(ex) + raise ex + + if not cli_volume: + raise controller_errors.VolumeNotFoundError(vol_name) + logger.debug("cli volume returned : {}".format(cli_volume)) + return self._generate_volume_response(cli_volume) + + def validate_supported_capabilities(self, capabilities): + logger.debug("validate_supported_capabilities for " + "capabilities : {0}".format(capabilities)) + # Currently, we only support one capability "SpaceEfficiency" + # The value should be: "thin/thick/compressed/deduplicated" + if (capabilities and capabilities.get( + config.CAPABILITIES_SPACEEFFICIENCY).lower() not in + [config.CAPABILITY_THIN, config.CAPABILITY_THICK, + config.CAPABILITY_COMPRESSED, + config.CAPABILITY_DEDUPLICATED]): + logger.error("capability value is not " + "supported {0}".format(capabilities)) + raise controller_errors.StorageClassCapabilityNotSupported( + capabilities) + + logger.info("Finished validate_supported_capabilities") + + def _convert_size_bytes(self, size_in_bytes): + # SVC volume size must be the multiple of 512 bytes + ret = size_in_bytes % self.BLOCK_SIZE_IN_BYTES + if ret > 0: + return size_in_bytes - ret + 512 + return size_in_bytes + + def _get_vol_by_wwn(self, volume_id): + filter_value = 'vdisk_UID=' + volume_id + vol_by_wwn = self.client.svcinfo.lsvdisk( + filtervalue=filter_value).as_single_element + if not vol_by_wwn: + raise controller_errors.VolumeNotFoundError(volume_id) + + vol_name = vol_by_wwn.name + logger.debug("found volume name : {0}".format(vol_name)) + return vol_name + + def create_volume(self, name, size_in_bytes, capabilities, pool): + logger.info("creating volume with name : {}. size : {} . in pool : {} " + "with capabilities : {}".format(name, size_in_bytes, pool, + capabilities)) + try: + size = self._convert_size_bytes(size_in_bytes) + cli_kwargs = build_kwargs_from_capabilities(capabilities, pool, + name, size) + self.client.svctask.mkvolume(**cli_kwargs) + vol = self.get_volume(name) + logger.info("finished creating cli volume : {}".format(vol)) + return vol + except (svc_errors.CommandExecutionError, CLIFailureError) as ex: + if not is_warning_message(ex.my_message): + logger.error(msg="Cannot create volume {0}, " + "Reason is: {1}".format(name, ex)) + if OBJ_ALREADY_EXIST in ex.my_message: + raise controller_errors.VolumeAlreadyExists(name, + self.endpoint) + if NAME_NOT_MEET in ex.my_message: + raise controller_errors.PoolDoesNotExist(pool, + self.endpoint) + if (POOL_NOT_MATCH_VOL_CAPABILITIES in ex.my_message + or NOT_REDUCTION_POOL in ex.my_message): + raise controller_errors.PoolDoesNotMatchCapabilities( + pool, capabilities, ex) + raise ex + except Exception as ex: + logger.exception(ex) + raise ex + + def delete_volume(self, volume_id): + logger.info("Deleting volume with id : {0}".format(volume_id)) + vol_name = self._get_vol_by_wwn(volume_id) + try: + self.client.svctask.rmvolume(vdisk_id=vol_name) + except (svc_errors.CommandExecutionError, CLIFailureError) as ex: + if not is_warning_message(ex.my_message): + logger.warning("Failed to delete volume {}".format(vol_name)) + if (OBJ_NOT_FOUND in ex.my_message + or VOL_NOT_FOUND in ex.my_message): + raise controller_errors.VolumeNotFoundError(vol_name) + else: + raise ex + except Exception as ex: + logger.exception(ex) + raise ex + + logger.info("Finished volume deletion. id : {0}".format(volume_id)) + + def get_host_by_host_identifiers(self, initiators): + logger.debug("Getting host id for initiators : {0}".format(initiators)) + host_list = self.client.svcinfo.lshost() + iscsi_host, fc_host = None, None + for host in host_list: + host_detail = self.client.svcinfo.lshost( + object_id=host.get('id', '')).as_single_element + iscsi_names = host_detail.get('iscsi_name', '') + wwns_value = host_detail.get('WWPN', []) + if not isinstance(wwns_value, list): + wwns_value = [wwns_value, ] + if initiators.is_array_iscsi_iqns_match([iscsi_names]): + iscsi_host = host_detail.get('name', '') + logger.debug("found iscsi iqn in list : {0} for host : " + "{1}".format(initiators.iscsi_iqn, iscsi_host)) + if initiators.is_array_wwns_match(wwns_value): + fc_host = host_detail.get('name', '') + logger.debug("found fc wwns in list : {0} for host : " + "{1}".format(initiators.fc_wwns, fc_host)) + if iscsi_host and fc_host: + if iscsi_host == fc_host: + return fc_host, [config.ISCSI_CONNECTIVITY_TYPE, + config.FC_CONNECTIVITY_TYPE] + else: + raise controller_errors.MultipleHostsFoundError(initiators, fc_host) + elif iscsi_host: + logger.debug("found host : {0} with iqn : {1}".format(iscsi_host, initiators.iscsi_iqn)) + return iscsi_host, [config.ISCSI_CONNECTIVITY_TYPE] + elif fc_host: + logger.debug("found host : {0} with fc wwn : {1}".format(fc_host, initiators.fc_wwns)) + return fc_host, [config.FC_CONNECTIVITY_TYPE] + else: + logger.debug("can not found host by using initiators: {0} ".format(initiators)) + raise controller_errors.HostNotFoundError(initiators) + + def get_volume_mappings(self, volume_id): + logger.debug("Getting volume mappings for volume id : " + "{0}".format(volume_id)) + vol_name = self._get_vol_by_wwn(volume_id) + logger.debug("vol name : {0}".format(vol_name)) + try: + mapping_list = self.client.svcinfo.lsvdiskhostmap(vdisk_name=vol_name) + res = {} + for mapping in mapping_list: + logger.debug("mapping for vol is :{0}".format(mapping)) + res[mapping.get('host_name', '')] = mapping.get('SCSI_id', '') + except(svc_errors.CommandExecutionError, CLIFailureError) as ex: + logger.error(ex) + raise controller_errors.VolumeNotFoundError(volume_id) + + return res + + def _get_used_lun_ids_from_host(self, host_name): + logger.debug("getting used lun ids for host :{0}".format(host_name)) + luns_in_use = set() + + try: + for mapping in self.client.svcinfo.lshostvdiskmap(host=host_name): + luns_in_use.add(mapping.get('SCSI_id', '')) + except(svc_errors.CommandExecutionError, CLIFailureError) as ex: + logger.error(ex) + raise controller_errors.HostNotFoundError(host_name) + logger.debug("The used lun ids for host :{0}".format(luns_in_use)) + + return luns_in_use + + def get_first_free_lun(self, host_name): + logger.debug("getting first free lun id for " + "host :{0}".format(host_name)) + lun = None + luns_in_use = self._get_used_lun_ids_from_host(host_name) + # Today we have SS_MAX_HLUN_MAPPINGS_PER_HOST as 2048 on high end + # platforms (SVC / V7000 etc.) and 512 for the lower + # end platforms (V3500 etc.). This limits the number of volumes that + # can be mapped to a single host. (Note that some hosts such as linux + # do not support more than 255 or 511 mappings today irrespective of + # our constraint). + for candidate in range(self.MIN_LUN_NUMBER, self.MAX_LUN_NUMBER + 1): + if str(candidate) not in luns_in_use: + logger.debug("First available LUN number for {0} is " + "{1}".format(host_name, str(candidate))) + lun = str(candidate) + break + if not lun: + raise controller_errors.NoAvailableLunError(host_name) + logger.debug("The first available lun is : {0}".format(lun)) + return lun + + def map_volume(self, volume_id, host_name): + logger.debug("mapping volume : {0} to host : " + "{1}".format(volume_id, host_name)) + vol_name = self._get_vol_by_wwn(volume_id) + cli_kwargs = { + 'host': host_name, + 'object_id': vol_name, + 'force': True + } + + try: + lun = self.get_first_free_lun(host_name) + cli_kwargs.update({'scsi': lun}) + self.client.svctask.mkvdiskhostmap(**cli_kwargs) + except (svc_errors.CommandExecutionError, CLIFailureError) as ex: + if not is_warning_message(ex.my_message): + logger.error(msg="Map volume {0} to host {1} failed. Reason " + "is: {2}".format(vol_name, host_name, ex)) + if NAME_NOT_MEET in ex.my_message: + raise controller_errors.HostNotFoundError(host_name) + if SPECIFIED_OBJ_NOT_EXIST in ex.my_message: + raise controller_errors.VolumeNotFoundError(vol_name) + if VOL_ALREADY_MAPPED in ex.my_message: + raise controller_errors.LunAlreadyInUseError(lun, + host_name) + raise controller_errors.MappingError(vol_name, host_name, ex) + except Exception as ex: + logger.exception(ex) + raise ex + + return str(lun) + + def unmap_volume(self, volume_id, host_name): + logger.debug("un-mapping volume : {0} from host : " + "{1}".format(volume_id, host_name)) + vol_name = self._get_vol_by_wwn(volume_id) + + cli_kwargs = { + 'host': host_name, + 'vdisk_id': vol_name + } + + try: + self.client.svctask.rmvdiskhostmap(**cli_kwargs) + except (svc_errors.CommandExecutionError, CLIFailureError) as ex: + if not is_warning_message(ex.my_message): + logger.error(msg="Map volume {0} to host {1} failed. Reason " + "is: {2}".format(vol_name, host_name, ex)) + if NAME_NOT_MEET in ex.my_message: + raise controller_errors.HostNotFoundError(host_name) + if OBJ_NOT_FOUND in ex.my_message: + raise controller_errors.VolumeNotFoundError(vol_name) + if VOL_ALREADY_UNMAPPED in ex.my_message: + raise controller_errors.VolumeAlreadyUnmappedError( + vol_name) + raise controller_errors.UnMappingError(vol_name, + host_name, ex) + except Exception as ex: + logger.exception(ex) + raise ex + + def get_array_iqns(self): + logger.debug("Getting array nodes iscsi name") + try: + nodes_list = self.client.svcinfo.lsnode() + array_iqns = [node.iscsi_name for node in nodes_list + if node.status.lower() == "online"] + except Exception as ex: + logger.exception(ex) + raise ex + return array_iqns + + def get_array_fc_wwns(self, host_name): + logger.debug("Getting the connected fc port wwn value from array " + "related to host : {}.".format(host_name)) + fc_port_wwns = [] + try: + fc_wwns = self.client.svcinfo.lsfabric(host=host_name) + for wwn in fc_wwns: + state = wwn.get('state', '') + if state == 'active' or state == 'inactive': + fc_port_wwns.append(wwn.get('local_wwpn', '')) + logger.debug("Getting fc wwns : {}".format(fc_port_wwns)) + return fc_port_wwns + except(svc_errors.CommandExecutionError, CLIFailureError) as ex: + logger.error(msg="Failed to get array fc wwn. Reason " + "is: {0}".format(ex)) + raise ex diff --git a/controller/array_action/array_mediator_xiv.py b/controller/array_action/array_mediator_xiv.py new file mode 100644 index 000000000..4a6b55be6 --- /dev/null +++ b/controller/array_action/array_mediator_xiv.py @@ -0,0 +1,276 @@ +from random import randint + +from pyxcli.client import XCLIClient +from pyxcli import errors as xcli_errors +from controller.common.csi_logger import get_stdout_logger +from controller.array_action.array_mediator_interface import ArrayMediator +from controller.array_action.array_action_types import Volume +import controller.array_action.errors as controller_errors +from controller.array_action.config import ISCSI_CONNECTIVITY_TYPE +from controller.array_action.config import FC_CONNECTIVITY_TYPE +from controller.array_action.utils import classproperty +from controller.common.utils import string_to_array + +array_connections_dict = {} +logger = get_stdout_logger() + + +class XIVArrayMediator(ArrayMediator): + ARRAY_ACTIONS = {} + BLOCK_SIZE_IN_BYTES = 512 + MAX_LUN_NUMBER = 250 + MIN_LUN_NUMBER = 1 + + @classproperty + def array_type(self): + return 'A9000' + + @classproperty + def port(self): + return 7778 + + @classproperty + def max_vol_name_length(self): + return 63 + + @classproperty + def max_connections(self): + return 2 + + @classproperty + def minimal_volume_size_in_bytes(self): + return 1 * 1024 * 1024 * 1024 # 1 GiB + + @classproperty + def max_lun_retries(self): + return 10 + + def __init__(self, user, password, endpoint): + self.user = user + self.password = password + self.endpoint = endpoint + self.client = None + + logger.debug("in init") + self._connect() + + def _connect(self): + logger.debug("connecting to endpoint") + try: + self.client = XCLIClient.connect_multiendpoint_ssl( + self.user, + self.password, + self.endpoint + ) + + except xcli_errors.CredentialsError: + raise controller_errors.CredentialsError(self.endpoint) + except xcli_errors.XCLIError: + raise controller_errors.CredentialsError(self.endpoint) + + def disconnect(self): + if self.client and self.client.is_connected(): + self.client.close() + + def _convert_size_blocks_to_bytes(self, size_in_blocks): + return size_in_blocks * self.BLOCK_SIZE_IN_BYTES + + def _generate_volume_response(self, cli_volume): + return Volume(self._convert_size_blocks_to_bytes(int(cli_volume.capacity)), + cli_volume.wwn, + cli_volume.name, + self.endpoint, + cli_volume.pool_name, + self.array_type) + + def get_volume(self, vol_name): + logger.debug("Get volume : {}".format(vol_name)) + try: + cli_volume = self.client.cmd.vol_list(vol=vol_name).as_single_element + except xcli_errors.IllegalNameForObjectError as ex: + logger.exception(ex) + raise controller_errors.IllegalObjectName(ex.status) + + logger.debug("cli volume returned : {}".format(cli_volume)) + if not cli_volume: + raise controller_errors.VolumeNotFoundError(vol_name) + + array_vol = self._generate_volume_response(cli_volume) + return array_vol + + def validate_supported_capabilities(self, capabilities): + logger.info("validate_supported_capabilities for capabilities : {0}".format(capabilities)) + # for a9k there should be no capabilities + if capabilities or len(capabilities) > 0: + raise controller_errors.StorageClassCapabilityNotSupported(capabilities) + + logger.info("Finished validate_supported_capabilities") + + def _convert_size_bytes_to_blocks(self, size_in_bytes): + """:rtype: float""" + return float(size_in_bytes) / self.BLOCK_SIZE_IN_BYTES + + def create_volume(self, name, size_in_bytes, capabilities, pool): + logger.info("creating volume with name : {}. size : {} . in pool : {} with capabilities : {}".format( + name, size_in_bytes, pool, capabilities)) + + size_in_blocks = int(self._convert_size_bytes_to_blocks(size_in_bytes)) + + try: + cli_volume = self.client.cmd.vol_create(vol=name, size_blocks=size_in_blocks, + pool=pool).as_single_element + logger.info("finished creating cli volume : {}".format(cli_volume)) + return self._generate_volume_response(cli_volume) + except xcli_errors.IllegalNameForObjectError as ex: + logger.exception(ex) + raise controller_errors.IllegalObjectName(ex.status) + except xcli_errors.VolumeExistsError as ex: + logger.exception(ex) + raise controller_errors.VolumeAlreadyExists(name, self.endpoint) + except xcli_errors.PoolDoesNotExistError as ex: + logger.exception(ex) + raise controller_errors.PoolDoesNotExist(pool, self.endpoint) + except xcli_errors.OperationForbiddenForUserCategoryError as ex: + logger.exception(ex) + raise controller_errors.PermissionDeniedError("create vol : {0}".format(name)) + + def _get_vol_by_wwn(self, volume_id): + vol_by_wwn = self.client.cmd.vol_list(wwn=volume_id).as_single_element + if not vol_by_wwn: + raise controller_errors.VolumeNotFoundError(volume_id) + + vol_name = vol_by_wwn.name + logger.debug("found volume name : {0}".format(vol_name)) + return vol_name + + def delete_volume(self, volume_id): + logger.info("Deleting volume with id : {0}".format(volume_id)) + vol_name = self._get_vol_by_wwn(volume_id) + + try: + self.client.cmd.vol_delete(vol=vol_name) + except xcli_errors.VolumeBadNameError as ex: + logger.exception(ex) + raise controller_errors.VolumeNotFoundError(vol_name) + + except xcli_errors.OperationForbiddenForUserCategoryError as ex: + logger.exception(ex) + raise controller_errors.PermissionDeniedError("delete vol : {0}".format(vol_name)) + + logger.info("Finished volume deletion. id : {0}".format(volume_id)) + + def get_host_by_host_identifiers(self, initiators): + logger.debug("Getting host id for initiators : {0}".format(initiators)) + matching_hosts_set = set() + port_types = [] + + host_list = self.client.cmd.host_list().as_list + for host in host_list: + host_iscsi_ports = string_to_array(host.iscsi_ports, ',') + host_fc_ports = string_to_array(host.fc_ports, ',') + if initiators.is_array_wwns_match(host_fc_ports): + matching_hosts_set.add(host.name) + logger.debug("found host : {0}, by fc port : {1}".format(host.name, host_fc_ports)) + port_types.append(FC_CONNECTIVITY_TYPE) + if initiators.is_array_iscsi_iqns_match(host_iscsi_ports): + matching_hosts_set.add(host.name) + logger.debug("found host : {0}, by iscsi port : {1}".format(host.name, host_iscsi_ports)) + port_types.append(ISCSI_CONNECTIVITY_TYPE) + matching_hosts = sorted(matching_hosts_set) + if not matching_hosts: + raise controller_errors.HostNotFoundError(initiators) + elif len(matching_hosts) > 1: + raise controller_errors.MultipleHostsFoundError(initiators, matching_hosts) + return matching_hosts[0], port_types + + def get_volume_mappings(self, volume_id): + logger.debug("Getting volume mappings for volume id : {0}".format(volume_id)) + vol_name = self._get_vol_by_wwn(volume_id) + logger.debug("vol name : {0}".format(vol_name)) + mapping_list = self.client.cmd.vol_mapping_list(vol=vol_name).as_list + res = {} + for mapping in mapping_list: + logger.debug("mapping for vol is :{0}".format(mapping)) + res[mapping.host] = mapping.lun + + return res + + def _get_next_available_lun(self, host_name): + logger.debug("getting host mapping list for host :{0}".format(host_name)) + try: + host_mapping_list = self.client.cmd.mapping_list(host=host_name).as_list + except xcli_errors.HostBadNameError as ex: + logger.exception(ex) + raise controller_errors.HostNotFoundError(host_name) + + luns_in_use = set([host_mapping.lun for host_mapping in host_mapping_list]) + logger.debug("luns in use : {0}".format(luns_in_use)) + + # try to use random lun number just in case there are many calls at the same time to reduce re-tries + all_available_luns = [i for i in range(self.MIN_LUN_NUMBER, self.MAX_LUN_NUMBER + 1) if i not in luns_in_use] + + if len(all_available_luns) == 0: + raise controller_errors.NoAvailableLunError(host_name) + + index = randint(0, len(all_available_luns) - 1) + lun = all_available_luns[index] + logger.debug("next random available lun is : {0}".format(lun)) + return lun + + def map_volume(self, volume_id, host_name): + logger.debug("mapping volume : {0} to host : {1}".format(volume_id, host_name)) + vol_name = self._get_vol_by_wwn(volume_id) + lun = self._get_next_available_lun(host_name) + + try: + self.client.cmd.map_vol(host=host_name, vol=vol_name, lun=lun) + except xcli_errors.OperationForbiddenForUserCategoryError as ex: + logger.exception(ex) + raise controller_errors.PermissionDeniedError("map volume : {0} to host : {1}".format(volume_id, host_name)) + except xcli_errors.VolumeBadNameError as ex: + logger.exception(ex) + raise controller_errors.VolumeNotFoundError(vol_name) + except xcli_errors.HostBadNameError as ex: + logger.exception(ex) + raise controller_errors.HostNotFoundError(host_name) + except xcli_errors.CommandFailedRuntimeError as ex: + logger.exception(ex) + if "LUN is already in use" in ex.status: + raise controller_errors.LunAlreadyInUseError(lun, host_name) + else: + raise controller_errors.MappingError(vol_name, host_name, ex) + + return str(lun) + + def unmap_volume(self, volume_id, host_name): + logger.debug("un-mapping volume : {0} from host : {1}".format(volume_id, host_name)) + + vol_name = self._get_vol_by_wwn(volume_id) + + try: + self.client.cmd.unmap_vol(host=host_name, vol=vol_name) + except xcli_errors.VolumeBadNameError as ex: + logger.exception(ex) + raise controller_errors.VolumeNotFoundError(vol_name) + except xcli_errors.HostBadNameError as ex: + logger.exception(ex) + raise controller_errors.HostNotFoundError(host_name) + except xcli_errors.OperationForbiddenForUserCategoryError as ex: + logger.exception(ex) + raise controller_errors.PermissionDeniedError( + "unmap volume : {0} from host : {1}".format(volume_id, host_name)) + except xcli_errors.CommandFailedRuntimeError as ex: + logger.exception(ex) + if "The requested mapping is not defined" in ex.status: + raise controller_errors.VolumeAlreadyUnmappedError(vol_name) + else: + raise controller_errors.UnMappingError(vol_name, host_name, ex) + + def get_array_iqns(self): + config_get_list = self.client.cmd.config_get().as_list + array_iqn = [a for a in config_get_list if a["name"] == "iscsi_name"][0]["value"] + return [array_iqn] + + def get_array_fc_wwns(self, host_name): + fc_wwns_objects = self.client.cmd.fc_port_list() + return [port.wwpn for port in fc_wwns_objects if port.port_state == 'Online' and port.role == 'Target'] diff --git a/controller/array_action/config.py b/controller/array_action/config.py new file mode 100644 index 000000000..69809393b --- /dev/null +++ b/controller/array_action/config.py @@ -0,0 +1,7 @@ +ISCSI_CONNECTIVITY_TYPE = "iscsi" +FC_CONNECTIVITY_TYPE = "fc" +CAPABILITIES_SPACEEFFICIENCY = 'SpaceEfficiency' +CAPABILITY_THIN = 'thin' +CAPABILITY_COMPRESSED = 'compressed' +CAPABILITY_DEDUPLICATED = 'deduplicated' +CAPABILITY_THICK = 'thick' diff --git a/controller/array_action/errors.py b/controller/array_action/errors.py new file mode 100644 index 000000000..a4b88a4b1 --- /dev/null +++ b/controller/array_action/errors.py @@ -0,0 +1,121 @@ +import controller.array_action.messages as messages + + +class BaseArrayActionException(Exception): + + def __str__(self, *args, **kwargs): + return self.message + + +class NoConnectionAvailableException(BaseArrayActionException): + + def __init__(self, endpoint): + self.message = messages.NoConnectionAvailableException_message.format(endpoint) + + +class StorageManagementIPsNotSupportError(BaseArrayActionException): + + def __init__(self, endpoint): + self.message = messages.StorageManagementIPsNotSupportError_message.format(endpoint) + + +class CredentialsError(BaseArrayActionException): + + def __init__(self, endpoint): + self.message = messages.CredentialsError_message.format(endpoint) + + +class VolumeNotFoundError(BaseArrayActionException): + + def __init__(self, name): + self.message = messages.VolumeNotFoundError_message.format(name) + + +class IllegalObjectName(BaseArrayActionException): + + def __init__(self, msg): + self.message = "{0}".format(msg) + + +class PoolDoesNotMatchCapabilities(BaseArrayActionException): + + def __init__(self, pool, capabilities, error): + self.message = messages.PoolDoesNotMatchCapabilities_message.format(pool, capabilities, error) + + +class StorageClassCapabilityNotSupported(BaseArrayActionException): + + def __init__(self, capabilities): + self.message = messages.StorageClassCapabilityNotSupported_message.format(capabilities) + + +class VolumeAlreadyExists(BaseArrayActionException): + + def __init__(self, volume, array): + self.message = messages.VolumeAlreadyExists_message.format(volume, array) + + +class PoolDoesNotExist(BaseArrayActionException): + + def __init__(self, pool, array): + self.message = messages.PoolDoesNotExist_message.format(pool, array) + + +class FailedToFindStorageSystemType(BaseArrayActionException): + + def __init__(self, endpoint): + self.message = messages.FailedToFindStorageSystemType_message.format(endpoint) + + +class PermissionDeniedError(BaseArrayActionException): + + def __init__(self, operation): + self.message = messages.PermissionDeniedError_message.format(operation) + + +class MultipleHostsFoundError(BaseArrayActionException): + + def __init__(self, initiators, hosts): + self.message = messages.MultipleHostsFoundError_message.format(initiators, hosts) + + +class HostNotFoundError(BaseArrayActionException): + + def __init__(self, iscsi_iqn): + self.message = messages.HostNotFoundError_message.format(iscsi_iqn) + + +class NoAvailableLunError(BaseArrayActionException): + + def __init__(self, host): + self.message = messages.NoAvailableLunError_message.format(host) + + +class LunAlreadyInUseError(BaseArrayActionException): + + def __init__(self, lun, host): + self.message = messages.LunAlreadyInUse_message.format(lun, host) + + +class MappingError(BaseArrayActionException): + + def __init__(self, vol, host, err): + self.message = messages.MappingError_message.format(vol, host, err) + + +class VolumeAlreadyUnmappedError(BaseArrayActionException): + + def __init__(self, vol): + self.message = messages.VolumeAlreadyUnmapped_message.format(vol) + + +class UnMappingError(BaseArrayActionException): + + def __init__(self, vol, host, err): + self.message = messages.UnMappingError_message.format(vol, host, err) + + +class BadNodeIdError(BaseArrayActionException): + + def __init__(self, name): + self.message = messages.BadNodeIdError_message.format(name) diff --git a/controller/array_action/messages.py b/controller/array_action/messages.py new file mode 100644 index 000000000..c2e9475a1 --- /dev/null +++ b/controller/array_action/messages.py @@ -0,0 +1,35 @@ +NoConnectionAvailableException_message = "No connection available to endpoint : {0}" + +CredentialsError_message = "Credential error has occurred while connecting to endpoint : {0} " + +StorageManagementIPsNotSupportError_message = "Invalid Management IP for SVC : {0} " + +VolumeNotFoundError_message = "Volume was not found : {0} " + +PoolDoesNotMatchCapabilities_message = "Pool : {0} does not match the following capabilities : {1} . error : {2}" + +StorageClassCapabilityNotSupported_message = "Storage class capability is not supported : {0} " + +VolumeAlreadyExists_message = "Volume already exists : {0} , array : {1}" + +PoolDoesNotExist_message = "Pool does not exist: {0} , array : {1}" + +FailedToFindStorageSystemType_message = "Could not identify the type for endpoint: {0} " + +PermissionDeniedError_message = "Permission was denied to operation : {0}" + +MultipleHostsFoundError_message = "Multiple hosts found for port(s): {0}. hosts are : {1}" + +HostNotFoundError_message = "Host for node: {0} was not found" + +NoAvailableLunError_message = "No available lun was found for host : {0}" + +LunAlreadyInUse_message = "Lun : {0} is already mapped for host : {1}" + +MappingError_message = "Mapping error has occured while mapping vol : {0} to host : {1}. error : {2}" + +VolumeAlreadyUnmapped_message = "Volume: {0} is already unmapped." + +UnMappingError_message = "Unmapping error has occurred for vol : {0} and host : {1}. error : {2}" + +BadNodeIdError_message = "Bad node id format for node id : {0}" diff --git a/controller/array_action/utils.py b/controller/array_action/utils.py new file mode 100644 index 000000000..615f79010 --- /dev/null +++ b/controller/array_action/utils.py @@ -0,0 +1,8 @@ + +class classproperty(object): + + def __init__(self, function): + self._function = function + + def __get__(self, instance, owner): + return self._function(owner) diff --git a/controller/common/__init__.py b/controller/common/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/controller/common/csi_logger.py b/controller/common/csi_logger.py new file mode 100644 index 000000000..59443c85a --- /dev/null +++ b/controller/common/csi_logger.py @@ -0,0 +1,30 @@ +import logging +import sys + +log_level="DEBUG" + +def get_stdout_logger(): + csi_logger = logging.getLogger("csi_logger") + + if not getattr(csi_logger, 'handler_set', None): + global log_level + csi_logger.setLevel(log_level) + handler = logging.StreamHandler(sys.stdout) + handler.setLevel(logging.DEBUG) + formatter = logging.Formatter( + '%(asctime)s %(levelname)s\t[%(thread)d] [%(threadName)s] (%(filename)s:%(funcName)s:%(lineno)d) - %(message)s') + handler.setFormatter(formatter) + csi_logger.addHandler(handler) + + csi_logger.handler_set = True + + return csi_logger + +def set_log_level(log_level_to_set): + """ + In order to set non-default log level this function should be called before first cal of get_stdout_logger + :param log_level_to_set: + """ + global log_level + if log_level_to_set: + log_level = log_level_to_set.upper() diff --git a/controller/common/node_info.py b/controller/common/node_info.py new file mode 100644 index 000000000..d785ac745 --- /dev/null +++ b/controller/common/node_info.py @@ -0,0 +1,54 @@ +import controller.controller_server.utils as utils +import controller.controller_server.config as config + +class NodeIdInfo: + def __init__(self, node_id): + """ + Args: + node_id: ,, + """ + node_name, iscsi_iqn, fc_wwns_str = utils.get_node_id_info(node_id) + fc_wwns = fc_wwns_str.split(config.PARAMETERS_FC_WWN_DELIMITER) + self.node_name = node_name + self.initiators = Initiators(iscsi_iqn.strip(), fc_wwns) + + +class Initiators: + """ + Object containing node initiators (e.g. iqn, fc_wwns) + """ + def __init__(self, iscsi_iqn, fc_wwns): + """ + Args: + iscsi_iqn : iqn + fc_wwns : list of fc wwns + """ + self.iscsi_iqn = iscsi_iqn + self.fc_wwns = fc_wwns + self._fc_wwns_lowercase_set = set([wwn.lower() for wwn in fc_wwns]) + self._iscsi_iqn_lowercase = iscsi_iqn.lower() + + def is_array_wwns_match(self, host_wwns): + """ + Args: + host_wwns : storage host wwns list + + Returns: + Is current host wwns matches + """ + host_wwns_lower = [wwn.lower() for wwn in host_wwns] + return not self._fc_wwns_lowercase_set.isdisjoint(host_wwns_lower) + + def is_array_iscsi_iqns_match(self, host_iqns): + """ + Args: + host_iqns: storage host iqns list + + Returns: + Is current host iqns matches + """ + host_iqns_lower = [iqn.lower() for iqn in host_iqns] + return self._iscsi_iqn_lowercase in host_iqns_lower + + def __str__(self): + return "iscsi_iqn: " + self.iscsi_iqn + ", fc_wwns: " + ",".join(self.fc_wwns) diff --git a/controller/common/utils.py b/controller/common/utils.py new file mode 100644 index 000000000..38361c2f4 --- /dev/null +++ b/controller/common/utils.py @@ -0,0 +1,27 @@ +import threading + +def set_current_thread_name(name): + """ + Sets current thread name if ame not None or empty string + + Args: + name : name to set + """ + if name: + current_thread = threading.current_thread() + current_thread.setName(name) + + +def string_to_array(str_val, separator): + """ + Args + str_val : string value + separator : string separator + Return + List as splitted string by separator after stripping whitespaces from each element + """ + if not str_val: + return [] + str_val = str_val.strip() + res = str_val.split(separator) + return [res_val.strip() for res_val in res] diff --git a/controller/controller_server/__init__.py b/controller/controller_server/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/controller/controller_server/config.py b/controller/controller_server/config.py new file mode 100644 index 000000000..1c29de389 --- /dev/null +++ b/controller/controller_server/config.py @@ -0,0 +1,21 @@ +from controller.csi_general import csi_pb2 + +SUPPORTED_FS_TYPES = ["ext4", "xfs"] + +access_types = csi_pb2.VolumeCapability.AccessMode +SUPPORTED_ACCESS_MODE = [access_types.SINGLE_NODE_WRITER] + +SECRET_USERNAME_PARAMETER = "username" +SECRET_PASSWORD_PARAMETER = "password" +SECRET_ARRAY_PARAMETER = "management_address" + +PARAMETERS_POOL = "pool" +PARAMETERS_CAPABILITIES_SPACEEFFICIENCY = "SpaceEfficiency" +PARAMETERS_PREFIX = "volume_name_prefix" +PARAMETERS_CAPACITY_DELIMITER = "=" +PARAMETERS_CAPABILITIES_DELIMITER = "=" +PARAMETERS_VOLUME_ID_DELIMITER = ":" +PARAMETERS_NODE_ID_DELIMITER = ";" +PARAMETERS_FC_WWN_DELIMITER = ":" + +SUPPORTED_CONNECTIVITY_TYPES = 2 diff --git a/controller/controller_server/csi_controller_server.py b/controller/controller_server/csi_controller_server.py new file mode 100755 index 000000000..e1cb42c27 --- /dev/null +++ b/controller/controller_server/csi_controller_server.py @@ -0,0 +1,447 @@ +import grpc +import time +from optparse import OptionParser +import yaml +import os.path + +from concurrent import futures +from controller.csi_general import csi_pb2 +from controller.csi_general import csi_pb2_grpc +from controller.array_action.array_connection_manager import ArrayConnectionManager +from controller.common.csi_logger import get_stdout_logger +from controller.common.csi_logger import set_log_level +import controller.controller_server.config as config +from controller.array_action.config import FC_CONNECTIVITY_TYPE +import controller.controller_server.utils as utils +import controller.array_action.errors as controller_errors +from controller.controller_server.errors import ValidationException +import controller.controller_server.messages as messages +from controller.common.utils import set_current_thread_name +from controller.common.node_info import NodeIdInfo + +logger = None #is set in ControllerServicer::__init__ + + +class ControllerServicer(csi_pb2_grpc.ControllerServicer): + """ + gRPC server for Digestor Service + """ + + def __init__(self, array_endpoint): + # init logger + global logger + logger = get_stdout_logger() + + self.endpoint = array_endpoint + + my_path = os.path.abspath(os.path.dirname(__file__)) + path = os.path.join(my_path, "../../common/config.yaml") + + with open(path, 'r') as yamlfile: + self.cfg = yaml.load(yamlfile) # TODO: add the following when possible : Loader=yaml.FullLoader) + + def CreateVolume(self, request, context): + set_current_thread_name(request.name) + logger.info("create volume") + try: + utils.validate_create_volume_request(request) + except ValidationException as ex: + logger.error("failed request validation") + logger.exception(ex) + context.set_details(ex.message) + context.set_code(grpc.StatusCode.INVALID_ARGUMENT) + return csi_pb2.CreateVolumeResponse() + + volume_name = request.name + logger.debug("volume name : {}".format(volume_name)) + + secrets = request.secrets + user, password, array_addresses = utils.get_array_connection_info_from_secret(secrets) + + pool = request.parameters[config.PARAMETERS_POOL] + capabilities = { + key: value for key, value in request.parameters.items() if key in [ + config.PARAMETERS_CAPABILITIES_SPACEEFFICIENCY, + ] + } + + if config.PARAMETERS_PREFIX in request.parameters: + volume_prefix = request.parameters[config.PARAMETERS_PREFIX] + volume_name = volume_prefix + "_" + volume_name + + try: + # TODO : pass multiple array addresses + with ArrayConnectionManager(user, password, array_addresses) as array_mediator: + logger.debug(array_mediator) + + if len(volume_name) > array_mediator.max_vol_name_length: + volume_name = volume_name[:array_mediator.max_vol_name_length] + logger.warning("volume name is too long - cutting it to be of size : {0}. new name : {1}".format( + array_mediator.max_vol_name_length, volume_name)) + + size = request.capacity_range.required_bytes + + if size == 0: + size = array_mediator.minimal_volume_size_in_bytes + logger.debug("requested size is 0 so the default size will be used : {0} ".format( + size)) + try: + vol = array_mediator.get_volume(volume_name) + + except controller_errors.VolumeNotFoundError as ex: + logger.debug( + "volume was not found. creating a new volume with parameters: {0}".format(request.parameters)) + + array_mediator.validate_supported_capabilities(capabilities) + vol = array_mediator.create_volume(volume_name, size, capabilities, pool) + + else: + logger.debug("volume found : {}".format(vol)) + + if not (vol.capacity_bytes == request.capacity_range.required_bytes): + context.set_details("Volume was already created with different size.") + context.set_code(grpc.StatusCode.ALREADY_EXISTS) + return csi_pb2.CreateVolumeResponse() + + logger.debug("generating create volume response") + res = utils.generate_csi_create_volume_response(vol) + logger.info("finished create volume") + return res + + except (controller_errors.IllegalObjectName, controller_errors.StorageClassCapabilityNotSupported, + controller_errors.PoolDoesNotExist, controller_errors.PoolDoesNotMatchCapabilities) as ex: + context.set_details(ex.message) + context.set_code(grpc.StatusCode.INVALID_ARGUMENT) + return csi_pb2.CreateVolumeResponse() + except controller_errors.PermissionDeniedError as ex: + context.set_code(grpc.StatusCode.PERMISSION_DENIED) + context.set_details(ex) + return csi_pb2.CreateVolumeResponse() + except controller_errors.VolumeAlreadyExists as ex: + context.set_details(ex.message) + context.set_code(grpc.StatusCode.ALREADY_EXISTS) + return csi_pb2.CreateVolumeResponse() + except Exception as ex: + logger.error("an internal exception occurred") + logger.exception(ex) + context.set_code(grpc.StatusCode.INTERNAL) + context.set_details('an internal exception occurred : {}'.format(ex)) + return csi_pb2.CreateVolumeResponse() + + def DeleteVolume(self, request, context): + set_current_thread_name(request.volume_id) + logger.info("DeleteVolume") + secrets = request.secrets + + try: + utils.validate_delete_volume_request(request) + + user, password, array_addresses = utils.get_array_connection_info_from_secret(secrets) + + try: + array_type, vol_id = utils.get_volume_id_info(request.volume_id) + except controller_errors.VolumeNotFoundError as ex: + logger.warning("volume id is invalid. error : {}".format(ex)) + return csi_pb2.DeleteVolumeResponse() + + with ArrayConnectionManager(user, password, array_addresses, array_type) as array_mediator: + + logger.debug(array_mediator) + + try: + array_mediator.delete_volume(vol_id) + + except controller_errors.VolumeNotFoundError as ex: + logger.debug("volume was not found during deletion: {0}".format(ex)) + + except controller_errors.PermissionDeniedError as ex: + context.set_code(grpc.StatusCode.PERMISSION_DENIED) + context.set_details(ex) + return csi_pb2.DeleteVolumeResponse() + + except ValidationException as ex: + logger.exception(ex) + context.set_details(ex.message) + context.set_code(grpc.StatusCode.INVALID_ARGUMENT) + return csi_pb2.DeleteVolumeResponse() + + except Exception as ex: + logger.debug("an internal exception occurred") + logger.exception(ex) + context.set_code(grpc.StatusCode.INTERNAL) + context.set_details('an internal exception occurred : {}'.format(ex)) + return csi_pb2.DeleteVolumeResponse() + + logger.debug("generating delete volume response") + res = csi_pb2.DeleteVolumeResponse() + logger.info("finished DeleteVolume") + return res + + def ControllerPublishVolume(self, request, context): + set_current_thread_name(request.volume_id) + logger.info("ControllerPublishVolume") + try: + + utils.validate_publish_volume_request(request) + + array_type, vol_id = utils.get_volume_id_info(request.volume_id) + + node_id_info = NodeIdInfo(request.node_id) + node_name = node_id_info.node_name + initiators = node_id_info.initiators + + logger.debug("node name for this publish operation is : {0}".format(node_name)) + + user, password, array_addresses = utils.get_array_connection_info_from_secret(request.secrets) + + with ArrayConnectionManager(user, password, array_addresses, array_type) as array_mediator: + + host_name, connectivity_types = array_mediator.get_host_by_host_identifiers(initiators) + + logger.debug("hostname : {}, connectiivity_types : {}".format(host_name, connectivity_types)) + + connectivity_type = utils.choose_connectivity_type(connectivity_types) + + if FC_CONNECTIVITY_TYPE == connectivity_type: + array_initiators = array_mediator.get_array_fc_wwns(host_name) + else: + array_initiators = array_mediator.get_array_iqns() + mappings = array_mediator.get_volume_mappings(vol_id) + if len(mappings) >= 1: + logger.debug( + "{0} mappings have been found for volume. the mappings are: {1}".format( + len(mappings), mappings)) + if len(mappings) == 1: + mapping = list(mappings)[0] + if mapping == host_name: + logger.debug("idempotent case - volume is already mapped to host.") + return utils.generate_csi_publish_volume_response(mappings[mapping], connectivity_type, + self.cfg, array_initiators) + + logger.error(messages.more_then_one_mapping_message.format(mappings)) + context.set_details(messages.more_then_one_mapping_message.format(mappings)) + context.set_code(grpc.StatusCode.FAILED_PRECONDITION) + return csi_pb2.ControllerPublishVolumeResponse() + + logger.debug("no mappings were found for volume. mapping vol : {0} to host : {1}".format( + vol_id, host_name)) + + try: + lun = array_mediator.map_volume(vol_id, host_name) + logger.debug("lun : {}".format(lun)) + except controller_errors.LunAlreadyInUseError as ex: + logger.warning("Lun was already in use. re-trying the operation. {0}".format(ex)) + for i in range(array_mediator.max_lun_retries - 1): + try: + lun = array_mediator.map_volume(vol_id, host_name) + break + except controller_errors.LunAlreadyInUseError as inner_ex: + logger.warning("re-trying map volume. try #{0}. {1}".format(i, inner_ex)) + else: # will get here only if the for statement is false. + raise ex + except controller_errors.PermissionDeniedError as ex: + context.set_code(grpc.StatusCode.PERMISSION_DENIED) + context.set_details(ex) + return csi_pb2.ControllerPublishVolumeResponse() + + logger.info("finished ControllerPublishVolume") + res = utils.generate_csi_publish_volume_response(lun, connectivity_type, self.cfg, array_initiators) + logger.debug("after res") + return res + + except (controller_errors.LunAlreadyInUseError, controller_errors.NoAvailableLunError) as ex: + logger.exception(ex) + context.set_details(ex.message) + context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED) + return csi_pb2.ControllerPublishVolumeResponse() + + except (controller_errors.HostNotFoundError, controller_errors.VolumeNotFoundError, controller_errors.BadNodeIdError) as ex: + logger.exception(ex) + context.set_details(ex.message) + context.set_code(grpc.StatusCode.NOT_FOUND) + return csi_pb2.ControllerPublishVolumeResponse() + + except ValidationException as ex: + logger.exception(ex) + context.set_details(ex.message) + context.set_code(grpc.StatusCode.INVALID_ARGUMENT) + return csi_pb2.ControllerPublishVolumeResponse() + + except Exception as ex: + logger.debug("an internal exception occurred") + logger.exception(ex) + context.set_code(grpc.StatusCode.INTERNAL) + context.set_details('an internal exception occurred : {}'.format(ex)) + return csi_pb2.ControllerPublishVolumeResponse() + + def ControllerUnpublishVolume(self, request, context): + set_current_thread_name(request.volume_id) + logger.info("ControllerUnpublishVolume") + try: + try: + utils.validate_unpublish_volume_request(request) + except ValidationException as ex: + logger.exception(ex) + context.set_details(ex.message) + context.set_code(grpc.StatusCode.INVALID_ARGUMENT) + return csi_pb2.ControllerUnpublishVolumeResponse() + + array_type, vol_id = utils.get_volume_id_info(request.volume_id) + + node_id_info = NodeIdInfo(request.node_id) + node_name = node_id_info.node_name + initiators = node_id_info.initiators + logger.debug("node name for this unpublish operation is : {0}".format(node_name)) + + user, password, array_addresses = utils.get_array_connection_info_from_secret(request.secrets) + + with ArrayConnectionManager(user, password, array_addresses, array_type) as array_mediator: + + host_name, _ = array_mediator.get_host_by_host_identifiers(initiators) + try: + array_mediator.unmap_volume(vol_id, host_name) + + except controller_errors.VolumeAlreadyUnmappedError as ex: + logger.debug("Idempotent case. volume is already unmapped.") + return csi_pb2.ControllerUnpublishVolumeResponse() + + except controller_errors.PermissionDeniedError as ex: + context.set_code(grpc.StatusCode.PERMISSION_DENIED) + context.set_details(ex) + return csi_pb2.ControllerPublishVolumeResponse() + + logger.info("finished ControllerUnpublishVolume") + return csi_pb2.ControllerUnpublishVolumeResponse() + + except (controller_errors.HostNotFoundError, controller_errors.VolumeNotFoundError) as ex: + logger.exception(ex) + context.set_details(ex.message) + context.set_code(grpc.StatusCode.NOT_FOUND) + return csi_pb2.ControllerUnpublishVolumeResponse() + + except Exception as ex: + logger.debug("an internal exception occurred") + logger.exception(ex) + context.set_code(grpc.StatusCode.INTERNAL) + context.set_details('an internal exception occurred : {}'.format(ex)) + return csi_pb2.ControllerUnpublishVolumeResponse() + + def ValidateVolumeCapabilities(self, request, context): + logger.info("ValidateVolumeCapabilities") + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + logger.info("finished ValidateVolumeCapabilities") + return csi_pb2.ValidateVolumeCapabilitiesResponse() + + def ListVolumes(self, request, context): + logger.info("ListVolumes") + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + logger.info("finished ListVolumes") + return csi_pb2.ListVolumesResponse() + + def GetCapacity(self, request, context): + logger.info("GetCapacity") + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + logger.info("finished GetCapacity") + return csi_pb2.GetCapacityResponse() + + def ControllerGetCapabilities(self, request, context): + logger.info("ControllerGetCapabilities") + types = csi_pb2.ControllerServiceCapability.RPC.Type + + res = csi_pb2.ControllerGetCapabilitiesResponse( + capabilities=[csi_pb2.ControllerServiceCapability( + rpc=csi_pb2.ControllerServiceCapability.RPC(type=types.Value("CREATE_DELETE_VOLUME"))), + csi_pb2.ControllerServiceCapability( + rpc=csi_pb2.ControllerServiceCapability.RPC(type=types.Value("PUBLISH_UNPUBLISH_VOLUME")))]) + + logger.info("finished ControllerGetCapabilities") + return res + + def __get_identity_config(self, attribute_name): + return self.cfg['identity'][attribute_name] + + def GetPluginInfo(self, request, context): + logger.info("GetPluginInfo") + try: + name = self.__get_identity_config("name") + version = self.__get_identity_config("version") + except Exception as ex: + logger.exception(ex) + context.set_code(grpc.StatusCode.INTERNAL) + context.set_details('an error occured while trying to get plugin name or version') + return csi_pb2.GetPluginInfoResponse() + + if not name or not version: + logger.error("plugin name or version cannot be empty") + context.set_code(grpc.StatusCode.INTERNAL) + context.set_details("plugin name or version cannot be empty") + return csi_pb2.GetPluginInfoResponse() + + logger.info("finished GetPluginInfo") + return csi_pb2.GetPluginInfoResponse(name=name, vendor_version=version) + + def GetPluginCapabilities(self, request, context): + logger.info("GetPluginCapabilities") + types = csi_pb2.PluginCapability.Service.Type + capabilities = self.__get_identity_config("capabilities") + capability_list = [] + for cap in capabilities: + capability_list.append( + csi_pb2.PluginCapability( + service=csi_pb2.PluginCapability.Service(type=types.Value(cap)) + ) + ) + + logger.info("finished GetPluginCapabilities") + return csi_pb2.GetPluginCapabilitiesResponse( + capabilities=capability_list + + ) + + def Probe(self, request, context): + context.set_code(grpc.StatusCode.OK) + return csi_pb2.ProbeResponse() + + def start_server(self): + controller_server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + + csi_pb2_grpc.add_ControllerServicer_to_server(self, controller_server) + csi_pb2_grpc.add_IdentityServicer_to_server(self, controller_server) + + # bind the server to the port defined above + # controller_server.add_insecure_port('[::]:{}'.format(self.server_port)) + # controller_server.add_insecure_port('unix://{}'.format(self.server_port)) + controller_server.add_insecure_port(self.endpoint) + + # start the server + logger.debug("Listening for connections on endpoint address: {}".format(self.endpoint)) + + controller_server.start() + logger.debug('Controller Server running ...') + + try: + while True: + time.sleep(60 * 60 * 60) + except KeyboardInterrupt: + controller_server.stop(0) + logger.debug('Controller Server Stopped ...') + + +def main(): + parser = OptionParser() + parser.add_option("-e", "--csi-endpoint", dest="endpoint",help="grpc endpoint") + parser.add_option("-l", "--loglevel", dest="loglevel",help="log level") + (options, args) = parser.parse_args() + + # set logger level and init logger + log_level = options.loglevel + set_log_level(log_level) + + # start the server + endpoint = options.endpoint + curr_server = ControllerServicer(endpoint) + curr_server.start_server() + + +if __name__ == '__main__': + main() diff --git a/controller/controller_server/errors.py b/controller/controller_server/errors.py new file mode 100644 index 000000000..01e658c77 --- /dev/null +++ b/controller/controller_server/errors.py @@ -0,0 +1,19 @@ +import controller.controller_server.messages as messages + + +class BaseControllerServerException(Exception): + + def __str__(self, *args, **kwargs): + return self.message + + +class ValidationException(BaseControllerServerException): + + def __init__(self, msg): + self.message = messages.ValidationException_message.format(msg) + + +class VolumeIdError(BaseControllerServerException): + + def __init__(self, id): + self.message = messages.VolumeIdError_message.format(id) diff --git a/controller/controller_server/messages.py b/controller/controller_server/messages.py new file mode 100644 index 000000000..d2a639565 --- /dev/null +++ b/controller/controller_server/messages.py @@ -0,0 +1,22 @@ +ValidationException_message = "Validation error has occurred : {0}" + +VolumeIdError_message = "Wrong volume id format : {0}" + +more_then_one_mapping_message = "Volume is already mapped to a different host : {0}" + +# validation error messages +invalid_secret_message = "invalid secret was passed" +secret_missing_message = 'secret is missing' +capabilities_not_set_message = "capbilities were not set" +unsupported_fs_type_message = "unsupported fs_type : {}" +only_mount_supported_message = "only mount volume capability is supported" +unsupported_access_mode_message = "unsupported access mode : {}" +name_should_be_empty_message = 'name should not be empty' +size_bigget_then_0_message = 'size should be bigger then 0' +no_capacity_range_message = 'no capacity range set' +pool_is_missing_message = 'pool parameter is missing.' +wrong_pool_passed_message = 'pool parameter must provide value for it' +params_are_missing_message = 'parameters are missing' +volume_id_wrong_format_message = 'volume id has wrong format' +readoly_not_supported_message = 'readonly parameter is not supported' +node_id_wrong_format_message = 'node id has wrong format' diff --git a/controller/controller_server/test_settings.py b/controller/controller_server/test_settings.py new file mode 100644 index 000000000..e0009b627 --- /dev/null +++ b/controller/controller_server/test_settings.py @@ -0,0 +1,4 @@ +user = "temp" +password = "temp" +array = "arr" +vol_name = "vol" diff --git a/controller/controller_server/utils.py b/controller/controller_server/utils.py new file mode 100644 index 000000000..c7d45eba3 --- /dev/null +++ b/controller/controller_server/utils.py @@ -0,0 +1,219 @@ +from controller.common.csi_logger import get_stdout_logger +import controller.controller_server.config as config +from controller.csi_general import csi_pb2 +from controller.controller_server.errors import ValidationException +import controller.controller_server.messages as messages +from controller.array_action.config import FC_CONNECTIVITY_TYPE, ISCSI_CONNECTIVITY_TYPE +from controller.array_action.errors import HostNotFoundError, VolumeNotFoundError + +logger = get_stdout_logger() + + +def get_array_connection_info_from_secret(secrets): + user = secrets[config.SECRET_USERNAME_PARAMETER] + password = secrets[config.SECRET_PASSWORD_PARAMETER] + array_addresses = secrets[config.SECRET_ARRAY_PARAMETER].split(",") + return user, password, array_addresses + + +def get_vol_id(new_vol): + logger.debug('getting vol id for vol : {0}'.format(new_vol)) + vol_id = "{0}{1}{2}".format(new_vol.array_type, config.PARAMETERS_VOLUME_ID_DELIMITER, new_vol.id) + logger.debug("vol id is : {0}".format(vol_id)) + return vol_id + + +def validate_secret(secret): + logger.debug("validating secrets") + if secret: + if not (config.SECRET_USERNAME_PARAMETER in secret and + config.SECRET_PASSWORD_PARAMETER in secret and + config.SECRET_ARRAY_PARAMETER in secret): + raise ValidationException(messages.invalid_secret_message) + + else: + raise ValidationException(messages.secret_missing_message) + + logger.debug("secret validation finished") + + +def validate_csi_volume_capability(cap): + logger.debug("validating csi volume capability : {0}".format(cap)) + if cap.mount: + if cap.mount.fs_type and (cap.mount.fs_type not in config.SUPPORTED_FS_TYPES): + raise ValidationException(messages.unsupported_fs_type_message.format(cap.mount.fs_type)) + + else: + logger.error(messages.only_mount_supported_message) + raise ValidationException(messages.only_mount_supported_message) + + if cap.access_mode.mode not in config.SUPPORTED_ACCESS_MODE: + logger.error("unsupported access mode : {}".format(cap.access_mode)) + raise ValidationException(messages.unsupported_access_mode_message.format(cap.access_mode)) + + logger.debug("csi volume capabilities validation finished.") + + +def validate_csi_volume_capabilties(capabilities): + logger.debug("validating csi volume capabilities: {}".format(capabilities)) + if len(capabilities) == 0: + raise ValidationException(messages.capabilities_not_set_message) + + for cap in capabilities: + validate_csi_volume_capability(cap) + + logger.debug("finished validating csi volume capabilities.") + + +def validate_create_volume_request(request): + logger.debug("validating create volume request") + + logger.debug("validating volume name") + if request.name == '': + raise ValidationException(messages.name_should_be_empty_message) + + logger.debug("validating volume capacity") + if request.capacity_range: + if request.capacity_range.required_bytes < 0: + raise ValidationException(messages.size_bigget_then_0_message) + + else: + raise ValidationException(messages.no_capacity_range_message) + + logger.debug("validating volume capabilities") + validate_csi_volume_capabilties(request.volume_capabilities) + + logger.debug("validating secrets") + if request.secrets: + validate_secret(request.secrets) + + logger.debug("validating storage class parameters") + if request.parameters: + if not (config.PARAMETERS_POOL in request.parameters): + raise ValidationException(messages.pool_is_missing_message) + + if not request.parameters[config.PARAMETERS_POOL]: + raise ValidationException(messages.wrong_pool_passed_message) + else: + raise ValidationException(messages.params_are_missing_message) + + logger.debug("request validation finished.") + + +def generate_csi_create_volume_response(new_vol): + logger.debug("creating volume response for vol : {0}".format(new_vol)) + + vol_context = {"volume_name": new_vol.volume_name, + "array_address": ",".join(new_vol.array_address), + "pool_name": new_vol.pool_name, + "storage_type": new_vol.array_type + } + + res = csi_pb2.CreateVolumeResponse(volume=csi_pb2.Volume( + capacity_bytes=new_vol.capacity_bytes, + volume_id=get_vol_id(new_vol), + volume_context=vol_context)) + + logger.debug("finished creating volume response : {0}".format(res)) + return res + + +def validate_delete_volume_request(request): + logger.debug("validating delete volume request") + + if request.volume_id == "": + raise ValidationException("Volume id cannot be empty") + + logger.debug("validating secrets") + if request.secrets: + validate_secret(request.secrets) + + logger.debug("delete volume validation finished") + + +def validate_publish_volume_request(request): + logger.debug("validating publish volume request") + + logger.debug("validating readonly") + if request.readonly: + raise ValidationException(messages.readoly_not_supported_message) + + logger.debug("validating volume capabilities") + validate_csi_volume_capability(request.volume_capability) + + logger.debug("validating secrets") + if request.secrets: + validate_secret(request.secrets) + else: + raise ValidationException(messages.secret_missing_message) + + logger.debug("publish volume request validation finished.") + + +def get_volume_id_info(volume_id): + logger.debug("getting volume info for vol id : {0}".format(volume_id)) + split_vol = volume_id.split(config.PARAMETERS_VOLUME_ID_DELIMITER) + if len(split_vol) != 2: + raise VolumeNotFoundError(volume_id) + + array_type, vol_id = split_vol + logger.debug("volume id : {0}, array type :{1}".format(vol_id, array_type)) + return array_type, vol_id + + +def get_node_id_info(node_id): + logger.debug("getting node info for node id : {0}".format(node_id)) + split_node = node_id.split(config.PARAMETERS_NODE_ID_DELIMITER) + if len(split_node) != config.SUPPORTED_CONNECTIVITY_TYPES + 1: # the 1 is for the hostname + raise HostNotFoundError(node_id) + + hostname, iscsi_iqn, fc_wwns = split_node + logger.debug("node name : {0}, iscsi_iqn : {1}, fc_wwns : {2} ".format( + hostname, iscsi_iqn, fc_wwns)) + return hostname, iscsi_iqn, fc_wwns + + +def choose_connectivity_type(connecitvity_types): + # If connectivity type support FC and iSCSI at the same time, chose FC + logger.debug("choosing connectivity type for connectivity types : {0}".format(connecitvity_types)) + res = None + if FC_CONNECTIVITY_TYPE in connecitvity_types: + logger.debug("connectivity type is : {0}".format(FC_CONNECTIVITY_TYPE)) + return FC_CONNECTIVITY_TYPE + if ISCSI_CONNECTIVITY_TYPE in connecitvity_types: + logger.debug("connectivity type is : {0}".format(ISCSI_CONNECTIVITY_TYPE)) + return ISCSI_CONNECTIVITY_TYPE + +def generate_csi_publish_volume_response(lun, connectivity_type, config, array_initiators): + logger.debug("generating publish volume response for lun :{0}, connectivity : {1}".format(lun, connectivity_type)) + + lun_param = config["controller"]["publish_context_lun_parameter"] + connectivity_param = config["controller"]["publish_context_connectivity_parameter"] + hash_by_connectivity = { + 'iscsi': config["controller"]["publish_context_array_iqn"], + 'fc': config["controller"]["publish_context_fc_initiators"]} + + array_initiators = ",".join(array_initiators) + res = csi_pb2.ControllerPublishVolumeResponse( + publish_context={lun_param: str(lun), + connectivity_param: connectivity_type, + hash_by_connectivity[connectivity_type]: array_initiators}) + + logger.debug("publish volume response is :{0}".format(res)) + return res + + +def validate_unpublish_volume_request(request): + logger.debug("validating unpublish volume request") + + logger.debug("validating volume id") + if len(request.volume_id.split(config.PARAMETERS_VOLUME_ID_DELIMITER)) != 2: + raise ValidationException(messages.volume_id_wrong_format_message) + + logger.debug("validating secrets") + if request.secrets: + validate_secret(request.secrets) + else: + raise ValidationException(messages.secret_missing_message) + + logger.debug("unpublish volume request validation finished.") diff --git a/controller/csi_general/__init__.py b/controller/csi_general/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/controller/csi_general/csi_pb2.py b/controller/csi_general/csi_pb2.py new file mode 100644 index 000000000..250449487 --- /dev/null +++ b/controller/csi_general/csi_pb2.py @@ -0,0 +1,4970 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: csi.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 +from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='csi.proto', + package='csi.v1', + syntax='proto3', + serialized_options=_b('Z\003csi'), + serialized_pb=_b('\n\tcsi.proto\x12\x06\x63si.v1\x1a google/protobuf/descriptor.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/wrappers.proto\"\x16\n\x14GetPluginInfoRequest\"\xad\x01\n\x15GetPluginInfoResponse\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x16\n\x0evendor_version\x18\x02 \x01(\t\x12=\n\x08manifest\x18\x03 \x03(\x0b\x32+.csi.v1.GetPluginInfoResponse.ManifestEntry\x1a/\n\rManifestEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x1e\n\x1cGetPluginCapabilitiesRequest\"O\n\x1dGetPluginCapabilitiesResponse\x12.\n\x0c\x63\x61pabilities\x18\x01 \x03(\x0b\x32\x18.csi.v1.PluginCapability\"\xa7\x03\n\x10PluginCapability\x12\x33\n\x07service\x18\x01 \x01(\x0b\x32 .csi.v1.PluginCapability.ServiceH\x00\x12\x44\n\x10volume_expansion\x18\x02 \x01(\x0b\x32(.csi.v1.PluginCapability.VolumeExpansionH\x00\x1a\x91\x01\n\x07Service\x12\x33\n\x04type\x18\x01 \x01(\x0e\x32%.csi.v1.PluginCapability.Service.Type\"Q\n\x04Type\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x16\n\x12\x43ONTROLLER_SERVICE\x10\x01\x12$\n VOLUME_ACCESSIBILITY_CONSTRAINTS\x10\x02\x1a|\n\x0fVolumeExpansion\x12;\n\x04type\x18\x01 \x01(\x0e\x32-.csi.v1.PluginCapability.VolumeExpansion.Type\",\n\x04Type\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06ONLINE\x10\x01\x12\x0b\n\x07OFFLINE\x10\x02\x42\x06\n\x04type\"\x0e\n\x0cProbeRequest\":\n\rProbeResponse\x12)\n\x05ready\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\"\xea\x03\n\x13\x43reateVolumeRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x0e\x63\x61pacity_range\x18\x02 \x01(\x0b\x32\x15.csi.v1.CapacityRange\x12\x35\n\x13volume_capabilities\x18\x03 \x03(\x0b\x32\x18.csi.v1.VolumeCapability\x12?\n\nparameters\x18\x04 \x03(\x0b\x32+.csi.v1.CreateVolumeRequest.ParametersEntry\x12>\n\x07secrets\x18\x05 \x03(\x0b\x32(.csi.v1.CreateVolumeRequest.SecretsEntryB\x03\x98\x42\x01\x12:\n\x15volume_content_source\x18\x06 \x01(\x0b\x32\x1b.csi.v1.VolumeContentSource\x12?\n\x1a\x61\x63\x63\x65ssibility_requirements\x18\x07 \x01(\x0b\x32\x1b.csi.v1.TopologyRequirement\x1a\x31\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xe3\x01\n\x13VolumeContentSource\x12>\n\x08snapshot\x18\x01 \x01(\x0b\x32*.csi.v1.VolumeContentSource.SnapshotSourceH\x00\x12:\n\x06volume\x18\x02 \x01(\x0b\x32(.csi.v1.VolumeContentSource.VolumeSourceH\x00\x1a%\n\x0eSnapshotSource\x12\x13\n\x0bsnapshot_id\x18\x01 \x01(\t\x1a!\n\x0cVolumeSource\x12\x11\n\tvolume_id\x18\x01 \x01(\tB\x06\n\x04type\"6\n\x14\x43reateVolumeResponse\x12\x1e\n\x06volume\x18\x01 \x01(\x0b\x32\x0e.csi.v1.Volume\"\xf6\x03\n\x10VolumeCapability\x12\x35\n\x05\x62lock\x18\x01 \x01(\x0b\x32$.csi.v1.VolumeCapability.BlockVolumeH\x00\x12\x35\n\x05mount\x18\x02 \x01(\x0b\x32$.csi.v1.VolumeCapability.MountVolumeH\x00\x12\x38\n\x0b\x61\x63\x63\x65ss_mode\x18\x03 \x01(\x0b\x32#.csi.v1.VolumeCapability.AccessMode\x1a\r\n\x0b\x42lockVolume\x1a\x33\n\x0bMountVolume\x12\x0f\n\x07\x66s_type\x18\x01 \x01(\t\x12\x13\n\x0bmount_flags\x18\x02 \x03(\t\x1a\xe6\x01\n\nAccessMode\x12\x36\n\x04mode\x18\x01 \x01(\x0e\x32(.csi.v1.VolumeCapability.AccessMode.Mode\"\x9f\x01\n\x04Mode\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x16\n\x12SINGLE_NODE_WRITER\x10\x01\x12\x1b\n\x17SINGLE_NODE_READER_ONLY\x10\x02\x12\x1a\n\x16MULTI_NODE_READER_ONLY\x10\x03\x12\x1c\n\x18MULTI_NODE_SINGLE_WRITER\x10\x04\x12\x1b\n\x17MULTI_NODE_MULTI_WRITER\x10\x05\x42\r\n\x0b\x61\x63\x63\x65ss_type\"<\n\rCapacityRange\x12\x16\n\x0erequired_bytes\x18\x01 \x01(\x03\x12\x13\n\x0blimit_bytes\x18\x02 \x01(\x03\"\x88\x02\n\x06Volume\x12\x16\n\x0e\x63\x61pacity_bytes\x18\x01 \x01(\x03\x12\x11\n\tvolume_id\x18\x02 \x01(\t\x12\x39\n\x0evolume_context\x18\x03 \x03(\x0b\x32!.csi.v1.Volume.VolumeContextEntry\x12\x33\n\x0e\x63ontent_source\x18\x04 \x01(\x0b\x32\x1b.csi.v1.VolumeContentSource\x12-\n\x13\x61\x63\x63\x65ssible_topology\x18\x05 \x03(\x0b\x32\x10.csi.v1.Topology\x1a\x34\n\x12VolumeContextEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"_\n\x13TopologyRequirement\x12#\n\trequisite\x18\x01 \x03(\x0b\x32\x10.csi.v1.Topology\x12#\n\tpreferred\x18\x02 \x03(\x0b\x32\x10.csi.v1.Topology\"m\n\x08Topology\x12\x30\n\x08segments\x18\x01 \x03(\x0b\x32\x1e.csi.v1.Topology.SegmentsEntry\x1a/\n\rSegmentsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x98\x01\n\x13\x44\x65leteVolumeRequest\x12\x11\n\tvolume_id\x18\x01 \x01(\t\x12>\n\x07secrets\x18\x02 \x03(\x0b\x32(.csi.v1.DeleteVolumeRequest.SecretsEntryB\x03\x98\x42\x01\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x16\n\x14\x44\x65leteVolumeResponse\"\x8f\x03\n\x1e\x43ontrollerPublishVolumeRequest\x12\x11\n\tvolume_id\x18\x01 \x01(\t\x12\x0f\n\x07node_id\x18\x02 \x01(\t\x12\x33\n\x11volume_capability\x18\x03 \x01(\x0b\x32\x18.csi.v1.VolumeCapability\x12\x10\n\x08readonly\x18\x04 \x01(\x08\x12I\n\x07secrets\x18\x05 \x03(\x0b\x32\x33.csi.v1.ControllerPublishVolumeRequest.SecretsEntryB\x03\x98\x42\x01\x12Q\n\x0evolume_context\x18\x06 \x03(\x0b\x32\x39.csi.v1.ControllerPublishVolumeRequest.VolumeContextEntry\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x34\n\x12VolumeContextEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xae\x01\n\x1f\x43ontrollerPublishVolumeResponse\x12T\n\x0fpublish_context\x18\x01 \x03(\x0b\x32;.csi.v1.ControllerPublishVolumeResponse.PublishContextEntry\x1a\x35\n\x13PublishContextEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xc3\x01\n ControllerUnpublishVolumeRequest\x12\x11\n\tvolume_id\x18\x01 \x01(\t\x12\x0f\n\x07node_id\x18\x02 \x01(\t\x12K\n\x07secrets\x18\x03 \x03(\x0b\x32\x35.csi.v1.ControllerUnpublishVolumeRequest.SecretsEntryB\x03\x98\x42\x01\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"#\n!ControllerUnpublishVolumeResponse\"\xf9\x03\n!ValidateVolumeCapabilitiesRequest\x12\x11\n\tvolume_id\x18\x01 \x01(\t\x12T\n\x0evolume_context\x18\x02 \x03(\x0b\x32<.csi.v1.ValidateVolumeCapabilitiesRequest.VolumeContextEntry\x12\x35\n\x13volume_capabilities\x18\x03 \x03(\x0b\x32\x18.csi.v1.VolumeCapability\x12M\n\nparameters\x18\x04 \x03(\x0b\x32\x39.csi.v1.ValidateVolumeCapabilitiesRequest.ParametersEntry\x12L\n\x07secrets\x18\x05 \x03(\x0b\x32\x36.csi.v1.ValidateVolumeCapabilitiesRequest.SecretsEntryB\x03\x98\x42\x01\x1a\x34\n\x12VolumeContextEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x31\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xe7\x03\n\"ValidateVolumeCapabilitiesResponse\x12G\n\tconfirmed\x18\x01 \x01(\x0b\x32\x34.csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed\x12\x0f\n\x07message\x18\x02 \x01(\t\x1a\xe6\x02\n\tConfirmed\x12_\n\x0evolume_context\x18\x01 \x03(\x0b\x32G.csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.VolumeContextEntry\x12\x35\n\x13volume_capabilities\x18\x02 \x03(\x0b\x32\x18.csi.v1.VolumeCapability\x12X\n\nparameters\x18\x03 \x03(\x0b\x32\x44.csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.ParametersEntry\x1a\x34\n\x12VolumeContextEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x31\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"A\n\x12ListVolumesRequest\x12\x13\n\x0bmax_entries\x18\x01 \x01(\x05\x12\x16\n\x0estarting_token\x18\x02 \x01(\t\"\x86\x01\n\x13ListVolumesResponse\x12\x32\n\x07\x65ntries\x18\x01 \x03(\x0b\x32!.csi.v1.ListVolumesResponse.Entry\x12\x12\n\nnext_token\x18\x02 \x01(\t\x1a\'\n\x05\x45ntry\x12\x1e\n\x06volume\x18\x01 \x01(\x0b\x32\x0e.csi.v1.Volume\"\xed\x01\n\x12GetCapacityRequest\x12\x35\n\x13volume_capabilities\x18\x01 \x03(\x0b\x32\x18.csi.v1.VolumeCapability\x12>\n\nparameters\x18\x02 \x03(\x0b\x32*.csi.v1.GetCapacityRequest.ParametersEntry\x12-\n\x13\x61\x63\x63\x65ssible_topology\x18\x03 \x01(\x0b\x32\x10.csi.v1.Topology\x1a\x31\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"1\n\x13GetCapacityResponse\x12\x1a\n\x12\x61vailable_capacity\x18\x01 \x01(\x03\"\"\n ControllerGetCapabilitiesRequest\"^\n!ControllerGetCapabilitiesResponse\x12\x39\n\x0c\x63\x61pabilities\x18\x01 \x03(\x0b\x32#.csi.v1.ControllerServiceCapability\"\xfe\x02\n\x1b\x43ontrollerServiceCapability\x12\x36\n\x03rpc\x18\x01 \x01(\x0b\x32\'.csi.v1.ControllerServiceCapability.RPCH\x00\x1a\x9e\x02\n\x03RPC\x12:\n\x04type\x18\x01 \x01(\x0e\x32,.csi.v1.ControllerServiceCapability.RPC.Type\"\xda\x01\n\x04Type\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x18\n\x14\x43REATE_DELETE_VOLUME\x10\x01\x12\x1c\n\x18PUBLISH_UNPUBLISH_VOLUME\x10\x02\x12\x10\n\x0cLIST_VOLUMES\x10\x03\x12\x10\n\x0cGET_CAPACITY\x10\x04\x12\x1a\n\x16\x43REATE_DELETE_SNAPSHOT\x10\x05\x12\x12\n\x0eLIST_SNAPSHOTS\x10\x06\x12\x10\n\x0c\x43LONE_VOLUME\x10\x07\x12\x14\n\x10PUBLISH_READONLY\x10\x08\x12\x11\n\rEXPAND_VOLUME\x10\tB\x06\n\x04type\"\xa7\x02\n\x15\x43reateSnapshotRequest\x12\x18\n\x10source_volume_id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12@\n\x07secrets\x18\x03 \x03(\x0b\x32*.csi.v1.CreateSnapshotRequest.SecretsEntryB\x03\x98\x42\x01\x12\x41\n\nparameters\x18\x04 \x03(\x0b\x32-.csi.v1.CreateSnapshotRequest.ParametersEntry\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x31\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"<\n\x16\x43reateSnapshotResponse\x12\"\n\x08snapshot\x18\x01 \x01(\x0b\x32\x10.csi.v1.Snapshot\"\x96\x01\n\x08Snapshot\x12\x12\n\nsize_bytes\x18\x01 \x01(\x03\x12\x13\n\x0bsnapshot_id\x18\x02 \x01(\t\x12\x18\n\x10source_volume_id\x18\x03 \x01(\t\x12\x31\n\rcreation_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0cready_to_use\x18\x05 \x01(\x08\"\x9e\x01\n\x15\x44\x65leteSnapshotRequest\x12\x13\n\x0bsnapshot_id\x18\x01 \x01(\t\x12@\n\x07secrets\x18\x02 \x03(\x0b\x32*.csi.v1.DeleteSnapshotRequest.SecretsEntryB\x03\x98\x42\x01\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x18\n\x16\x44\x65leteSnapshotResponse\"r\n\x14ListSnapshotsRequest\x12\x13\n\x0bmax_entries\x18\x01 \x01(\x05\x12\x16\n\x0estarting_token\x18\x02 \x01(\t\x12\x18\n\x10source_volume_id\x18\x03 \x01(\t\x12\x13\n\x0bsnapshot_id\x18\x04 \x01(\t\"\x8e\x01\n\x15ListSnapshotsResponse\x12\x34\n\x07\x65ntries\x18\x01 \x03(\x0b\x32#.csi.v1.ListSnapshotsResponse.Entry\x12\x12\n\nnext_token\x18\x02 \x01(\t\x1a+\n\x05\x45ntry\x12\"\n\x08snapshot\x18\x01 \x01(\x0b\x32\x10.csi.v1.Snapshot\"\xdb\x01\n\x1d\x43ontrollerExpandVolumeRequest\x12\x11\n\tvolume_id\x18\x01 \x01(\t\x12-\n\x0e\x63\x61pacity_range\x18\x02 \x01(\x0b\x32\x15.csi.v1.CapacityRange\x12H\n\x07secrets\x18\x03 \x03(\x0b\x32\x32.csi.v1.ControllerExpandVolumeRequest.SecretsEntryB\x03\x98\x42\x01\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"Y\n\x1e\x43ontrollerExpandVolumeResponse\x12\x16\n\x0e\x63\x61pacity_bytes\x18\x01 \x01(\x03\x12\x1f\n\x17node_expansion_required\x18\x02 \x01(\x08\"\xf5\x03\n\x16NodeStageVolumeRequest\x12\x11\n\tvolume_id\x18\x01 \x01(\t\x12K\n\x0fpublish_context\x18\x02 \x03(\x0b\x32\x32.csi.v1.NodeStageVolumeRequest.PublishContextEntry\x12\x1b\n\x13staging_target_path\x18\x03 \x01(\t\x12\x33\n\x11volume_capability\x18\x04 \x01(\x0b\x32\x18.csi.v1.VolumeCapability\x12\x41\n\x07secrets\x18\x05 \x03(\x0b\x32+.csi.v1.NodeStageVolumeRequest.SecretsEntryB\x03\x98\x42\x01\x12I\n\x0evolume_context\x18\x06 \x03(\x0b\x32\x31.csi.v1.NodeStageVolumeRequest.VolumeContextEntry\x1a\x35\n\x13PublishContextEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x34\n\x12VolumeContextEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x19\n\x17NodeStageVolumeResponse\"J\n\x18NodeUnstageVolumeRequest\x12\x11\n\tvolume_id\x18\x01 \x01(\t\x12\x1b\n\x13staging_target_path\x18\x02 \x01(\t\"\x1b\n\x19NodeUnstageVolumeResponse\"\xa4\x04\n\x18NodePublishVolumeRequest\x12\x11\n\tvolume_id\x18\x01 \x01(\t\x12M\n\x0fpublish_context\x18\x02 \x03(\x0b\x32\x34.csi.v1.NodePublishVolumeRequest.PublishContextEntry\x12\x1b\n\x13staging_target_path\x18\x03 \x01(\t\x12\x13\n\x0btarget_path\x18\x04 \x01(\t\x12\x33\n\x11volume_capability\x18\x05 \x01(\x0b\x32\x18.csi.v1.VolumeCapability\x12\x10\n\x08readonly\x18\x06 \x01(\x08\x12\x43\n\x07secrets\x18\x07 \x03(\x0b\x32-.csi.v1.NodePublishVolumeRequest.SecretsEntryB\x03\x98\x42\x01\x12K\n\x0evolume_context\x18\x08 \x03(\x0b\x32\x33.csi.v1.NodePublishVolumeRequest.VolumeContextEntry\x1a\x35\n\x13PublishContextEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a.\n\x0cSecretsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x34\n\x12VolumeContextEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x1b\n\x19NodePublishVolumeResponse\"D\n\x1aNodeUnpublishVolumeRequest\x12\x11\n\tvolume_id\x18\x01 \x01(\t\x12\x13\n\x0btarget_path\x18\x02 \x01(\t\"\x1d\n\x1bNodeUnpublishVolumeResponse\"C\n\x19NodeGetVolumeStatsRequest\x12\x11\n\tvolume_id\x18\x01 \x01(\t\x12\x13\n\x0bvolume_path\x18\x02 \x01(\t\"@\n\x1aNodeGetVolumeStatsResponse\x12\"\n\x05usage\x18\x01 \x03(\x0b\x32\x13.csi.v1.VolumeUsage\"\x91\x01\n\x0bVolumeUsage\x12\x11\n\tavailable\x18\x01 \x01(\x03\x12\r\n\x05total\x18\x02 \x01(\x03\x12\x0c\n\x04used\x18\x03 \x01(\x03\x12&\n\x04unit\x18\x04 \x01(\x0e\x32\x18.csi.v1.VolumeUsage.Unit\"*\n\x04Unit\x12\x0b\n\x07UNKNOWN\x10\x00\x12\t\n\x05\x42YTES\x10\x01\x12\n\n\x06INODES\x10\x02\"\x1c\n\x1aNodeGetCapabilitiesRequest\"R\n\x1bNodeGetCapabilitiesResponse\x12\x33\n\x0c\x63\x61pabilities\x18\x01 \x03(\x0b\x32\x1d.csi.v1.NodeServiceCapability\"\xe7\x01\n\x15NodeServiceCapability\x12\x30\n\x03rpc\x18\x01 \x01(\x0b\x32!.csi.v1.NodeServiceCapability.RPCH\x00\x1a\x93\x01\n\x03RPC\x12\x34\n\x04type\x18\x01 \x01(\x0e\x32&.csi.v1.NodeServiceCapability.RPC.Type\"V\n\x04Type\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x18\n\x14STAGE_UNSTAGE_VOLUME\x10\x01\x12\x14\n\x10GET_VOLUME_STATS\x10\x02\x12\x11\n\rEXPAND_VOLUME\x10\x03\x42\x06\n\x04type\"\x14\n\x12NodeGetInfoRequest\"s\n\x13NodeGetInfoResponse\x12\x0f\n\x07node_id\x18\x01 \x01(\t\x12\x1c\n\x14max_volumes_per_node\x18\x02 \x01(\x03\x12-\n\x13\x61\x63\x63\x65ssible_topology\x18\x03 \x01(\x0b\x32\x10.csi.v1.Topology\"p\n\x17NodeExpandVolumeRequest\x12\x11\n\tvolume_id\x18\x01 \x01(\t\x12\x13\n\x0bvolume_path\x18\x02 \x01(\t\x12-\n\x0e\x63\x61pacity_range\x18\x03 \x01(\x0b\x32\x15.csi.v1.CapacityRange\"2\n\x18NodeExpandVolumeResponse\x12\x16\n\x0e\x63\x61pacity_bytes\x18\x01 \x01(\x03\x32\xfa\x01\n\x08Identity\x12N\n\rGetPluginInfo\x12\x1c.csi.v1.GetPluginInfoRequest\x1a\x1d.csi.v1.GetPluginInfoResponse\"\x00\x12\x66\n\x15GetPluginCapabilities\x12$.csi.v1.GetPluginCapabilitiesRequest\x1a%.csi.v1.GetPluginCapabilitiesResponse\"\x00\x12\x36\n\x05Probe\x12\x14.csi.v1.ProbeRequest\x1a\x15.csi.v1.ProbeResponse\"\x00\x32\xe8\x08\n\nController\x12K\n\x0c\x43reateVolume\x12\x1b.csi.v1.CreateVolumeRequest\x1a\x1c.csi.v1.CreateVolumeResponse\"\x00\x12K\n\x0c\x44\x65leteVolume\x12\x1b.csi.v1.DeleteVolumeRequest\x1a\x1c.csi.v1.DeleteVolumeResponse\"\x00\x12l\n\x17\x43ontrollerPublishVolume\x12&.csi.v1.ControllerPublishVolumeRequest\x1a\'.csi.v1.ControllerPublishVolumeResponse\"\x00\x12r\n\x19\x43ontrollerUnpublishVolume\x12(.csi.v1.ControllerUnpublishVolumeRequest\x1a).csi.v1.ControllerUnpublishVolumeResponse\"\x00\x12u\n\x1aValidateVolumeCapabilities\x12).csi.v1.ValidateVolumeCapabilitiesRequest\x1a*.csi.v1.ValidateVolumeCapabilitiesResponse\"\x00\x12H\n\x0bListVolumes\x12\x1a.csi.v1.ListVolumesRequest\x1a\x1b.csi.v1.ListVolumesResponse\"\x00\x12H\n\x0bGetCapacity\x12\x1a.csi.v1.GetCapacityRequest\x1a\x1b.csi.v1.GetCapacityResponse\"\x00\x12r\n\x19\x43ontrollerGetCapabilities\x12(.csi.v1.ControllerGetCapabilitiesRequest\x1a).csi.v1.ControllerGetCapabilitiesResponse\"\x00\x12Q\n\x0e\x43reateSnapshot\x12\x1d.csi.v1.CreateSnapshotRequest\x1a\x1e.csi.v1.CreateSnapshotResponse\"\x00\x12Q\n\x0e\x44\x65leteSnapshot\x12\x1d.csi.v1.DeleteSnapshotRequest\x1a\x1e.csi.v1.DeleteSnapshotResponse\"\x00\x12N\n\rListSnapshots\x12\x1c.csi.v1.ListSnapshotsRequest\x1a\x1d.csi.v1.ListSnapshotsResponse\"\x00\x12i\n\x16\x43ontrollerExpandVolume\x12%.csi.v1.ControllerExpandVolumeRequest\x1a&.csi.v1.ControllerExpandVolumeResponse\"\x00\x32\xda\x05\n\x04Node\x12T\n\x0fNodeStageVolume\x12\x1e.csi.v1.NodeStageVolumeRequest\x1a\x1f.csi.v1.NodeStageVolumeResponse\"\x00\x12Z\n\x11NodeUnstageVolume\x12 .csi.v1.NodeUnstageVolumeRequest\x1a!.csi.v1.NodeUnstageVolumeResponse\"\x00\x12Z\n\x11NodePublishVolume\x12 .csi.v1.NodePublishVolumeRequest\x1a!.csi.v1.NodePublishVolumeResponse\"\x00\x12`\n\x13NodeUnpublishVolume\x12\".csi.v1.NodeUnpublishVolumeRequest\x1a#.csi.v1.NodeUnpublishVolumeResponse\"\x00\x12]\n\x12NodeGetVolumeStats\x12!.csi.v1.NodeGetVolumeStatsRequest\x1a\".csi.v1.NodeGetVolumeStatsResponse\"\x00\x12W\n\x10NodeExpandVolume\x12\x1f.csi.v1.NodeExpandVolumeRequest\x1a .csi.v1.NodeExpandVolumeResponse\"\x00\x12`\n\x13NodeGetCapabilities\x12\".csi.v1.NodeGetCapabilitiesRequest\x1a#.csi.v1.NodeGetCapabilitiesResponse\"\x00\x12H\n\x0bNodeGetInfo\x12\x1a.csi.v1.NodeGetInfoRequest\x1a\x1b.csi.v1.NodeGetInfoResponse\"\x00:2\n\ncsi_secret\x12\x1d.google.protobuf.FieldOptions\x18\xa3\x08 \x01(\x08\x42\x05Z\x03\x63sib\x06proto3') + , + dependencies=[google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,]) + + +CSI_SECRET_FIELD_NUMBER = 1059 +csi_secret = _descriptor.FieldDescriptor( + name='csi_secret', full_name='csi.v1.csi_secret', index=0, + number=1059, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=True, extension_scope=None, + serialized_options=None, file=DESCRIPTOR) + +_PLUGINCAPABILITY_SERVICE_TYPE = _descriptor.EnumDescriptor( + name='Type', + full_name='csi.v1.PluginCapability.Service.Type', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='UNKNOWN', index=0, number=0, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CONTROLLER_SERVICE', index=1, number=1, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='VOLUME_ACCESSIBILITY_CONSTRAINTS', index=2, number=2, + serialized_options=None, + type=None), + ], + containing_type=None, + serialized_options=None, + serialized_start=642, + serialized_end=723, +) +_sym_db.RegisterEnumDescriptor(_PLUGINCAPABILITY_SERVICE_TYPE) + +_PLUGINCAPABILITY_VOLUMEEXPANSION_TYPE = _descriptor.EnumDescriptor( + name='Type', + full_name='csi.v1.PluginCapability.VolumeExpansion.Type', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='UNKNOWN', index=0, number=0, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ONLINE', index=1, number=1, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='OFFLINE', index=2, number=2, + serialized_options=None, + type=None), + ], + containing_type=None, + serialized_options=None, + serialized_start=805, + serialized_end=849, +) +_sym_db.RegisterEnumDescriptor(_PLUGINCAPABILITY_VOLUMEEXPANSION_TYPE) + +_VOLUMECAPABILITY_ACCESSMODE_MODE = _descriptor.EnumDescriptor( + name='Mode', + full_name='csi.v1.VolumeCapability.AccessMode.Mode', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='UNKNOWN', index=0, number=0, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SINGLE_NODE_WRITER', index=1, number=1, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SINGLE_NODE_READER_ONLY', index=2, number=2, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MULTI_NODE_READER_ONLY', index=3, number=3, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MULTI_NODE_SINGLE_WRITER', index=4, number=4, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MULTI_NODE_MULTI_WRITER', index=5, number=5, + serialized_options=None, + type=None), + ], + containing_type=None, + serialized_options=None, + serialized_start=2043, + serialized_end=2202, +) +_sym_db.RegisterEnumDescriptor(_VOLUMECAPABILITY_ACCESSMODE_MODE) + +_CONTROLLERSERVICECAPABILITY_RPC_TYPE = _descriptor.EnumDescriptor( + name='Type', + full_name='csi.v1.ControllerServiceCapability.RPC.Type', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='UNKNOWN', index=0, number=0, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CREATE_DELETE_VOLUME', index=1, number=1, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='PUBLISH_UNPUBLISH_VOLUME', index=2, number=2, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='LIST_VOLUMES', index=3, number=3, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='GET_CAPACITY', index=4, number=4, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CREATE_DELETE_SNAPSHOT', index=5, number=5, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='LIST_SNAPSHOTS', index=6, number=6, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CLONE_VOLUME', index=7, number=7, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='PUBLISH_READONLY', index=8, number=8, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='EXPAND_VOLUME', index=9, number=9, + serialized_options=None, + type=None), + ], + containing_type=None, + serialized_options=None, + serialized_start=5531, + serialized_end=5749, +) +_sym_db.RegisterEnumDescriptor(_CONTROLLERSERVICECAPABILITY_RPC_TYPE) + +_VOLUMEUSAGE_UNIT = _descriptor.EnumDescriptor( + name='Unit', + full_name='csi.v1.VolumeUsage.Unit', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='UNKNOWN', index=0, number=0, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BYTES', index=1, number=1, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='INODES', index=2, number=2, + serialized_options=None, + type=None), + ], + containing_type=None, + serialized_options=None, + serialized_start=8589, + serialized_end=8631, +) +_sym_db.RegisterEnumDescriptor(_VOLUMEUSAGE_UNIT) + +_NODESERVICECAPABILITY_RPC_TYPE = _descriptor.EnumDescriptor( + name='Type', + full_name='csi.v1.NodeServiceCapability.RPC.Type', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='UNKNOWN', index=0, number=0, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='STAGE_UNSTAGE_VOLUME', index=1, number=1, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='GET_VOLUME_STATS', index=2, number=2, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='EXPAND_VOLUME', index=3, number=3, + serialized_options=None, + type=None), + ], + containing_type=None, + serialized_options=None, + serialized_start=8885, + serialized_end=8971, +) +_sym_db.RegisterEnumDescriptor(_NODESERVICECAPABILITY_RPC_TYPE) + + +_GETPLUGININFOREQUEST = _descriptor.Descriptor( + name='GetPluginInfoRequest', + full_name='csi.v1.GetPluginInfoRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=120, + serialized_end=142, +) + + +_GETPLUGININFORESPONSE_MANIFESTENTRY = _descriptor.Descriptor( + name='ManifestEntry', + full_name='csi.v1.GetPluginInfoResponse.ManifestEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='csi.v1.GetPluginInfoResponse.ManifestEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='csi.v1.GetPluginInfoResponse.ManifestEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=271, + serialized_end=318, +) + +_GETPLUGININFORESPONSE = _descriptor.Descriptor( + name='GetPluginInfoResponse', + full_name='csi.v1.GetPluginInfoResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='csi.v1.GetPluginInfoResponse.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='vendor_version', full_name='csi.v1.GetPluginInfoResponse.vendor_version', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='manifest', full_name='csi.v1.GetPluginInfoResponse.manifest', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_GETPLUGININFORESPONSE_MANIFESTENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=145, + serialized_end=318, +) + + +_GETPLUGINCAPABILITIESREQUEST = _descriptor.Descriptor( + name='GetPluginCapabilitiesRequest', + full_name='csi.v1.GetPluginCapabilitiesRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=320, + serialized_end=350, +) + + +_GETPLUGINCAPABILITIESRESPONSE = _descriptor.Descriptor( + name='GetPluginCapabilitiesResponse', + full_name='csi.v1.GetPluginCapabilitiesResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='capabilities', full_name='csi.v1.GetPluginCapabilitiesResponse.capabilities', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=352, + serialized_end=431, +) + + +_PLUGINCAPABILITY_SERVICE = _descriptor.Descriptor( + name='Service', + full_name='csi.v1.PluginCapability.Service', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='type', full_name='csi.v1.PluginCapability.Service.type', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _PLUGINCAPABILITY_SERVICE_TYPE, + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=578, + serialized_end=723, +) + +_PLUGINCAPABILITY_VOLUMEEXPANSION = _descriptor.Descriptor( + name='VolumeExpansion', + full_name='csi.v1.PluginCapability.VolumeExpansion', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='type', full_name='csi.v1.PluginCapability.VolumeExpansion.type', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _PLUGINCAPABILITY_VOLUMEEXPANSION_TYPE, + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=725, + serialized_end=849, +) + +_PLUGINCAPABILITY = _descriptor.Descriptor( + name='PluginCapability', + full_name='csi.v1.PluginCapability', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='service', full_name='csi.v1.PluginCapability.service', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='volume_expansion', full_name='csi.v1.PluginCapability.volume_expansion', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_PLUGINCAPABILITY_SERVICE, _PLUGINCAPABILITY_VOLUMEEXPANSION, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='type', full_name='csi.v1.PluginCapability.type', + index=0, containing_type=None, fields=[]), + ], + serialized_start=434, + serialized_end=857, +) + + +_PROBEREQUEST = _descriptor.Descriptor( + name='ProbeRequest', + full_name='csi.v1.ProbeRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=859, + serialized_end=873, +) + + +_PROBERESPONSE = _descriptor.Descriptor( + name='ProbeResponse', + full_name='csi.v1.ProbeResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='ready', full_name='csi.v1.ProbeResponse.ready', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=875, + serialized_end=933, +) + + +_CREATEVOLUMEREQUEST_PARAMETERSENTRY = _descriptor.Descriptor( + name='ParametersEntry', + full_name='csi.v1.CreateVolumeRequest.ParametersEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='csi.v1.CreateVolumeRequest.ParametersEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='csi.v1.CreateVolumeRequest.ParametersEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1329, + serialized_end=1378, +) + +_CREATEVOLUMEREQUEST_SECRETSENTRY = _descriptor.Descriptor( + name='SecretsEntry', + full_name='csi.v1.CreateVolumeRequest.SecretsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='csi.v1.CreateVolumeRequest.SecretsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='csi.v1.CreateVolumeRequest.SecretsEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1380, + serialized_end=1426, +) + +_CREATEVOLUMEREQUEST = _descriptor.Descriptor( + name='CreateVolumeRequest', + full_name='csi.v1.CreateVolumeRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='csi.v1.CreateVolumeRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='capacity_range', full_name='csi.v1.CreateVolumeRequest.capacity_range', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='volume_capabilities', full_name='csi.v1.CreateVolumeRequest.volume_capabilities', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='parameters', full_name='csi.v1.CreateVolumeRequest.parameters', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='secrets', full_name='csi.v1.CreateVolumeRequest.secrets', index=4, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=_b('\230B\001'), file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='volume_content_source', full_name='csi.v1.CreateVolumeRequest.volume_content_source', index=5, + number=6, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='accessibility_requirements', full_name='csi.v1.CreateVolumeRequest.accessibility_requirements', index=6, + number=7, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_CREATEVOLUMEREQUEST_PARAMETERSENTRY, _CREATEVOLUMEREQUEST_SECRETSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=936, + serialized_end=1426, +) + + +_VOLUMECONTENTSOURCE_SNAPSHOTSOURCE = _descriptor.Descriptor( + name='SnapshotSource', + full_name='csi.v1.VolumeContentSource.SnapshotSource', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='snapshot_id', full_name='csi.v1.VolumeContentSource.SnapshotSource.snapshot_id', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1576, + serialized_end=1613, +) + +_VOLUMECONTENTSOURCE_VOLUMESOURCE = _descriptor.Descriptor( + name='VolumeSource', + full_name='csi.v1.VolumeContentSource.VolumeSource', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='volume_id', full_name='csi.v1.VolumeContentSource.VolumeSource.volume_id', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1615, + serialized_end=1648, +) + +_VOLUMECONTENTSOURCE = _descriptor.Descriptor( + name='VolumeContentSource', + full_name='csi.v1.VolumeContentSource', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='snapshot', full_name='csi.v1.VolumeContentSource.snapshot', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='volume', full_name='csi.v1.VolumeContentSource.volume', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_VOLUMECONTENTSOURCE_SNAPSHOTSOURCE, _VOLUMECONTENTSOURCE_VOLUMESOURCE, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='type', full_name='csi.v1.VolumeContentSource.type', + index=0, containing_type=None, fields=[]), + ], + serialized_start=1429, + serialized_end=1656, +) + + +_CREATEVOLUMERESPONSE = _descriptor.Descriptor( + name='CreateVolumeResponse', + full_name='csi.v1.CreateVolumeResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='volume', full_name='csi.v1.CreateVolumeResponse.volume', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1658, + serialized_end=1712, +) + + +_VOLUMECAPABILITY_BLOCKVOLUME = _descriptor.Descriptor( + name='BlockVolume', + full_name='csi.v1.VolumeCapability.BlockVolume', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1903, + serialized_end=1916, +) + +_VOLUMECAPABILITY_MOUNTVOLUME = _descriptor.Descriptor( + name='MountVolume', + full_name='csi.v1.VolumeCapability.MountVolume', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='fs_type', full_name='csi.v1.VolumeCapability.MountVolume.fs_type', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='mount_flags', full_name='csi.v1.VolumeCapability.MountVolume.mount_flags', index=1, + number=2, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1918, + serialized_end=1969, +) + +_VOLUMECAPABILITY_ACCESSMODE = _descriptor.Descriptor( + name='AccessMode', + full_name='csi.v1.VolumeCapability.AccessMode', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='mode', full_name='csi.v1.VolumeCapability.AccessMode.mode', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _VOLUMECAPABILITY_ACCESSMODE_MODE, + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1972, + serialized_end=2202, +) + +_VOLUMECAPABILITY = _descriptor.Descriptor( + name='VolumeCapability', + full_name='csi.v1.VolumeCapability', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='block', full_name='csi.v1.VolumeCapability.block', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='mount', full_name='csi.v1.VolumeCapability.mount', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='access_mode', full_name='csi.v1.VolumeCapability.access_mode', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_VOLUMECAPABILITY_BLOCKVOLUME, _VOLUMECAPABILITY_MOUNTVOLUME, _VOLUMECAPABILITY_ACCESSMODE, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='access_type', full_name='csi.v1.VolumeCapability.access_type', + index=0, containing_type=None, fields=[]), + ], + serialized_start=1715, + serialized_end=2217, +) + + +_CAPACITYRANGE = _descriptor.Descriptor( + name='CapacityRange', + full_name='csi.v1.CapacityRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='required_bytes', full_name='csi.v1.CapacityRange.required_bytes', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='limit_bytes', full_name='csi.v1.CapacityRange.limit_bytes', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2219, + serialized_end=2279, +) + + +_VOLUME_VOLUMECONTEXTENTRY = _descriptor.Descriptor( + name='VolumeContextEntry', + full_name='csi.v1.Volume.VolumeContextEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='csi.v1.Volume.VolumeContextEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='csi.v1.Volume.VolumeContextEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2494, + serialized_end=2546, +) + +_VOLUME = _descriptor.Descriptor( + name='Volume', + full_name='csi.v1.Volume', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='capacity_bytes', full_name='csi.v1.Volume.capacity_bytes', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='volume_id', full_name='csi.v1.Volume.volume_id', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='volume_context', full_name='csi.v1.Volume.volume_context', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='content_source', full_name='csi.v1.Volume.content_source', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='accessible_topology', full_name='csi.v1.Volume.accessible_topology', index=4, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_VOLUME_VOLUMECONTEXTENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2282, + serialized_end=2546, +) + + +_TOPOLOGYREQUIREMENT = _descriptor.Descriptor( + name='TopologyRequirement', + full_name='csi.v1.TopologyRequirement', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='requisite', full_name='csi.v1.TopologyRequirement.requisite', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='preferred', full_name='csi.v1.TopologyRequirement.preferred', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2548, + serialized_end=2643, +) + + +_TOPOLOGY_SEGMENTSENTRY = _descriptor.Descriptor( + name='SegmentsEntry', + full_name='csi.v1.Topology.SegmentsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='csi.v1.Topology.SegmentsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='csi.v1.Topology.SegmentsEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2707, + serialized_end=2754, +) + +_TOPOLOGY = _descriptor.Descriptor( + name='Topology', + full_name='csi.v1.Topology', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='segments', full_name='csi.v1.Topology.segments', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_TOPOLOGY_SEGMENTSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2645, + serialized_end=2754, +) + + +_DELETEVOLUMEREQUEST_SECRETSENTRY = _descriptor.Descriptor( + name='SecretsEntry', + full_name='csi.v1.DeleteVolumeRequest.SecretsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='csi.v1.DeleteVolumeRequest.SecretsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='csi.v1.DeleteVolumeRequest.SecretsEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1380, + serialized_end=1426, +) + +_DELETEVOLUMEREQUEST = _descriptor.Descriptor( + name='DeleteVolumeRequest', + full_name='csi.v1.DeleteVolumeRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='volume_id', full_name='csi.v1.DeleteVolumeRequest.volume_id', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='secrets', full_name='csi.v1.DeleteVolumeRequest.secrets', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=_b('\230B\001'), file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_DELETEVOLUMEREQUEST_SECRETSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2757, + serialized_end=2909, +) + + +_DELETEVOLUMERESPONSE = _descriptor.Descriptor( + name='DeleteVolumeResponse', + full_name='csi.v1.DeleteVolumeResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2911, + serialized_end=2933, +) + + +_CONTROLLERPUBLISHVOLUMEREQUEST_SECRETSENTRY = _descriptor.Descriptor( + name='SecretsEntry', + full_name='csi.v1.ControllerPublishVolumeRequest.SecretsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='csi.v1.ControllerPublishVolumeRequest.SecretsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='csi.v1.ControllerPublishVolumeRequest.SecretsEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1380, + serialized_end=1426, +) + +_CONTROLLERPUBLISHVOLUMEREQUEST_VOLUMECONTEXTENTRY = _descriptor.Descriptor( + name='VolumeContextEntry', + full_name='csi.v1.ControllerPublishVolumeRequest.VolumeContextEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='csi.v1.ControllerPublishVolumeRequest.VolumeContextEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='csi.v1.ControllerPublishVolumeRequest.VolumeContextEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2494, + serialized_end=2546, +) + +_CONTROLLERPUBLISHVOLUMEREQUEST = _descriptor.Descriptor( + name='ControllerPublishVolumeRequest', + full_name='csi.v1.ControllerPublishVolumeRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='volume_id', full_name='csi.v1.ControllerPublishVolumeRequest.volume_id', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='node_id', full_name='csi.v1.ControllerPublishVolumeRequest.node_id', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='volume_capability', full_name='csi.v1.ControllerPublishVolumeRequest.volume_capability', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='readonly', full_name='csi.v1.ControllerPublishVolumeRequest.readonly', index=3, + number=4, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='secrets', full_name='csi.v1.ControllerPublishVolumeRequest.secrets', index=4, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=_b('\230B\001'), file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='volume_context', full_name='csi.v1.ControllerPublishVolumeRequest.volume_context', index=5, + number=6, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_CONTROLLERPUBLISHVOLUMEREQUEST_SECRETSENTRY, _CONTROLLERPUBLISHVOLUMEREQUEST_VOLUMECONTEXTENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2936, + serialized_end=3335, +) + + +_CONTROLLERPUBLISHVOLUMERESPONSE_PUBLISHCONTEXTENTRY = _descriptor.Descriptor( + name='PublishContextEntry', + full_name='csi.v1.ControllerPublishVolumeResponse.PublishContextEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='csi.v1.ControllerPublishVolumeResponse.PublishContextEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='csi.v1.ControllerPublishVolumeResponse.PublishContextEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3459, + serialized_end=3512, +) + +_CONTROLLERPUBLISHVOLUMERESPONSE = _descriptor.Descriptor( + name='ControllerPublishVolumeResponse', + full_name='csi.v1.ControllerPublishVolumeResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='publish_context', full_name='csi.v1.ControllerPublishVolumeResponse.publish_context', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_CONTROLLERPUBLISHVOLUMERESPONSE_PUBLISHCONTEXTENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3338, + serialized_end=3512, +) + + +_CONTROLLERUNPUBLISHVOLUMEREQUEST_SECRETSENTRY = _descriptor.Descriptor( + name='SecretsEntry', + full_name='csi.v1.ControllerUnpublishVolumeRequest.SecretsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='csi.v1.ControllerUnpublishVolumeRequest.SecretsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='csi.v1.ControllerUnpublishVolumeRequest.SecretsEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1380, + serialized_end=1426, +) + +_CONTROLLERUNPUBLISHVOLUMEREQUEST = _descriptor.Descriptor( + name='ControllerUnpublishVolumeRequest', + full_name='csi.v1.ControllerUnpublishVolumeRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='volume_id', full_name='csi.v1.ControllerUnpublishVolumeRequest.volume_id', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='node_id', full_name='csi.v1.ControllerUnpublishVolumeRequest.node_id', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='secrets', full_name='csi.v1.ControllerUnpublishVolumeRequest.secrets', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=_b('\230B\001'), file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_CONTROLLERUNPUBLISHVOLUMEREQUEST_SECRETSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3515, + serialized_end=3710, +) + + +_CONTROLLERUNPUBLISHVOLUMERESPONSE = _descriptor.Descriptor( + name='ControllerUnpublishVolumeResponse', + full_name='csi.v1.ControllerUnpublishVolumeResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3712, + serialized_end=3747, +) + + +_VALIDATEVOLUMECAPABILITIESREQUEST_VOLUMECONTEXTENTRY = _descriptor.Descriptor( + name='VolumeContextEntry', + full_name='csi.v1.ValidateVolumeCapabilitiesRequest.VolumeContextEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='csi.v1.ValidateVolumeCapabilitiesRequest.VolumeContextEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='csi.v1.ValidateVolumeCapabilitiesRequest.VolumeContextEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2494, + serialized_end=2546, +) + +_VALIDATEVOLUMECAPABILITIESREQUEST_PARAMETERSENTRY = _descriptor.Descriptor( + name='ParametersEntry', + full_name='csi.v1.ValidateVolumeCapabilitiesRequest.ParametersEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='csi.v1.ValidateVolumeCapabilitiesRequest.ParametersEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='csi.v1.ValidateVolumeCapabilitiesRequest.ParametersEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1329, + serialized_end=1378, +) + +_VALIDATEVOLUMECAPABILITIESREQUEST_SECRETSENTRY = _descriptor.Descriptor( + name='SecretsEntry', + full_name='csi.v1.ValidateVolumeCapabilitiesRequest.SecretsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='csi.v1.ValidateVolumeCapabilitiesRequest.SecretsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='csi.v1.ValidateVolumeCapabilitiesRequest.SecretsEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1380, + serialized_end=1426, +) + +_VALIDATEVOLUMECAPABILITIESREQUEST = _descriptor.Descriptor( + name='ValidateVolumeCapabilitiesRequest', + full_name='csi.v1.ValidateVolumeCapabilitiesRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='volume_id', full_name='csi.v1.ValidateVolumeCapabilitiesRequest.volume_id', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='volume_context', full_name='csi.v1.ValidateVolumeCapabilitiesRequest.volume_context', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='volume_capabilities', full_name='csi.v1.ValidateVolumeCapabilitiesRequest.volume_capabilities', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='parameters', full_name='csi.v1.ValidateVolumeCapabilitiesRequest.parameters', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='secrets', full_name='csi.v1.ValidateVolumeCapabilitiesRequest.secrets', index=4, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=_b('\230B\001'), file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_VALIDATEVOLUMECAPABILITIESREQUEST_VOLUMECONTEXTENTRY, _VALIDATEVOLUMECAPABILITIESREQUEST_PARAMETERSENTRY, _VALIDATEVOLUMECAPABILITIESREQUEST_SECRETSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3750, + serialized_end=4255, +) + + +_VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED_VOLUMECONTEXTENTRY = _descriptor.Descriptor( + name='VolumeContextEntry', + full_name='csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.VolumeContextEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.VolumeContextEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.VolumeContextEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2494, + serialized_end=2546, +) + +_VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED_PARAMETERSENTRY = _descriptor.Descriptor( + name='ParametersEntry', + full_name='csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.ParametersEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.ParametersEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.ParametersEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1329, + serialized_end=1378, +) + +_VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED = _descriptor.Descriptor( + name='Confirmed', + full_name='csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='volume_context', full_name='csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.volume_context', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='volume_capabilities', full_name='csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.volume_capabilities', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='parameters', full_name='csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.parameters', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED_VOLUMECONTEXTENTRY, _VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED_PARAMETERSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4387, + serialized_end=4745, +) + +_VALIDATEVOLUMECAPABILITIESRESPONSE = _descriptor.Descriptor( + name='ValidateVolumeCapabilitiesResponse', + full_name='csi.v1.ValidateVolumeCapabilitiesResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='confirmed', full_name='csi.v1.ValidateVolumeCapabilitiesResponse.confirmed', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='message', full_name='csi.v1.ValidateVolumeCapabilitiesResponse.message', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4258, + serialized_end=4745, +) + + +_LISTVOLUMESREQUEST = _descriptor.Descriptor( + name='ListVolumesRequest', + full_name='csi.v1.ListVolumesRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='max_entries', full_name='csi.v1.ListVolumesRequest.max_entries', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='starting_token', full_name='csi.v1.ListVolumesRequest.starting_token', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4747, + serialized_end=4812, +) + + +_LISTVOLUMESRESPONSE_ENTRY = _descriptor.Descriptor( + name='Entry', + full_name='csi.v1.ListVolumesResponse.Entry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='volume', full_name='csi.v1.ListVolumesResponse.Entry.volume', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4910, + serialized_end=4949, +) + +_LISTVOLUMESRESPONSE = _descriptor.Descriptor( + name='ListVolumesResponse', + full_name='csi.v1.ListVolumesResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='entries', full_name='csi.v1.ListVolumesResponse.entries', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='next_token', full_name='csi.v1.ListVolumesResponse.next_token', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_LISTVOLUMESRESPONSE_ENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4815, + serialized_end=4949, +) + + +_GETCAPACITYREQUEST_PARAMETERSENTRY = _descriptor.Descriptor( + name='ParametersEntry', + full_name='csi.v1.GetCapacityRequest.ParametersEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='csi.v1.GetCapacityRequest.ParametersEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='csi.v1.GetCapacityRequest.ParametersEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1329, + serialized_end=1378, +) + +_GETCAPACITYREQUEST = _descriptor.Descriptor( + name='GetCapacityRequest', + full_name='csi.v1.GetCapacityRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='volume_capabilities', full_name='csi.v1.GetCapacityRequest.volume_capabilities', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='parameters', full_name='csi.v1.GetCapacityRequest.parameters', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='accessible_topology', full_name='csi.v1.GetCapacityRequest.accessible_topology', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_GETCAPACITYREQUEST_PARAMETERSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4952, + serialized_end=5189, +) + + +_GETCAPACITYRESPONSE = _descriptor.Descriptor( + name='GetCapacityResponse', + full_name='csi.v1.GetCapacityResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='available_capacity', full_name='csi.v1.GetCapacityResponse.available_capacity', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=5191, + serialized_end=5240, +) + + +_CONTROLLERGETCAPABILITIESREQUEST = _descriptor.Descriptor( + name='ControllerGetCapabilitiesRequest', + full_name='csi.v1.ControllerGetCapabilitiesRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=5242, + serialized_end=5276, +) + + +_CONTROLLERGETCAPABILITIESRESPONSE = _descriptor.Descriptor( + name='ControllerGetCapabilitiesResponse', + full_name='csi.v1.ControllerGetCapabilitiesResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='capabilities', full_name='csi.v1.ControllerGetCapabilitiesResponse.capabilities', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=5278, + serialized_end=5372, +) + + +_CONTROLLERSERVICECAPABILITY_RPC = _descriptor.Descriptor( + name='RPC', + full_name='csi.v1.ControllerServiceCapability.RPC', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='type', full_name='csi.v1.ControllerServiceCapability.RPC.type', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _CONTROLLERSERVICECAPABILITY_RPC_TYPE, + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=5463, + serialized_end=5749, +) + +_CONTROLLERSERVICECAPABILITY = _descriptor.Descriptor( + name='ControllerServiceCapability', + full_name='csi.v1.ControllerServiceCapability', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='rpc', full_name='csi.v1.ControllerServiceCapability.rpc', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_CONTROLLERSERVICECAPABILITY_RPC, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='type', full_name='csi.v1.ControllerServiceCapability.type', + index=0, containing_type=None, fields=[]), + ], + serialized_start=5375, + serialized_end=5757, +) + + +_CREATESNAPSHOTREQUEST_SECRETSENTRY = _descriptor.Descriptor( + name='SecretsEntry', + full_name='csi.v1.CreateSnapshotRequest.SecretsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='csi.v1.CreateSnapshotRequest.SecretsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='csi.v1.CreateSnapshotRequest.SecretsEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1380, + serialized_end=1426, +) + +_CREATESNAPSHOTREQUEST_PARAMETERSENTRY = _descriptor.Descriptor( + name='ParametersEntry', + full_name='csi.v1.CreateSnapshotRequest.ParametersEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='csi.v1.CreateSnapshotRequest.ParametersEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='csi.v1.CreateSnapshotRequest.ParametersEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1329, + serialized_end=1378, +) + +_CREATESNAPSHOTREQUEST = _descriptor.Descriptor( + name='CreateSnapshotRequest', + full_name='csi.v1.CreateSnapshotRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='source_volume_id', full_name='csi.v1.CreateSnapshotRequest.source_volume_id', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='name', full_name='csi.v1.CreateSnapshotRequest.name', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='secrets', full_name='csi.v1.CreateSnapshotRequest.secrets', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=_b('\230B\001'), file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='parameters', full_name='csi.v1.CreateSnapshotRequest.parameters', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_CREATESNAPSHOTREQUEST_SECRETSENTRY, _CREATESNAPSHOTREQUEST_PARAMETERSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=5760, + serialized_end=6055, +) + + +_CREATESNAPSHOTRESPONSE = _descriptor.Descriptor( + name='CreateSnapshotResponse', + full_name='csi.v1.CreateSnapshotResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='snapshot', full_name='csi.v1.CreateSnapshotResponse.snapshot', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=6057, + serialized_end=6117, +) + + +_SNAPSHOT = _descriptor.Descriptor( + name='Snapshot', + full_name='csi.v1.Snapshot', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='size_bytes', full_name='csi.v1.Snapshot.size_bytes', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='snapshot_id', full_name='csi.v1.Snapshot.snapshot_id', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='source_volume_id', full_name='csi.v1.Snapshot.source_volume_id', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='creation_time', full_name='csi.v1.Snapshot.creation_time', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='ready_to_use', full_name='csi.v1.Snapshot.ready_to_use', index=4, + number=5, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=6120, + serialized_end=6270, +) + + +_DELETESNAPSHOTREQUEST_SECRETSENTRY = _descriptor.Descriptor( + name='SecretsEntry', + full_name='csi.v1.DeleteSnapshotRequest.SecretsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='csi.v1.DeleteSnapshotRequest.SecretsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='csi.v1.DeleteSnapshotRequest.SecretsEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1380, + serialized_end=1426, +) + +_DELETESNAPSHOTREQUEST = _descriptor.Descriptor( + name='DeleteSnapshotRequest', + full_name='csi.v1.DeleteSnapshotRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='snapshot_id', full_name='csi.v1.DeleteSnapshotRequest.snapshot_id', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='secrets', full_name='csi.v1.DeleteSnapshotRequest.secrets', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=_b('\230B\001'), file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_DELETESNAPSHOTREQUEST_SECRETSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=6273, + serialized_end=6431, +) + + +_DELETESNAPSHOTRESPONSE = _descriptor.Descriptor( + name='DeleteSnapshotResponse', + full_name='csi.v1.DeleteSnapshotResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=6433, + serialized_end=6457, +) + + +_LISTSNAPSHOTSREQUEST = _descriptor.Descriptor( + name='ListSnapshotsRequest', + full_name='csi.v1.ListSnapshotsRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='max_entries', full_name='csi.v1.ListSnapshotsRequest.max_entries', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='starting_token', full_name='csi.v1.ListSnapshotsRequest.starting_token', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='source_volume_id', full_name='csi.v1.ListSnapshotsRequest.source_volume_id', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='snapshot_id', full_name='csi.v1.ListSnapshotsRequest.snapshot_id', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=6459, + serialized_end=6573, +) + + +_LISTSNAPSHOTSRESPONSE_ENTRY = _descriptor.Descriptor( + name='Entry', + full_name='csi.v1.ListSnapshotsResponse.Entry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='snapshot', full_name='csi.v1.ListSnapshotsResponse.Entry.snapshot', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=6675, + serialized_end=6718, +) + +_LISTSNAPSHOTSRESPONSE = _descriptor.Descriptor( + name='ListSnapshotsResponse', + full_name='csi.v1.ListSnapshotsResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='entries', full_name='csi.v1.ListSnapshotsResponse.entries', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='next_token', full_name='csi.v1.ListSnapshotsResponse.next_token', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_LISTSNAPSHOTSRESPONSE_ENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=6576, + serialized_end=6718, +) + + +_CONTROLLEREXPANDVOLUMEREQUEST_SECRETSENTRY = _descriptor.Descriptor( + name='SecretsEntry', + full_name='csi.v1.ControllerExpandVolumeRequest.SecretsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='csi.v1.ControllerExpandVolumeRequest.SecretsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='csi.v1.ControllerExpandVolumeRequest.SecretsEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1380, + serialized_end=1426, +) + +_CONTROLLEREXPANDVOLUMEREQUEST = _descriptor.Descriptor( + name='ControllerExpandVolumeRequest', + full_name='csi.v1.ControllerExpandVolumeRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='volume_id', full_name='csi.v1.ControllerExpandVolumeRequest.volume_id', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='capacity_range', full_name='csi.v1.ControllerExpandVolumeRequest.capacity_range', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='secrets', full_name='csi.v1.ControllerExpandVolumeRequest.secrets', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=_b('\230B\001'), file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_CONTROLLEREXPANDVOLUMEREQUEST_SECRETSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=6721, + serialized_end=6940, +) + + +_CONTROLLEREXPANDVOLUMERESPONSE = _descriptor.Descriptor( + name='ControllerExpandVolumeResponse', + full_name='csi.v1.ControllerExpandVolumeResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='capacity_bytes', full_name='csi.v1.ControllerExpandVolumeResponse.capacity_bytes', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='node_expansion_required', full_name='csi.v1.ControllerExpandVolumeResponse.node_expansion_required', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=6942, + serialized_end=7031, +) + + +_NODESTAGEVOLUMEREQUEST_PUBLISHCONTEXTENTRY = _descriptor.Descriptor( + name='PublishContextEntry', + full_name='csi.v1.NodeStageVolumeRequest.PublishContextEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='csi.v1.NodeStageVolumeRequest.PublishContextEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='csi.v1.NodeStageVolumeRequest.PublishContextEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3459, + serialized_end=3512, +) + +_NODESTAGEVOLUMEREQUEST_SECRETSENTRY = _descriptor.Descriptor( + name='SecretsEntry', + full_name='csi.v1.NodeStageVolumeRequest.SecretsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='csi.v1.NodeStageVolumeRequest.SecretsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='csi.v1.NodeStageVolumeRequest.SecretsEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1380, + serialized_end=1426, +) + +_NODESTAGEVOLUMEREQUEST_VOLUMECONTEXTENTRY = _descriptor.Descriptor( + name='VolumeContextEntry', + full_name='csi.v1.NodeStageVolumeRequest.VolumeContextEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='csi.v1.NodeStageVolumeRequest.VolumeContextEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='csi.v1.NodeStageVolumeRequest.VolumeContextEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2494, + serialized_end=2546, +) + +_NODESTAGEVOLUMEREQUEST = _descriptor.Descriptor( + name='NodeStageVolumeRequest', + full_name='csi.v1.NodeStageVolumeRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='volume_id', full_name='csi.v1.NodeStageVolumeRequest.volume_id', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='publish_context', full_name='csi.v1.NodeStageVolumeRequest.publish_context', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='staging_target_path', full_name='csi.v1.NodeStageVolumeRequest.staging_target_path', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='volume_capability', full_name='csi.v1.NodeStageVolumeRequest.volume_capability', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='secrets', full_name='csi.v1.NodeStageVolumeRequest.secrets', index=4, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=_b('\230B\001'), file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='volume_context', full_name='csi.v1.NodeStageVolumeRequest.volume_context', index=5, + number=6, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_NODESTAGEVOLUMEREQUEST_PUBLISHCONTEXTENTRY, _NODESTAGEVOLUMEREQUEST_SECRETSENTRY, _NODESTAGEVOLUMEREQUEST_VOLUMECONTEXTENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=7034, + serialized_end=7535, +) + + +_NODESTAGEVOLUMERESPONSE = _descriptor.Descriptor( + name='NodeStageVolumeResponse', + full_name='csi.v1.NodeStageVolumeResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=7537, + serialized_end=7562, +) + + +_NODEUNSTAGEVOLUMEREQUEST = _descriptor.Descriptor( + name='NodeUnstageVolumeRequest', + full_name='csi.v1.NodeUnstageVolumeRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='volume_id', full_name='csi.v1.NodeUnstageVolumeRequest.volume_id', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='staging_target_path', full_name='csi.v1.NodeUnstageVolumeRequest.staging_target_path', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=7564, + serialized_end=7638, +) + + +_NODEUNSTAGEVOLUMERESPONSE = _descriptor.Descriptor( + name='NodeUnstageVolumeResponse', + full_name='csi.v1.NodeUnstageVolumeResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=7640, + serialized_end=7667, +) + + +_NODEPUBLISHVOLUMEREQUEST_PUBLISHCONTEXTENTRY = _descriptor.Descriptor( + name='PublishContextEntry', + full_name='csi.v1.NodePublishVolumeRequest.PublishContextEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='csi.v1.NodePublishVolumeRequest.PublishContextEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='csi.v1.NodePublishVolumeRequest.PublishContextEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3459, + serialized_end=3512, +) + +_NODEPUBLISHVOLUMEREQUEST_SECRETSENTRY = _descriptor.Descriptor( + name='SecretsEntry', + full_name='csi.v1.NodePublishVolumeRequest.SecretsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='csi.v1.NodePublishVolumeRequest.SecretsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='csi.v1.NodePublishVolumeRequest.SecretsEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1380, + serialized_end=1426, +) + +_NODEPUBLISHVOLUMEREQUEST_VOLUMECONTEXTENTRY = _descriptor.Descriptor( + name='VolumeContextEntry', + full_name='csi.v1.NodePublishVolumeRequest.VolumeContextEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='csi.v1.NodePublishVolumeRequest.VolumeContextEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='csi.v1.NodePublishVolumeRequest.VolumeContextEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=_b('8\001'), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2494, + serialized_end=2546, +) + +_NODEPUBLISHVOLUMEREQUEST = _descriptor.Descriptor( + name='NodePublishVolumeRequest', + full_name='csi.v1.NodePublishVolumeRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='volume_id', full_name='csi.v1.NodePublishVolumeRequest.volume_id', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='publish_context', full_name='csi.v1.NodePublishVolumeRequest.publish_context', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='staging_target_path', full_name='csi.v1.NodePublishVolumeRequest.staging_target_path', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='target_path', full_name='csi.v1.NodePublishVolumeRequest.target_path', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='volume_capability', full_name='csi.v1.NodePublishVolumeRequest.volume_capability', index=4, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='readonly', full_name='csi.v1.NodePublishVolumeRequest.readonly', index=5, + number=6, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='secrets', full_name='csi.v1.NodePublishVolumeRequest.secrets', index=6, + number=7, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=_b('\230B\001'), file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='volume_context', full_name='csi.v1.NodePublishVolumeRequest.volume_context', index=7, + number=8, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_NODEPUBLISHVOLUMEREQUEST_PUBLISHCONTEXTENTRY, _NODEPUBLISHVOLUMEREQUEST_SECRETSENTRY, _NODEPUBLISHVOLUMEREQUEST_VOLUMECONTEXTENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=7670, + serialized_end=8218, +) + + +_NODEPUBLISHVOLUMERESPONSE = _descriptor.Descriptor( + name='NodePublishVolumeResponse', + full_name='csi.v1.NodePublishVolumeResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=8220, + serialized_end=8247, +) + + +_NODEUNPUBLISHVOLUMEREQUEST = _descriptor.Descriptor( + name='NodeUnpublishVolumeRequest', + full_name='csi.v1.NodeUnpublishVolumeRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='volume_id', full_name='csi.v1.NodeUnpublishVolumeRequest.volume_id', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='target_path', full_name='csi.v1.NodeUnpublishVolumeRequest.target_path', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=8249, + serialized_end=8317, +) + + +_NODEUNPUBLISHVOLUMERESPONSE = _descriptor.Descriptor( + name='NodeUnpublishVolumeResponse', + full_name='csi.v1.NodeUnpublishVolumeResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=8319, + serialized_end=8348, +) + + +_NODEGETVOLUMESTATSREQUEST = _descriptor.Descriptor( + name='NodeGetVolumeStatsRequest', + full_name='csi.v1.NodeGetVolumeStatsRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='volume_id', full_name='csi.v1.NodeGetVolumeStatsRequest.volume_id', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='volume_path', full_name='csi.v1.NodeGetVolumeStatsRequest.volume_path', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=8350, + serialized_end=8417, +) + + +_NODEGETVOLUMESTATSRESPONSE = _descriptor.Descriptor( + name='NodeGetVolumeStatsResponse', + full_name='csi.v1.NodeGetVolumeStatsResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='usage', full_name='csi.v1.NodeGetVolumeStatsResponse.usage', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=8419, + serialized_end=8483, +) + + +_VOLUMEUSAGE = _descriptor.Descriptor( + name='VolumeUsage', + full_name='csi.v1.VolumeUsage', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='available', full_name='csi.v1.VolumeUsage.available', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='total', full_name='csi.v1.VolumeUsage.total', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='used', full_name='csi.v1.VolumeUsage.used', index=2, + number=3, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='unit', full_name='csi.v1.VolumeUsage.unit', index=3, + number=4, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _VOLUMEUSAGE_UNIT, + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=8486, + serialized_end=8631, +) + + +_NODEGETCAPABILITIESREQUEST = _descriptor.Descriptor( + name='NodeGetCapabilitiesRequest', + full_name='csi.v1.NodeGetCapabilitiesRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=8633, + serialized_end=8661, +) + + +_NODEGETCAPABILITIESRESPONSE = _descriptor.Descriptor( + name='NodeGetCapabilitiesResponse', + full_name='csi.v1.NodeGetCapabilitiesResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='capabilities', full_name='csi.v1.NodeGetCapabilitiesResponse.capabilities', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=8663, + serialized_end=8745, +) + + +_NODESERVICECAPABILITY_RPC = _descriptor.Descriptor( + name='RPC', + full_name='csi.v1.NodeServiceCapability.RPC', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='type', full_name='csi.v1.NodeServiceCapability.RPC.type', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _NODESERVICECAPABILITY_RPC_TYPE, + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=8824, + serialized_end=8971, +) + +_NODESERVICECAPABILITY = _descriptor.Descriptor( + name='NodeServiceCapability', + full_name='csi.v1.NodeServiceCapability', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='rpc', full_name='csi.v1.NodeServiceCapability.rpc', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_NODESERVICECAPABILITY_RPC, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='type', full_name='csi.v1.NodeServiceCapability.type', + index=0, containing_type=None, fields=[]), + ], + serialized_start=8748, + serialized_end=8979, +) + + +_NODEGETINFOREQUEST = _descriptor.Descriptor( + name='NodeGetInfoRequest', + full_name='csi.v1.NodeGetInfoRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=8981, + serialized_end=9001, +) + + +_NODEGETINFORESPONSE = _descriptor.Descriptor( + name='NodeGetInfoResponse', + full_name='csi.v1.NodeGetInfoResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='node_id', full_name='csi.v1.NodeGetInfoResponse.node_id', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='max_volumes_per_node', full_name='csi.v1.NodeGetInfoResponse.max_volumes_per_node', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='accessible_topology', full_name='csi.v1.NodeGetInfoResponse.accessible_topology', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=9003, + serialized_end=9118, +) + + +_NODEEXPANDVOLUMEREQUEST = _descriptor.Descriptor( + name='NodeExpandVolumeRequest', + full_name='csi.v1.NodeExpandVolumeRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='volume_id', full_name='csi.v1.NodeExpandVolumeRequest.volume_id', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='volume_path', full_name='csi.v1.NodeExpandVolumeRequest.volume_path', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='capacity_range', full_name='csi.v1.NodeExpandVolumeRequest.capacity_range', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=9120, + serialized_end=9232, +) + + +_NODEEXPANDVOLUMERESPONSE = _descriptor.Descriptor( + name='NodeExpandVolumeResponse', + full_name='csi.v1.NodeExpandVolumeResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='capacity_bytes', full_name='csi.v1.NodeExpandVolumeResponse.capacity_bytes', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=9234, + serialized_end=9284, +) + +_GETPLUGININFORESPONSE_MANIFESTENTRY.containing_type = _GETPLUGININFORESPONSE +_GETPLUGININFORESPONSE.fields_by_name['manifest'].message_type = _GETPLUGININFORESPONSE_MANIFESTENTRY +_GETPLUGINCAPABILITIESRESPONSE.fields_by_name['capabilities'].message_type = _PLUGINCAPABILITY +_PLUGINCAPABILITY_SERVICE.fields_by_name['type'].enum_type = _PLUGINCAPABILITY_SERVICE_TYPE +_PLUGINCAPABILITY_SERVICE.containing_type = _PLUGINCAPABILITY +_PLUGINCAPABILITY_SERVICE_TYPE.containing_type = _PLUGINCAPABILITY_SERVICE +_PLUGINCAPABILITY_VOLUMEEXPANSION.fields_by_name['type'].enum_type = _PLUGINCAPABILITY_VOLUMEEXPANSION_TYPE +_PLUGINCAPABILITY_VOLUMEEXPANSION.containing_type = _PLUGINCAPABILITY +_PLUGINCAPABILITY_VOLUMEEXPANSION_TYPE.containing_type = _PLUGINCAPABILITY_VOLUMEEXPANSION +_PLUGINCAPABILITY.fields_by_name['service'].message_type = _PLUGINCAPABILITY_SERVICE +_PLUGINCAPABILITY.fields_by_name['volume_expansion'].message_type = _PLUGINCAPABILITY_VOLUMEEXPANSION +_PLUGINCAPABILITY.oneofs_by_name['type'].fields.append( + _PLUGINCAPABILITY.fields_by_name['service']) +_PLUGINCAPABILITY.fields_by_name['service'].containing_oneof = _PLUGINCAPABILITY.oneofs_by_name['type'] +_PLUGINCAPABILITY.oneofs_by_name['type'].fields.append( + _PLUGINCAPABILITY.fields_by_name['volume_expansion']) +_PLUGINCAPABILITY.fields_by_name['volume_expansion'].containing_oneof = _PLUGINCAPABILITY.oneofs_by_name['type'] +_PROBERESPONSE.fields_by_name['ready'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE +_CREATEVOLUMEREQUEST_PARAMETERSENTRY.containing_type = _CREATEVOLUMEREQUEST +_CREATEVOLUMEREQUEST_SECRETSENTRY.containing_type = _CREATEVOLUMEREQUEST +_CREATEVOLUMEREQUEST.fields_by_name['capacity_range'].message_type = _CAPACITYRANGE +_CREATEVOLUMEREQUEST.fields_by_name['volume_capabilities'].message_type = _VOLUMECAPABILITY +_CREATEVOLUMEREQUEST.fields_by_name['parameters'].message_type = _CREATEVOLUMEREQUEST_PARAMETERSENTRY +_CREATEVOLUMEREQUEST.fields_by_name['secrets'].message_type = _CREATEVOLUMEREQUEST_SECRETSENTRY +_CREATEVOLUMEREQUEST.fields_by_name['volume_content_source'].message_type = _VOLUMECONTENTSOURCE +_CREATEVOLUMEREQUEST.fields_by_name['accessibility_requirements'].message_type = _TOPOLOGYREQUIREMENT +_VOLUMECONTENTSOURCE_SNAPSHOTSOURCE.containing_type = _VOLUMECONTENTSOURCE +_VOLUMECONTENTSOURCE_VOLUMESOURCE.containing_type = _VOLUMECONTENTSOURCE +_VOLUMECONTENTSOURCE.fields_by_name['snapshot'].message_type = _VOLUMECONTENTSOURCE_SNAPSHOTSOURCE +_VOLUMECONTENTSOURCE.fields_by_name['volume'].message_type = _VOLUMECONTENTSOURCE_VOLUMESOURCE +_VOLUMECONTENTSOURCE.oneofs_by_name['type'].fields.append( + _VOLUMECONTENTSOURCE.fields_by_name['snapshot']) +_VOLUMECONTENTSOURCE.fields_by_name['snapshot'].containing_oneof = _VOLUMECONTENTSOURCE.oneofs_by_name['type'] +_VOLUMECONTENTSOURCE.oneofs_by_name['type'].fields.append( + _VOLUMECONTENTSOURCE.fields_by_name['volume']) +_VOLUMECONTENTSOURCE.fields_by_name['volume'].containing_oneof = _VOLUMECONTENTSOURCE.oneofs_by_name['type'] +_CREATEVOLUMERESPONSE.fields_by_name['volume'].message_type = _VOLUME +_VOLUMECAPABILITY_BLOCKVOLUME.containing_type = _VOLUMECAPABILITY +_VOLUMECAPABILITY_MOUNTVOLUME.containing_type = _VOLUMECAPABILITY +_VOLUMECAPABILITY_ACCESSMODE.fields_by_name['mode'].enum_type = _VOLUMECAPABILITY_ACCESSMODE_MODE +_VOLUMECAPABILITY_ACCESSMODE.containing_type = _VOLUMECAPABILITY +_VOLUMECAPABILITY_ACCESSMODE_MODE.containing_type = _VOLUMECAPABILITY_ACCESSMODE +_VOLUMECAPABILITY.fields_by_name['block'].message_type = _VOLUMECAPABILITY_BLOCKVOLUME +_VOLUMECAPABILITY.fields_by_name['mount'].message_type = _VOLUMECAPABILITY_MOUNTVOLUME +_VOLUMECAPABILITY.fields_by_name['access_mode'].message_type = _VOLUMECAPABILITY_ACCESSMODE +_VOLUMECAPABILITY.oneofs_by_name['access_type'].fields.append( + _VOLUMECAPABILITY.fields_by_name['block']) +_VOLUMECAPABILITY.fields_by_name['block'].containing_oneof = _VOLUMECAPABILITY.oneofs_by_name['access_type'] +_VOLUMECAPABILITY.oneofs_by_name['access_type'].fields.append( + _VOLUMECAPABILITY.fields_by_name['mount']) +_VOLUMECAPABILITY.fields_by_name['mount'].containing_oneof = _VOLUMECAPABILITY.oneofs_by_name['access_type'] +_VOLUME_VOLUMECONTEXTENTRY.containing_type = _VOLUME +_VOLUME.fields_by_name['volume_context'].message_type = _VOLUME_VOLUMECONTEXTENTRY +_VOLUME.fields_by_name['content_source'].message_type = _VOLUMECONTENTSOURCE +_VOLUME.fields_by_name['accessible_topology'].message_type = _TOPOLOGY +_TOPOLOGYREQUIREMENT.fields_by_name['requisite'].message_type = _TOPOLOGY +_TOPOLOGYREQUIREMENT.fields_by_name['preferred'].message_type = _TOPOLOGY +_TOPOLOGY_SEGMENTSENTRY.containing_type = _TOPOLOGY +_TOPOLOGY.fields_by_name['segments'].message_type = _TOPOLOGY_SEGMENTSENTRY +_DELETEVOLUMEREQUEST_SECRETSENTRY.containing_type = _DELETEVOLUMEREQUEST +_DELETEVOLUMEREQUEST.fields_by_name['secrets'].message_type = _DELETEVOLUMEREQUEST_SECRETSENTRY +_CONTROLLERPUBLISHVOLUMEREQUEST_SECRETSENTRY.containing_type = _CONTROLLERPUBLISHVOLUMEREQUEST +_CONTROLLERPUBLISHVOLUMEREQUEST_VOLUMECONTEXTENTRY.containing_type = _CONTROLLERPUBLISHVOLUMEREQUEST +_CONTROLLERPUBLISHVOLUMEREQUEST.fields_by_name['volume_capability'].message_type = _VOLUMECAPABILITY +_CONTROLLERPUBLISHVOLUMEREQUEST.fields_by_name['secrets'].message_type = _CONTROLLERPUBLISHVOLUMEREQUEST_SECRETSENTRY +_CONTROLLERPUBLISHVOLUMEREQUEST.fields_by_name['volume_context'].message_type = _CONTROLLERPUBLISHVOLUMEREQUEST_VOLUMECONTEXTENTRY +_CONTROLLERPUBLISHVOLUMERESPONSE_PUBLISHCONTEXTENTRY.containing_type = _CONTROLLERPUBLISHVOLUMERESPONSE +_CONTROLLERPUBLISHVOLUMERESPONSE.fields_by_name['publish_context'].message_type = _CONTROLLERPUBLISHVOLUMERESPONSE_PUBLISHCONTEXTENTRY +_CONTROLLERUNPUBLISHVOLUMEREQUEST_SECRETSENTRY.containing_type = _CONTROLLERUNPUBLISHVOLUMEREQUEST +_CONTROLLERUNPUBLISHVOLUMEREQUEST.fields_by_name['secrets'].message_type = _CONTROLLERUNPUBLISHVOLUMEREQUEST_SECRETSENTRY +_VALIDATEVOLUMECAPABILITIESREQUEST_VOLUMECONTEXTENTRY.containing_type = _VALIDATEVOLUMECAPABILITIESREQUEST +_VALIDATEVOLUMECAPABILITIESREQUEST_PARAMETERSENTRY.containing_type = _VALIDATEVOLUMECAPABILITIESREQUEST +_VALIDATEVOLUMECAPABILITIESREQUEST_SECRETSENTRY.containing_type = _VALIDATEVOLUMECAPABILITIESREQUEST +_VALIDATEVOLUMECAPABILITIESREQUEST.fields_by_name['volume_context'].message_type = _VALIDATEVOLUMECAPABILITIESREQUEST_VOLUMECONTEXTENTRY +_VALIDATEVOLUMECAPABILITIESREQUEST.fields_by_name['volume_capabilities'].message_type = _VOLUMECAPABILITY +_VALIDATEVOLUMECAPABILITIESREQUEST.fields_by_name['parameters'].message_type = _VALIDATEVOLUMECAPABILITIESREQUEST_PARAMETERSENTRY +_VALIDATEVOLUMECAPABILITIESREQUEST.fields_by_name['secrets'].message_type = _VALIDATEVOLUMECAPABILITIESREQUEST_SECRETSENTRY +_VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED_VOLUMECONTEXTENTRY.containing_type = _VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED +_VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED_PARAMETERSENTRY.containing_type = _VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED +_VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED.fields_by_name['volume_context'].message_type = _VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED_VOLUMECONTEXTENTRY +_VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED.fields_by_name['volume_capabilities'].message_type = _VOLUMECAPABILITY +_VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED.fields_by_name['parameters'].message_type = _VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED_PARAMETERSENTRY +_VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED.containing_type = _VALIDATEVOLUMECAPABILITIESRESPONSE +_VALIDATEVOLUMECAPABILITIESRESPONSE.fields_by_name['confirmed'].message_type = _VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED +_LISTVOLUMESRESPONSE_ENTRY.fields_by_name['volume'].message_type = _VOLUME +_LISTVOLUMESRESPONSE_ENTRY.containing_type = _LISTVOLUMESRESPONSE +_LISTVOLUMESRESPONSE.fields_by_name['entries'].message_type = _LISTVOLUMESRESPONSE_ENTRY +_GETCAPACITYREQUEST_PARAMETERSENTRY.containing_type = _GETCAPACITYREQUEST +_GETCAPACITYREQUEST.fields_by_name['volume_capabilities'].message_type = _VOLUMECAPABILITY +_GETCAPACITYREQUEST.fields_by_name['parameters'].message_type = _GETCAPACITYREQUEST_PARAMETERSENTRY +_GETCAPACITYREQUEST.fields_by_name['accessible_topology'].message_type = _TOPOLOGY +_CONTROLLERGETCAPABILITIESRESPONSE.fields_by_name['capabilities'].message_type = _CONTROLLERSERVICECAPABILITY +_CONTROLLERSERVICECAPABILITY_RPC.fields_by_name['type'].enum_type = _CONTROLLERSERVICECAPABILITY_RPC_TYPE +_CONTROLLERSERVICECAPABILITY_RPC.containing_type = _CONTROLLERSERVICECAPABILITY +_CONTROLLERSERVICECAPABILITY_RPC_TYPE.containing_type = _CONTROLLERSERVICECAPABILITY_RPC +_CONTROLLERSERVICECAPABILITY.fields_by_name['rpc'].message_type = _CONTROLLERSERVICECAPABILITY_RPC +_CONTROLLERSERVICECAPABILITY.oneofs_by_name['type'].fields.append( + _CONTROLLERSERVICECAPABILITY.fields_by_name['rpc']) +_CONTROLLERSERVICECAPABILITY.fields_by_name['rpc'].containing_oneof = _CONTROLLERSERVICECAPABILITY.oneofs_by_name['type'] +_CREATESNAPSHOTREQUEST_SECRETSENTRY.containing_type = _CREATESNAPSHOTREQUEST +_CREATESNAPSHOTREQUEST_PARAMETERSENTRY.containing_type = _CREATESNAPSHOTREQUEST +_CREATESNAPSHOTREQUEST.fields_by_name['secrets'].message_type = _CREATESNAPSHOTREQUEST_SECRETSENTRY +_CREATESNAPSHOTREQUEST.fields_by_name['parameters'].message_type = _CREATESNAPSHOTREQUEST_PARAMETERSENTRY +_CREATESNAPSHOTRESPONSE.fields_by_name['snapshot'].message_type = _SNAPSHOT +_SNAPSHOT.fields_by_name['creation_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_DELETESNAPSHOTREQUEST_SECRETSENTRY.containing_type = _DELETESNAPSHOTREQUEST +_DELETESNAPSHOTREQUEST.fields_by_name['secrets'].message_type = _DELETESNAPSHOTREQUEST_SECRETSENTRY +_LISTSNAPSHOTSRESPONSE_ENTRY.fields_by_name['snapshot'].message_type = _SNAPSHOT +_LISTSNAPSHOTSRESPONSE_ENTRY.containing_type = _LISTSNAPSHOTSRESPONSE +_LISTSNAPSHOTSRESPONSE.fields_by_name['entries'].message_type = _LISTSNAPSHOTSRESPONSE_ENTRY +_CONTROLLEREXPANDVOLUMEREQUEST_SECRETSENTRY.containing_type = _CONTROLLEREXPANDVOLUMEREQUEST +_CONTROLLEREXPANDVOLUMEREQUEST.fields_by_name['capacity_range'].message_type = _CAPACITYRANGE +_CONTROLLEREXPANDVOLUMEREQUEST.fields_by_name['secrets'].message_type = _CONTROLLEREXPANDVOLUMEREQUEST_SECRETSENTRY +_NODESTAGEVOLUMEREQUEST_PUBLISHCONTEXTENTRY.containing_type = _NODESTAGEVOLUMEREQUEST +_NODESTAGEVOLUMEREQUEST_SECRETSENTRY.containing_type = _NODESTAGEVOLUMEREQUEST +_NODESTAGEVOLUMEREQUEST_VOLUMECONTEXTENTRY.containing_type = _NODESTAGEVOLUMEREQUEST +_NODESTAGEVOLUMEREQUEST.fields_by_name['publish_context'].message_type = _NODESTAGEVOLUMEREQUEST_PUBLISHCONTEXTENTRY +_NODESTAGEVOLUMEREQUEST.fields_by_name['volume_capability'].message_type = _VOLUMECAPABILITY +_NODESTAGEVOLUMEREQUEST.fields_by_name['secrets'].message_type = _NODESTAGEVOLUMEREQUEST_SECRETSENTRY +_NODESTAGEVOLUMEREQUEST.fields_by_name['volume_context'].message_type = _NODESTAGEVOLUMEREQUEST_VOLUMECONTEXTENTRY +_NODEPUBLISHVOLUMEREQUEST_PUBLISHCONTEXTENTRY.containing_type = _NODEPUBLISHVOLUMEREQUEST +_NODEPUBLISHVOLUMEREQUEST_SECRETSENTRY.containing_type = _NODEPUBLISHVOLUMEREQUEST +_NODEPUBLISHVOLUMEREQUEST_VOLUMECONTEXTENTRY.containing_type = _NODEPUBLISHVOLUMEREQUEST +_NODEPUBLISHVOLUMEREQUEST.fields_by_name['publish_context'].message_type = _NODEPUBLISHVOLUMEREQUEST_PUBLISHCONTEXTENTRY +_NODEPUBLISHVOLUMEREQUEST.fields_by_name['volume_capability'].message_type = _VOLUMECAPABILITY +_NODEPUBLISHVOLUMEREQUEST.fields_by_name['secrets'].message_type = _NODEPUBLISHVOLUMEREQUEST_SECRETSENTRY +_NODEPUBLISHVOLUMEREQUEST.fields_by_name['volume_context'].message_type = _NODEPUBLISHVOLUMEREQUEST_VOLUMECONTEXTENTRY +_NODEGETVOLUMESTATSRESPONSE.fields_by_name['usage'].message_type = _VOLUMEUSAGE +_VOLUMEUSAGE.fields_by_name['unit'].enum_type = _VOLUMEUSAGE_UNIT +_VOLUMEUSAGE_UNIT.containing_type = _VOLUMEUSAGE +_NODEGETCAPABILITIESRESPONSE.fields_by_name['capabilities'].message_type = _NODESERVICECAPABILITY +_NODESERVICECAPABILITY_RPC.fields_by_name['type'].enum_type = _NODESERVICECAPABILITY_RPC_TYPE +_NODESERVICECAPABILITY_RPC.containing_type = _NODESERVICECAPABILITY +_NODESERVICECAPABILITY_RPC_TYPE.containing_type = _NODESERVICECAPABILITY_RPC +_NODESERVICECAPABILITY.fields_by_name['rpc'].message_type = _NODESERVICECAPABILITY_RPC +_NODESERVICECAPABILITY.oneofs_by_name['type'].fields.append( + _NODESERVICECAPABILITY.fields_by_name['rpc']) +_NODESERVICECAPABILITY.fields_by_name['rpc'].containing_oneof = _NODESERVICECAPABILITY.oneofs_by_name['type'] +_NODEGETINFORESPONSE.fields_by_name['accessible_topology'].message_type = _TOPOLOGY +_NODEEXPANDVOLUMEREQUEST.fields_by_name['capacity_range'].message_type = _CAPACITYRANGE +DESCRIPTOR.message_types_by_name['GetPluginInfoRequest'] = _GETPLUGININFOREQUEST +DESCRIPTOR.message_types_by_name['GetPluginInfoResponse'] = _GETPLUGININFORESPONSE +DESCRIPTOR.message_types_by_name['GetPluginCapabilitiesRequest'] = _GETPLUGINCAPABILITIESREQUEST +DESCRIPTOR.message_types_by_name['GetPluginCapabilitiesResponse'] = _GETPLUGINCAPABILITIESRESPONSE +DESCRIPTOR.message_types_by_name['PluginCapability'] = _PLUGINCAPABILITY +DESCRIPTOR.message_types_by_name['ProbeRequest'] = _PROBEREQUEST +DESCRIPTOR.message_types_by_name['ProbeResponse'] = _PROBERESPONSE +DESCRIPTOR.message_types_by_name['CreateVolumeRequest'] = _CREATEVOLUMEREQUEST +DESCRIPTOR.message_types_by_name['VolumeContentSource'] = _VOLUMECONTENTSOURCE +DESCRIPTOR.message_types_by_name['CreateVolumeResponse'] = _CREATEVOLUMERESPONSE +DESCRIPTOR.message_types_by_name['VolumeCapability'] = _VOLUMECAPABILITY +DESCRIPTOR.message_types_by_name['CapacityRange'] = _CAPACITYRANGE +DESCRIPTOR.message_types_by_name['Volume'] = _VOLUME +DESCRIPTOR.message_types_by_name['TopologyRequirement'] = _TOPOLOGYREQUIREMENT +DESCRIPTOR.message_types_by_name['Topology'] = _TOPOLOGY +DESCRIPTOR.message_types_by_name['DeleteVolumeRequest'] = _DELETEVOLUMEREQUEST +DESCRIPTOR.message_types_by_name['DeleteVolumeResponse'] = _DELETEVOLUMERESPONSE +DESCRIPTOR.message_types_by_name['ControllerPublishVolumeRequest'] = _CONTROLLERPUBLISHVOLUMEREQUEST +DESCRIPTOR.message_types_by_name['ControllerPublishVolumeResponse'] = _CONTROLLERPUBLISHVOLUMERESPONSE +DESCRIPTOR.message_types_by_name['ControllerUnpublishVolumeRequest'] = _CONTROLLERUNPUBLISHVOLUMEREQUEST +DESCRIPTOR.message_types_by_name['ControllerUnpublishVolumeResponse'] = _CONTROLLERUNPUBLISHVOLUMERESPONSE +DESCRIPTOR.message_types_by_name['ValidateVolumeCapabilitiesRequest'] = _VALIDATEVOLUMECAPABILITIESREQUEST +DESCRIPTOR.message_types_by_name['ValidateVolumeCapabilitiesResponse'] = _VALIDATEVOLUMECAPABILITIESRESPONSE +DESCRIPTOR.message_types_by_name['ListVolumesRequest'] = _LISTVOLUMESREQUEST +DESCRIPTOR.message_types_by_name['ListVolumesResponse'] = _LISTVOLUMESRESPONSE +DESCRIPTOR.message_types_by_name['GetCapacityRequest'] = _GETCAPACITYREQUEST +DESCRIPTOR.message_types_by_name['GetCapacityResponse'] = _GETCAPACITYRESPONSE +DESCRIPTOR.message_types_by_name['ControllerGetCapabilitiesRequest'] = _CONTROLLERGETCAPABILITIESREQUEST +DESCRIPTOR.message_types_by_name['ControllerGetCapabilitiesResponse'] = _CONTROLLERGETCAPABILITIESRESPONSE +DESCRIPTOR.message_types_by_name['ControllerServiceCapability'] = _CONTROLLERSERVICECAPABILITY +DESCRIPTOR.message_types_by_name['CreateSnapshotRequest'] = _CREATESNAPSHOTREQUEST +DESCRIPTOR.message_types_by_name['CreateSnapshotResponse'] = _CREATESNAPSHOTRESPONSE +DESCRIPTOR.message_types_by_name['Snapshot'] = _SNAPSHOT +DESCRIPTOR.message_types_by_name['DeleteSnapshotRequest'] = _DELETESNAPSHOTREQUEST +DESCRIPTOR.message_types_by_name['DeleteSnapshotResponse'] = _DELETESNAPSHOTRESPONSE +DESCRIPTOR.message_types_by_name['ListSnapshotsRequest'] = _LISTSNAPSHOTSREQUEST +DESCRIPTOR.message_types_by_name['ListSnapshotsResponse'] = _LISTSNAPSHOTSRESPONSE +DESCRIPTOR.message_types_by_name['ControllerExpandVolumeRequest'] = _CONTROLLEREXPANDVOLUMEREQUEST +DESCRIPTOR.message_types_by_name['ControllerExpandVolumeResponse'] = _CONTROLLEREXPANDVOLUMERESPONSE +DESCRIPTOR.message_types_by_name['NodeStageVolumeRequest'] = _NODESTAGEVOLUMEREQUEST +DESCRIPTOR.message_types_by_name['NodeStageVolumeResponse'] = _NODESTAGEVOLUMERESPONSE +DESCRIPTOR.message_types_by_name['NodeUnstageVolumeRequest'] = _NODEUNSTAGEVOLUMEREQUEST +DESCRIPTOR.message_types_by_name['NodeUnstageVolumeResponse'] = _NODEUNSTAGEVOLUMERESPONSE +DESCRIPTOR.message_types_by_name['NodePublishVolumeRequest'] = _NODEPUBLISHVOLUMEREQUEST +DESCRIPTOR.message_types_by_name['NodePublishVolumeResponse'] = _NODEPUBLISHVOLUMERESPONSE +DESCRIPTOR.message_types_by_name['NodeUnpublishVolumeRequest'] = _NODEUNPUBLISHVOLUMEREQUEST +DESCRIPTOR.message_types_by_name['NodeUnpublishVolumeResponse'] = _NODEUNPUBLISHVOLUMERESPONSE +DESCRIPTOR.message_types_by_name['NodeGetVolumeStatsRequest'] = _NODEGETVOLUMESTATSREQUEST +DESCRIPTOR.message_types_by_name['NodeGetVolumeStatsResponse'] = _NODEGETVOLUMESTATSRESPONSE +DESCRIPTOR.message_types_by_name['VolumeUsage'] = _VOLUMEUSAGE +DESCRIPTOR.message_types_by_name['NodeGetCapabilitiesRequest'] = _NODEGETCAPABILITIESREQUEST +DESCRIPTOR.message_types_by_name['NodeGetCapabilitiesResponse'] = _NODEGETCAPABILITIESRESPONSE +DESCRIPTOR.message_types_by_name['NodeServiceCapability'] = _NODESERVICECAPABILITY +DESCRIPTOR.message_types_by_name['NodeGetInfoRequest'] = _NODEGETINFOREQUEST +DESCRIPTOR.message_types_by_name['NodeGetInfoResponse'] = _NODEGETINFORESPONSE +DESCRIPTOR.message_types_by_name['NodeExpandVolumeRequest'] = _NODEEXPANDVOLUMEREQUEST +DESCRIPTOR.message_types_by_name['NodeExpandVolumeResponse'] = _NODEEXPANDVOLUMERESPONSE +DESCRIPTOR.extensions_by_name['csi_secret'] = csi_secret +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +GetPluginInfoRequest = _reflection.GeneratedProtocolMessageType('GetPluginInfoRequest', (_message.Message,), dict( + DESCRIPTOR = _GETPLUGININFOREQUEST, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.GetPluginInfoRequest) + )) +_sym_db.RegisterMessage(GetPluginInfoRequest) + +GetPluginInfoResponse = _reflection.GeneratedProtocolMessageType('GetPluginInfoResponse', (_message.Message,), dict( + + ManifestEntry = _reflection.GeneratedProtocolMessageType('ManifestEntry', (_message.Message,), dict( + DESCRIPTOR = _GETPLUGININFORESPONSE_MANIFESTENTRY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.GetPluginInfoResponse.ManifestEntry) + )) + , + DESCRIPTOR = _GETPLUGININFORESPONSE, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.GetPluginInfoResponse) + )) +_sym_db.RegisterMessage(GetPluginInfoResponse) +_sym_db.RegisterMessage(GetPluginInfoResponse.ManifestEntry) + +GetPluginCapabilitiesRequest = _reflection.GeneratedProtocolMessageType('GetPluginCapabilitiesRequest', (_message.Message,), dict( + DESCRIPTOR = _GETPLUGINCAPABILITIESREQUEST, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.GetPluginCapabilitiesRequest) + )) +_sym_db.RegisterMessage(GetPluginCapabilitiesRequest) + +GetPluginCapabilitiesResponse = _reflection.GeneratedProtocolMessageType('GetPluginCapabilitiesResponse', (_message.Message,), dict( + DESCRIPTOR = _GETPLUGINCAPABILITIESRESPONSE, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.GetPluginCapabilitiesResponse) + )) +_sym_db.RegisterMessage(GetPluginCapabilitiesResponse) + +PluginCapability = _reflection.GeneratedProtocolMessageType('PluginCapability', (_message.Message,), dict( + + Service = _reflection.GeneratedProtocolMessageType('Service', (_message.Message,), dict( + DESCRIPTOR = _PLUGINCAPABILITY_SERVICE, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.PluginCapability.Service) + )) + , + + VolumeExpansion = _reflection.GeneratedProtocolMessageType('VolumeExpansion', (_message.Message,), dict( + DESCRIPTOR = _PLUGINCAPABILITY_VOLUMEEXPANSION, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.PluginCapability.VolumeExpansion) + )) + , + DESCRIPTOR = _PLUGINCAPABILITY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.PluginCapability) + )) +_sym_db.RegisterMessage(PluginCapability) +_sym_db.RegisterMessage(PluginCapability.Service) +_sym_db.RegisterMessage(PluginCapability.VolumeExpansion) + +ProbeRequest = _reflection.GeneratedProtocolMessageType('ProbeRequest', (_message.Message,), dict( + DESCRIPTOR = _PROBEREQUEST, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.ProbeRequest) + )) +_sym_db.RegisterMessage(ProbeRequest) + +ProbeResponse = _reflection.GeneratedProtocolMessageType('ProbeResponse', (_message.Message,), dict( + DESCRIPTOR = _PROBERESPONSE, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.ProbeResponse) + )) +_sym_db.RegisterMessage(ProbeResponse) + +CreateVolumeRequest = _reflection.GeneratedProtocolMessageType('CreateVolumeRequest', (_message.Message,), dict( + + ParametersEntry = _reflection.GeneratedProtocolMessageType('ParametersEntry', (_message.Message,), dict( + DESCRIPTOR = _CREATEVOLUMEREQUEST_PARAMETERSENTRY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.CreateVolumeRequest.ParametersEntry) + )) + , + + SecretsEntry = _reflection.GeneratedProtocolMessageType('SecretsEntry', (_message.Message,), dict( + DESCRIPTOR = _CREATEVOLUMEREQUEST_SECRETSENTRY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.CreateVolumeRequest.SecretsEntry) + )) + , + DESCRIPTOR = _CREATEVOLUMEREQUEST, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.CreateVolumeRequest) + )) +_sym_db.RegisterMessage(CreateVolumeRequest) +_sym_db.RegisterMessage(CreateVolumeRequest.ParametersEntry) +_sym_db.RegisterMessage(CreateVolumeRequest.SecretsEntry) + +VolumeContentSource = _reflection.GeneratedProtocolMessageType('VolumeContentSource', (_message.Message,), dict( + + SnapshotSource = _reflection.GeneratedProtocolMessageType('SnapshotSource', (_message.Message,), dict( + DESCRIPTOR = _VOLUMECONTENTSOURCE_SNAPSHOTSOURCE, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.VolumeContentSource.SnapshotSource) + )) + , + + VolumeSource = _reflection.GeneratedProtocolMessageType('VolumeSource', (_message.Message,), dict( + DESCRIPTOR = _VOLUMECONTENTSOURCE_VOLUMESOURCE, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.VolumeContentSource.VolumeSource) + )) + , + DESCRIPTOR = _VOLUMECONTENTSOURCE, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.VolumeContentSource) + )) +_sym_db.RegisterMessage(VolumeContentSource) +_sym_db.RegisterMessage(VolumeContentSource.SnapshotSource) +_sym_db.RegisterMessage(VolumeContentSource.VolumeSource) + +CreateVolumeResponse = _reflection.GeneratedProtocolMessageType('CreateVolumeResponse', (_message.Message,), dict( + DESCRIPTOR = _CREATEVOLUMERESPONSE, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.CreateVolumeResponse) + )) +_sym_db.RegisterMessage(CreateVolumeResponse) + +VolumeCapability = _reflection.GeneratedProtocolMessageType('VolumeCapability', (_message.Message,), dict( + + BlockVolume = _reflection.GeneratedProtocolMessageType('BlockVolume', (_message.Message,), dict( + DESCRIPTOR = _VOLUMECAPABILITY_BLOCKVOLUME, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.VolumeCapability.BlockVolume) + )) + , + + MountVolume = _reflection.GeneratedProtocolMessageType('MountVolume', (_message.Message,), dict( + DESCRIPTOR = _VOLUMECAPABILITY_MOUNTVOLUME, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.VolumeCapability.MountVolume) + )) + , + + AccessMode = _reflection.GeneratedProtocolMessageType('AccessMode', (_message.Message,), dict( + DESCRIPTOR = _VOLUMECAPABILITY_ACCESSMODE, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.VolumeCapability.AccessMode) + )) + , + DESCRIPTOR = _VOLUMECAPABILITY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.VolumeCapability) + )) +_sym_db.RegisterMessage(VolumeCapability) +_sym_db.RegisterMessage(VolumeCapability.BlockVolume) +_sym_db.RegisterMessage(VolumeCapability.MountVolume) +_sym_db.RegisterMessage(VolumeCapability.AccessMode) + +CapacityRange = _reflection.GeneratedProtocolMessageType('CapacityRange', (_message.Message,), dict( + DESCRIPTOR = _CAPACITYRANGE, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.CapacityRange) + )) +_sym_db.RegisterMessage(CapacityRange) + +Volume = _reflection.GeneratedProtocolMessageType('Volume', (_message.Message,), dict( + + VolumeContextEntry = _reflection.GeneratedProtocolMessageType('VolumeContextEntry', (_message.Message,), dict( + DESCRIPTOR = _VOLUME_VOLUMECONTEXTENTRY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.Volume.VolumeContextEntry) + )) + , + DESCRIPTOR = _VOLUME, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.Volume) + )) +_sym_db.RegisterMessage(Volume) +_sym_db.RegisterMessage(Volume.VolumeContextEntry) + +TopologyRequirement = _reflection.GeneratedProtocolMessageType('TopologyRequirement', (_message.Message,), dict( + DESCRIPTOR = _TOPOLOGYREQUIREMENT, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.TopologyRequirement) + )) +_sym_db.RegisterMessage(TopologyRequirement) + +Topology = _reflection.GeneratedProtocolMessageType('Topology', (_message.Message,), dict( + + SegmentsEntry = _reflection.GeneratedProtocolMessageType('SegmentsEntry', (_message.Message,), dict( + DESCRIPTOR = _TOPOLOGY_SEGMENTSENTRY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.Topology.SegmentsEntry) + )) + , + DESCRIPTOR = _TOPOLOGY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.Topology) + )) +_sym_db.RegisterMessage(Topology) +_sym_db.RegisterMessage(Topology.SegmentsEntry) + +DeleteVolumeRequest = _reflection.GeneratedProtocolMessageType('DeleteVolumeRequest', (_message.Message,), dict( + + SecretsEntry = _reflection.GeneratedProtocolMessageType('SecretsEntry', (_message.Message,), dict( + DESCRIPTOR = _DELETEVOLUMEREQUEST_SECRETSENTRY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.DeleteVolumeRequest.SecretsEntry) + )) + , + DESCRIPTOR = _DELETEVOLUMEREQUEST, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.DeleteVolumeRequest) + )) +_sym_db.RegisterMessage(DeleteVolumeRequest) +_sym_db.RegisterMessage(DeleteVolumeRequest.SecretsEntry) + +DeleteVolumeResponse = _reflection.GeneratedProtocolMessageType('DeleteVolumeResponse', (_message.Message,), dict( + DESCRIPTOR = _DELETEVOLUMERESPONSE, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.DeleteVolumeResponse) + )) +_sym_db.RegisterMessage(DeleteVolumeResponse) + +ControllerPublishVolumeRequest = _reflection.GeneratedProtocolMessageType('ControllerPublishVolumeRequest', (_message.Message,), dict( + + SecretsEntry = _reflection.GeneratedProtocolMessageType('SecretsEntry', (_message.Message,), dict( + DESCRIPTOR = _CONTROLLERPUBLISHVOLUMEREQUEST_SECRETSENTRY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.ControllerPublishVolumeRequest.SecretsEntry) + )) + , + + VolumeContextEntry = _reflection.GeneratedProtocolMessageType('VolumeContextEntry', (_message.Message,), dict( + DESCRIPTOR = _CONTROLLERPUBLISHVOLUMEREQUEST_VOLUMECONTEXTENTRY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.ControllerPublishVolumeRequest.VolumeContextEntry) + )) + , + DESCRIPTOR = _CONTROLLERPUBLISHVOLUMEREQUEST, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.ControllerPublishVolumeRequest) + )) +_sym_db.RegisterMessage(ControllerPublishVolumeRequest) +_sym_db.RegisterMessage(ControllerPublishVolumeRequest.SecretsEntry) +_sym_db.RegisterMessage(ControllerPublishVolumeRequest.VolumeContextEntry) + +ControllerPublishVolumeResponse = _reflection.GeneratedProtocolMessageType('ControllerPublishVolumeResponse', (_message.Message,), dict( + + PublishContextEntry = _reflection.GeneratedProtocolMessageType('PublishContextEntry', (_message.Message,), dict( + DESCRIPTOR = _CONTROLLERPUBLISHVOLUMERESPONSE_PUBLISHCONTEXTENTRY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.ControllerPublishVolumeResponse.PublishContextEntry) + )) + , + DESCRIPTOR = _CONTROLLERPUBLISHVOLUMERESPONSE, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.ControllerPublishVolumeResponse) + )) +_sym_db.RegisterMessage(ControllerPublishVolumeResponse) +_sym_db.RegisterMessage(ControllerPublishVolumeResponse.PublishContextEntry) + +ControllerUnpublishVolumeRequest = _reflection.GeneratedProtocolMessageType('ControllerUnpublishVolumeRequest', (_message.Message,), dict( + + SecretsEntry = _reflection.GeneratedProtocolMessageType('SecretsEntry', (_message.Message,), dict( + DESCRIPTOR = _CONTROLLERUNPUBLISHVOLUMEREQUEST_SECRETSENTRY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.ControllerUnpublishVolumeRequest.SecretsEntry) + )) + , + DESCRIPTOR = _CONTROLLERUNPUBLISHVOLUMEREQUEST, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.ControllerUnpublishVolumeRequest) + )) +_sym_db.RegisterMessage(ControllerUnpublishVolumeRequest) +_sym_db.RegisterMessage(ControllerUnpublishVolumeRequest.SecretsEntry) + +ControllerUnpublishVolumeResponse = _reflection.GeneratedProtocolMessageType('ControllerUnpublishVolumeResponse', (_message.Message,), dict( + DESCRIPTOR = _CONTROLLERUNPUBLISHVOLUMERESPONSE, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.ControllerUnpublishVolumeResponse) + )) +_sym_db.RegisterMessage(ControllerUnpublishVolumeResponse) + +ValidateVolumeCapabilitiesRequest = _reflection.GeneratedProtocolMessageType('ValidateVolumeCapabilitiesRequest', (_message.Message,), dict( + + VolumeContextEntry = _reflection.GeneratedProtocolMessageType('VolumeContextEntry', (_message.Message,), dict( + DESCRIPTOR = _VALIDATEVOLUMECAPABILITIESREQUEST_VOLUMECONTEXTENTRY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.ValidateVolumeCapabilitiesRequest.VolumeContextEntry) + )) + , + + ParametersEntry = _reflection.GeneratedProtocolMessageType('ParametersEntry', (_message.Message,), dict( + DESCRIPTOR = _VALIDATEVOLUMECAPABILITIESREQUEST_PARAMETERSENTRY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.ValidateVolumeCapabilitiesRequest.ParametersEntry) + )) + , + + SecretsEntry = _reflection.GeneratedProtocolMessageType('SecretsEntry', (_message.Message,), dict( + DESCRIPTOR = _VALIDATEVOLUMECAPABILITIESREQUEST_SECRETSENTRY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.ValidateVolumeCapabilitiesRequest.SecretsEntry) + )) + , + DESCRIPTOR = _VALIDATEVOLUMECAPABILITIESREQUEST, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.ValidateVolumeCapabilitiesRequest) + )) +_sym_db.RegisterMessage(ValidateVolumeCapabilitiesRequest) +_sym_db.RegisterMessage(ValidateVolumeCapabilitiesRequest.VolumeContextEntry) +_sym_db.RegisterMessage(ValidateVolumeCapabilitiesRequest.ParametersEntry) +_sym_db.RegisterMessage(ValidateVolumeCapabilitiesRequest.SecretsEntry) + +ValidateVolumeCapabilitiesResponse = _reflection.GeneratedProtocolMessageType('ValidateVolumeCapabilitiesResponse', (_message.Message,), dict( + + Confirmed = _reflection.GeneratedProtocolMessageType('Confirmed', (_message.Message,), dict( + + VolumeContextEntry = _reflection.GeneratedProtocolMessageType('VolumeContextEntry', (_message.Message,), dict( + DESCRIPTOR = _VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED_VOLUMECONTEXTENTRY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.VolumeContextEntry) + )) + , + + ParametersEntry = _reflection.GeneratedProtocolMessageType('ParametersEntry', (_message.Message,), dict( + DESCRIPTOR = _VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED_PARAMETERSENTRY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.ParametersEntry) + )) + , + DESCRIPTOR = _VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed) + )) + , + DESCRIPTOR = _VALIDATEVOLUMECAPABILITIESRESPONSE, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.ValidateVolumeCapabilitiesResponse) + )) +_sym_db.RegisterMessage(ValidateVolumeCapabilitiesResponse) +_sym_db.RegisterMessage(ValidateVolumeCapabilitiesResponse.Confirmed) +_sym_db.RegisterMessage(ValidateVolumeCapabilitiesResponse.Confirmed.VolumeContextEntry) +_sym_db.RegisterMessage(ValidateVolumeCapabilitiesResponse.Confirmed.ParametersEntry) + +ListVolumesRequest = _reflection.GeneratedProtocolMessageType('ListVolumesRequest', (_message.Message,), dict( + DESCRIPTOR = _LISTVOLUMESREQUEST, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.ListVolumesRequest) + )) +_sym_db.RegisterMessage(ListVolumesRequest) + +ListVolumesResponse = _reflection.GeneratedProtocolMessageType('ListVolumesResponse', (_message.Message,), dict( + + Entry = _reflection.GeneratedProtocolMessageType('Entry', (_message.Message,), dict( + DESCRIPTOR = _LISTVOLUMESRESPONSE_ENTRY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.ListVolumesResponse.Entry) + )) + , + DESCRIPTOR = _LISTVOLUMESRESPONSE, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.ListVolumesResponse) + )) +_sym_db.RegisterMessage(ListVolumesResponse) +_sym_db.RegisterMessage(ListVolumesResponse.Entry) + +GetCapacityRequest = _reflection.GeneratedProtocolMessageType('GetCapacityRequest', (_message.Message,), dict( + + ParametersEntry = _reflection.GeneratedProtocolMessageType('ParametersEntry', (_message.Message,), dict( + DESCRIPTOR = _GETCAPACITYREQUEST_PARAMETERSENTRY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.GetCapacityRequest.ParametersEntry) + )) + , + DESCRIPTOR = _GETCAPACITYREQUEST, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.GetCapacityRequest) + )) +_sym_db.RegisterMessage(GetCapacityRequest) +_sym_db.RegisterMessage(GetCapacityRequest.ParametersEntry) + +GetCapacityResponse = _reflection.GeneratedProtocolMessageType('GetCapacityResponse', (_message.Message,), dict( + DESCRIPTOR = _GETCAPACITYRESPONSE, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.GetCapacityResponse) + )) +_sym_db.RegisterMessage(GetCapacityResponse) + +ControllerGetCapabilitiesRequest = _reflection.GeneratedProtocolMessageType('ControllerGetCapabilitiesRequest', (_message.Message,), dict( + DESCRIPTOR = _CONTROLLERGETCAPABILITIESREQUEST, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.ControllerGetCapabilitiesRequest) + )) +_sym_db.RegisterMessage(ControllerGetCapabilitiesRequest) + +ControllerGetCapabilitiesResponse = _reflection.GeneratedProtocolMessageType('ControllerGetCapabilitiesResponse', (_message.Message,), dict( + DESCRIPTOR = _CONTROLLERGETCAPABILITIESRESPONSE, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.ControllerGetCapabilitiesResponse) + )) +_sym_db.RegisterMessage(ControllerGetCapabilitiesResponse) + +ControllerServiceCapability = _reflection.GeneratedProtocolMessageType('ControllerServiceCapability', (_message.Message,), dict( + + RPC = _reflection.GeneratedProtocolMessageType('RPC', (_message.Message,), dict( + DESCRIPTOR = _CONTROLLERSERVICECAPABILITY_RPC, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.ControllerServiceCapability.RPC) + )) + , + DESCRIPTOR = _CONTROLLERSERVICECAPABILITY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.ControllerServiceCapability) + )) +_sym_db.RegisterMessage(ControllerServiceCapability) +_sym_db.RegisterMessage(ControllerServiceCapability.RPC) + +CreateSnapshotRequest = _reflection.GeneratedProtocolMessageType('CreateSnapshotRequest', (_message.Message,), dict( + + SecretsEntry = _reflection.GeneratedProtocolMessageType('SecretsEntry', (_message.Message,), dict( + DESCRIPTOR = _CREATESNAPSHOTREQUEST_SECRETSENTRY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.CreateSnapshotRequest.SecretsEntry) + )) + , + + ParametersEntry = _reflection.GeneratedProtocolMessageType('ParametersEntry', (_message.Message,), dict( + DESCRIPTOR = _CREATESNAPSHOTREQUEST_PARAMETERSENTRY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.CreateSnapshotRequest.ParametersEntry) + )) + , + DESCRIPTOR = _CREATESNAPSHOTREQUEST, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.CreateSnapshotRequest) + )) +_sym_db.RegisterMessage(CreateSnapshotRequest) +_sym_db.RegisterMessage(CreateSnapshotRequest.SecretsEntry) +_sym_db.RegisterMessage(CreateSnapshotRequest.ParametersEntry) + +CreateSnapshotResponse = _reflection.GeneratedProtocolMessageType('CreateSnapshotResponse', (_message.Message,), dict( + DESCRIPTOR = _CREATESNAPSHOTRESPONSE, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.CreateSnapshotResponse) + )) +_sym_db.RegisterMessage(CreateSnapshotResponse) + +Snapshot = _reflection.GeneratedProtocolMessageType('Snapshot', (_message.Message,), dict( + DESCRIPTOR = _SNAPSHOT, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.Snapshot) + )) +_sym_db.RegisterMessage(Snapshot) + +DeleteSnapshotRequest = _reflection.GeneratedProtocolMessageType('DeleteSnapshotRequest', (_message.Message,), dict( + + SecretsEntry = _reflection.GeneratedProtocolMessageType('SecretsEntry', (_message.Message,), dict( + DESCRIPTOR = _DELETESNAPSHOTREQUEST_SECRETSENTRY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.DeleteSnapshotRequest.SecretsEntry) + )) + , + DESCRIPTOR = _DELETESNAPSHOTREQUEST, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.DeleteSnapshotRequest) + )) +_sym_db.RegisterMessage(DeleteSnapshotRequest) +_sym_db.RegisterMessage(DeleteSnapshotRequest.SecretsEntry) + +DeleteSnapshotResponse = _reflection.GeneratedProtocolMessageType('DeleteSnapshotResponse', (_message.Message,), dict( + DESCRIPTOR = _DELETESNAPSHOTRESPONSE, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.DeleteSnapshotResponse) + )) +_sym_db.RegisterMessage(DeleteSnapshotResponse) + +ListSnapshotsRequest = _reflection.GeneratedProtocolMessageType('ListSnapshotsRequest', (_message.Message,), dict( + DESCRIPTOR = _LISTSNAPSHOTSREQUEST, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.ListSnapshotsRequest) + )) +_sym_db.RegisterMessage(ListSnapshotsRequest) + +ListSnapshotsResponse = _reflection.GeneratedProtocolMessageType('ListSnapshotsResponse', (_message.Message,), dict( + + Entry = _reflection.GeneratedProtocolMessageType('Entry', (_message.Message,), dict( + DESCRIPTOR = _LISTSNAPSHOTSRESPONSE_ENTRY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.ListSnapshotsResponse.Entry) + )) + , + DESCRIPTOR = _LISTSNAPSHOTSRESPONSE, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.ListSnapshotsResponse) + )) +_sym_db.RegisterMessage(ListSnapshotsResponse) +_sym_db.RegisterMessage(ListSnapshotsResponse.Entry) + +ControllerExpandVolumeRequest = _reflection.GeneratedProtocolMessageType('ControllerExpandVolumeRequest', (_message.Message,), dict( + + SecretsEntry = _reflection.GeneratedProtocolMessageType('SecretsEntry', (_message.Message,), dict( + DESCRIPTOR = _CONTROLLEREXPANDVOLUMEREQUEST_SECRETSENTRY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.ControllerExpandVolumeRequest.SecretsEntry) + )) + , + DESCRIPTOR = _CONTROLLEREXPANDVOLUMEREQUEST, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.ControllerExpandVolumeRequest) + )) +_sym_db.RegisterMessage(ControllerExpandVolumeRequest) +_sym_db.RegisterMessage(ControllerExpandVolumeRequest.SecretsEntry) + +ControllerExpandVolumeResponse = _reflection.GeneratedProtocolMessageType('ControllerExpandVolumeResponse', (_message.Message,), dict( + DESCRIPTOR = _CONTROLLEREXPANDVOLUMERESPONSE, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.ControllerExpandVolumeResponse) + )) +_sym_db.RegisterMessage(ControllerExpandVolumeResponse) + +NodeStageVolumeRequest = _reflection.GeneratedProtocolMessageType('NodeStageVolumeRequest', (_message.Message,), dict( + + PublishContextEntry = _reflection.GeneratedProtocolMessageType('PublishContextEntry', (_message.Message,), dict( + DESCRIPTOR = _NODESTAGEVOLUMEREQUEST_PUBLISHCONTEXTENTRY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.NodeStageVolumeRequest.PublishContextEntry) + )) + , + + SecretsEntry = _reflection.GeneratedProtocolMessageType('SecretsEntry', (_message.Message,), dict( + DESCRIPTOR = _NODESTAGEVOLUMEREQUEST_SECRETSENTRY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.NodeStageVolumeRequest.SecretsEntry) + )) + , + + VolumeContextEntry = _reflection.GeneratedProtocolMessageType('VolumeContextEntry', (_message.Message,), dict( + DESCRIPTOR = _NODESTAGEVOLUMEREQUEST_VOLUMECONTEXTENTRY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.NodeStageVolumeRequest.VolumeContextEntry) + )) + , + DESCRIPTOR = _NODESTAGEVOLUMEREQUEST, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.NodeStageVolumeRequest) + )) +_sym_db.RegisterMessage(NodeStageVolumeRequest) +_sym_db.RegisterMessage(NodeStageVolumeRequest.PublishContextEntry) +_sym_db.RegisterMessage(NodeStageVolumeRequest.SecretsEntry) +_sym_db.RegisterMessage(NodeStageVolumeRequest.VolumeContextEntry) + +NodeStageVolumeResponse = _reflection.GeneratedProtocolMessageType('NodeStageVolumeResponse', (_message.Message,), dict( + DESCRIPTOR = _NODESTAGEVOLUMERESPONSE, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.NodeStageVolumeResponse) + )) +_sym_db.RegisterMessage(NodeStageVolumeResponse) + +NodeUnstageVolumeRequest = _reflection.GeneratedProtocolMessageType('NodeUnstageVolumeRequest', (_message.Message,), dict( + DESCRIPTOR = _NODEUNSTAGEVOLUMEREQUEST, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.NodeUnstageVolumeRequest) + )) +_sym_db.RegisterMessage(NodeUnstageVolumeRequest) + +NodeUnstageVolumeResponse = _reflection.GeneratedProtocolMessageType('NodeUnstageVolumeResponse', (_message.Message,), dict( + DESCRIPTOR = _NODEUNSTAGEVOLUMERESPONSE, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.NodeUnstageVolumeResponse) + )) +_sym_db.RegisterMessage(NodeUnstageVolumeResponse) + +NodePublishVolumeRequest = _reflection.GeneratedProtocolMessageType('NodePublishVolumeRequest', (_message.Message,), dict( + + PublishContextEntry = _reflection.GeneratedProtocolMessageType('PublishContextEntry', (_message.Message,), dict( + DESCRIPTOR = _NODEPUBLISHVOLUMEREQUEST_PUBLISHCONTEXTENTRY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.NodePublishVolumeRequest.PublishContextEntry) + )) + , + + SecretsEntry = _reflection.GeneratedProtocolMessageType('SecretsEntry', (_message.Message,), dict( + DESCRIPTOR = _NODEPUBLISHVOLUMEREQUEST_SECRETSENTRY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.NodePublishVolumeRequest.SecretsEntry) + )) + , + + VolumeContextEntry = _reflection.GeneratedProtocolMessageType('VolumeContextEntry', (_message.Message,), dict( + DESCRIPTOR = _NODEPUBLISHVOLUMEREQUEST_VOLUMECONTEXTENTRY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.NodePublishVolumeRequest.VolumeContextEntry) + )) + , + DESCRIPTOR = _NODEPUBLISHVOLUMEREQUEST, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.NodePublishVolumeRequest) + )) +_sym_db.RegisterMessage(NodePublishVolumeRequest) +_sym_db.RegisterMessage(NodePublishVolumeRequest.PublishContextEntry) +_sym_db.RegisterMessage(NodePublishVolumeRequest.SecretsEntry) +_sym_db.RegisterMessage(NodePublishVolumeRequest.VolumeContextEntry) + +NodePublishVolumeResponse = _reflection.GeneratedProtocolMessageType('NodePublishVolumeResponse', (_message.Message,), dict( + DESCRIPTOR = _NODEPUBLISHVOLUMERESPONSE, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.NodePublishVolumeResponse) + )) +_sym_db.RegisterMessage(NodePublishVolumeResponse) + +NodeUnpublishVolumeRequest = _reflection.GeneratedProtocolMessageType('NodeUnpublishVolumeRequest', (_message.Message,), dict( + DESCRIPTOR = _NODEUNPUBLISHVOLUMEREQUEST, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.NodeUnpublishVolumeRequest) + )) +_sym_db.RegisterMessage(NodeUnpublishVolumeRequest) + +NodeUnpublishVolumeResponse = _reflection.GeneratedProtocolMessageType('NodeUnpublishVolumeResponse', (_message.Message,), dict( + DESCRIPTOR = _NODEUNPUBLISHVOLUMERESPONSE, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.NodeUnpublishVolumeResponse) + )) +_sym_db.RegisterMessage(NodeUnpublishVolumeResponse) + +NodeGetVolumeStatsRequest = _reflection.GeneratedProtocolMessageType('NodeGetVolumeStatsRequest', (_message.Message,), dict( + DESCRIPTOR = _NODEGETVOLUMESTATSREQUEST, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.NodeGetVolumeStatsRequest) + )) +_sym_db.RegisterMessage(NodeGetVolumeStatsRequest) + +NodeGetVolumeStatsResponse = _reflection.GeneratedProtocolMessageType('NodeGetVolumeStatsResponse', (_message.Message,), dict( + DESCRIPTOR = _NODEGETVOLUMESTATSRESPONSE, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.NodeGetVolumeStatsResponse) + )) +_sym_db.RegisterMessage(NodeGetVolumeStatsResponse) + +VolumeUsage = _reflection.GeneratedProtocolMessageType('VolumeUsage', (_message.Message,), dict( + DESCRIPTOR = _VOLUMEUSAGE, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.VolumeUsage) + )) +_sym_db.RegisterMessage(VolumeUsage) + +NodeGetCapabilitiesRequest = _reflection.GeneratedProtocolMessageType('NodeGetCapabilitiesRequest', (_message.Message,), dict( + DESCRIPTOR = _NODEGETCAPABILITIESREQUEST, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.NodeGetCapabilitiesRequest) + )) +_sym_db.RegisterMessage(NodeGetCapabilitiesRequest) + +NodeGetCapabilitiesResponse = _reflection.GeneratedProtocolMessageType('NodeGetCapabilitiesResponse', (_message.Message,), dict( + DESCRIPTOR = _NODEGETCAPABILITIESRESPONSE, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.NodeGetCapabilitiesResponse) + )) +_sym_db.RegisterMessage(NodeGetCapabilitiesResponse) + +NodeServiceCapability = _reflection.GeneratedProtocolMessageType('NodeServiceCapability', (_message.Message,), dict( + + RPC = _reflection.GeneratedProtocolMessageType('RPC', (_message.Message,), dict( + DESCRIPTOR = _NODESERVICECAPABILITY_RPC, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.NodeServiceCapability.RPC) + )) + , + DESCRIPTOR = _NODESERVICECAPABILITY, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.NodeServiceCapability) + )) +_sym_db.RegisterMessage(NodeServiceCapability) +_sym_db.RegisterMessage(NodeServiceCapability.RPC) + +NodeGetInfoRequest = _reflection.GeneratedProtocolMessageType('NodeGetInfoRequest', (_message.Message,), dict( + DESCRIPTOR = _NODEGETINFOREQUEST, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.NodeGetInfoRequest) + )) +_sym_db.RegisterMessage(NodeGetInfoRequest) + +NodeGetInfoResponse = _reflection.GeneratedProtocolMessageType('NodeGetInfoResponse', (_message.Message,), dict( + DESCRIPTOR = _NODEGETINFORESPONSE, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.NodeGetInfoResponse) + )) +_sym_db.RegisterMessage(NodeGetInfoResponse) + +NodeExpandVolumeRequest = _reflection.GeneratedProtocolMessageType('NodeExpandVolumeRequest', (_message.Message,), dict( + DESCRIPTOR = _NODEEXPANDVOLUMEREQUEST, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.NodeExpandVolumeRequest) + )) +_sym_db.RegisterMessage(NodeExpandVolumeRequest) + +NodeExpandVolumeResponse = _reflection.GeneratedProtocolMessageType('NodeExpandVolumeResponse', (_message.Message,), dict( + DESCRIPTOR = _NODEEXPANDVOLUMERESPONSE, + __module__ = 'csi_pb2' + # @@protoc_insertion_point(class_scope:csi.v1.NodeExpandVolumeResponse) + )) +_sym_db.RegisterMessage(NodeExpandVolumeResponse) + +google_dot_protobuf_dot_descriptor__pb2.FieldOptions.RegisterExtension(csi_secret) + +DESCRIPTOR._options = None +_GETPLUGININFORESPONSE_MANIFESTENTRY._options = None +_CREATEVOLUMEREQUEST_PARAMETERSENTRY._options = None +_CREATEVOLUMEREQUEST_SECRETSENTRY._options = None +_CREATEVOLUMEREQUEST.fields_by_name['secrets']._options = None +_VOLUME_VOLUMECONTEXTENTRY._options = None +_TOPOLOGY_SEGMENTSENTRY._options = None +_DELETEVOLUMEREQUEST_SECRETSENTRY._options = None +_DELETEVOLUMEREQUEST.fields_by_name['secrets']._options = None +_CONTROLLERPUBLISHVOLUMEREQUEST_SECRETSENTRY._options = None +_CONTROLLERPUBLISHVOLUMEREQUEST_VOLUMECONTEXTENTRY._options = None +_CONTROLLERPUBLISHVOLUMEREQUEST.fields_by_name['secrets']._options = None +_CONTROLLERPUBLISHVOLUMERESPONSE_PUBLISHCONTEXTENTRY._options = None +_CONTROLLERUNPUBLISHVOLUMEREQUEST_SECRETSENTRY._options = None +_CONTROLLERUNPUBLISHVOLUMEREQUEST.fields_by_name['secrets']._options = None +_VALIDATEVOLUMECAPABILITIESREQUEST_VOLUMECONTEXTENTRY._options = None +_VALIDATEVOLUMECAPABILITIESREQUEST_PARAMETERSENTRY._options = None +_VALIDATEVOLUMECAPABILITIESREQUEST_SECRETSENTRY._options = None +_VALIDATEVOLUMECAPABILITIESREQUEST.fields_by_name['secrets']._options = None +_VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED_VOLUMECONTEXTENTRY._options = None +_VALIDATEVOLUMECAPABILITIESRESPONSE_CONFIRMED_PARAMETERSENTRY._options = None +_GETCAPACITYREQUEST_PARAMETERSENTRY._options = None +_CREATESNAPSHOTREQUEST_SECRETSENTRY._options = None +_CREATESNAPSHOTREQUEST_PARAMETERSENTRY._options = None +_CREATESNAPSHOTREQUEST.fields_by_name['secrets']._options = None +_DELETESNAPSHOTREQUEST_SECRETSENTRY._options = None +_DELETESNAPSHOTREQUEST.fields_by_name['secrets']._options = None +_CONTROLLEREXPANDVOLUMEREQUEST_SECRETSENTRY._options = None +_CONTROLLEREXPANDVOLUMEREQUEST.fields_by_name['secrets']._options = None +_NODESTAGEVOLUMEREQUEST_PUBLISHCONTEXTENTRY._options = None +_NODESTAGEVOLUMEREQUEST_SECRETSENTRY._options = None +_NODESTAGEVOLUMEREQUEST_VOLUMECONTEXTENTRY._options = None +_NODESTAGEVOLUMEREQUEST.fields_by_name['secrets']._options = None +_NODEPUBLISHVOLUMEREQUEST_PUBLISHCONTEXTENTRY._options = None +_NODEPUBLISHVOLUMEREQUEST_SECRETSENTRY._options = None +_NODEPUBLISHVOLUMEREQUEST_VOLUMECONTEXTENTRY._options = None +_NODEPUBLISHVOLUMEREQUEST.fields_by_name['secrets']._options = None + +_IDENTITY = _descriptor.ServiceDescriptor( + name='Identity', + full_name='csi.v1.Identity', + file=DESCRIPTOR, + index=0, + serialized_options=None, + serialized_start=9287, + serialized_end=9537, + methods=[ + _descriptor.MethodDescriptor( + name='GetPluginInfo', + full_name='csi.v1.Identity.GetPluginInfo', + index=0, + containing_service=None, + input_type=_GETPLUGININFOREQUEST, + output_type=_GETPLUGININFORESPONSE, + serialized_options=None, + ), + _descriptor.MethodDescriptor( + name='GetPluginCapabilities', + full_name='csi.v1.Identity.GetPluginCapabilities', + index=1, + containing_service=None, + input_type=_GETPLUGINCAPABILITIESREQUEST, + output_type=_GETPLUGINCAPABILITIESRESPONSE, + serialized_options=None, + ), + _descriptor.MethodDescriptor( + name='Probe', + full_name='csi.v1.Identity.Probe', + index=2, + containing_service=None, + input_type=_PROBEREQUEST, + output_type=_PROBERESPONSE, + serialized_options=None, + ), +]) +_sym_db.RegisterServiceDescriptor(_IDENTITY) + +DESCRIPTOR.services_by_name['Identity'] = _IDENTITY + + +_CONTROLLER = _descriptor.ServiceDescriptor( + name='Controller', + full_name='csi.v1.Controller', + file=DESCRIPTOR, + index=1, + serialized_options=None, + serialized_start=9540, + serialized_end=10668, + methods=[ + _descriptor.MethodDescriptor( + name='CreateVolume', + full_name='csi.v1.Controller.CreateVolume', + index=0, + containing_service=None, + input_type=_CREATEVOLUMEREQUEST, + output_type=_CREATEVOLUMERESPONSE, + serialized_options=None, + ), + _descriptor.MethodDescriptor( + name='DeleteVolume', + full_name='csi.v1.Controller.DeleteVolume', + index=1, + containing_service=None, + input_type=_DELETEVOLUMEREQUEST, + output_type=_DELETEVOLUMERESPONSE, + serialized_options=None, + ), + _descriptor.MethodDescriptor( + name='ControllerPublishVolume', + full_name='csi.v1.Controller.ControllerPublishVolume', + index=2, + containing_service=None, + input_type=_CONTROLLERPUBLISHVOLUMEREQUEST, + output_type=_CONTROLLERPUBLISHVOLUMERESPONSE, + serialized_options=None, + ), + _descriptor.MethodDescriptor( + name='ControllerUnpublishVolume', + full_name='csi.v1.Controller.ControllerUnpublishVolume', + index=3, + containing_service=None, + input_type=_CONTROLLERUNPUBLISHVOLUMEREQUEST, + output_type=_CONTROLLERUNPUBLISHVOLUMERESPONSE, + serialized_options=None, + ), + _descriptor.MethodDescriptor( + name='ValidateVolumeCapabilities', + full_name='csi.v1.Controller.ValidateVolumeCapabilities', + index=4, + containing_service=None, + input_type=_VALIDATEVOLUMECAPABILITIESREQUEST, + output_type=_VALIDATEVOLUMECAPABILITIESRESPONSE, + serialized_options=None, + ), + _descriptor.MethodDescriptor( + name='ListVolumes', + full_name='csi.v1.Controller.ListVolumes', + index=5, + containing_service=None, + input_type=_LISTVOLUMESREQUEST, + output_type=_LISTVOLUMESRESPONSE, + serialized_options=None, + ), + _descriptor.MethodDescriptor( + name='GetCapacity', + full_name='csi.v1.Controller.GetCapacity', + index=6, + containing_service=None, + input_type=_GETCAPACITYREQUEST, + output_type=_GETCAPACITYRESPONSE, + serialized_options=None, + ), + _descriptor.MethodDescriptor( + name='ControllerGetCapabilities', + full_name='csi.v1.Controller.ControllerGetCapabilities', + index=7, + containing_service=None, + input_type=_CONTROLLERGETCAPABILITIESREQUEST, + output_type=_CONTROLLERGETCAPABILITIESRESPONSE, + serialized_options=None, + ), + _descriptor.MethodDescriptor( + name='CreateSnapshot', + full_name='csi.v1.Controller.CreateSnapshot', + index=8, + containing_service=None, + input_type=_CREATESNAPSHOTREQUEST, + output_type=_CREATESNAPSHOTRESPONSE, + serialized_options=None, + ), + _descriptor.MethodDescriptor( + name='DeleteSnapshot', + full_name='csi.v1.Controller.DeleteSnapshot', + index=9, + containing_service=None, + input_type=_DELETESNAPSHOTREQUEST, + output_type=_DELETESNAPSHOTRESPONSE, + serialized_options=None, + ), + _descriptor.MethodDescriptor( + name='ListSnapshots', + full_name='csi.v1.Controller.ListSnapshots', + index=10, + containing_service=None, + input_type=_LISTSNAPSHOTSREQUEST, + output_type=_LISTSNAPSHOTSRESPONSE, + serialized_options=None, + ), + _descriptor.MethodDescriptor( + name='ControllerExpandVolume', + full_name='csi.v1.Controller.ControllerExpandVolume', + index=11, + containing_service=None, + input_type=_CONTROLLEREXPANDVOLUMEREQUEST, + output_type=_CONTROLLEREXPANDVOLUMERESPONSE, + serialized_options=None, + ), +]) +_sym_db.RegisterServiceDescriptor(_CONTROLLER) + +DESCRIPTOR.services_by_name['Controller'] = _CONTROLLER + + +_NODE = _descriptor.ServiceDescriptor( + name='Node', + full_name='csi.v1.Node', + file=DESCRIPTOR, + index=2, + serialized_options=None, + serialized_start=10671, + serialized_end=11401, + methods=[ + _descriptor.MethodDescriptor( + name='NodeStageVolume', + full_name='csi.v1.Node.NodeStageVolume', + index=0, + containing_service=None, + input_type=_NODESTAGEVOLUMEREQUEST, + output_type=_NODESTAGEVOLUMERESPONSE, + serialized_options=None, + ), + _descriptor.MethodDescriptor( + name='NodeUnstageVolume', + full_name='csi.v1.Node.NodeUnstageVolume', + index=1, + containing_service=None, + input_type=_NODEUNSTAGEVOLUMEREQUEST, + output_type=_NODEUNSTAGEVOLUMERESPONSE, + serialized_options=None, + ), + _descriptor.MethodDescriptor( + name='NodePublishVolume', + full_name='csi.v1.Node.NodePublishVolume', + index=2, + containing_service=None, + input_type=_NODEPUBLISHVOLUMEREQUEST, + output_type=_NODEPUBLISHVOLUMERESPONSE, + serialized_options=None, + ), + _descriptor.MethodDescriptor( + name='NodeUnpublishVolume', + full_name='csi.v1.Node.NodeUnpublishVolume', + index=3, + containing_service=None, + input_type=_NODEUNPUBLISHVOLUMEREQUEST, + output_type=_NODEUNPUBLISHVOLUMERESPONSE, + serialized_options=None, + ), + _descriptor.MethodDescriptor( + name='NodeGetVolumeStats', + full_name='csi.v1.Node.NodeGetVolumeStats', + index=4, + containing_service=None, + input_type=_NODEGETVOLUMESTATSREQUEST, + output_type=_NODEGETVOLUMESTATSRESPONSE, + serialized_options=None, + ), + _descriptor.MethodDescriptor( + name='NodeExpandVolume', + full_name='csi.v1.Node.NodeExpandVolume', + index=5, + containing_service=None, + input_type=_NODEEXPANDVOLUMEREQUEST, + output_type=_NODEEXPANDVOLUMERESPONSE, + serialized_options=None, + ), + _descriptor.MethodDescriptor( + name='NodeGetCapabilities', + full_name='csi.v1.Node.NodeGetCapabilities', + index=6, + containing_service=None, + input_type=_NODEGETCAPABILITIESREQUEST, + output_type=_NODEGETCAPABILITIESRESPONSE, + serialized_options=None, + ), + _descriptor.MethodDescriptor( + name='NodeGetInfo', + full_name='csi.v1.Node.NodeGetInfo', + index=7, + containing_service=None, + input_type=_NODEGETINFOREQUEST, + output_type=_NODEGETINFORESPONSE, + serialized_options=None, + ), +]) +_sym_db.RegisterServiceDescriptor(_NODE) + +DESCRIPTOR.services_by_name['Node'] = _NODE + +# @@protoc_insertion_point(module_scope) diff --git a/controller/csi_general/csi_pb2_grpc.py b/controller/csi_general/csi_pb2_grpc.py new file mode 100644 index 000000000..ff2fd0d69 --- /dev/null +++ b/controller/csi_general/csi_pb2_grpc.py @@ -0,0 +1,470 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +import grpc + +from controller.csi_general import csi_pb2 as csi__pb2 + + +class IdentityStub(object): + # missing associated documentation comment in .proto file + pass + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.GetPluginInfo = channel.unary_unary( + '/csi.v1.Identity/GetPluginInfo', + request_serializer=csi__pb2.GetPluginInfoRequest.SerializeToString, + response_deserializer=csi__pb2.GetPluginInfoResponse.FromString, + ) + self.GetPluginCapabilities = channel.unary_unary( + '/csi.v1.Identity/GetPluginCapabilities', + request_serializer=csi__pb2.GetPluginCapabilitiesRequest.SerializeToString, + response_deserializer=csi__pb2.GetPluginCapabilitiesResponse.FromString, + ) + self.Probe = channel.unary_unary( + '/csi.v1.Identity/Probe', + request_serializer=csi__pb2.ProbeRequest.SerializeToString, + response_deserializer=csi__pb2.ProbeResponse.FromString, + ) + + +class IdentityServicer(object): + # missing associated documentation comment in .proto file + pass + + def GetPluginInfo(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetPluginCapabilities(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Probe(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_IdentityServicer_to_server(servicer, server): + rpc_method_handlers = { + 'GetPluginInfo': grpc.unary_unary_rpc_method_handler( + servicer.GetPluginInfo, + request_deserializer=csi__pb2.GetPluginInfoRequest.FromString, + response_serializer=csi__pb2.GetPluginInfoResponse.SerializeToString, + ), + 'GetPluginCapabilities': grpc.unary_unary_rpc_method_handler( + servicer.GetPluginCapabilities, + request_deserializer=csi__pb2.GetPluginCapabilitiesRequest.FromString, + response_serializer=csi__pb2.GetPluginCapabilitiesResponse.SerializeToString, + ), + 'Probe': grpc.unary_unary_rpc_method_handler( + servicer.Probe, + request_deserializer=csi__pb2.ProbeRequest.FromString, + response_serializer=csi__pb2.ProbeResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'csi.v1.Identity', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + +class ControllerStub(object): + # missing associated documentation comment in .proto file + pass + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.CreateVolume = channel.unary_unary( + '/csi.v1.Controller/CreateVolume', + request_serializer=csi__pb2.CreateVolumeRequest.SerializeToString, + response_deserializer=csi__pb2.CreateVolumeResponse.FromString, + ) + self.DeleteVolume = channel.unary_unary( + '/csi.v1.Controller/DeleteVolume', + request_serializer=csi__pb2.DeleteVolumeRequest.SerializeToString, + response_deserializer=csi__pb2.DeleteVolumeResponse.FromString, + ) + self.ControllerPublishVolume = channel.unary_unary( + '/csi.v1.Controller/ControllerPublishVolume', + request_serializer=csi__pb2.ControllerPublishVolumeRequest.SerializeToString, + response_deserializer=csi__pb2.ControllerPublishVolumeResponse.FromString, + ) + self.ControllerUnpublishVolume = channel.unary_unary( + '/csi.v1.Controller/ControllerUnpublishVolume', + request_serializer=csi__pb2.ControllerUnpublishVolumeRequest.SerializeToString, + response_deserializer=csi__pb2.ControllerUnpublishVolumeResponse.FromString, + ) + self.ValidateVolumeCapabilities = channel.unary_unary( + '/csi.v1.Controller/ValidateVolumeCapabilities', + request_serializer=csi__pb2.ValidateVolumeCapabilitiesRequest.SerializeToString, + response_deserializer=csi__pb2.ValidateVolumeCapabilitiesResponse.FromString, + ) + self.ListVolumes = channel.unary_unary( + '/csi.v1.Controller/ListVolumes', + request_serializer=csi__pb2.ListVolumesRequest.SerializeToString, + response_deserializer=csi__pb2.ListVolumesResponse.FromString, + ) + self.GetCapacity = channel.unary_unary( + '/csi.v1.Controller/GetCapacity', + request_serializer=csi__pb2.GetCapacityRequest.SerializeToString, + response_deserializer=csi__pb2.GetCapacityResponse.FromString, + ) + self.ControllerGetCapabilities = channel.unary_unary( + '/csi.v1.Controller/ControllerGetCapabilities', + request_serializer=csi__pb2.ControllerGetCapabilitiesRequest.SerializeToString, + response_deserializer=csi__pb2.ControllerGetCapabilitiesResponse.FromString, + ) + self.CreateSnapshot = channel.unary_unary( + '/csi.v1.Controller/CreateSnapshot', + request_serializer=csi__pb2.CreateSnapshotRequest.SerializeToString, + response_deserializer=csi__pb2.CreateSnapshotResponse.FromString, + ) + self.DeleteSnapshot = channel.unary_unary( + '/csi.v1.Controller/DeleteSnapshot', + request_serializer=csi__pb2.DeleteSnapshotRequest.SerializeToString, + response_deserializer=csi__pb2.DeleteSnapshotResponse.FromString, + ) + self.ListSnapshots = channel.unary_unary( + '/csi.v1.Controller/ListSnapshots', + request_serializer=csi__pb2.ListSnapshotsRequest.SerializeToString, + response_deserializer=csi__pb2.ListSnapshotsResponse.FromString, + ) + self.ControllerExpandVolume = channel.unary_unary( + '/csi.v1.Controller/ControllerExpandVolume', + request_serializer=csi__pb2.ControllerExpandVolumeRequest.SerializeToString, + response_deserializer=csi__pb2.ControllerExpandVolumeResponse.FromString, + ) + + +class ControllerServicer(object): + # missing associated documentation comment in .proto file + pass + + def CreateVolume(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeleteVolume(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ControllerPublishVolume(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ControllerUnpublishVolume(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ValidateVolumeCapabilities(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListVolumes(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetCapacity(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ControllerGetCapabilities(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CreateSnapshot(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeleteSnapshot(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListSnapshots(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ControllerExpandVolume(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_ControllerServicer_to_server(servicer, server): + rpc_method_handlers = { + 'CreateVolume': grpc.unary_unary_rpc_method_handler( + servicer.CreateVolume, + request_deserializer=csi__pb2.CreateVolumeRequest.FromString, + response_serializer=csi__pb2.CreateVolumeResponse.SerializeToString, + ), + 'DeleteVolume': grpc.unary_unary_rpc_method_handler( + servicer.DeleteVolume, + request_deserializer=csi__pb2.DeleteVolumeRequest.FromString, + response_serializer=csi__pb2.DeleteVolumeResponse.SerializeToString, + ), + 'ControllerPublishVolume': grpc.unary_unary_rpc_method_handler( + servicer.ControllerPublishVolume, + request_deserializer=csi__pb2.ControllerPublishVolumeRequest.FromString, + response_serializer=csi__pb2.ControllerPublishVolumeResponse.SerializeToString, + ), + 'ControllerUnpublishVolume': grpc.unary_unary_rpc_method_handler( + servicer.ControllerUnpublishVolume, + request_deserializer=csi__pb2.ControllerUnpublishVolumeRequest.FromString, + response_serializer=csi__pb2.ControllerUnpublishVolumeResponse.SerializeToString, + ), + 'ValidateVolumeCapabilities': grpc.unary_unary_rpc_method_handler( + servicer.ValidateVolumeCapabilities, + request_deserializer=csi__pb2.ValidateVolumeCapabilitiesRequest.FromString, + response_serializer=csi__pb2.ValidateVolumeCapabilitiesResponse.SerializeToString, + ), + 'ListVolumes': grpc.unary_unary_rpc_method_handler( + servicer.ListVolumes, + request_deserializer=csi__pb2.ListVolumesRequest.FromString, + response_serializer=csi__pb2.ListVolumesResponse.SerializeToString, + ), + 'GetCapacity': grpc.unary_unary_rpc_method_handler( + servicer.GetCapacity, + request_deserializer=csi__pb2.GetCapacityRequest.FromString, + response_serializer=csi__pb2.GetCapacityResponse.SerializeToString, + ), + 'ControllerGetCapabilities': grpc.unary_unary_rpc_method_handler( + servicer.ControllerGetCapabilities, + request_deserializer=csi__pb2.ControllerGetCapabilitiesRequest.FromString, + response_serializer=csi__pb2.ControllerGetCapabilitiesResponse.SerializeToString, + ), + 'CreateSnapshot': grpc.unary_unary_rpc_method_handler( + servicer.CreateSnapshot, + request_deserializer=csi__pb2.CreateSnapshotRequest.FromString, + response_serializer=csi__pb2.CreateSnapshotResponse.SerializeToString, + ), + 'DeleteSnapshot': grpc.unary_unary_rpc_method_handler( + servicer.DeleteSnapshot, + request_deserializer=csi__pb2.DeleteSnapshotRequest.FromString, + response_serializer=csi__pb2.DeleteSnapshotResponse.SerializeToString, + ), + 'ListSnapshots': grpc.unary_unary_rpc_method_handler( + servicer.ListSnapshots, + request_deserializer=csi__pb2.ListSnapshotsRequest.FromString, + response_serializer=csi__pb2.ListSnapshotsResponse.SerializeToString, + ), + 'ControllerExpandVolume': grpc.unary_unary_rpc_method_handler( + servicer.ControllerExpandVolume, + request_deserializer=csi__pb2.ControllerExpandVolumeRequest.FromString, + response_serializer=csi__pb2.ControllerExpandVolumeResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'csi.v1.Controller', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + +class NodeStub(object): + # missing associated documentation comment in .proto file + pass + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.NodeStageVolume = channel.unary_unary( + '/csi.v1.Node/NodeStageVolume', + request_serializer=csi__pb2.NodeStageVolumeRequest.SerializeToString, + response_deserializer=csi__pb2.NodeStageVolumeResponse.FromString, + ) + self.NodeUnstageVolume = channel.unary_unary( + '/csi.v1.Node/NodeUnstageVolume', + request_serializer=csi__pb2.NodeUnstageVolumeRequest.SerializeToString, + response_deserializer=csi__pb2.NodeUnstageVolumeResponse.FromString, + ) + self.NodePublishVolume = channel.unary_unary( + '/csi.v1.Node/NodePublishVolume', + request_serializer=csi__pb2.NodePublishVolumeRequest.SerializeToString, + response_deserializer=csi__pb2.NodePublishVolumeResponse.FromString, + ) + self.NodeUnpublishVolume = channel.unary_unary( + '/csi.v1.Node/NodeUnpublishVolume', + request_serializer=csi__pb2.NodeUnpublishVolumeRequest.SerializeToString, + response_deserializer=csi__pb2.NodeUnpublishVolumeResponse.FromString, + ) + self.NodeGetVolumeStats = channel.unary_unary( + '/csi.v1.Node/NodeGetVolumeStats', + request_serializer=csi__pb2.NodeGetVolumeStatsRequest.SerializeToString, + response_deserializer=csi__pb2.NodeGetVolumeStatsResponse.FromString, + ) + self.NodeExpandVolume = channel.unary_unary( + '/csi.v1.Node/NodeExpandVolume', + request_serializer=csi__pb2.NodeExpandVolumeRequest.SerializeToString, + response_deserializer=csi__pb2.NodeExpandVolumeResponse.FromString, + ) + self.NodeGetCapabilities = channel.unary_unary( + '/csi.v1.Node/NodeGetCapabilities', + request_serializer=csi__pb2.NodeGetCapabilitiesRequest.SerializeToString, + response_deserializer=csi__pb2.NodeGetCapabilitiesResponse.FromString, + ) + self.NodeGetInfo = channel.unary_unary( + '/csi.v1.Node/NodeGetInfo', + request_serializer=csi__pb2.NodeGetInfoRequest.SerializeToString, + response_deserializer=csi__pb2.NodeGetInfoResponse.FromString, + ) + + +class NodeServicer(object): + # missing associated documentation comment in .proto file + pass + + def NodeStageVolume(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def NodeUnstageVolume(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def NodePublishVolume(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def NodeUnpublishVolume(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def NodeGetVolumeStats(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def NodeExpandVolume(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def NodeGetCapabilities(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def NodeGetInfo(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_NodeServicer_to_server(servicer, server): + rpc_method_handlers = { + 'NodeStageVolume': grpc.unary_unary_rpc_method_handler( + servicer.NodeStageVolume, + request_deserializer=csi__pb2.NodeStageVolumeRequest.FromString, + response_serializer=csi__pb2.NodeStageVolumeResponse.SerializeToString, + ), + 'NodeUnstageVolume': grpc.unary_unary_rpc_method_handler( + servicer.NodeUnstageVolume, + request_deserializer=csi__pb2.NodeUnstageVolumeRequest.FromString, + response_serializer=csi__pb2.NodeUnstageVolumeResponse.SerializeToString, + ), + 'NodePublishVolume': grpc.unary_unary_rpc_method_handler( + servicer.NodePublishVolume, + request_deserializer=csi__pb2.NodePublishVolumeRequest.FromString, + response_serializer=csi__pb2.NodePublishVolumeResponse.SerializeToString, + ), + 'NodeUnpublishVolume': grpc.unary_unary_rpc_method_handler( + servicer.NodeUnpublishVolume, + request_deserializer=csi__pb2.NodeUnpublishVolumeRequest.FromString, + response_serializer=csi__pb2.NodeUnpublishVolumeResponse.SerializeToString, + ), + 'NodeGetVolumeStats': grpc.unary_unary_rpc_method_handler( + servicer.NodeGetVolumeStats, + request_deserializer=csi__pb2.NodeGetVolumeStatsRequest.FromString, + response_serializer=csi__pb2.NodeGetVolumeStatsResponse.SerializeToString, + ), + 'NodeExpandVolume': grpc.unary_unary_rpc_method_handler( + servicer.NodeExpandVolume, + request_deserializer=csi__pb2.NodeExpandVolumeRequest.FromString, + response_serializer=csi__pb2.NodeExpandVolumeResponse.SerializeToString, + ), + 'NodeGetCapabilities': grpc.unary_unary_rpc_method_handler( + servicer.NodeGetCapabilities, + request_deserializer=csi__pb2.NodeGetCapabilitiesRequest.FromString, + response_serializer=csi__pb2.NodeGetCapabilitiesResponse.SerializeToString, + ), + 'NodeGetInfo': grpc.unary_unary_rpc_method_handler( + servicer.NodeGetInfo, + request_deserializer=csi__pb2.NodeGetInfoRequest.FromString, + response_serializer=csi__pb2.NodeGetInfoResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'csi.v1.Node', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) diff --git a/controller/requirements-tests.txt b/controller/requirements-tests.txt new file mode 100644 index 000000000..60b04c3d4 --- /dev/null +++ b/controller/requirements-tests.txt @@ -0,0 +1,3 @@ +mock +nose +coverage diff --git a/controller/requirements.txt b/controller/requirements.txt new file mode 100644 index 000000000..2a342f645 --- /dev/null +++ b/controller/requirements.txt @@ -0,0 +1,10 @@ +grpcio==1.20.1 +grpcio-tools==1.20.1 +protobuf==3.7.1 +pyyaml==5.1 +munch==2.3.2 + +# A9000 python client +pyxcli==1.2.1 +# SVC python client +pysvc==1.1.1 diff --git a/controller/scripts/entrypoint-test.sh b/controller/scripts/entrypoint-test.sh new file mode 100755 index 000000000..48d930f66 --- /dev/null +++ b/controller/scripts/entrypoint-test.sh @@ -0,0 +1,4 @@ +#!/bin/bash -x +coveragedir=/driver/coverage/ +[ ! -d $coveragedir ] && mkdir -p $coveragedir +exec nosetests --with-coverage --cover-xml --cover-xml-file=$coveragedir/.coverage.xml --cover-package=common --cover-package=controller --with-xunit --xunit-file=$coveragedir/.unitests.xml $@ diff --git a/controller/scripts/entrypoint.sh b/controller/scripts/entrypoint.sh new file mode 100755 index 000000000..62c9a0ee5 --- /dev/null +++ b/controller/scripts/entrypoint.sh @@ -0,0 +1,2 @@ +#!/bin/bash +exec python3.6 /driver/controller/controller_server/csi_controller_server.py $@ diff --git a/controller/tests/.coverage b/controller/tests/.coverage new file mode 100644 index 000000000..8ea30fae3 Binary files /dev/null and b/controller/tests/.coverage differ diff --git a/controller/tests/__init__.py b/controller/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/controller/tests/array_action/__init__.py b/controller/tests/array_action/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/controller/tests/array_action/array_connection_manager_test.py b/controller/tests/array_action/array_connection_manager_test.py new file mode 100644 index 000000000..ed95d431f --- /dev/null +++ b/controller/tests/array_action/array_connection_manager_test.py @@ -0,0 +1,122 @@ +import unittest +import controller.array_action.array_connection_manager as array_connection_manager +from controller.array_action.array_connection_manager import ArrayConnectionManager, NoConnectionAvailableException +from mock import patch +from controller.array_action.errors import FailedToFindStorageSystemType +from controller.array_action.array_mediator_xiv import XIVArrayMediator +from controller.array_action.array_mediator_svc import SVCArrayMediator + + +class TestWithFunctionality(unittest.TestCase): + + def setUp(self): + self.fqdn = "fqdn" + self.array_connection = ArrayConnectionManager( + "user", "password", [self.fqdn, self.fqdn], XIVArrayMediator.array_type) + + @patch("controller.array_action.array_connection_manager.XIVArrayMediator._connect") + @patch("controller.array_action.array_connection_manager.XIVArrayMediator.disconnect") + def test_with_opens_and_closes_the_connection(self, close, connect): + with self.array_connection as array_mediator: + self.assertEqual(self.array_connection.connected, True) + self.assertEqual(array_mediator.endpoint, [self.fqdn, self.fqdn]) + connect.assert_called_with() + close.assert_called_with() + + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.get_array_connection") + def test_with_throws_error_if_other_error_occures(self, get_connection): + error_message = "this is a dummy error " + get_connection.side_effect = [Exception(error_message)] + with self.assertRaises(Exception) as ex: + with self.array_connection as array_mediator: + pass + + self.assertTrue(error_message in str(ex.exception)) + self.assertEqual(get_connection.call_count, 1) + + +class TestGetconnection(unittest.TestCase): + + def setUp(self): + self.fqdn = "fqdn" + self.connections = [self.fqdn, self.fqdn] + self.connection_key = ",".join(self.connections) + self.array_connection = ArrayConnectionManager( + "user", "password", self.connections, XIVArrayMediator.array_type) + array_connection_manager.array_connections_dict = {} + + def tearDown(self): + array_connection_manager.array_connections_dict = {} + + @patch("controller.array_action.array_connection_manager.XIVArrayMediator._connect") + def test_connection_adds_the_new_endpoint_for_the_first_time(self, connect): + self.assertEqual(array_connection_manager.array_connections_dict, {}) + self.array_connection.get_array_connection() + self.assertEqual(array_connection_manager.array_connections_dict, {self.connection_key: 1}) + + new_fqdn = "new-fqdn" + array_connection2 = ArrayConnectionManager("user", "password", [new_fqdn], XIVArrayMediator.array_type) + + array_connection2.get_array_connection() + self.assertEqual(array_connection_manager.array_connections_dict, {self.connection_key: 1, new_fqdn: 1}) + + @patch("controller.array_action.array_connection_manager.XIVArrayMediator._connect") + def test_connection_adds_connections_to_connection_dict(self, connect): + self.assertEqual(array_connection_manager.array_connections_dict, {}) + self.array_connection.get_array_connection() + self.assertEqual(array_connection_manager.array_connections_dict, {self.connection_key: 1}) + + self.array_connection.get_array_connection() + self.assertEqual(array_connection_manager.array_connections_dict, {self.connection_key: 2}) + + @patch("controller.array_action.array_connection_manager.XIVArrayMediator._connect") + def test_connection_returns_error_on_too_many_connection(self, connect): + array_connection_manager.array_connections_dict = { + self.connection_key: array_connection_manager.XIVArrayMediator.max_connections} + with self.assertRaises(NoConnectionAvailableException): + self.array_connection.get_array_connection() + + @patch("controller.array_action.array_connection_manager.XIVArrayMediator._connect") + def test_connection_returns_error_from_connect_function(self, connect): + error_msg = "some error" + connect.side_effect = [Exception(error_msg)] + + with self.assertRaises(Exception) as ex: + self.array_connection.get_array_connection() + + self.assertTrue(error_msg in str(ex.exception)) + + @patch("controller.array_action.array_connection_manager._socket_connect_test") + def test_detect_array_type(self, socket_connet): + socket_connet.side_effect = [0, 1] + + res = self.array_connection.detect_array_type() + self.assertEqual(res, XIVArrayMediator.array_type) + + socket_connet.side_effect = [1, 1, 0, 0] + + res = self.array_connection.detect_array_type() + self.assertEqual(res, SVCArrayMediator.array_type) + + socket_connet.side_effect = [1, 1, 1, 1] + with self.assertRaises(FailedToFindStorageSystemType): + self.array_connection.detect_array_type() + + @patch("controller.array_action.array_connection_manager.XIVArrayMediator._connect") + def test_exit_reduces_connection_to_zero(self, connect): + self.array_connection.get_array_connection() + self.assertEqual(array_connection_manager.array_connections_dict, {self.connection_key: 1}) + + self.array_connection.__exit__("", "", None) + self.assertEqual(array_connection_manager.array_connections_dict, {}) + + @patch("controller.array_action.array_connection_manager.XIVArrayMediator._connect") + def test_exit_reduces_connection(self, connect): + self.array_connection.get_array_connection() + self.assertEqual(array_connection_manager.array_connections_dict, {self.connection_key: 1}) + + self.array_connection.get_array_connection() + self.assertEqual(array_connection_manager.array_connections_dict, {self.connection_key: 2}) + + self.array_connection.__exit__("", "", None) + self.assertEqual(array_connection_manager.array_connections_dict, {self.connection_key: 1}) diff --git a/controller/tests/array_action/svc/__init__.py b/controller/tests/array_action/svc/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/controller/tests/array_action/svc/array_mediator_svc_test.py b/controller/tests/array_action/svc/array_mediator_svc_test.py new file mode 100644 index 000000000..c6f36cfb2 --- /dev/null +++ b/controller/tests/array_action/svc/array_mediator_svc_test.py @@ -0,0 +1,557 @@ +import unittest +from munch import Munch +from mock import patch, Mock +from controller.array_action.array_mediator_svc import SVCArrayMediator +from controller.array_action.array_mediator_svc import \ + build_kwargs_from_capabilities +import controller.array_action.errors as array_errors +from pysvc.unified.response import CLIFailureError +from pysvc import errors as svc_errors +import controller.array_action.config as config +from controller.common.node_info import Initiators + +class TestArrayMediatorSVC(unittest.TestCase): + + @patch( + "controller.array_action.array_mediator_svc.SVCArrayMediator._connect") + def setUp(self, connect): + self.endpoint = ["IP_1"] + self.svc = SVCArrayMediator("user", "password", self.endpoint) + self.svc.client = Mock() + + @patch( + "controller.array_action.array_mediator_svc.SVCArrayMediator._connect") + def test_raise_ManagementIPsNotSupportError_in_init(self, connect): + self.endpoint = ["IP_1", "IP_2"] + with self.assertRaises( + array_errors.StorageManagementIPsNotSupportError): + SVCArrayMediator("user", "password", self.endpoint) + + self.endpoint = [] + with self.assertRaises( + array_errors.StorageManagementIPsNotSupportError): + SVCArrayMediator("user", "password", self.endpoint) + + @patch("pysvc.unified.client.connect") + def test_connect_errors(self, mock_connect): + mock_connect.side_effect = [ + svc_errors.IncorrectCredentials('Failed_a')] + with self.assertRaises(array_errors.CredentialsError): + self.svc._connect() + + def test_close(self): + self.svc.disconnect() + self.svc.client.close.assert_called_with() + + @patch("controller.array_action.array_mediator_svc.is_warning_message") + def test_get_volume_return_CLI_Failure_errors(self, mock_warning): + mock_warning.return_value = False + self.svc.client.svcinfo.lsvdisk.side_effect = [ + CLIFailureError('CMMVC5753E')] + with self.assertRaises(array_errors.VolumeNotFoundError): + self.svc.get_volume("vol_name") + + def test_get_volume_return_correct_value(self): + vol_ret = Mock(as_single_element=Munch({'vdisk_UID': 'vol_id', + 'name': 'test_vol', + 'capacity': '1024', + 'mdisk_grp_name': 'pool_name' + })) + self.svc.client.svcinfo.lsvdisk.return_value = vol_ret + vol = self.svc.get_volume("test_vol") + self.assertTrue(vol.capacity_bytes == 1024) + self.assertTrue(vol.pool_name == 'pool_name') + self.assertTrue(vol.array_type == 'SVC') + + @patch("controller.array_action.array_mediator_svc.is_warning_message") + def test_get_volume_returns_Exception(self, mock_warning): + mock_warning.return_value = False + self.svc.client.svcinfo.lsvdisk.side_effect = [Exception] + with self.assertRaises(Exception): + self.svc.get_volume("vol") + + def test_get_volume_returns_nothing(self): + vol_ret = Mock(as_single_element=Munch({})) + self.svc.client.svcinfo.lsvdisk.return_value = vol_ret + with self.assertRaises(array_errors.VolumeNotFoundError): + self.svc.get_volume("vol") + + @patch("controller.array_action.array_mediator_svc.is_warning_message") + def test_create_volume_return_exception(self, mock_warning): + mock_warning.return_value = False + self.svc.client.svctask.mkvolume.side_effect = [Exception] + with self.assertRaises(Exception): + self.svc.create_volume("vol", 10, {}, "pool") + self.svc.client.svctask.mkvolume.side_effect = [ + CLIFailureError("Failed")] + with self.assertRaises(CLIFailureError): + self.svc.create_volume("vol", 10, {}, "pool") + + @patch("controller.array_action.array_mediator_svc.is_warning_message") + def test_create_volume_return_volume_exists_error(self, mock_warning): + mock_warning.return_value = False + self.svc.client.svctask.mkvolume.side_effect = [ + CLIFailureError("CMMVC6035E")] + with self.assertRaises(array_errors.VolumeAlreadyExists): + self.svc.create_volume("vol", 10, {}, "pool") + + @patch("controller.array_action.array_mediator_svc.is_warning_message") + def test_create_volume_return_pool_not_exists_error(self, mock_warning): + mock_warning.return_value = False + self.svc.client.svctask.mkvolume.side_effect = [ + CLIFailureError("CMMVC5754E")] + with self.assertRaises(array_errors.PoolDoesNotExist): + self.svc.create_volume("vol", 10, {}, "pool") + + @patch("controller.array_action.array_mediator_svc.is_warning_message") + def test_create_volume_return_pool_not_match_capabilities_error( + self, mock_warning): + mock_warning.return_value = False + self.svc.client.svctask.mkvolume.side_effect = [ + CLIFailureError("CMMVC9292E")] + with self.assertRaises(array_errors.PoolDoesNotMatchCapabilities): + self.svc.create_volume("vol", 10, {}, "pool") + + self.svc.client.svctask.mkvolume.side_effect = [ + CLIFailureError("CMMVC9301E")] + with self.assertRaises(array_errors.PoolDoesNotMatchCapabilities): + self.svc.create_volume("vol", 10, {}, "pool") + + def test_create_volume_success(self): + self.svc.client.svctask.mkvolume.return_value = Mock() + vol_ret = Mock(as_single_element=Munch({'vdisk_UID': 'vol_id', + 'name': 'test_vol', + 'capacity': '1024', + 'mdisk_grp_name': 'pool_name' + })) + self.svc.client.svcinfo.lsvdisk.return_value = vol_ret + volume = self.svc.create_volume("test_vol", 10, {}, "pool_name") + self.assertEqual(volume.capacity_bytes, 1024) + self.assertEqual(volume.array_type, 'SVC') + self.assertEqual(volume.id, 'vol_id') + + def test_get_vol_by_wwn_return_error(self): + vol_ret = Mock(as_single_element=Munch({})) + self.svc.client.svcinfo.lsvdisk.return_value = vol_ret + with self.assertRaises(array_errors.VolumeNotFoundError): + self.svc._get_vol_by_wwn("vol") + + def test_get_vol_by_wwn_return_success(self): + vol_ret = Mock(as_single_element=Munch({'vdisk_UID': 'vol_id', + 'name': 'test_vol', + 'capacity': '1024', + 'mdisk_grp_name': 'pool_name' + })) + self.svc.client.svcinfo.lsvdisk.return_value = vol_ret + ret = self.svc._get_vol_by_wwn("vol_id") + self.assertEqual(ret, 'test_vol') + + @patch("controller.array_action.array_mediator_svc.is_warning_message") + def test_delete_volume_return_volume_not_found(self, mock_warning): + mock_warning.return_value = False + self.svc.client.svctask.rmvolume.side_effect = [ + CLIFailureError("CMMVC5753E")] + with self.assertRaises(array_errors.VolumeNotFoundError): + self.svc.delete_volume("vol") + + @patch("controller.array_action.array_mediator_svc.is_warning_message") + def test_delete_volume_return_volume_delete_errors(self, mock_warning): + mock_warning.return_value = False + self.svc.client.svctask.rmvolume.side_effect = [ + CLIFailureError("CMMVC5753E")] + with self.assertRaises(array_errors.VolumeNotFoundError): + self.svc.delete_volume("vol") + self.svc.client.svctask.rmvolume.side_effect = [ + CLIFailureError("CMMVC8957E")] + with self.assertRaises(array_errors.VolumeNotFoundError): + self.svc.delete_volume("vol") + self.svc.client.svctask.rmvolume.side_effect = [ + CLIFailureError("Failed")] + with self.assertRaises(CLIFailureError): + self.svc.delete_volume("vol") + + def test_delete_volume_success(self): + self.svc.client.svctask.rmvolume = Mock() + self.svc.delete_volume("vol") + + def test_validate_supported_capabilities_raise_error(self): + capabilities_a = {"SpaceEfficiency": "Test"} + with self.assertRaises( + array_errors.StorageClassCapabilityNotSupported): + self.svc.validate_supported_capabilities(capabilities_a) + capabilities_b = {"SpaceEfficiency": ""} + with self.assertRaises( + array_errors.StorageClassCapabilityNotSupported): + self.svc.validate_supported_capabilities(capabilities_b) + capabilities_c = {} + self.svc.validate_supported_capabilities(capabilities_c) + + def test_validate_supported_capabilities_success(self): + capabilities = {"SpaceEfficiency": "thin"} + self.svc.validate_supported_capabilities(capabilities) + capabilities = {"SpaceEfficiency": "thick"} + self.svc.validate_supported_capabilities(capabilities) + capabilities = {"SpaceEfficiency": "compressed"} + self.svc.validate_supported_capabilities(capabilities) + capabilities = {"SpaceEfficiency": "deduplicated"} + self.svc.validate_supported_capabilities(capabilities) + + def test_build_kwargs_from_capabilities(self): + size = self.svc._convert_size_bytes(1000) + result_a = build_kwargs_from_capabilities({'SpaceEfficiency': 'Thin'}, + 'P1', 'V1', size) + self.assertDictEqual(result_a, {'name': 'V1', 'unit': 'b', + 'size': 1024, 'pool': 'P1', + 'thin': True}) + result_b = build_kwargs_from_capabilities( + {'SpaceEfficiency': 'compressed'}, 'P2', 'V2', size) + self.assertDictEqual(result_b, {'name': 'V2', 'unit': 'b', + 'size': 1024, 'pool': 'P2', + 'compressed': True}) + result_c = build_kwargs_from_capabilities({'SpaceEfficiency': 'Deduplicated'}, + 'P3', 'V3', + self.svc._convert_size_bytes( + 2048)) + self.assertDictEqual(result_c, {'name': 'V3', 'unit': 'b', + 'size': 2048, 'pool': 'P3', + 'compressed': True, + 'deduplicated': True}) + + def test_properties(self): + self.assertEqual(SVCArrayMediator.port, 22) + self.assertEqual(SVCArrayMediator.minimal_volume_size_in_bytes, 512) + self.assertEqual(SVCArrayMediator.array_type, 'SVC') + self.assertEqual(SVCArrayMediator.max_vol_name_length, 64) + self.assertEqual(SVCArrayMediator.max_connections, 2) + self.assertEqual(SVCArrayMediator.max_lun_retries, 10) + + def test_get_host_by_identifiers_returns_host_not_found(self): + host_munch_ret_1 = Munch({'id': 'host_id_1', 'name': 'test_host_1', + 'iscsi_name': 'iqn.test.1'}) + host_munch_ret_2 = Munch({'id': 'host_id_2', 'name': 'test_host_1', + 'iscsi_name': 'iqn.test.2'}) + host_munch_ret_3 = Munch({'id': 'host_id_3', 'name': 'test_host_3', + 'iscsi_name': 'iqn.test.3'}) + ret1 = [host_munch_ret_1, host_munch_ret_2] + ret2 = Munch(as_single_element=host_munch_ret_3) + + self.svc.client.svcinfo.lshost.side_effect = [ret1, ret2, ret2] + with self.assertRaises(array_errors.HostNotFoundError): + self.svc.get_host_by_host_identifiers(Initiators('Test_iqn', ['Test_wwn'])) + + def test_get_host_by_identifier_return_host_not_found_when_no_hosts_exist( + self): + host_munch_ret_1 = Munch({}) + host_munch_ret_2 = Munch({}) + host_munch_ret_3 = Munch({}) + ret1 = [host_munch_ret_1, host_munch_ret_2] + ret2 = Munch(as_single_element=host_munch_ret_3) + + self.svc.client.svcinfo.lshost.side_effect = [ret1, ret2, ret2] + with self.assertRaises(array_errors.HostNotFoundError): + self.svc.get_host_by_host_identifiers(Initiators('Test_iqn', ['Test_wwn'])) + + def test_get_host_by_identifiers_raise_multiplehostsfounderror(self): + host_munch_ret_1 = Munch({'id': 'host_id_1', 'name': 'test_host_1', + 'iscsi_name': 'iqn.test.1'}) + host_munch_ret_2 = Munch({'id': 'host_id_2', 'name': 'test_host_2', + 'iscsi_name': 'iqn.test.3'}) + host_munch_ret_3 = Munch({'id': 'host_id_3', 'name': 'test_host_3', + 'WWPN': ['Test_wwn']}) + ret1 = [host_munch_ret_1, host_munch_ret_2] + ret2 = Munch(as_single_element=host_munch_ret_2) + ret3 = Munch(as_single_element=host_munch_ret_3) + self.svc.client.svcinfo.lshost.side_effect = [ret1, ret2, ret3] + with self.assertRaises(array_errors.MultipleHostsFoundError): + self.svc.get_host_by_host_identifiers(Initiators('iqn.test.3', ['Test_wwn'])) + + def test_get_host_by_identifiers_return_iscsi_host(self): + host_munch_ret_1 = Munch({'id': 'host_id_1', 'name': 'test_host_1', + 'WWPN': ['abc1']}) + host_munch_ret_2 = Munch({'id': 'host_id_2', 'name': 'test_host_3', + 'iscsi_name': 'iqn.test.2', + 'WWPN': ['abc3']}) + host_munch_ret_3 = Munch({'id': 'host_id_3', 'name': 'test_host_3', + 'WWPN': ['abc3'], + 'iscsi_name': 'iqn.test.3'}) + ret1 = [host_munch_ret_1, host_munch_ret_2] + ret2 = Munch(as_single_element=host_munch_ret_2) + ret3 = Munch(as_single_element=host_munch_ret_3) + self.svc.client.svcinfo.lshost.side_effect = [ret1, ret2, ret3] + host, connectivity_type = self.svc.get_host_by_host_identifiers(Initiators( + 'iqn.test.2', ['abcd3'])) + self.assertEqual('test_host_3', host) + self.assertEqual([config.ISCSI_CONNECTIVITY_TYPE], connectivity_type) + + def test_get_host_by_identifiers_return_fc_host(self): + host_munch_ret_1 = Munch({'id': 'host_id_1', 'name': 'test_host_1', + 'WWPN': ['abc1']}) + host_munch_ret_2 = Munch({'id': 'host_id_2', 'name': 'test_host_3', + 'iscsi_name': 'iqn.test.2', + 'WWPN': 'abc3'}) + host_munch_ret_3 = Munch({'id': 'host_id_3', 'name': 'test_host_3', + 'WWPN': ['abc1', 'abc3'], + 'iscsi_name': 'iqn.test.3'}) + ret1 = [host_munch_ret_1, host_munch_ret_2] + ret2 = Munch(as_single_element=host_munch_ret_2) + ret3 = Munch(as_single_element=host_munch_ret_3) + self.svc.client.svcinfo.lshost.side_effect = [ret1, ret2, ret3] + host, connectivity_type = self.svc.get_host_by_host_identifiers(Initiators( + 'iqn.test.6', ['abc3', 'ABC1'])) + self.assertEqual('test_host_3', host) + self.assertEqual([config.FC_CONNECTIVITY_TYPE], connectivity_type) + + self.svc.client.svcinfo.lshost.side_effect = [ret1, ret2, ret3] + host, connectivity_type = self.svc.get_host_by_host_identifiers(Initiators( + 'iqn.test.6', ['abc3'])) + self.assertEqual('test_host_3', host) + self.assertEqual([config.FC_CONNECTIVITY_TYPE], connectivity_type) + + def test_get_host_by_identifiers_with_wrong_fc_iscsi_raise_not_found(self): + host_munch_ret_1 = Munch({'id': 'host_id_1', 'name': 'test_host_1', + 'WWPN': ['abc1']}) + host_munch_ret_2 = Munch({'id': 'host_id_2', 'name': 'test_host_3', + 'iscsi_name': 'iqn.test.2', + 'WWPN': ['abc3']}) + host_munch_ret_3 = Munch({'id': 'host_id_3', 'name': 'test_host_3', + 'WWPN': ['abc1', 'abc3'], + 'iscsi_name': 'iqn.test.3'}) + ret1 = [host_munch_ret_1, host_munch_ret_2] + ret2 = Munch(as_single_element=host_munch_ret_2) + ret3 = Munch(as_single_element=host_munch_ret_3) + self.svc.client.svcinfo.lshost.side_effect = [ret1, ret2, ret3] + with self.assertRaises(array_errors.HostNotFoundError): + self.svc.get_host_by_host_identifiers(Initiators('', [])) + self.svc.client.svcinfo.lshost.side_effect = [ret1, ret2, ret3] + with self.assertRaises(array_errors.HostNotFoundError): + self.svc.get_host_by_host_identifiers(Initiators('123', ['a', 'b'])) + + def test_get_host_by_identifiers_return_iscsi_and_fc_all_support(self): + host_munch_ret_1 = Munch({'id': 'host_id_1', 'name': 'test_host_1', + 'WWPN': ['abc1']}) + host_munch_ret_2 = Munch({'id': 'host_id_2', 'name': 'test_host_3', + 'iscsi_name': 'iqn.test.6', + 'WWPN': ['abcd3']}) + host_munch_ret_3 = Munch({'id': 'host_id_3', 'name': 'test_host_3', + 'WWPN': ['abc3'], + 'iscsi_name': 'iqn.test.2'}) + ret1 = [host_munch_ret_1, host_munch_ret_2] + ret2 = Munch(as_single_element=host_munch_ret_2) + ret3 = Munch(as_single_element=host_munch_ret_3) + self.svc.client.svcinfo.lshost.side_effect = [ret1, ret2, ret3] + host, connectivity_type = self.svc.get_host_by_host_identifiers(Initiators( + 'iqn.test.2', ['ABC3'])) + self.assertEqual('test_host_3', host) + self.assertEqual([config.ISCSI_CONNECTIVITY_TYPE, + config.FC_CONNECTIVITY_TYPE], connectivity_type) + + def test_get_volume_mappings_empty_mapping_list(self): + self.svc.client.svcinfo.lsvdiskhostmap.return_value = [] + mappings = self.svc.get_volume_mappings("vol") + self.assertEqual(mappings, {}) + + def test_get_volume_mappings_on_volume_not_found(self): + self.svc.client.svcinfo.lsvdiskhostmap.side_effect = [ + svc_errors.CommandExecutionError('Failed')] + + with self.assertRaises(array_errors.VolumeNotFoundError): + self.svc.get_volume_mappings('vol') + + def test_get_volume_mappings_success(self): + map1 = Munch({'id': '51', 'name': 'peng', 'SCSI_id': '0', + 'host_id': '12', 'host_name': 'Test_P'}) + map2 = Munch({'id': '52', 'name': 'peng', 'SCSI_id': '1', + 'host_id': '18', 'host_name': 'Test_W'}) + self.svc.client.svcinfo.lsvdiskhostmap.return_value = [map1, map2] + mappings = self.svc.get_volume_mappings("vol") + self.assertEqual(mappings, {'Test_P': '0', 'Test_W': '1'}) + + def test_get_first_free_lun_raises_host_not_found_error(self): + self.svc.client.svcinfo.lshostvdiskmap.side_effect = [ + svc_errors.CommandExecutionError('Failed')] + with self.assertRaises(array_errors.HostNotFoundError): + self.svc.get_first_free_lun('host') + + def test_get_first_free_lun_with_no_host_mappings(self): + self.svc.client.svcinfo.lshostvdiskmap.return_value = [] + lun = self.svc.get_first_free_lun('host') + self.assertEqual(lun, '0') + + @patch.object(SVCArrayMediator, "MAX_LUN_NUMBER", 3) + @patch.object(SVCArrayMediator, "MIN_LUN_NUMBER", 1) + def test_get_first_free_lun_success(self): + map1 = Munch({'id': '51', 'name': 'peng', 'SCSI_id': '0', + 'host_id': '12', 'host_name': 'Test_P'}) + map2 = Munch({'id': '56', 'name': 'peng', 'SCSI_id': '1', + 'host_id': '16', 'host_name': 'Test_W'}) + self.svc.client.svcinfo.lshostvdiskmap.return_value = [map1, map2] + lun = self.svc.get_first_free_lun('Test_P') + self.assertEqual(lun, '2') + + @patch.object(SVCArrayMediator, "MAX_LUN_NUMBER", 3) + @patch.object(SVCArrayMediator, "MIN_LUN_NUMBER", 1) + def test_first_free_lun_no_available_lun(self): + map1 = Munch({'id': '51', 'name': 'peng', 'SCSI_id': '1', + 'host_id': '12', 'host_name': 'Test_P'}) + map2 = Munch({'id': '56', 'name': 'peng', 'SCSI_id': '2', + 'host_id': '16', 'host_name': 'Test_W'}) + map3 = Munch({'id': '58', 'name': 'Host', 'SCSI_id': '3', + 'host_id': '18', 'host_name': 'Test_H'}) + self.svc.client.svcinfo.lshostvdiskmap.return_value = [map1, map2, + map3] + with self.assertRaises(array_errors.NoAvailableLunError): + self.svc.get_first_free_lun('Test_P') + + @patch("controller.array_action.array_mediator_svc.is_warning_message") + @patch("controller.array_action.array_mediator_svc.SVCArrayMediator.get_first_free_lun") + def test_map_volume_vol_not_found(self, mock_get_first_free_lun, + mock_is_warning_message): + mock_is_warning_message.return_value = False + mock_get_first_free_lun.return_value = '1' + self.svc.client.svctask.mkvdiskhostmap.side_effect = [ + svc_errors.CommandExecutionError('CMMVC5804E')] + with self.assertRaises(array_errors.VolumeNotFoundError): + self.svc.map_volume("vol", "host") + + @patch("controller.array_action.array_mediator_svc.is_warning_message") + @patch("controller.array_action.array_mediator_svc.SVCArrayMediator.get_first_free_lun") + def test_map_volume_host_not_found(self, mock_get_first_free_lun, + mock_is_warning_message): + mock_is_warning_message.return_value = False + mock_get_first_free_lun.return_value = '2' + self.svc.client.svctask.mkvdiskhostmap.side_effect = [ + svc_errors.CommandExecutionError('CMMVC5754E')] + with self.assertRaises(array_errors.HostNotFoundError): + self.svc.map_volume("vol", "host") + + @patch("controller.array_action.array_mediator_svc.is_warning_message") + @patch("controller.array_action.array_mediator_svc.SVCArrayMediator.get_first_free_lun") + def test_map_volume_vol_already_in_use(self, mock_get_first_free_lun, + mock_is_warning_message): + mock_is_warning_message.return_value = False + mock_get_first_free_lun.return_value = '3' + self.svc.client.svctask.mkvdiskhostmap.side_effect = [ + svc_errors.CommandExecutionError('CMMVC5878E')] + with self.assertRaises(array_errors.LunAlreadyInUseError): + self.svc.map_volume("vol", "host") + + @patch("controller.array_action.array_mediator_svc.is_warning_message") + @patch("controller.array_action.array_mediator_svc.SVCArrayMediator.get_first_free_lun") + def test_map_volume_raise_mapping_error( + self, mock_get_first_free_lun, mock_is_warning_message): + mock_is_warning_message.return_value = False + mock_get_first_free_lun.return_value = '4' + self.svc.client.svctask.mkvdiskhostmap.side_effect = [ + svc_errors.CommandExecutionError('Failed')] + with self.assertRaises(array_errors.MappingError): + self.svc.map_volume("vol", "host") + + def test_map_volume_raise_exception(self): + self.svc.client.svctask.mkvdiskhostmap.side_effect = [Exception] + with self.assertRaises(Exception): + self.svc.map_volume("vol", "host") + + @patch("controller.array_action.array_mediator_svc.SVCArrayMediator.get_first_free_lun") + def test_map_volume_success(self, mock_get_first_free_lun): + mock_get_first_free_lun.return_value = '5' + self.svc.client.svctask.mkvdiskhostmap.return_value = None + lun = self.svc.map_volume("vol", "host") + self.assertEqual(lun, '5') + + @patch("controller.array_action.array_mediator_svc.is_warning_message") + def test_unmap_volume_vol_not_found(self, mock_is_warning_message): + mock_is_warning_message.return_value = False + self.svc.client.svctask.rmvdiskhostmap.side_effect = [ + svc_errors.CommandExecutionError('CMMVC5753E')] + with self.assertRaises(array_errors.VolumeNotFoundError): + self.svc.unmap_volume("vol", "host") + + @patch("controller.array_action.array_mediator_svc.is_warning_message") + def test_unmap_volume_host_not_found(self, mock_is_warning_message): + mock_is_warning_message.return_value = False + self.svc.client.svctask.rmvdiskhostmap.side_effect = [ + svc_errors.CommandExecutionError('CMMVC5754E')] + with self.assertRaises(array_errors.HostNotFoundError): + self.svc.unmap_volume("vol", "host") + + @patch("controller.array_action.array_mediator_svc.is_warning_message") + def test_unmap_volume_vol_already_unmapped(self, mock_is_warning_message): + mock_is_warning_message.return_value = False + self.svc.client.svctask.rmvdiskhostmap.side_effect = [ + svc_errors.CommandExecutionError('CMMVC5842E')] + with self.assertRaises(array_errors.VolumeAlreadyUnmappedError): + self.svc.unmap_volume("vol", "host") + + @patch("controller.array_action.array_mediator_svc.is_warning_message") + def test_unmap_volume_raise_unmapped_error(self, mock_is_warning_message): + mock_is_warning_message.return_value = False + self.svc.client.svctask.rmvdiskhostmap.side_effect = [ + svc_errors.CommandExecutionError('Failed')] + with self.assertRaises(array_errors.UnMappingError): + self.svc.unmap_volume("vol", "host") + + def test_unmap_volume_raise_exception(self): + self.svc.client.svctask.rmvdiskhostmap.side_effect = [Exception] + with self.assertRaises(Exception): + self.svc.unmap_volume("vol", "host") + + def test_unmap_volume_success(self): + self.svc.client.svctask.rmvdiskhostmap.return_value = None + lun = self.svc.unmap_volume("vol", "host") + + def test_get_array_iqns_with_exception(self): + self.svc.client.svcinfo.lsnode.side_effect = [Exception] + with self.assertRaises(Exception): + self.svc.get_array_iqns() + + def test_get_array_iqns_without_node(self): + self.svc.client.svcinfo.lsnode.return_value = [] + iqns = self.svc.get_array_iqns() + self.assertEqual(iqns, []) + + def test_get_array_iqns_with_no_online_node(self): + node = Munch({'id': '1', + 'name': 'node1', + 'iscsi_name': 'iqn.1986-03.com.ibm:2145.v7k1.node1', + 'status': 'offline'}) + self.svc.client.svcinfo.lsnode.return_value = [node] + iqns = self.svc.get_array_iqns() + self.assertEqual(iqns, []) + + def test_get_array_iqns_with_multi_nodes(self): + node1 = Munch({'id': '1', + 'name': 'node1', + 'iscsi_name': 'iqn.1986-03.com.ibm:2145.v7k1.node1', + 'status': 'online'}) + node2 = Munch({'id': '2', + 'name': 'node2', + 'iscsi_name': 'iqn.1986-03.com.ibm:2145.v7k1.node2', + 'status': 'online'}) + self.svc.client.svcinfo.lsnode.return_value = [node1, node2] + iqns = self.svc.get_array_iqns() + self.assertEqual(iqns, + ["iqn.1986-03.com.ibm:2145.v7k1.node1", + "iqn.1986-03.com.ibm:2145.v7k1.node2"]) + + def test_get_array_fc_wwns_failed(self): + self.svc.client.svcinfo.lsfabric.side_effect = [ + svc_errors.CommandExecutionError('Failed')] + with self.assertRaises(svc_errors.CommandExecutionError): + self.svc.get_array_fc_wwns('host') + + def test_get_array_fc_wwns_success(self): + port_1 = Munch({'remote_wwpn': '21000024FF3A42E5', + 'remote_nportid': '012F00', 'id': '1', + 'node_name': 'node1', 'local_wwpn': '5005076810282CD8', + 'local_port': '8', 'local_nportid': '010601', + 'state': 'active', 'name': 'csi_host', + 'cluster_name': '', 'type': 'host'}) + port_2 = Munch({'remote_wwpn': '21000024FF3A42E6', + 'remote_nportid': '012F10', 'id': '2', + 'node_name': 'node2', 'local_wwpn': '5005076810262CD8', + 'local_port': '9', 'local_nportid': '010611', + 'state': 'inactive', 'name': 'csi_host', + 'cluster_name': '', 'type': 'host'}) + self.svc.client.svcinfo.lsfabric.return_value = [port_1, port_2] + wwns = self.svc.get_array_fc_wwns('host') + self.assertEqual(wwns, ['5005076810282CD8', '5005076810262CD8']) diff --git a/controller/tests/array_action/xiv/__init__.py b/controller/tests/array_action/xiv/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/controller/tests/array_action/xiv/array_mediator_xiv_tests.py b/controller/tests/array_action/xiv/array_mediator_xiv_tests.py new file mode 100644 index 000000000..d0d113f4c --- /dev/null +++ b/controller/tests/array_action/xiv/array_mediator_xiv_tests.py @@ -0,0 +1,337 @@ +import unittest +from pyxcli import errors as xcli_errors +from controller.array_action.array_mediator_xiv import XIVArrayMediator +from mock import patch, Mock +import controller.array_action.errors as array_errors +from controller.tests.array_action.xiv import utils +from controller.array_action.config import ISCSI_CONNECTIVITY_TYPE +from controller.array_action.config import FC_CONNECTIVITY_TYPE +from controller.common.node_info import Initiators + + +class TestArrayMediatorXIV(unittest.TestCase): + + @patch("controller.array_action.array_mediator_xiv.XIVArrayMediator._connect") + def setUp(self, connect): + self.fqdn = "fqdn" + self.mediator = XIVArrayMediator("user", "password", self.fqdn) + self.mediator.client = Mock() + + def test_get_volume_return_correct_errors(self): + error_msg = "ex" + self.mediator.client.cmd.vol_list.side_effect = [Exception("ex")] + with self.assertRaises(Exception) as ex: + self.mediator.get_volume("some name") + + self.assertTrue(error_msg in str(ex.exception)) + + def test_get_volume_return_correct_value(self): + vol = utils.get_mock_xiv_volume(10, "vol_name", "wwn") + ret = Mock() + ret.as_single_element = vol + self.mediator.client.cmd.vol_list.return_value = ret + res = self.mediator.get_volume("some name") + + self.assertTrue(res.capacity_bytes == vol.capacity * 512) + self.assertTrue(res.capacity_bytes == vol.capacity * 512) + + def test_get_volume_returns_illegal_object_name(self): + self.mediator.client.cmd.vol_list.side_effect = [xcli_errors.IllegalNameForObjectError("", "vol", "")] + with self.assertRaises(array_errors.IllegalObjectName): + res = self.mediator.get_volume("vol") + + def test_get_volume_returns_nothing(self): + ret = Mock() + ret.as_single_element = None + self.mediator.client.cmd.vol_list.return_value = ret + with self.assertRaises(array_errors.VolumeNotFoundError): + res = self.mediator.get_volume("vol") + + @patch("controller.array_action.array_mediator_xiv.XCLIClient") + def test_connect_errors(self, client): + client.connect_multiendpoint_ssl.return_value = Mock() + client.connect_multiendpoint_ssl.side_effect = [xcli_errors.CredentialsError("a", "b", "c")] + with self.assertRaises(array_errors.CredentialsError): + self.mediator._connect() + + client.connect_multiendpoint_ssl.side_effect = [xcli_errors.XCLIError()] + with self.assertRaises(array_errors.CredentialsError) as ex: + self.mediator._connect() + + @patch("controller.array_action.array_mediator_xiv.XCLIClient") + def test_close(self, client): + self.mediator.client.is_connected = lambda: True + self.mediator.disconnect() + self.mediator.client.close.assert_called_once_with() + + self.mediator.client.is_connected = lambda: False + self.mediator.disconnect() + self.mediator.client.close.assert_called_once_with() + + def test_create_volume_return_illegal_name_for_object(self): + self.mediator.client.cmd.vol_create.side_effect = [xcli_errors.IllegalNameForObjectError("", "vol", "")] + with self.assertRaises(array_errors.IllegalObjectName): + self.mediator.create_volume("vol", 10, [], "pool1") + + def test_create_volume_return_volume_exists_error(self): + self.mediator.client.cmd.vol_create.side_effect = [xcli_errors.VolumeExistsError("", "vol", "")] + with self.assertRaises(array_errors.VolumeAlreadyExists): + self.mediator.create_volume("vol", 10, [], "pool1") + + def test_create_volume_return_pool_does_not_exists_error(self): + self.mediator.client.cmd.vol_create.side_effect = [xcli_errors.PoolDoesNotExistError("", "pool", "")] + with self.assertRaises(array_errors.PoolDoesNotExist): + self.mediator.create_volume("vol", 10, [], "pool1") + + @patch.object(XIVArrayMediator, "_generate_volume_response") + def test_create_volume__generate_volume_response_return_exception(self, response): + response.side_effect = Exception("err") + with self.assertRaises(Exception): + self.mediator.create_volume("vol", 10, [], "pool1") + + def test_delete_volume_return_volume_not_found(self): + ret = Mock() + ret.as_single_element = None + self.mediator.client.cmd.vol_list.return_value = ret + with self.assertRaises(array_errors.VolumeNotFoundError): + self.mediator.delete_volume("vol-wwn") + + def test_delete_volume_return_bad_name_error(self): + self.mediator.client.cmd.vol_delete.side_effect = [xcli_errors.VolumeBadNameError("", "vol", "")] + with self.assertRaises(array_errors.VolumeNotFoundError): + self.mediator.delete_volume("vol-wwn") + + def test_delete_volume_fails_on_permissions(self): + self.mediator.client.cmd.vol_delete.side_effect = [ + xcli_errors.OperationForbiddenForUserCategoryError("", "vol", "")] + with self.assertRaises(array_errors.PermissionDeniedError): + self.mediator.delete_volume("vol-wwn") + + def test_delete_volume_succeeds(self): + self.mediator.client.cmd.vol_delete = Mock() + self.mediator.delete_volume("vol-wwn") + + def test_property(self): + self.assertEqual(XIVArrayMediator.port, 7778) + + def test_get_host_by_identifiers_returns_host_not_found(self): + iqn = "iqn" + wwns = ['wwn1', 'wwn2'] + host1 = utils.get_mock_xiv_host("host1", "iqn1", "") + host2 = utils.get_mock_xiv_host("host2", "iqn1", "") + host3 = utils.get_mock_xiv_host("host3", "iqn2", "") + ret = Mock() + ret.as_list = [host1, host2, host3] + + self.mediator.client.cmd.host_list.return_value = ret + with self.assertRaises(array_errors.HostNotFoundError): + self.mediator.get_host_by_host_identifiers(Initiators(iqn, wwns)) + + def test_get_host_by_identifiers_returns_host_not_found_when_no_hosts_exist(self): + iqn = "iqn" + ret = Mock() + ret.as_list = [] + + self.mediator.client.cmd.host_list.return_value = ret + with self.assertRaises(array_errors.HostNotFoundError): + self.mediator.get_host_by_host_identifiers(Initiators(iqn,[])) + + def test_get_host_by_iscsi_identifiers_succeeds(self): + iqn = "iqn1" + wwns = [] + right_host = "host1" + + host1 = utils.get_mock_xiv_host(right_host, "iqn1,iqn4", "") + host2 = utils.get_mock_xiv_host("host2", "iqn2", "") + host3 = utils.get_mock_xiv_host("host3", "iqn2", "") + host4 = utils.get_mock_xiv_host("host4", "iqn3", "") + ret = Mock() + ret.as_list = [host1, host2, host3, host4] + + self.mediator.client.cmd.host_list.return_value = ret + host, connectivity_type = self.mediator.get_host_by_host_identifiers(Initiators(iqn, wwns)) + self.assertEqual(host, right_host) + self.assertEqual(connectivity_type, [ISCSI_CONNECTIVITY_TYPE]) + + def test_get_host_by_fc_identifiers_succeeds(self): + iqn = "iqn5" + wwns = ["wwn2", "wwn5"] + right_host = "host2" + + host1 = utils.get_mock_xiv_host("host1", "iqn1", "wwn1") + host2 = utils.get_mock_xiv_host(right_host, "iqn2", "wwn2") + host3 = utils.get_mock_xiv_host("host3", "iqn2", "wwn3") + host4 = utils.get_mock_xiv_host("host4", "iqn3", "wwn4") + ret = Mock() + ret.as_list = [host1, host2, host3, host4] + + self.mediator.client.cmd.host_list.return_value = ret + host, connectivity_type = self.mediator.get_host_by_host_identifiers(Initiators(iqn, wwns)) + self.assertEqual(host, right_host) + self.assertEqual(connectivity_type, [FC_CONNECTIVITY_TYPE]) + + def test_get_host_by_iscsi_and_fc_identifiers_succeeds(self): + iqn = "iqn2" + wwns = ["wwn2", "wwn5"] + right_host = "host2" + + host1 = utils.get_mock_xiv_host("host1", "iqn1", "wwn1") + host2 = utils.get_mock_xiv_host(right_host, "iqn2", "wwn2") + host3 = utils.get_mock_xiv_host("host3", "iqn3", "wwn3") + host4 = utils.get_mock_xiv_host("host4", "iqn4", "wwn4") + ret = Mock() + ret.as_list = [host1, host2, host3, host4] + + self.mediator.client.cmd.host_list.return_value = ret + host, connectivity_type = self.mediator.get_host_by_host_identifiers(Initiators(iqn, wwns)) + self.assertEqual(host, right_host) + self.assertEqual(connectivity_type, [FC_CONNECTIVITY_TYPE, ISCSI_CONNECTIVITY_TYPE]) + + def test_get_volume_mappings_empty_mapping_list(self): + # host3 = utils.get_mock_xiv_mapping(2, "host1") + ret = Mock() + ret.as_list = [] + self.mediator.client.cmd.vol_mapping_list.return_value = ret + mappings = self.mediator.get_volume_mappings("vol") + self.assertEqual(mappings, {}) + + def test_get_volume_mappings_success(self): + host1 = "host1" + host2 = "host2" + map1 = utils.get_mock_xiv_vol_mapping(2, host1) + map2 = utils.get_mock_xiv_vol_mapping(3, host2) + ret = Mock() + ret.as_list = [map1, map2] + self.mediator.client.cmd.vol_mapping_list.return_value = ret + mappings = self.mediator.get_volume_mappings("vol") + self.assertEqual(mappings, {host1: 2, host2: 3}) + + def test_get_volume_mappings_on_volume_not_found(self): + vol = Mock() + vol.as_single_element = None + self.mediator.client.cmd.vol_list.return_value = vol + + with self.assertRaises(array_errors.VolumeNotFoundError): + self.mediator.get_volume_mappings("vol") + + def test_get_next_available_lun_raises_host_bad_name(self): + # mapping = get_mock_xiv_host_mapping(1) + self.mediator.client.cmd.mapping_list.side_effect = [xcli_errors.HostBadNameError("", "host", "")] + with self.assertRaises(array_errors.HostNotFoundError): + self.mediator._get_next_available_lun("host") + + def test_get_next_available_with_no_host_mappings(self): + res = Mock() + res.as_list = [] + self.mediator.client.cmd.mapping_list.return_value = res + lun = self.mediator._get_next_available_lun("host") + self.assertTrue(lun <= self.mediator.MAX_LUN_NUMBER) + self.assertTrue(lun >= self.mediator.MIN_LUN_NUMBER) + + @patch.object(XIVArrayMediator, "MAX_LUN_NUMBER", 3) + @patch.object(XIVArrayMediator, "MIN_LUN_NUMBER", 1) + def test_get_next_available_lun_success(self): + mapping1 = utils.get_mock_xiv_host_mapping(1) + mapping2 = utils.get_mock_xiv_host_mapping(3) + res = Mock() + res.as_list = [mapping1, mapping2] + self.mediator.client.cmd.mapping_list.return_value = res + lun = self.mediator._get_next_available_lun("host") + self.assertEqual(lun, 2) + + @patch.object(XIVArrayMediator, "MAX_LUN_NUMBER", 3) + @patch.object(XIVArrayMediator, "MIN_LUN_NUMBER", 1) + def test_get_next_available_lun_no_available_lun(self): + mapping1 = utils.get_mock_xiv_host_mapping(1) + mapping2 = utils.get_mock_xiv_host_mapping(3) + mapping3 = utils.get_mock_xiv_host_mapping(2) + res = Mock() + res.as_list = [mapping1, mapping2, mapping3] + self.mediator.client.cmd.mapping_list.return_value = res + with self.assertRaises(array_errors.NoAvailableLunError): + self.mediator._get_next_available_lun("host") + + def test_map_volume_vol_bot_found(self): + vol = Mock() + vol.as_single_element = None + self.mediator.client.cmd.vol_list.return_value = vol + with self.assertRaises(array_errors.VolumeNotFoundError): + self.mediator.map_volume("vol", "host") + + @patch.object(XIVArrayMediator, "MAX_LUN_NUMBER", 3) + @patch.object(XIVArrayMediator, "MIN_LUN_NUMBER", 1) + def test_map_volume_no_availabe_lun(self): + mapping1 = utils.get_mock_xiv_host_mapping(1) + mapping2 = utils.get_mock_xiv_host_mapping(3) + mapping3 = utils.get_mock_xiv_host_mapping(2) + res = Mock() + res.as_list = [mapping1, mapping2, mapping3] + self.mediator.client.cmd.mapping_list.return_value = res + with self.assertRaises(array_errors.NoAvailableLunError): + self.mediator.map_volume("vol", "host") + + @patch.object(XIVArrayMediator, "_get_next_available_lun") + def map_volume_with_error(self, xcli_err, status, returned_err, get_next_lun): + self.mediator.client.cmd.map_vol.side_effect = [xcli_err("", status, "")] + with self.assertRaises(returned_err): + self.mediator.map_volume("vol", "host") + + def test_map_volume_operation_forbidden(self): + self.map_volume_with_error(xcli_errors.OperationForbiddenForUserCategoryError, "", + array_errors.PermissionDeniedError) + + def test_map_volume_volume_bad_name(self): + self.map_volume_with_error(xcli_errors.VolumeBadNameError, "", + array_errors.VolumeNotFoundError) + + def test_map_volume_host_bad_name(self): + self.map_volume_with_error(xcli_errors.HostBadNameError, "", + array_errors.HostNotFoundError) + + def test_map_volume_command_runtime_lun_in_use_error(self): + self.map_volume_with_error(xcli_errors.CommandFailedRuntimeError, "LUN is already in use 3", + array_errors.LunAlreadyInUseError) + + def test_map_volume_other_command_runtime_error(self): + self.map_volume_with_error(xcli_errors.CommandFailedRuntimeError, "", + array_errors.MappingError) + + @patch.object(XIVArrayMediator, "_get_next_available_lun") + def test_map_volume_success(self, next_lun): + next_lun.return_value = 5 + self.mediator.client.cmd.map_vol.return_value = None + lun = self.mediator.map_volume("vol", "host") + self.assertEqual(lun, '5') + + def test_unmap_volume_volume_not_found(self): + vol = Mock() + vol.as_single_element = None + self.mediator.client.cmd.vol_list.return_value = vol + with self.assertRaises(array_errors.VolumeNotFoundError): + self.mediator.unmap_volume("vol", "host") + + def unmap_volume_with_error(self, xcli_err, status, returned_err): + self.mediator.client.cmd.unmap_vol.side_effect = [xcli_err("", status, "")] + with self.assertRaises(returned_err): + self.mediator.unmap_volume("vol", "host") + + def test_unmap_volume_vol_not_found(self): + self.unmap_volume_with_error(xcli_errors.VolumeBadNameError, "", array_errors.VolumeNotFoundError) + + def test_unmap_volume_host_not_found(self): + self.unmap_volume_with_error(xcli_errors.HostBadNameError, "", array_errors.HostNotFoundError) + + def test_unmap_volume_operation_forbidden(self): + self.unmap_volume_with_error(xcli_errors.OperationForbiddenForUserCategoryError, "", + array_errors.PermissionDeniedError) + + def test_unmap_volume_command_runtime_mapping_not_defined(self): + self.unmap_volume_with_error(xcli_errors.CommandFailedRuntimeError, "The requested mapping is not defined", + array_errors.VolumeAlreadyUnmappedError) + + def test_unmap_volume_command_runtime_other_error(self): + self.unmap_volume_with_error(xcli_errors.CommandFailedRuntimeError, "", array_errors.UnMappingError) + + def test_unmap_volume_success(self): + self.mediator.client.cmd.unmap_vol.return_value = None + self.mediator.unmap_volume("vol", "host") diff --git a/controller/tests/array_action/xiv/utils.py b/controller/tests/array_action/xiv/utils.py new file mode 100644 index 000000000..47ea1520b --- /dev/null +++ b/controller/tests/array_action/xiv/utils.py @@ -0,0 +1,31 @@ +from mock import Mock + + +def get_mock_xiv_volume(size, name, wwn): + vol = Mock() + vol.capacity = size + vol.wwn = wwn + vol.name = name + vol.pool_name = "vol-name" + return vol + + +def get_mock_xiv_host(name, iscsi_ports, fc_ports): + host = Mock() + host.iscsi_ports = iscsi_ports + host.fc_ports = fc_ports + host.name = name + return host + + +def get_mock_xiv_vol_mapping(lun, host): + mapping = Mock() + mapping.lun = lun + mapping.host = host + return mapping + + +def get_mock_xiv_host_mapping(lun): + mapping = Mock() + mapping.lun = lun + return mapping diff --git a/controller/tests/controller_server/__init__.py b/controller/tests/controller_server/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/controller/tests/controller_server/csi_controller_server_test.py b/controller/tests/controller_server/csi_controller_server_test.py new file mode 100644 index 000000000..f583480f7 --- /dev/null +++ b/controller/tests/controller_server/csi_controller_server_test.py @@ -0,0 +1,816 @@ +import unittest +# from unittest import mock as umock +import grpc + +from mock import patch, Mock, call +from controller.tests import utils + +from controller.csi_general import csi_pb2 +from controller.array_action.array_mediator_xiv import XIVArrayMediator +from controller.controller_server.csi_controller_server import ControllerServicer +from controller.controller_server.test_settings import vol_name +import controller.array_action.errors as array_errors +import controller.controller_server.errors as controller_errors + +from controller.controller_server.config import PARAMETERS_PREFIX + + +class TestControllerServerCreateVolume(unittest.TestCase): + + @patch("controller.array_action.array_mediator_xiv.XIVArrayMediator._connect") + def setUp(self, connect): + self.fqdn = "fqdn" + self.mediator = XIVArrayMediator("user", "password", self.fqdn) + self.mediator.client = Mock() + + self.mediator.get_volume = Mock() + self.mediator.get_volume.side_effect = [array_errors.VolumeNotFoundError("vol")] + + self.servicer = ControllerServicer(self.fqdn) + + self.request = Mock() + caps = Mock() + caps.mount = Mock() + caps.mount.fs_type = "ext4" + access_types = csi_pb2.VolumeCapability.AccessMode + caps.access_mode.mode = access_types.SINGLE_NODE_WRITER + + self.request.volume_capabilities = [caps] + + self.pool = 'pool1' + self.request.secrets = {"username": "user", "password": "pass", "management_address": "mg"} + self.request.parameters = {"pool": self.pool} + self.capacity_bytes = 10 + self.request.capacity_range.required_bytes = self.capacity_bytes + self.request.name = vol_name + + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__exit__") + def test_create_volume_with_empty_name(self, a_enter, a_exit): + a_enter.return_value = self.mediator + self.request.name = "" + context = utils.FakeContext() + res = self.servicer.CreateVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.INVALID_ARGUMENT) + self.assertTrue("name" in context.details) + self.assertEqual(res, csi_pb2.CreateVolumeResponse()) + + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.detect_array_type") + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__exit__") + def test_create_volume_succeeds(self, a_exit, a_enter, array_type): + a_enter.return_value = self.mediator + context = utils.FakeContext() + + self.mediator.create_volume = Mock() + self.mediator.create_volume.return_value = utils.get_mock_mediator_response_volume(10, "vol", "wwn", "xiv") + array_type.return_value = "a9k" + res = self.servicer.CreateVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.OK) + self.mediator.get_volume.assert_called_once_with(vol_name) + self.mediator.create_volume.assert_called_once_with(vol_name, 10, {}, 'pool1') + + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.detect_array_type") + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__exit__") + def test_create_volume_with_wrong_secrets(self, a_enter, a_exit, array_type): + a_enter.return_value = self.mediator + context = utils.FakeContext() + + self.request.secrets = {"password": "pass", "management_address": "mg"} + self.servicer.CreateVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.INVALID_ARGUMENT, "username is missing in secrets") + self.assertTrue("secret" in context.details) + + self.request.secrets = {"username": "user", "management_address": "mg"} + self.servicer.CreateVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.INVALID_ARGUMENT, "password is missing in secrets") + self.assertTrue("secret" in context.details) + + self.request.secrets = {"username": "user", "password": "pass"} + self.servicer.CreateVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.INVALID_ARGUMENT, "mgmt address is missing in secrets") + self.assertTrue("secret" in context.details) + + self.request.secrets = [] + + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.detect_array_type") + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__exit__") + def test_create_volume_with_wrong_parameters(self, a_enter, a_exit, array_type): + a_enter.return_value = self.mediator + context = utils.FakeContext() + + self.request.parameters = {"pool": "pool1"} + res = self.servicer.CreateVolume(self.request, context) + self.assertNotEqual(context.code, grpc.StatusCode.INVALID_ARGUMENT) + + self.request.parameters = {"capabilities": ""} + res = self.servicer.CreateVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.INVALID_ARGUMENT, "capacity is missing in secrets") + self.assertEqual(context.code, grpc.StatusCode.INVALID_ARGUMENT, "pool parameter is missing") + self.assertTrue("parameter" in context.details) + + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__exit__") + def test_create_volume_with_wrong_volume_capabilities(self, a_enter, a_exit): + a_enter.return_value = self.mediator + context = utils.FakeContext() + + caps = Mock() + caps.mount = Mock() + caps.mount.fs_type = "ext42" + access_types = csi_pb2.VolumeCapability.AccessMode + caps.access_mode.mode = access_types.SINGLE_NODE_WRITER + + self.request.volume_capabilities = [caps] + + res = self.servicer.CreateVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.INVALID_ARGUMENT, "wrong fs_type") + self.assertTrue("fs_type" in context.details) + + caps.mount.fs_type = "ext4" + caps.access_mode.mode = access_types.MULTI_NODE_SINGLE_WRITER + self.request.volume_capabilities = [caps] + + res = self.servicer.CreateVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.INVALID_ARGUMENT, "wrong access_mode") + self.assertTrue("access mode" in context.details) + + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.detect_array_type") + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__exit__") + def test_create_volume_with_array_connection_exception(self, a_enter, a_exit, array_type): + a_enter.side_effect = [Exception("error")] + context = utils.FakeContext() + res = self.servicer.CreateVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.INTERNAL, "connection error occured in array_connection") + self.assertTrue("error" in context.details) + + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.detect_array_type") + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__exit__") + def test_create_volume_with_get_array_type_exception(self, a_enter, a_exit, array_type): + a_enter.return_value = self.mediator + context = utils.FakeContext() + array_type.side_effect = [array_errors.FailedToFindStorageSystemType("endpoint")] + res = self.servicer.CreateVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.INTERNAL, "failed to find storage system") + msg = array_errors.FailedToFindStorageSystemType("endpoint").message + self.assertTrue(msg in context.details) + + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.detect_array_type") + @patch("controller.array_action.array_mediator_xiv.XIVArrayMediator.get_volume") + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + def test_create_volume_get_volume_exception(self, a_enter, get_volume, array_type): + a_enter.return_value = self.mediator + self.mediator.get_volume.side_effect = [Exception("error")] + context = utils.FakeContext() + res = self.servicer.CreateVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.INTERNAL) + self.assertTrue("error" in context.details) + self.mediator.get_volume.assert_called_once_with(vol_name) + + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.detect_array_type") + @patch("controller.array_action.array_mediator_xiv.XIVArrayMediator.get_volume") + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + def test_create_volume_with_get_volume_illegal_object_name_exception(self, a_enter, get_volume, array_type): + a_enter.return_value = self.mediator + self.mediator.get_volume.side_effect = [array_errors.IllegalObjectName("vol")] + context = utils.FakeContext() + res = self.servicer.CreateVolume(self.request, context) + msg = array_errors.IllegalObjectName("vol").message + + self.assertEqual(context.code, grpc.StatusCode.INVALID_ARGUMENT) + self.assertTrue(msg in context.details) + self.mediator.get_volume.assert_called_once_with(vol_name) + + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.detect_array_type") + @patch("controller.array_action.array_mediator_xiv.XIVArrayMediator.create_volume") + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + def create_volume_returns_error(self, a_enter, create_volume, array_type, return_code, err): + a_enter.return_value = self.mediator + create_volume.side_effect = [err] + + context = utils.FakeContext() + res = self.servicer.CreateVolume(self.request, context) + msg = str(err) + + self.assertEqual(context.code, return_code) + self.assertTrue(msg in context.details) + self.mediator.get_volume.assert_called_once_with(vol_name) + self.mediator.create_volume.assert_called_once_with(vol_name, self.capacity_bytes, {}, self.pool) + + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.detect_array_type") + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__exit__") + def test_create_volume_cuts_name_if_its_too_long(self, a_exit, a_enter, array_type): + a_enter.return_value = self.mediator + context = utils.FakeContext() + + self.request.name = "a" * 128 + self.mediator.create_volume = Mock() + self.mediator.create_volume.return_value = utils.get_mock_mediator_response_volume(10, "vol", "wwn", "xiv") + array_type.return_value = "a9k" + res = self.servicer.CreateVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.OK) + self.mediator.get_volume.assert_called_once_with("a" * self.mediator.max_vol_name_length) + + def test_create_volume_with_illegal_object_name_exception(self): + self.create_volume_returns_error(return_code=grpc.StatusCode.INVALID_ARGUMENT, + err=array_errors.IllegalObjectName("vol")) + + def test_create_volume_with_create_volume_with_volume_exsits_exception(self): + self.create_volume_returns_error(return_code=grpc.StatusCode.ALREADY_EXISTS, + err=array_errors.VolumeAlreadyExists("vol", "endpoint")) + + def test_create_volume_with_create_volume_with_pool_does_not_exist_exception(self): + self.create_volume_returns_error(return_code=grpc.StatusCode.INVALID_ARGUMENT, + err=array_errors.PoolDoesNotExist("pool1", "endpoint")) + + def test_create_volume_with_create_volume_with_pool_does_not_match_capabilities_exception(self): + self.create_volume_returns_error(return_code=grpc.StatusCode.INVALID_ARGUMENT, + err=array_errors.PoolDoesNotMatchCapabilities("pool1", "", "endpoint")) + + def test_create_volume_with_create_volume_with_capability_not_supported_exception(self): + self.create_volume_returns_error(return_code=grpc.StatusCode.INVALID_ARGUMENT, + err=array_errors.StorageClassCapabilityNotSupported(["cap"])) + + def test_create_volume_with_create_volume_with_other_exception(self): + self.create_volume_returns_error(return_code=grpc.StatusCode.INTERNAL, + err=Exception("error")) + + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.detect_array_type") + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__exit__") + def test_create_volume_with_name_prefix(self, a_exit, a_enter, array_type): + a_enter.return_value = self.mediator + context = utils.FakeContext() + + self.request.name = "some_name" + self.request.parameters[PARAMETERS_PREFIX] = "prefix" + self.mediator.create_volume = Mock() + self.mediator.create_volume.return_value = utils.get_mock_mediator_response_volume(10, "vol", "wwn", "xiv") + array_type.return_value = "a9k" + res = self.servicer.CreateVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.OK) + self.mediator.create_volume.assert_called_once_with("prefix_some_name", 10, {}, "pool1") + + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.detect_array_type") + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__exit__") + def test_create_volume_with_zero_size(self, a_exit, a_enter, array_type): + a_enter.return_value = self.mediator + context = utils.FakeContext() + + self.request.capacity_range.required_bytes = 0 + self.mediator.create_volume = Mock() + self.mediator.create_volume.return_value = utils.get_mock_mediator_response_volume(10, "vol", "wwn", "xiv") + array_type.return_value = "a9k" + res = self.servicer.CreateVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.OK) + self.mediator.create_volume.assert_called_once_with(self.request.name, 1 * 1024 * 1024 * 1024, {}, "pool1") + + +class TestControllerServerDeleteVolume(unittest.TestCase): + + @patch("controller.array_action.array_mediator_xiv.XIVArrayMediator._connect") + def setUp(self, connect): + self.fqdn = "fqdn" + self.mediator = XIVArrayMediator("user", "password", self.fqdn) + self.mediator.client = Mock() + + self.mediator.get_volume = Mock() + + self.servicer = ControllerServicer(self.fqdn) + + self.request = Mock() + + self.pool = 'pool1' + self.request.secrets = {"username": "user", "password": "pass", "management_address": "mg"} + + self.request.volume_id = "xiv:vol-id" + + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__exit__") + def test_delete_volume_with_wrong_secrets(self, a_enter, a_exit): + a_enter.return_value = self.mediator + context = utils.FakeContext() + + self.request.secrets = {"password": "pass", "management_address": "mg"} + res = self.servicer.DeleteVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.INVALID_ARGUMENT, "username is missing in secrets") + self.assertTrue("secret" in context.details) + + self.request.secrets = {"username": "user", "management_address": "mg"} + res = self.servicer.DeleteVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.INVALID_ARGUMENT, "password is missing in secrets") + self.assertTrue("secret" in context.details) + + self.request.secrets = {"username": "user", "password": "pass"} + res = self.servicer.DeleteVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.INVALID_ARGUMENT, "mgmt address is missing in secrets") + self.assertTrue("secret" in context.details) + + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__exit__") + def test_delete_volume_invalid_volume_id(self, a_enter, a_exit): + a_enter.return_value = self.mediator + context = utils.FakeContext() + self.request.volume_id = "wrong_id" + res = self.servicer.DeleteVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.OK) + + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__exit__") + def test_delete_volume_with_array_connection_exception(self, a_enter, a_exit): + a_enter.side_effect = [Exception("a_enter error")] + context = utils.FakeContext() + res = self.servicer.DeleteVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.INTERNAL, "array connection internal error") + self.assertTrue("a_enter error" in context.details) + + @patch("controller.array_action.array_mediator_xiv.XIVArrayMediator.delete_volume") + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + def delete_volume_returns_error(self, a_enter, delete_volume, error, return_code): + a_enter.return_value = self.mediator + delete_volume.side_effect = [error] + context = utils.FakeContext() + res = self.servicer.DeleteVolume(self.request, context) + self.assertEqual(context.code, return_code) + if return_code != grpc.StatusCode.OK: + msg = str(error) + self.assertTrue(msg in context.details, "msg : {0} is not in : {1}".format(msg, context.details)) + + def test_delete_volume_with_volume_not_found_error(self, ): + self.delete_volume_returns_error(error=array_errors.VolumeNotFoundError("vol"), return_code=grpc.StatusCode.OK) + + def test_delete_volume_with_delete_volume_other_exception(self): + self.delete_volume_returns_error(error=Exception("error"), return_code=grpc.StatusCode.INTERNAL) + + @patch("controller.array_action.array_mediator_xiv.XIVArrayMediator.delete_volume") + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + def test_delete_volume_succeeds(self, a_enter, delete_volume): + a_enter.return_value = self.mediator + context = utils.FakeContext() + res = self.servicer.DeleteVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.OK) + + +class TestControllerServerPublishVolume(unittest.TestCase): + + @patch("controller.array_action.array_mediator_xiv.XIVArrayMediator._connect") + def setUp(self, connect): + self.fqdn = "fqdn" + self.hostname = "hostname" + self.mediator = XIVArrayMediator("user", "password", self.fqdn) + self.mediator.client = Mock() + + self.mediator.get_host_by_host_identifiers = Mock() + self.mediator.get_host_by_host_identifiers.return_value = self.hostname, ["iscsi"] + + self.mediator.get_volume_mappings = Mock() + self.mediator.get_volume_mappings.return_value = {} + + self.mediator.map_volume = Mock() + self.mediator.map_volume.return_value = 1 + + self.mediator.get_array_iqns = Mock() + self.mediator.get_array_iqns.return_value = "array-iqn" + + self.servicer = ControllerServicer(self.fqdn) + + self.request = Mock() + arr_type = XIVArrayMediator.array_type + self.request.volume_id = "{}:wwn1".format(arr_type) + self.request.node_id = "hostname;iqn.1994-05.com.redhat:686358c930fe;500143802426baf4" + self.request.readonly = False + self.request.readonly = False + self.request.secrets = {"username": "user", "password": "pass", "management_address": "mg"} + self.request.volume_context = {} + + caps = Mock() + caps.mount = Mock() + caps.mount.fs_type = "ext4" + access_types = csi_pb2.VolumeCapability.AccessMode + caps.access_mode.mode = access_types.SINGLE_NODE_WRITER + self.request.volume_capability = caps + + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + def test_publish_volume_success(self, enter): + enter.return_value = self.mediator + + context = utils.FakeContext() + self.servicer.ControllerPublishVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.OK) + + @patch("controller.controller_server.utils.validate_publish_volume_request") + def test_publish_volume_validateion_exception(self, publish_validation): + publish_validation.side_effect = [controller_errors.ValidationException("msg")] + context = utils.FakeContext() + self.servicer.ControllerPublishVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.INVALID_ARGUMENT) + self.assertTrue("msg" in context.details) + + def test_publish_volume_wrong_volume_id(self): + self.request.volume_id = "some-wrong-id-format" + + context = utils.FakeContext() + self.servicer.ControllerPublishVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.NOT_FOUND) + + def test_publish_volume_wrong_node_id(self): + self.request.node_id = "some-wrong-id-format" + + context = utils.FakeContext() + self.servicer.ControllerPublishVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.NOT_FOUND) + + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + def test_publish_volume_get_host_by_host_identifiers_exception(self, enter): + context = utils.FakeContext() + + self.mediator.get_host_by_host_identifiers = Mock() + self.mediator.get_host_by_host_identifiers.side_effect = [array_errors.MultipleHostsFoundError("", "")] + enter.return_value = self.mediator + + self.servicer.ControllerPublishVolume(self.request, context) + self.assertTrue("Multiple hosts" in context.details) + self.assertEqual(context.code, grpc.StatusCode.INTERNAL) + + self.mediator.get_host_by_host_identifiers.side_effect = [array_errors.HostNotFoundError("")] + enter.return_value = self.mediator + + self.servicer.ControllerPublishVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.NOT_FOUND) + + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + def test_publish_volume_get_volume_mappings_one_map_for_existing_host(self, enter): + context = utils.FakeContext() + self.mediator.get_volume_mappings = Mock() + self.mediator.get_volume_mappings.return_value = {self.hostname: 2} + enter.return_value = self.mediator + + res = self.servicer.ControllerPublishVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.OK) + + self.assertEqual(res.publish_context["PUBLISH_CONTEXT_LUN"], '2') + self.assertEqual(res.publish_context["PUBLISH_CONTEXT_CONNECTIVITY"], "iscsi") + + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + def test_publish_volume_with_connectivity_type_fc(self, enter): + context = utils.FakeContext() + self.mediator.get_host_by_host_identifiers.return_value = self.hostname, ["iscsi", "fc"] + self.mediator.get_array_fc_wwns = Mock() + self.mediator.get_array_fc_wwns.return_value = ["500143802426baf4"] + self.mediator.get_array_iqns = Mock() + self.mediator.get_array_iqns.return_value = [ + "iqn.1994-05.com.redhat:686358c930fe"] + enter.return_value = self.mediator + + res = self.servicer.ControllerPublishVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.OK) + + self.assertEqual(res.publish_context["PUBLISH_CONTEXT_LUN"], '1') + self.assertEqual(res.publish_context["PUBLISH_CONTEXT_CONNECTIVITY"], "fc") + self.assertEqual(res.publish_context["PUBLISH_CONTEXT_ARRAY_FC_INITIATORS"], "500143802426baf4") + + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + def test_publish_volume_with_connectivity_type_iscsi(self, enter): + context = utils.FakeContext() + self.mediator.get_host_by_host_identifiers.return_value = self.hostname, ["iscsi"] + self.mediator.get_array_iqns = Mock() + self.mediator.get_array_iqns.return_value = ["iqn.1994-05.com.redhat:686358c930fe"] + self.mediator.get_array_fc_wwns = Mock() + self.mediator.get_array_fc_wwns.return_value = ["500143802426baf4"] + enter.return_value = self.mediator + + res = self.servicer.ControllerPublishVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.OK) + + self.assertEqual(res.publish_context["PUBLISH_CONTEXT_LUN"], '1') + self.assertEqual(res.publish_context["PUBLISH_CONTEXT_CONNECTIVITY"], + "iscsi") + self.assertEqual( + res.publish_context["PUBLISH_CONTEXT_ARRAY_IQN"], + "iqn.1994-05.com.redhat:686358c930fe") + + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + def test_publish_volume_with_node_id_only_has_iqns(self, enter): + context = utils.FakeContext() + self.request.node_id = "hostname;iqn.1994-05.com.redhat:686358c930fe;" + self.mediator.get_host_by_host_identifiers.return_value = self.hostname, ["iscsi"] + self.mediator.get_array_iqns = Mock() + self.mediator.get_array_iqns.return_value = [ + "iqn.1994-05.com.redhat:686358c930fe"] + enter.return_value = self.mediator + + res = self.servicer.ControllerPublishVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.OK) + + self.assertEqual(res.publish_context["PUBLISH_CONTEXT_LUN"], '1') + self.assertEqual(res.publish_context["PUBLISH_CONTEXT_CONNECTIVITY"], + "iscsi") + self.assertEqual( + res.publish_context["PUBLISH_CONTEXT_ARRAY_IQN"], + "iqn.1994-05.com.redhat:686358c930fe") + + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + def test_publish_volume_with_node_id_only_has_wwns(self, enter): + context = utils.FakeContext() + self.request.node_id = "hostname;;500143802426baf4" + self.mediator.get_host_by_host_identifiers.return_value = self.hostname, ["fc"] + self.mediator.get_array_fc_wwns = Mock() + self.mediator.get_array_fc_wwns.return_value = ["500143802426baf4"] + enter.return_value = self.mediator + + res = self.servicer.ControllerPublishVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.OK) + + self.assertEqual(res.publish_context["PUBLISH_CONTEXT_LUN"], '1') + self.assertEqual(res.publish_context["PUBLISH_CONTEXT_CONNECTIVITY"], + "fc") + self.assertEqual( + res.publish_context["PUBLISH_CONTEXT_ARRAY_FC_INITIATORS"], + "500143802426baf4") + + self.request.node_id = "hostname;;500143802426baf4:500143806626bae2" + self.mediator.get_host_by_host_identifiers.return_value = self.hostname, ["fc"] + self.mediator.get_array_fc_wwns = Mock() + self.mediator.get_array_fc_wwns.return_value = ["500143802426baf4", + "500143806626bae2"] + enter.return_value = self.mediator + + res = self.servicer.ControllerPublishVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.OK) + + self.assertEqual(res.publish_context["PUBLISH_CONTEXT_LUN"], '1') + self.assertEqual(res.publish_context["PUBLISH_CONTEXT_CONNECTIVITY"], + "fc") + self.assertEqual( + res.publish_context["PUBLISH_CONTEXT_ARRAY_FC_INITIATORS"], + "500143802426baf4,500143806626bae2") + + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + def test_publish_volume_get_volume_mappings_one_map_for_other_host(self, enter): + context = utils.FakeContext() + self.mediator.get_volume_mappings = Mock() + self.mediator.get_volume_mappings.return_value = {"other-hostname": 3} + enter.return_value = self.mediator + + self.servicer.ControllerPublishVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.FAILED_PRECONDITION) + self.assertTrue("Volume is already mapped" in context.details) + + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + def test_publish_volume_get_volume_mappings_more_then_one_mapping(self, enter): + context = utils.FakeContext() + self.mediator.get_volume_mappings = Mock() + self.mediator.get_volume_mappings.return_value = {"other-hostname": 3, self.hostname: 4} + enter.return_value = self.mediator + + self.servicer.ControllerPublishVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.FAILED_PRECONDITION) + self.assertTrue("Volume is already mapped" in context.details) + + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + def test_publish_volume_map_volume_excpetions(self, enter): + context = utils.FakeContext() + + self.mediator.map_volume.side_effect = [array_errors.PermissionDeniedError("msg")] + + enter.return_value = self.mediator + self.servicer.ControllerPublishVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.PERMISSION_DENIED) + + self.mediator.map_volume.side_effect = [array_errors.VolumeNotFoundError("vol")] + enter.return_value = self.mediator + self.servicer.ControllerPublishVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.NOT_FOUND) + + self.mediator.map_volume.side_effect = [array_errors.HostNotFoundError("host")] + enter.return_value = self.mediator + self.servicer.ControllerPublishVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.NOT_FOUND) + + self.mediator.map_volume.side_effect = [array_errors.MappingError("", "", "")] + enter.return_value = self.mediator + self.servicer.ControllerPublishVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.INTERNAL) + + @patch.object(XIVArrayMediator, "MAX_LUN_NUMBER", 3) + @patch.object(XIVArrayMediator, "MIN_LUN_NUMBER", 1) + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + def test_publish_volume_map_volume_lun_already_in_use(self, enter): + context = utils.FakeContext() + + self.mediator.map_volume.side_effect = [array_errors.LunAlreadyInUseError("", ""), 2] + self.mediator.map_volume.get_array_iqns.return_value = "array-iqn" + enter.return_value = self.mediator + res = self.servicer.ControllerPublishVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.OK) + self.assertEqual(res.publish_context["PUBLISH_CONTEXT_LUN"], '2') + self.assertEqual(res.publish_context["PUBLISH_CONTEXT_CONNECTIVITY"], "iscsi") + + self.mediator.map_volume.side_effect = [ + array_errors.LunAlreadyInUseError("", ""), 2] + self.mediator.get_host_by_host_identifiers = Mock() + self.mediator.get_host_by_host_identifiers.return_value = self.hostname, ["fc"] + self.mediator.get_array_fc_wwns = Mock() + self.mediator.get_array_fc_wwns.return_value = ["500143802426baf4"] + enter.return_value = self.mediator + res = self.servicer.ControllerPublishVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.OK) + self.assertEqual(res.publish_context["PUBLISH_CONTEXT_LUN"], '2') + self.assertEqual(res.publish_context["PUBLISH_CONTEXT_CONNECTIVITY"], + "fc") + + self.mediator.map_volume.side_effect = [array_errors.LunAlreadyInUseError("", ""), + array_errors.LunAlreadyInUseError("", ""), 2] + enter.return_value = self.mediator + self.servicer.ControllerPublishVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.OK) + self.assertEqual(res.publish_context["PUBLISH_CONTEXT_LUN"], '2') + self.assertEqual(res.publish_context["PUBLISH_CONTEXT_CONNECTIVITY"], "fc") + + self.mediator.map_volume.side_effect = [ + array_errors.LunAlreadyInUseError("", "")] * (self.mediator.max_lun_retries + 1) + enter.return_value = self.mediator + self.servicer.ControllerPublishVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.RESOURCE_EXHAUSTED) + + +class TestControllerServerUnPublishVolume(unittest.TestCase): + + @patch("controller.array_action.array_mediator_xiv.XIVArrayMediator._connect") + def setUp(self, connect): + self.fqdn = "fqdn" + self.hostname = "hostname" + self.mediator = XIVArrayMediator("user", "password", self.fqdn) + self.mediator.client = Mock() + + self.mediator.get_host_by_host_identifiers = Mock() + self.mediator.get_host_by_host_identifiers.return_value = self.hostname, ["iscsi"] + + self.mediator.unmap_volume = Mock() + self.mediator.unmap_volume.return_value = None + + self.servicer = ControllerServicer(self.fqdn) + + self.request = Mock() + arr_type = XIVArrayMediator.array_type + self.request.volume_id = "{}:wwn1".format(arr_type) + self.request.node_id = "hostname;iqn1;500143802426baf4" + self.request.secrets = {"username": "user", "password": "pass", "management_address": "mg"} + self.request.volume_context = {} + + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + def test_unpublish_volume_success(self, enter): + enter.return_value = self.mediator + context = utils.FakeContext() + self.servicer.ControllerUnpublishVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.OK) + + @patch("controller.controller_server.utils.validate_unpublish_volume_request") + def test_unpublish_volume_validation_exception(self, publish_validation): + publish_validation.side_effect = [controller_errors.ValidationException("msg")] + context = utils.FakeContext() + self.servicer.ControllerUnpublishVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.INVALID_ARGUMENT) + self.assertTrue("msg" in context.details) + + def test_unpublish_volume_wrong_volume_id(self): + self.request.volume_id = "some-wrong-id-format" + + context = utils.FakeContext() + self.servicer.ControllerUnpublishVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.INVALID_ARGUMENT) + + def test_unpublish_volume_wrong_node_id(self): + self.request.node_id = "some-wrong-id-format" + + context = utils.FakeContext() + self.servicer.ControllerUnpublishVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.NOT_FOUND) + + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + def test_unpublish_volume_get_host_by_host_identifiers_exception(self, enter): + context = utils.FakeContext() + + self.mediator.get_host_by_host_identifiers = Mock() + self.mediator.get_host_by_host_identifiers.side_effect = [array_errors.MultipleHostsFoundError("", "")] + enter.return_value = self.mediator + + self.servicer.ControllerUnpublishVolume(self.request, context) + self.assertTrue("Multiple hosts" in context.details) + self.assertEqual(context.code, grpc.StatusCode.INTERNAL) + + self.mediator.get_host_by_host_identifiers.side_effect = [array_errors.HostNotFoundError("")] + enter.return_value = self.mediator + + self.servicer.ControllerUnpublishVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.NOT_FOUND) + + @patch("controller.array_action.array_connection_manager.ArrayConnectionManager.__enter__") + def test_unpublish_volume_unmap_volume_excpetions(self, enter): + context = utils.FakeContext() + + self.mediator.unmap_volume.side_effect = [array_errors.PermissionDeniedError("msg")] + enter.return_value = self.mediator + self.servicer.ControllerUnpublishVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.PERMISSION_DENIED) + + context = utils.FakeContext() + self.mediator.unmap_volume.side_effect = [array_errors.VolumeNotFoundError("vol")] + enter.return_value = self.mediator + self.servicer.ControllerUnpublishVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.NOT_FOUND) + + context = utils.FakeContext() + self.mediator.unmap_volume.side_effect = [array_errors.HostNotFoundError("host")] + enter.return_value = self.mediator + self.servicer.ControllerUnpublishVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.NOT_FOUND) + + context = utils.FakeContext() + self.mediator.unmap_volume.side_effect = [array_errors.UnMappingError("", "", "")] + enter.return_value = self.mediator + self.servicer.ControllerUnpublishVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.INTERNAL) + + context = utils.FakeContext() + self.mediator.unmap_volume.side_effect = [array_errors.VolumeAlreadyUnmappedError("")] + enter.return_value = self.mediator + self.servicer.ControllerUnpublishVolume(self.request, context) + self.assertEqual(context.code, grpc.StatusCode.OK) + + +class TestControllerServerGetCapabilities(unittest.TestCase): + + def setUp(self): + self.fqdn = "fqdn" + self.servicer = ControllerServicer(self.fqdn) + + def test_controller_get_capabilities(self): + request = Mock() + context = Mock() + self.servicer.ControllerGetCapabilities(request, context) + + +class TestIdentityServer(unittest.TestCase): + + def setUp(self): + self.fqdn = "fqdn" + self.servicer = ControllerServicer(self.fqdn) + + @patch.object(ControllerServicer, "_ControllerServicer__get_identity_config") + def test_identity_plugin_get_info_succeeds(self, identity_config): + plugin_name = "plugin-name" + version = "0.9.0" + identity_config.side_effect = [plugin_name, version] + request = Mock() + context = Mock() + request.volume_capabilities = [] + res = self.servicer.GetPluginInfo(request, context) + self.assertEqual(res, csi_pb2.GetPluginInfoResponse(name=plugin_name, vendor_version=version)) + + @patch.object(ControllerServicer, "_ControllerServicer__get_identity_config") + def test_identity_plugin_get_info_fails_when_attributes_from_config_are_missing(self, identity_config): + request = Mock() + context = Mock() + + identity_config.side_effect = ["name", Exception(), Exception(), "0.9.0"] + + res = self.servicer.GetPluginInfo(request, context) + context.set_code.assert_called_once_with(grpc.StatusCode.INTERNAL) + self.assertEqual(res, csi_pb2.GetPluginInfoResponse()) + + res = self.servicer.GetPluginInfo(request, context) + self.assertEqual(res, csi_pb2.GetPluginInfoResponse()) + context.set_code.assert_called_with(grpc.StatusCode.INTERNAL) + + @patch.object(ControllerServicer, "_ControllerServicer__get_identity_config") + def test_identity_plugin_get_info_fails_when_name_or_value_are_empty(self, identity_config): + request = Mock() + context = Mock() + + identity_config.side_effect = ["", "0.9.0", "name", ""] + + res = self.servicer.GetPluginInfo(request, context) + context.set_code.assert_called_once_with(grpc.StatusCode.INTERNAL) + self.assertEqual(res, csi_pb2.GetPluginInfoResponse()) + + res = self.servicer.GetPluginInfo(request, context) + self.assertEqual(res, csi_pb2.GetPluginInfoResponse()) + self.assertEqual(context.set_code.call_args_list, + [call(grpc.StatusCode.INTERNAL), call(grpc.StatusCode.INTERNAL)]) + + def test_identity_get_plugin_capabilities(self): + request = Mock() + context = Mock() + self.servicer.GetPluginCapabilities(request, context) + + def test_identity_probe(self): + request = Mock() + context = Mock() + self.servicer.Probe(request, context) diff --git a/controller/tests/controller_server/utils_test.py b/controller/tests/controller_server/utils_test.py new file mode 100644 index 000000000..f2cb3e5c9 --- /dev/null +++ b/controller/tests/controller_server/utils_test.py @@ -0,0 +1,277 @@ +import unittest +from mock import patch, Mock +from controller.csi_general import csi_pb2 +from controller.controller_server.csi_controller_server import ControllerServicer +import controller.controller_server.utils as utils +from controller.controller_server.errors import ValidationException +from controller.array_action.errors import VolumeNotFoundError, HostNotFoundError + + +class TestUtils(unittest.TestCase): + + def setUp(self): + self.fqdn = "fqdn" + self.servicer = ControllerServicer(self.fqdn) + + def test_validate_secrets(self): + username = "user" + password = "pass" + mgmt = "mg" + secrets = {"username": username, "password": password, "management_address": mgmt} + + utils.validate_secret(secrets) + + with self.assertRaises(ValidationException): + utils.validate_secret(None) + + secrets = {"username": username, "password": password} + with self.assertRaises(ValidationException): + utils.validate_secret(secrets) + + secrets = {"username": username, "management_address": mgmt} + with self.assertRaises(ValidationException): + utils.validate_secret(secrets) + + secrets = {"password": password, "management_address": mgmt} + with self.assertRaises(ValidationException): + utils.validate_secret(secrets) + + secrets = {} + with self.assertRaises(ValidationException): + utils.validate_secret(secrets) + + def test_validate_volume_capabilities(self): + caps = Mock() + caps.mount = Mock() + caps.mount.fs_type = "ext4" + access_types = csi_pb2.VolumeCapability.AccessMode + caps.access_mode.mode = access_types.SINGLE_NODE_WRITER + + utils.validate_csi_volume_capabilties([caps]) + + with self.assertRaises(ValidationException): + utils.validate_csi_volume_capabilties([]) + + caps.mount.fs_type = "ext41" + with self.assertRaises(ValidationException): + utils.validate_csi_volume_capabilties([caps]) + + caps.mount.fs_type = "ext4" + caps.access_mode.mode = access_types.SINGLE_NODE_READER_ONLY + with self.assertRaises(ValidationException): + utils.validate_csi_volume_capabilties([caps]) + + caps = Mock() + caps.mount = None + caps.access_mode.mode = access_types.SINGLE_NODE_READER_ONLY + with self.assertRaises(ValidationException): + utils.validate_csi_volume_capabilties([caps]) + + @patch('controller.controller_server.utils.validate_secret') + @patch('controller.controller_server.utils.validate_csi_volume_capabilties') + def test_validate_create_volume_request(self, valiate_capabilities, validate_secret): + request = Mock() + request.name = "" + + with self.assertRaises(ValidationException) as ex: + utils.validate_create_volume_request(request) + self.assertTrue("name" in ex.message) + + request.name = "name" + + request.capacity_range.required_bytes = -1 + + with self.assertRaises(ValidationException) as ex: + utils.validate_create_volume_request(request) + self.assertTrue("size" in ex.message) + + request.capacity_range.required_bytes = 10 + valiate_capabilities.side_effect = ValidationException("msg") + + with self.assertRaises(ValidationException) as ex: + utils.validate_create_volume_request(request) + self.assertTrue("msg" in ex.message) + + valiate_capabilities.side_effect = None + + validate_secret.side_effect = ValidationException(" other msg") + + with self.assertRaises(ValidationException) as ex: + utils.validate_create_volume_request(request) + self.assertTrue("other msg" in ex.message) + + validate_secret.side_effect = None + + request.parameters = {"capabilities": ""} + + with self.assertRaises(ValidationException) as ex: + utils.validate_create_volume_request(request) + self.assertTrue("parameters" in ex.message) + + request.parameters = {} + + with self.assertRaises(ValidationException) as ex: + utils.validate_create_volume_request(request) + self.assertTrue("parameters" in ex.message) + + request.parameters = None + + with self.assertRaises(ValidationException) as ex: + utils.validate_create_volume_request(request) + self.assertTrue("parameters" in ex.message) + + request.parameters = {"pool": "pool1", "SpaceEfficiency": "thin "} + + utils.validate_create_volume_request(request) + + request.parameters = {"pool": "pool1"} + utils.validate_create_volume_request(request) + + request.capacity_range.required_bytes = 0 + utils.validate_create_volume_request(request) + + @patch("controller.controller_server.utils.get_vol_id") + def test_get_create_volume_response(self, get_vol_id): + new_vol = Mock() + new_vol.volume_name = "name" + new_vol.array_address = ["fqdn1", "fqdn2"] + + new_vol.pool_name = "pool" + new_vol.array_type = "a9k" + new_vol.capacity_bytes = 10 + + get_vol_id.return_value = "a9k:name" + res = utils.generate_csi_create_volume_response(new_vol) + + self.assertEqual(10, res.volume.capacity_bytes) + + get_vol_id.side_effect = [Exception("err")] + + with self.assertRaises(Exception): + utils.generate_csi_create_volume_response(new_vol) + + @patch('controller.controller_server.utils.validate_secret') + @patch('controller.controller_server.utils.validate_csi_volume_capability') + def test_validate_publish_volume_request(self, validate_capabilities, validate_secrets): + request = Mock() + request.readonly = True + + with self.assertRaises(ValidationException) as ex: + utils.validate_publish_volume_request(request) + self.assertTrue("readonly" in ex.message) + + request.readonly = False + validate_capabilities.side_effect = [ValidationException("msg1")] + + with self.assertRaises(ValidationException) as ex: + utils.validate_publish_volume_request(request) + self.assertTrue("msg1" in ex.message) + + validate_capabilities.side_effect = None + request.secrets = [] + + with self.assertRaises(ValidationException) as ex: + utils.validate_publish_volume_request(request) + self.assertTrue("secrets" in ex.message) + + request.secrets = ["secret"] + validate_secrets.side_effect = [ValidationException("msg2")] + + with self.assertRaises(ValidationException) as ex: + utils.validate_publish_volume_request(request) + self.assertTrue("msg2" in ex.message) + + validate_secrets.side_effect = None + + utils.validate_publish_volume_request(request) + + @patch('controller.controller_server.utils.validate_secret') + def test_validate_unpublish_volume_request(self, validate_secret): + request = Mock() + request.volume_id = "somebadvolumename" + + with self.assertRaises(ValidationException) as ex: + utils.validate_unpublish_volume_request(request) + self.assertTrue("volume" in ex.message) + + request.volume_id = "xiv:volume" + + request.secrets = [] + with self.assertRaises(ValidationException) as ex: + utils.validate_unpublish_volume_request(request) + self.assertTrue("secret" in ex.message) + + request.secrets = ["secret"] + validate_secret.side_effect = [ValidationException("msg2")] + with self.assertRaises(ValidationException) as ex: + utils.validate_unpublish_volume_request(request) + self.assertTrue("msg2" in ex.message) + + validate_secret.side_effect = None + + utils.validate_unpublish_volume_request(request) + + def test_get_volume_id_info(self): + with self.assertRaises(VolumeNotFoundError) as ex: + utils.get_volume_id_info("badvolumeformat") + self.assertTrue("volume" in ex.message) + + arr_type, vol = utils.get_volume_id_info("xiv:vol") + self.assertEqual(arr_type, "xiv") + self.assertEqual(vol, "vol") + + def test_get_node_id_info(self): + with self.assertRaises(HostNotFoundError) as ex: + utils.get_node_id_info("badnodeformat") + self.assertTrue("node" in ex.message) + + hostname, iscsi_iqn, fc_wwns = utils.get_node_id_info("hostabc;iqn.ibm;") + self.assertEqual(hostname, "hostabc") + self.assertEqual(iscsi_iqn, "iqn.ibm") + self.assertEqual(fc_wwns, "") + + hostname, iscsi_iqn, fc_wwns = utils.get_node_id_info("hostabc;iqn.ibm;wwn1:wwn2") + self.assertEqual(hostname, "hostabc") + self.assertEqual(iscsi_iqn, "iqn.ibm") + self.assertEqual(fc_wwns, "wwn1:wwn2") + + hostname, iscsi_iqn, fc_wwns = utils.get_node_id_info("hostabc;;wwn1:wwn2") + self.assertEqual(hostname, "hostabc") + self.assertEqual(iscsi_iqn, "") + self.assertEqual(fc_wwns, "wwn1:wwn2") + + def test_choose_connectivity_types(self): + res = utils.choose_connectivity_type(["iscsi"]) + self.assertEqual(res, "iscsi") + + res = utils.choose_connectivity_type(["fc"]) + self.assertEqual(res, "fc") + + res = utils.choose_connectivity_type(["iscsi", "fc"]) + self.assertEqual(res, "fc") + + def test_generate_publish_volume_response(self): + config_a = {"controller": {"publish_context_lun_parameter": "lun", + "publish_context_connectivity_parameter": + "connectivity_type", + "publish_context_array_iqn": "array_iqn", + "publish_context_fc_initiators": "fc_wwns"} + } + res = utils.generate_csi_publish_volume_response(0, "iscsi", config_a, + ["1"]) + self.assertEqual(res.publish_context["lun"], '0') + self.assertEqual(res.publish_context["connectivity_type"], "iscsi") + self.assertEqual(res.publish_context["array_iqn"], '1') + + config_b = {"controller": {"publish_context_lun_parameter": "lun", + "publish_context_connectivity_parameter": "connectivity_type", + "publish_context_array_iqn": "array_iqn", + "publish_context_fc_initiators": "fc_wwns"} + } + res = utils.generate_csi_publish_volume_response(1, "fc", config_b, + ["wwn1", "wwn2"]) + self.assertEqual(res.publish_context["lun"], '1') + self.assertEqual(res.publish_context["connectivity_type"], "fc") + self.assertEqual(res.publish_context["fc_wwns"], "wwn1,wwn2") + + diff --git a/controller/tests/utils.py b/controller/tests/utils.py new file mode 100644 index 000000000..c4a429966 --- /dev/null +++ b/controller/tests/utils.py @@ -0,0 +1,27 @@ +from mock import Mock +import grpc + + +def get_mock_mediator_response_volume(size, name, wwn, array_type): + vol = Mock() + vol.capacity_bytes = size + vol.id = wwn + vol.volume_name = name + vol.array_address = "arr1" + vol.pool_name = "pool1" + vol.array_type = array_type + + return vol + + +class FakeContext: + + def __init__(self): + self.code = grpc.StatusCode.OK + self.details = "" + + def set_code(self, code): + self.code = code + + def set_details(self, details): + self.details = details diff --git a/deploy/kubernetes/examples/demo-pvc-gold.yaml b/deploy/kubernetes/examples/demo-pvc-gold.yaml new file mode 100644 index 000000000..7d3338237 --- /dev/null +++ b/deploy/kubernetes/examples/demo-pvc-gold.yaml @@ -0,0 +1,12 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: demo-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: gold + \ No newline at end of file diff --git a/deploy/kubernetes/examples/demo-statefulset-with-demo-pvc.yml b/deploy/kubernetes/examples/demo-statefulset-with-demo-pvc.yml new file mode 100644 index 000000000..c3152f402 --- /dev/null +++ b/deploy/kubernetes/examples/demo-statefulset-with-demo-pvc.yml @@ -0,0 +1,31 @@ +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: demo-statefulset +spec: + selector: + matchLabels: + app: demo-statefulset + serviceName: demo-statefulset + replicas: 1 + template: + metadata: + labels: + app: demo-statefulset + spec: + containers: + - name: container1 + image: registry.access.redhat.com/ubi8/ubi:latest + command: [ "/bin/sh", "-c", "--" ] + args: [ "while true; do sleep 30; done;" ] + volumeMounts: + - name: demo-pvc + mountPath: "/data" + volumes: + - name: demo-pvc + persistentVolumeClaim: + claimName: demo-pvc + +# nodeSelector: +# kubernetes.io/hostname: NODESELECTOR + \ No newline at end of file diff --git a/deploy/kubernetes/examples/demo-storageclass-gold-A9000R.yaml b/deploy/kubernetes/examples/demo-storageclass-gold-A9000R.yaml new file mode 100644 index 000000000..05537f168 --- /dev/null +++ b/deploy/kubernetes/examples/demo-storageclass-gold-A9000R.yaml @@ -0,0 +1,19 @@ +################################################## +# Storage class example for IBM Block CSI Driver using A9000. +################################################## + +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: gold +provisioner: block.csi.ibm.com +parameters: + pool: gold + + csi.storage.k8s.io/provisioner-secret-name: a9000-array1 + csi.storage.k8s.io/provisioner-secret-namespace: kube-system + csi.storage.k8s.io/controller-publish-secret-name: a9000-array1 + csi.storage.k8s.io/controller-publish-secret-namespace: kube-system + + csi.storage.k8s.io/fstype: xfs # Optional. values ext4\xfs. The default is ext4. + volume_name_prefix: demo1 # Optional. diff --git a/deploy/kubernetes/examples/sanity-deployment.yml b/deploy/kubernetes/examples/sanity-deployment.yml new file mode 100644 index 000000000..b58e3cfdf --- /dev/null +++ b/deploy/kubernetes/examples/sanity-deployment.yml @@ -0,0 +1,29 @@ +apiVersion: "apps/v1" +kind: Deployment +metadata: + name: sanity-deployment +spec: + selector: + matchLabels: + app: sanity-deployment + replicas: 1 + template: + metadata: + labels: + app: sanity-deployment + spec: + containers: + - name: container1 + image: registry.access.redhat.com/ubi8/ubi:latest + command: [ "/bin/sh", "-c", "--" ] + args: [ "while true; do sleep 30; done;" ] + volumeMounts: + - name: sanity-pvc + mountPath: "/data" + volumes: + - name: sanity-pvc + persistentVolumeClaim: + claimName: sanity-pvc + +# nodeSelector: +# kubernetes.io/hostname: NODESELECTOR diff --git a/deploy/kubernetes/examples/sanity-pod.yml b/deploy/kubernetes/examples/sanity-pod.yml new file mode 100644 index 000000000..4a6260848 --- /dev/null +++ b/deploy/kubernetes/examples/sanity-pod.yml @@ -0,0 +1,18 @@ +kind: Pod +apiVersion: v1 +metadata: + name: pod1 +spec: + containers: + - name: container1 + image: registry.access.redhat.com/ubi8/ubi:latest + command: [ "/bin/sh", "-c", "--" ] + args: [ "while true; do date; sleep 5; date >&2; sleep 5; done;" ] + volumeMounts: + - name: sanity-pvc + mountPath: "/data" + volumes: + - name: sanity-pvc + persistentVolumeClaim: + claimName: sanity-pvc + diff --git a/deploy/kubernetes/examples/template-array-secret.yaml b/deploy/kubernetes/examples/template-array-secret.yaml new file mode 100644 index 000000000..d1f1252a1 --- /dev/null +++ b/deploy/kubernetes/examples/template-array-secret.yaml @@ -0,0 +1,15 @@ +################################################## +# Array secret template for IBM Block CSI Driver +# All the VALUE fields must be set. +################################################## + +kind: Secret +apiVersion: v1 +metadata: + name: + namespace: kube-system +type: Opaque +data: + username: # Array username. + password: # Array password. + management_address: # Array managment addresses diff --git a/deploy/kubernetes/examples/template-pvc.yaml b/deploy/kubernetes/examples/template-pvc.yaml new file mode 100644 index 000000000..521a45643 --- /dev/null +++ b/deploy/kubernetes/examples/template-pvc.yaml @@ -0,0 +1,11 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: +spec: + storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: diff --git a/deploy/kubernetes/examples/template-storageclass.yaml b/deploy/kubernetes/examples/template-storageclass.yaml new file mode 100644 index 000000000..d135cbc9f --- /dev/null +++ b/deploy/kubernetes/examples/template-storageclass.yaml @@ -0,0 +1,25 @@ +################################################## +# Storage class template for IBM Block CSI Driver +# All the VALUE fields must be set. +################################################## + +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: +provisioner: block.csi.ibm.com +parameters: + #capabilities: # Optional. + # SpaceEfficiency= + capacity: + pool= + + csi.storage.k8s.io/provisioner-secret-name: + csi.storage.k8s.io/provisioner-secret-namespace: + csi.storage.k8s.io/controller-publish-secret-name: + csi.storage.k8s.io/controller-publish-secret-namespace: + + + #csi.storage.k8s.io/fstype: # Optional. values ext4\xfs. The default is ext4. + #volume_name_prefix: # Optional. + diff --git a/deploy/kubernetes/v1.13/ibm-block-csi-driver.yaml b/deploy/kubernetes/v1.13/ibm-block-csi-driver.yaml new file mode 100644 index 000000000..69124783e --- /dev/null +++ b/deploy/kubernetes/v1.13/ibm-block-csi-driver.yaml @@ -0,0 +1,495 @@ +# ============================================================== +# IBM Block CSI driver - Kubernetes manifest. +# The manifest contains: +# CSI controller (StatefulSet) +# CSI node (DaemonSet) +# And all the relevant RBAC elements the CSI side car containers: +# CSI controller service account +# ibm-block-csi-external-provisioner-role +# ibm-block-csi-external-provisioner-binding +# ibm-block-csi-external-attacher-role +# ibm-block-csi-external-attacher-binding +# ibm-block-csi-cluster-driver-registrar-role +# ibm-block-csi-cluster-driver-registrar-binding +# ibm-block-csi-external-snapshotter-role +# ibm-block-csi-snapshotter-binding +# CSI node service account +# ============================================================== + + +# CSI Controller Service Account + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ibm-block-csi-controller-sa + namespace: kube-system + labels: + product: ibm-block-csi-driver + csi: ibm + +--- + +# RBAC for CSI Controller Service Account for all the CSI sidecar containers. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ibm-block-csi-external-provisioner-role + labels: + product: ibm-block-csi-driver + csi: ibm + +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list"] + - apiGroups: ["storage.k8s.io"] + ## Needed only from k8s 1.14+ + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + ## Needed only from k8s 1.14+ + resources: ["nodes"] + verbs: ["get", "list", "watch"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ibm-block-csi-external-provisioner-binding + labels: + product: ibm-block-csi-driver + csi: ibm + +subjects: + - kind: ServiceAccount + name: ibm-block-csi-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: ibm-block-csi-external-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ibm-block-csi-external-attacher-role + labels: + product: ibm-block-csi-driver + csi: ibm +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] # For k8s 1.14+ + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ibm-block-csi-external-attacher-binding + labels: + product: ibm-block-csi-driver + csi: ibm +subjects: + - kind: ServiceAccount + name: ibm-block-csi-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: ibm-block-csi-external-attacher-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ibm-block-csi-cluster-driver-registrar-role + labels: + product: ibm-block-csi-driver + csi: ibm +rules: + ## Only needed for k8s 1.13 since it was automatic install the CSIDriver. But for 1.14+ its not available. + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create", "list", "watch", "delete"] + + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csidrivers"] + verbs: ["create", "delete"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ibm-block-csi-cluster-driver-registrar-binding + labels: + product: ibm-block-csi-driver + csi: ibm +subjects: + - kind: ServiceAccount + name: ibm-block-csi-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: ibm-block-csi-cluster-driver-registrar-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ibm-block-csi-external-snapshotter-role + labels: + product: ibm-block-csi-driver + csi: ibm +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create", "list", "watch", "delete"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ibm-block-csi-external-snapshotter-binding + labels: + product: ibm-block-csi-driver + csi: ibm +subjects: + - kind: ServiceAccount + name: ibm-block-csi-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: ibm-block-csi-external-snapshotter-role + apiGroup: rbac.authorization.k8s.io + +--- + +## CSI Controller Service statefulset +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: ibm-block-csi-controller + namespace: kube-system + labels: + product: ibm-block-csi-driver + csi: ibm +spec: + selector: + matchLabels: + app: ibm-block-csi-controller + serviceName: ibm-block-csi-controller + replicas: 1 + template: + metadata: + labels: + app: ibm-block-csi-controller + product: ibm-block-csi-driver + csi: ibm + spec: + serviceAccount: ibm-block-csi-controller-sa + containers: + - name: ibm-block-csi-controller + image: ibmcom/ibm-block-csi-driver-controller:0.9.0 + imagePullPolicy: "IfNotPresent" + args : + - --csi-endpoint=$(CSI_ENDPOINT) + - --loglevel=$(CSI_LOGLEVEL) + env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: CSI_LOGLEVEL + value: DEBUG + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + + ## Defining port which will be used to GET plugin health status + ## 9808 is default, but can be changed. + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + ## The probe actually used by the liveness-probe. TODO Not sure if ports\livenessProbe needed. + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 2 + + + - name: cluster-driver-registrar + ## Note: Requires CRD CSIDriver + image: quay.io/k8scsi/csi-cluster-driver-registrar:v1.0.1 + imagePullPolicy: "IfNotPresent" + args: + - --csi-address=$(ADDRESS) + - --v=5 + #- --pod-info-mount=true ## TODO to enable pod info feature. + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + + + - name: csi-provisioner + image: quay.io/k8scsi/csi-provisioner:v1.3.0 + imagePullPolicy: "IfNotPresent" + args: + - --csi-address=$(ADDRESS) + - --v=5 + #- --feature-gates=Topology=true ## TODO add Topology feature + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + + + - name: csi-attacher + image: quay.io/k8scsi/csi-attacher:v1.2.1 + imagePullPolicy: "IfNotPresent" + args: + - --csi-address=$(ADDRESS) + - --v=5 + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + + +## TODO add snapshotter when plugin will support ControllerCreateSnapshot capability. +# - name: csi-snapshotter +# image: quay.io/k8scsi/csi-snapshotter:v1.1.0 # TODO v1.1.0 for k8s1.13+(github release different then k8s CSI docs), but there is v1.0.1 for k8s 1.13 +# imagePullPolicy: "IfNotPresent" +# args: +# - --csi-address=$(ADDRESS) +# - --connection-timeout=15s +# env: +# - name: ADDRESS +# value: /var/lib/csi/sockets/pluginproxy/csi.sock +# volumeMounts: +# - name: socket-dir +# mountPath: /var/lib/csi/sockets/pluginproxy/ + + + - name: liveness-probe + image: quay.io/k8scsi/livenessprobe:v1.1.0 # TODO v1.1.0 is for k8s 1.13+, but v1.0.2 was the first k8s 1.13 version. + args: + - --csi-address=/csi/csi.sock + - --connection-timeout=3s # TODO this line is deprecated from v1.1.0. So if exist its ignore it. + volumeMounts: + - name: socket-dir + mountPath: /csi + + volumes: + - name: socket-dir + emptyDir: {} + +--- + +# CSI Node Service Deamonset +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: ibm-block-csi-node + namespace: kube-system + labels: + product: ibm-block-csi-driver + csi: ibm +spec: + selector: + matchLabels: + app: ibm-block-csi-node + template: + metadata: + labels: + app: ibm-block-csi-node + product: ibm-block-csi-driver + csi: ibm + spec: + hostNetwork: true + containers: + - name: ibm-block-csi-node + securityContext: + privileged: true + image: ibmcom/ibm-block-csi-driver-node:0.9.0 + imagePullPolicy: "IfNotPresent" + args: + - --csi-endpoint=$(CSI_ENDPOINT) + - --hostname=$(KUBE_NODE_NAME) + - --config-file-path=./config.yaml + - --loglevel=$(CSI_LOGLEVEL) + env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: CSI_LOGLEVEL + value: "trace" + ## KUBE_NODE_NAME needed for the GetNodeInfo API + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: mountpoint-dir + mountPath: /var/lib/kubelet/pods + mountPropagation: "Bidirectional" + - name: socket-dir + mountPath: /csi + + - name: device-dir + mountPath: /dev + - name: sys-dir + mountPath: /sys + - name: host-dir + mountPath: /host # Maps the host root into "/host", inside the container. + mountPropagation: "Bidirectional" + + ports: + - name: healthz + containerPort: 9808 + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 10 + failureThreshold: 5 + + + - name: node-driver-registrar + image: quay.io/k8scsi/csi-node-driver-registrar:v1.1.0 + imagePullPolicy: "IfNotPresent" + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=5 + lifecycle: + preStop: + exec: + command: ["/bin/sh", "-c", "rm -rf /registration/block.csi.ibm.com-reg.sock /csi/csi.sock"] + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/block.csi.ibm.com/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + + - name: liveness-probe + image: quay.io/k8scsi/livenessprobe:v1.1.0 # TODO v1.1.0 is for k8s 1.13+, but v1.0.2 was the first k8s 1.13 version. + args: + - --csi-address=/csi/csi.sock + - --connection-timeout=3s # NOTE: this line is deprecated from v1.1.0. So if exist its ignore it. + volumeMounts: + - name: socket-dir + mountPath: /csi + + volumes: + ## This volume is where the driver mounts volumes + - name: mountpoint-dir + hostPath: + path: /var/lib/kubelet/pods + type: Directory + + ## This volume is where the socket for kubelet->driver communication is done + - name: socket-dir + hostPath: + path: /var/lib/kubelet/plugins/block.csi.ibm.com/ + type: DirectoryOrCreate + + ## This volume is where the node-driver-registrar registers the plugin with kubelet + - name: registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: Directory + + ## To discover the new devices + - name: device-dir + hostPath: + path: /dev + type: Directory + + - name: sys-dir + hostPath: + path: /sys + type: Directory + + - name: host-dir + hostPath: + path: / + type: Directory + diff --git a/deploy/kubernetes/v1.14/ibm-block-csi-driver.yaml b/deploy/kubernetes/v1.14/ibm-block-csi-driver.yaml new file mode 100644 index 000000000..ff952c79f --- /dev/null +++ b/deploy/kubernetes/v1.14/ibm-block-csi-driver.yaml @@ -0,0 +1,501 @@ +# ============================================================== +# IBM Block CSI driver - Kubernetes manifest. +# The manifest contains: +# CSI controller (StatefulSet) +# CSI node (DaemonSet) +# And all the relevant RBAC elements the CSI side car containers: +# CSI controller service account +# ibm-block-csi-external-provisioner-role +# ibm-block-csi-external-provisioner-binding +# ibm-block-csi-external-attacher-role +# ibm-block-csi-external-attacher-binding +# ibm-block-csi-cluster-driver-registrar-role +# ibm-block-csi-cluster-driver-registrar-binding +# ibm-block-csi-external-snapshotter-role +# ibm-block-csi-snapshotter-binding +# CSI node service account +# ============================================================== + + +# CSI Controller Service Account + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ibm-block-csi-controller-sa + namespace: kube-system + labels: + product: ibm-block-csi-driver + csi: ibm + +--- + +# RBAC for CSI Controller Service Account for all the CSI sidecar containers. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ibm-block-csi-external-provisioner-role + labels: + product: ibm-block-csi-driver + csi: ibm + +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list"] + - apiGroups: ["storage.k8s.io"] + ## Needed only from k8s 1.14+ + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + ## Needed only from k8s 1.14+ + resources: ["nodes"] + verbs: ["get", "list", "watch"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ibm-block-csi-external-provisioner-binding + labels: + product: ibm-block-csi-driver + csi: ibm + +subjects: + - kind: ServiceAccount + name: ibm-block-csi-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: ibm-block-csi-external-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ibm-block-csi-external-attacher-role + labels: + product: ibm-block-csi-driver + csi: ibm +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinode"] # For k8s 1.14+ + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ibm-block-csi-external-attacher-binding + labels: + product: ibm-block-csi-driver + csi: ibm +subjects: + - kind: ServiceAccount + name: ibm-block-csi-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: ibm-block-csi-external-attacher-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ibm-block-csi-cluster-driver-registrar-role + labels: + product: ibm-block-csi-driver + csi: ibm +rules: + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csidrivers"] + verbs: ["create", "delete"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ibm-block-csi-cluster-driver-registrar-binding + labels: + product: ibm-block-csi-driver + csi: ibm +subjects: + - kind: ServiceAccount + name: ibm-block-csi-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: ibm-block-csi-cluster-driver-registrar-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ibm-block-csi-external-snapshotter-role + labels: + product: ibm-block-csi-driver + csi: ibm +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create", "list", "watch", "delete"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ibm-block-csi-external-snapshotter-binding + labels: + product: ibm-block-csi-driver + csi: ibm +subjects: + - kind: ServiceAccount + name: ibm-block-csi-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: ibm-block-csi-external-snapshotter-role + apiGroup: rbac.authorization.k8s.io + +--- + +## CSI Controller Service statefulset +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: ibm-block-csi-controller + namespace: kube-system + labels: + product: ibm-block-csi-driver + csi: ibm +spec: + selector: + matchLabels: + app: ibm-block-csi-controller + serviceName: ibm-block-csi-controller + replicas: 1 + template: + metadata: + labels: + app: ibm-block-csi-controller + product: ibm-block-csi-driver + csi: ibm + spec: + serviceAccount: ibm-block-csi-controller-sa + containers: + - name: ibm-block-csi-controller + image: ibmcom/ibm-block-csi-driver-controller:0.9.0 + imagePullPolicy: "IfNotPresent" + args : + - --csi-endpoint=$(CSI_ENDPOINT) + - --loglevel=$(CSI_LOGLEVEL) + env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: CSI_LOGLEVEL + value: DEBUG + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + + ## Defining port which will be used to GET plugin health status + ## 9808 is default, but can be changed. + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + ## The probe actually used by the liveness-probe. TODO Not sure if ports\livenessProbe needed. + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 2 + + + - name: cluster-driver-registrar + ## Note: Requires CRD CSIDriver + image: quay.io/k8scsi/csi-cluster-driver-registrar:v1.0.1 + imagePullPolicy: "IfNotPresent" + args: + - --csi-address=$(ADDRESS) + - --v=5 + #- --pod-info-mount=true ## TODO to enable pod info feature. + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + + + - name: csi-provisioner + image: quay.io/k8scsi/csi-provisioner:v1.3.0 + imagePullPolicy: "IfNotPresent" + args: + - --csi-address=$(ADDRESS) + - --v=5 + #- --feature-gates=Topology=true ## TODO add Topology feature + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + + + - name: csi-attacher + image: quay.io/k8scsi/csi-attacher:v1.2.1 + imagePullPolicy: "IfNotPresent" + args: + - --csi-address=$(ADDRESS) + - --v=5 + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + + +## TODO add snapshotter when plugin will support ControllerCreateSnapshot capability. +# - name: csi-snapshotter +# image: quay.io/k8scsi/csi-snapshotter:v1.1.0 # TODO v1.1.0 for k8s1.13+(github release different then k8s CSI docs), but there is v1.0.1 for k8s 1.13 +# imagePullPolicy: "IfNotPresent" +# args: +# - --csi-address=$(ADDRESS) +# - --connection-timeout=15s +# env: +# - name: ADDRESS +# value: /var/lib/csi/sockets/pluginproxy/csi.sock +# volumeMounts: +# - name: socket-dir +# mountPath: /var/lib/csi/sockets/pluginproxy/ + + + - name: liveness-probe + image: quay.io/k8scsi/livenessprobe:v1.1.0 # TODO v1.1.0 is for k8s 1.13+, but v1.0.2 was the first k8s 1.13 version. + args: + - --csi-address=/csi/csi.sock + - --connection-timeout=3s # TODO this line is deprecated from v1.1.0. So if exist its ignore it. + volumeMounts: + - name: socket-dir + mountPath: /csi + + ## TODO add external-resizer later on + volumes: + - name: socket-dir + emptyDir: {} + +--- + +# CSI Node Service Deamonset +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: ibm-block-csi-node + namespace: kube-system + labels: + product: ibm-block-csi-driver + csi: ibm +spec: + selector: + matchLabels: + app: ibm-block-csi-node + template: + metadata: + labels: + app: ibm-block-csi-node + product: ibm-block-csi-driver + csi: ibm + spec: + hostNetwork: true + containers: + - name: ibm-block-csi-node + securityContext: + privileged: true + image: ibmcom/ibm-block-csi-driver-node:0.9.0 + imagePullPolicy: "IfNotPresent" + args: + - --csi-endpoint=$(CSI_ENDPOINT) + - --hostname=$(KUBE_NODE_NAME) + - --config-file-path=./config.yaml + - --loglevel=$(CSI_LOGLEVEL) + env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: CSI_LOGLEVEL + value: "trace" + ## KUBE_NODE_NAME needed for the GetNodeInfo API + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: mountpoint-dir + mountPath: /var/lib/kubelet/pods + mountPropagation: "Bidirectional" + - name: socket-dir + mountPath: /csi + + - name: device-dir + mountPath: /dev + - name: sys-dir + mountPath: /sys + - name: host-dir + mountPath: /host # Maps the host root into "/host", inside the container. + mountPropagation: "Bidirectional" + + ports: + - name: healthz + containerPort: 9808 + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 10 + failureThreshold: 5 + + + - name: node-driver-registrar + image: quay.io/k8scsi/csi-node-driver-registrar:v1.1.0 + imagePullPolicy: "IfNotPresent" + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=5 + lifecycle: + preStop: + exec: + command: ["/bin/sh", "-c", "rm -rf /registration/block.csi.ibm.com-reg.sock /csi/csi.sock"] + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/block.csi.ibm.com/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + + - name: liveness-probe + image: quay.io/k8scsi/livenessprobe:v1.1.0 # TODO v1.1.0 is for k8s 1.13+, but v1.0.2 was the first k8s 1.13 version. + args: + - --csi-address=/csi/csi.sock + - --connection-timeout=3s # NOTE: this line is deprecated from v1.1.0. So if exist its ignore it. + volumeMounts: + - name: socket-dir + mountPath: /csi + + volumes: + ## This volume is where the driver mounts volumes + - name: mountpoint-dir + hostPath: + path: /var/lib/kubelet/pods + type: Directory + + ## This volume is where the socket for kubelet->driver communication is done + - name: socket-dir + hostPath: + path: /var/lib/kubelet/plugins/block.csi.ibm.com/ + type: DirectoryOrCreate + + ## This volume is where the node-driver-registrar registers the plugin with kubelet + - name: registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: Directory + + ## To discover the new devices + - name: device-dir + hostPath: + path: /dev + type: Directory + + - name: sys-dir + hostPath: + path: /sys + type: Directory + + - name: host-dir + hostPath: + path: / + type: Directory + + +## The below CSIDriver object is required to define (k8s 1.14 its still needed to be added due to redesign -> https://kubernetes-csi.github.io/docs/cluster-driver-registrar.html) +apiVersion: storage.k8s.io/v1beta1 ## for k8s 1.13 it should be csi.storage.k8s.io/v1alpha1 and requires to set feature gate --feature-gates=CSIDriverRegistry=true +kind: CSIDriver +metadata: + name: ibm-block-csi-driver +spec: + attachRequired: true + podInfoOnMount: false + diff --git a/go.mod b/go.mod new file mode 100644 index 000000000..231827e96 --- /dev/null +++ b/go.mod @@ -0,0 +1,18 @@ +module github.com/ibm/ibm-block-csi-driver + +go 1.12 + +require ( + github.com/container-storage-interface/spec v1.1.0 + github.com/golang/mock v1.3.1 + github.com/pkg/errors v0.8.1 // indirect + github.com/sirupsen/logrus v1.4.2 + golang.org/x/net v0.0.0-20190620200207-3b0461eec859 // indirect + golang.org/x/sync v0.0.0-20190423024810-112230192c58 + google.golang.org/grpc v1.22.0 + gopkg.in/yaml.v2 v2.2.2 + k8s.io/apimachinery v0.0.0-20190727130956-f97a4e5b4abc + k8s.io/klog v0.3.3 // indirect + k8s.io/kubernetes v1.13.1 + k8s.io/utils v0.0.0-20190712204705-3dccf664f023 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 000000000..b36b968f4 --- /dev/null +++ b/go.sum @@ -0,0 +1,147 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/container-storage-interface/spec v1.1.0 h1:qPsTqtR1VUPvMPeK0UnCZMtXaKGyyLPG8gj/wG6VqMs= +github.com/container-storage-interface/spec v1.1.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= +github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/gogo/protobuf v0.0.0-20190410021324-65acae22fc9/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/apimachinery v0.0.0-20190727130956-f97a4e5b4abc h1:fi1vG9UrnqoGU/H2HP2rr7GH6vaQeFdLxfocg5uMQmA= +k8s.io/apimachinery v0.0.0-20190727130956-f97a4e5b4abc/go.mod h1:eXR4ljjmbwK6Ng0PKsXRySPXnTUy/qBUa6kPDeckhQ0= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.3 h1:niceAagH1tzskmaie/icWd7ci1wbG7Bf2c6YGcQv+3c= +k8s.io/klog v0.3.3/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4= +k8s.io/kubernetes v1.13.1 h1:IwCCcPOZwY9rKcQyBJYXAE4Wgma4oOW5NYR3HXKFfZ8= +k8s.io/kubernetes v1.13.1/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/utils v0.0.0-20190712204705-3dccf664f023 h1:1H4Jyzb0z2X0GfBMTwRjnt5ejffRHrGftUgJcV/ZfDc= +k8s.io/utils v0.0.0-20190712204705-3dccf664f023/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/node/cmd/main.go b/node/cmd/main.go new file mode 100644 index 000000000..e2f1eb1c8 --- /dev/null +++ b/node/cmd/main.go @@ -0,0 +1,55 @@ +/** + * Copyright 2019 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "flag" + "fmt" + "os" + + "github.com/ibm/ibm-block-csi-driver/node/logger" + "github.com/ibm/ibm-block-csi-driver/node/pkg/driver" +) + +func main() { + logger.Debugf("Starting CSI node...") // Note - must set this in the first line in order to define the -loglevel in the flags + var ( + endpoint = flag.String("csi-endpoint", "unix://csi/csi.sock", "CSI Endpoint") + version = flag.Bool("version", false, "Print the version and exit.") + configFile = flag.String("config-file-path", "./config.yaml", "Shared config file.") + hostname = flag.String("hostname", "host-dns-name", "The name of the host the node is running on.") + ) + + flag.Parse() + + if *version { + info, err := driver.GetVersionJSON(*configFile) + if err != nil { + logger.Panicln(err) + } + fmt.Println(info) + os.Exit(0) + } + + drv, err := driver.NewDriver(*endpoint, *configFile, *hostname) + if err != nil { + logger.Panicln(err) + } + if err := drv.Run(); err != nil { + logger.Panicln(err) + } +} diff --git a/node/goid_info/goid_info.go b/node/goid_info/goid_info.go new file mode 100644 index 000000000..b9f7c64e7 --- /dev/null +++ b/node/goid_info/goid_info.go @@ -0,0 +1,48 @@ +/** + * Copyright 2019 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// We can map goid to a string which will appear in log entry with this goid. In most places it will be volume id. +// Use "SetAdditionalIDInfo()" at the begining of API method to specify additional info for current goid. +// Directly after use "defer DeleteAdditionalIDInfo()" to remove the info so additionalIDInfoByGoID will not grow endlessly. + +package goid_info + +import ( + "github.com/ibm/ibm-block-csi-driver/node/util" + "golang.org/x/sync/syncmap" +) + +var additionalIDInfoByGoID = new(syncmap.Map) + +func GetAdditionalIDInfo() (string, bool) { + goId := util.GetGoID() + additionalIDInfo, hasValue := additionalIDInfoByGoID.Load(goId) + if hasValue { + return additionalIDInfo.(string), hasValue + } else { + return "", hasValue + } +} + +func SetAdditionalIDInfo(additionalIDInfo string) { + goId := util.GetGoID() + additionalIDInfoByGoID.Store(goId, additionalIDInfo) +} + +func DeleteAdditionalIDInfo() { + goId := util.GetGoID() + additionalIDInfoByGoID.Delete(goId) +} diff --git a/node/logger/logger.go b/node/logger/logger.go new file mode 100644 index 000000000..135b8042d --- /dev/null +++ b/node/logger/logger.go @@ -0,0 +1,209 @@ +/** + * Copyright 2019 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This package is used for logging. +// It is implemented as decorator for logrus which formats messages in specific manner +// while adding additional data to each message like goroutine id. +// E.g. 2019-08-20 17:57:01.821 info [1] [vol] (main.go:83) - my logg message +// +// We can add additional info to goid which is specified in the log by mapping it to some string value +// using goid_info acage. E.g. to volume id +// +// To change log level add argument -loglevel (e.g. -log-level debug). Default is info. + +package logger + +import ( + "bytes" + "flag" + "fmt" + "path/filepath" + "runtime" + "strconv" + "strings" + + "github.com/ibm/ibm-block-csi-driver/node/goid_info" + "github.com/ibm/ibm-block-csi-driver/node/util" + "github.com/sirupsen/logrus" +) + +const ( + callerField = "caller" + goIDField = "goid" + additionalGoIDInfoField = "addId" + unknownValue = "unknown" + noAdditionalIDValue = "-" +) + +type LogFormat struct { + TimestampFormat string +} + +// singleton logrus instance +var instance *logrus.Logger + +// Format the entry which contains log message info +func (f *LogFormat) Format(entry *logrus.Entry) ([]byte, error) { + goid := entry.Data[goIDField] + if goid == nil { + goid = unknownValue + } + additionalGoIDInfo := entry.Data[additionalGoIDInfoField] + if additionalGoIDInfo == nil || len(additionalGoIDInfo.(string)) == 0 { + additionalGoIDInfo = noAdditionalIDValue + } + caller := entry.Data[callerField] // file and line this log is caled from + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + b.WriteString(entry.Time.Format(f.TimestampFormat) + " ") + b.WriteString(strings.ToUpper(entry.Level.String()) + "\t") + b.WriteString(fmt.Sprintf("%v", "[" + goid.(string)) + "] ") + b.WriteString(fmt.Sprintf("%v", "[" + additionalGoIDInfo.(string)) + "] ") + b.WriteString("(" + caller.(string) + ") - ") + b.WriteString(entry.Message) + b.WriteString("\n") + return b.Bytes(), nil +} + +// Initialize logrus instance if it was not initialized yet +// It panics if -loglevel is specified but as illegal value +func getInstance() *logrus.Logger { + if instance == nil { + formatter := LogFormat{} + instance = logrus.New() + instance.SetReportCaller(true) + // in logrus timestamp format is specified using example + formatter.TimestampFormat = "2006-01-02 15:04:05,123" + instance.SetFormatter(&formatter) + // set log level + logLevel := flag.String("loglevel", "trace", "The level of logs (error, warning info, debug, trace etc...).") + level, err := logrus.ParseLevel(*logLevel) + if err != nil { + logEntry().Panic(err) + } + instance.SetLevel(level) + } + return instance +} + +// Create log entry with additional data +// 1) goroutine id +// 2) caller: file and line log was called from +func logEntry() *logrus.Entry { + goid := util.GetGoID() + additionalId, _ := goid_info.GetAdditionalIDInfo() + _, file, no, ok := runtime.Caller(2) + caller := unknownValue + if ok { + caller = filepath.Base(file) + ":" + strconv.Itoa(no) + } + logEntry := getInstance().WithFields(logrus.Fields{goIDField: strconv.FormatUint(goid, 10), + additionalGoIDInfoField: additionalId, + callerField: caller}) + return logEntry +} + +func Trace(args ...interface{}) { + logEntry().Trace(args...) +} + +func Traceln(args ...interface{}) { + logEntry().Traceln(args...) +} + +func Tracef(format string, args ...interface{}) { + logEntry().Tracef(format, args...) +} + +func Debug(args ...interface{}) { + logEntry().Debug(args...) +} + +func Debugln(args ...interface{}) { + logEntry().Debugln(args...) +} + +func Debugf(format string, args ...interface{}) { + logEntry().Debugf(format, args...) +} + +func Info(args ...interface{}) { + logEntry().Info(args...) +} + +func Infoln(args ...interface{}) { + logEntry().Infoln(args...) +} + +func Infof(format string, args ...interface{}) { + logEntry().Infof(format, args...) +} + +func Warning(args ...interface{}) { + logEntry().Warn(args...) +} + +func Warningln(args ...interface{}) { + logEntry().Warnln(args...) +} + +func Warningf(format string, args ...interface{}) { + logEntry().Warnf(format, args...) +} + +func Error(args ...interface{}) { + logEntry().Error(args...) +} + +func Errorln(args ...interface{}) { + logEntry().Errorln(args...) +} + +func Errorf(format string, args ...interface{}) { + logEntry().Errorf(format, args...) +} + +func Fatal(args ...interface{}) { + logEntry().Fatal(args...) +} + +func Fatalln(args ...interface{}) { + logEntry().Fatalln(args...) +} + +func Fatalf(format string, args ...interface{}) { + logEntry().Fatalf(format, args...) +} + +func Panic(args ...interface{}) { + logEntry().Panic(args...) +} + +func Panicln(args ...interface{}) { + logEntry().Panicln(args...) +} + +func Panicf(format string, args ...interface{}) { + logEntry().Panicf(format, args...) +} + +func GetLevel() string { + return getInstance().GetLevel().String() +} diff --git a/node/pkg/driver/device_connectivity/device_connectivity_fc.go b/node/pkg/driver/device_connectivity/device_connectivity_fc.go new file mode 100644 index 000000000..45f0f59f8 --- /dev/null +++ b/node/pkg/driver/device_connectivity/device_connectivity_fc.go @@ -0,0 +1,59 @@ +/** + * Copyright 2019 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package device_connectivity + +import ( + "github.com/ibm/ibm-block-csi-driver/node/pkg/driver/executer" +) + +type OsDeviceConnectivityFc struct { + Executer executer.ExecuterInterface + HelperScsiGeneric OsDeviceConnectivityHelperScsiGenericInterface +} + +func NewOsDeviceConnectivityFc(executer executer.ExecuterInterface) OsDeviceConnectivityInterface { + return &OsDeviceConnectivityFc{ + Executer: executer, + HelperScsiGeneric: NewOsDeviceConnectivityHelperScsiGeneric(executer), + } +} + +func (r OsDeviceConnectivityFc) RescanDevices(lunId int, arrayIdentifiers []string) error { + return r.HelperScsiGeneric.RescanDevices(lunId, arrayIdentifiers) +} + +func (r OsDeviceConnectivityFc) GetMpathDevice(volumeId string, lunId int, arrayIdentifiers []string) (string, error) { + /* + Description: + 1. Find all the files "/dev/disk/by-path/pci-*-fc--lun- -> ../../sd + Note: Instead of setting here the IP we just search for * on that. + 2. Get the sd devices. + 3. Search '/sys/block/dm-*\/slaves/*' and get the . For example dm-3 below: + /sys/block/dm-3/slaves/sdb -> ../../../../pci0000:00/0000:00:17.0/0000:13:00.0/host33/rport-33:0-3/target33:0:1/33:0:1:0/block/sdb + + Return Value: "dm-X" of the volumeID by using the LunID and the array wwn. + */ + return r.HelperScsiGeneric.GetMpathDevice(volumeId, lunId, arrayIdentifiers, "fc") +} + +func (r OsDeviceConnectivityFc) FlushMultipathDevice(mpathDevice string) error { + return r.HelperScsiGeneric.FlushMultipathDevice(mpathDevice) +} + +func (r OsDeviceConnectivityFc) RemovePhysicalDevice(sysDevices []string) error { + return r.HelperScsiGeneric.RemovePhysicalDevice(sysDevices) +} diff --git a/node/pkg/driver/device_connectivity/device_connectivity_helper_scsigeneric.go b/node/pkg/driver/device_connectivity/device_connectivity_helper_scsigeneric.go new file mode 100644 index 000000000..6068e2436 --- /dev/null +++ b/node/pkg/driver/device_connectivity/device_connectivity_helper_scsigeneric.go @@ -0,0 +1,469 @@ +/** + * Copyright 2019 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package device_connectivity + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "github.com/ibm/ibm-block-csi-driver/node/logger" + executer "github.com/ibm/ibm-block-csi-driver/node/pkg/driver/executer" +) + +//go:generate mockgen -destination=../../../mocks/mock_OsDeviceConnectivityHelperScsiGenericInterface.go -package=mocks github.com/ibm/ibm-block-csi-driver/node/pkg/driver/device_connectivity OsDeviceConnectivityHelperScsiGenericInterface + +type OsDeviceConnectivityHelperScsiGenericInterface interface { + /* + This is helper interface for OsDeviceConnectivityHelperScsiGenericInterface. + Mainly for writing clean unit testing, so we can Mock this interface in order to unit test logic. + */ + RescanDevices(lunId int, arrayIdentifiers []string) error + GetMpathDevice(volumeId string, lunId int, arrayIdentifiers []string, connectivityType string) (string, error) + FlushMultipathDevice(mpathDevice string) error + RemovePhysicalDevice(sysDevices []string) error +} + +type OsDeviceConnectivityHelperScsiGeneric struct { + Executer executer.ExecuterInterface + Helper OsDeviceConnectivityHelperInterface + MutexMultipathF *sync.Mutex +} + +var ( + TimeOutMultipathFlashCmd = 4 * 1000 +) + +const ( + DevPath = "/dev" +) + +func NewOsDeviceConnectivityHelperScsiGeneric(executer executer.ExecuterInterface) OsDeviceConnectivityHelperScsiGenericInterface { + return &OsDeviceConnectivityHelperScsiGeneric{ + Executer: executer, + Helper: NewOsDeviceConnectivityHelperGeneric(executer), + MutexMultipathF: &sync.Mutex{}, + } +} + +func (r OsDeviceConnectivityHelperScsiGeneric) RescanDevices(lunId int, arrayIdentifiers []string) error { + logger.Debugf("Rescan : Start rescan on specific lun, on lun : {%v}, with array identifiers : {%v}", lunId, arrayIdentifiers) + var hostIDs []int + var errStrings []string + if len(arrayIdentifiers) == 0 { + e := &ErrorNotFoundArrayIdentifiers{lunId} + logger.Errorf(e.Error()) + return e + } + + for _, arrayIdentifier := range arrayIdentifiers { + hostsId, e := r.Helper.GetHostsIdByArrayIdentifier(arrayIdentifier) + if e != nil { + logger.Errorf(e.Error()) + errStrings = append(errStrings, e.Error()) + } + hostIDs = append(hostIDs, hostsId...) + } + if len(hostIDs) == 0 && len(errStrings) != 0 { + err := errors.New(strings.Join(errStrings, ",")) + return err + } + for _, hostNumber := range hostIDs { + + filename := fmt.Sprintf("/sys/class/scsi_host/host%d/scan", hostNumber) + f, err := r.Executer.OsOpenFile(filename, os.O_APPEND|os.O_WRONLY, 0200) + if err != nil { + logger.Errorf("Rescan Error: could not open filename : {%v}. err : {%v}", filename, err) + return err + } + + defer f.Close() + + scanCmd := fmt.Sprintf("- - %d", lunId) + logger.Debugf("Rescan host device : echo %s > %s", scanCmd, filename) + if written, err := r.Executer.FileWriteString(f, scanCmd); err != nil { + logger.Errorf("Rescan Error: could not write to rescan file :{%v}, error : {%v}", filename, err) + return err + } else if written == 0 { + e := &ErrorNothingWasWrittenToScanFileError{filename} + logger.Errorf(e.Error()) + return e + } + + } + + logger.Debugf("Rescan : finish rescan lun on lun id : {%v}, with array identifiers : {%v}", lunId, arrayIdentifiers) + return nil +} + +func (r OsDeviceConnectivityHelperScsiGeneric) GetMpathDevice(volumeId string, lunId int, arrayIdentifiers []string, connectivityType string) (string, error) { + logger.Infof("GetMpathDevice: Found multipath devices for volume : [%s] that relats to lunId=%d and arrayIdentifiers=%s", volumeId, lunId, arrayIdentifiers) + + if len(arrayIdentifiers) == 0 { + e := &ErrorNotFoundArrayIdentifiers{lunId} + return "", e + } + var devicePaths []string + var errStrings []string + var targetPath string + lunIdStr := strconv.Itoa(lunId) + + if connectivityType == "fc" { + targetPath = "/dev/disk/by-path/pci*" + // In host, the path like this: /dev/disk/by-path/pci-0000:13:00.0-fc-0x500507680b25c0aa-lun-0 + // So add prefix "ox" for the arrayIdentifiers + for index, wwn := range arrayIdentifiers { + arrayIdentifiers[index] = "0x" + strings.ToLower(wwn) + } + } + if connectivityType == "iscsi" { + targetPath = "/dev/disk/by-path/ip*" + } + + for _, arrayIdentifier := range arrayIdentifiers { + dp := strings.Join([]string{targetPath, connectivityType, arrayIdentifier, "lun", lunIdStr}, "-") + logger.Infof("GetMpathDevice: Get the mpath devices related to connectivityType=%s initiator=%s and lunID=%s : {%v}", connectivityType, arrayIdentifier, lunIdStr, dp) + dps, exists, e := r.Helper.WaitForPathToExist(dp, 5, 1) + if e != nil { + logger.Errorf("GetMpathDevice: No device found error : %v ", e.Error()) + errStrings = append(errStrings, e.Error()) + } else if !exists { + e := &MultipleDeviceNotFoundForLunError{volumeId, lunId, []string{arrayIdentifier}} + logger.Errorf(e.Error()) + errStrings = append(errStrings, e.Error()) + } + devicePaths = append(devicePaths, dps...) + } + + if len(devicePaths) == 0 && len(errStrings) != 0 { + err := errors.New(strings.Join(errStrings, ",")) + return "", err + } + + devicePathTosysfs := make(map[string]bool) + // Looping over the physical devices of the volume - /dev/sdX and store all the dm devices inside map. + for _, path := range devicePaths { + if path != "" { // since it may return empty items + mappedDevicePath, err := r.Helper.GetMultipathDisk(path) + if err != nil { + return "", err + } + + if mappedDevicePath != "" { + devicePathTosysfs[mappedDevicePath] = true // map it in order to save uniq dm devices + } + + } + } + + var mps string + for key := range devicePathTosysfs { + mps += ", " + key + } + logger.Infof("GetMpathDevice: Found multipath devices: [%s] that relats to lunId=%d and arrayIdentifiers=%s", mps, lunId, arrayIdentifiers) + + if len(devicePathTosysfs) > 1 { + return "", &MultipleDmDevicesError{volumeId, lunId, arrayIdentifiers, devicePathTosysfs} + } + + var md string + for md = range devicePathTosysfs { + break // because its a single value in the map(1 mpath device, if not it should fail above), so just take the first + } + return md, nil +} + +func (r OsDeviceConnectivityHelperScsiGeneric) FlushMultipathDevice(mpathDevice string) error { + // mpathdevice is dm-4 for example + logger.Debugf("Flushing mpath device : {%v}", mpathDevice) + + fullDevice := filepath.Join(DevPath, mpathDevice) + + logger.Debugf("Try to acquire lock for running the command multipath -f {%v} (to avoid concurrent multipath commands)", mpathDevice) + r.MutexMultipathF.Lock() + logger.Debugf("Acquired lock for multipath -f command") + _, err := r.Executer.ExecuteWithTimeout(TimeOutMultipathFlashCmd, "multipath", []string{"-f", fullDevice}) + r.MutexMultipathF.Unlock() + + if err != nil { + if _, errOpen := os.Open(fullDevice); errOpen != nil { + if os.IsNotExist(errOpen) { + logger.Debugf("Mpath device {%v} was deleted", fullDevice) + } else { + logger.Errorf("Error while opening file : {%v}. error: {%v}. Means the multipath -f {%v} did not succeed to delete the device.", fullDevice, errOpen.Error(), fullDevice) + return errOpen + } + } else { + logger.Errorf("multipath -f {%v} did not succeed to delete the device. err={%v}", fullDevice, err.Error()) + return err + } + } + + logger.Debugf("Finshed flushing mpath device : {%v}", mpathDevice) + return nil + +} + +func (r OsDeviceConnectivityHelperScsiGeneric) RemovePhysicalDevice(sysDevices []string) error { + // sysDevices = sdb, sda,... + logger.Debugf("Removing scsi device : {%v}", sysDevices) + // NOTE: this func could be also relevant for SCSI (not only for iSCSI) + var ( + f *os.File + err error + ) + + for _, deviceName := range sysDevices { + if deviceName == "" { + continue + } + + filename := fmt.Sprintf("/sys/block/%s/device/delete", deviceName) + logger.Debugf("Delete scsi device by open the device delete file : {%v}", filename) + + if f, err = os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0200); err != nil { + if os.IsNotExist(err) { + logger.Warningf("Idempotency: Block device {%v} was not found on the system, so skip deleting it", deviceName) + continue + } else { + logger.Errorf("Error while opening file : {%v}. error: {%v}", filename, err.Error()) + return err + } + } + + defer f.Close() + + if _, err := f.WriteString("1"); err != nil { + logger.Errorf("Error while writing to file : {%v}. error: {%v}", filename, err.Error()) + return err // TODO: maybe we need to just swallow the error and continnue?? + } + } + logger.Debugf("Finshed to remove SCSI devices : {%v}", sysDevices) + return nil +} + +// ============== OsDeviceConnectivityHelperInterface ========================== + +//go:generate mockgen -destination=../../../mocks/mock_OsDeviceConnectivityHelperInterface.go -package=mocks github.com/ibm/ibm-block-csi-driver/node/pkg/driver/device_connectivity OsDeviceConnectivityHelperInterface + +type OsDeviceConnectivityHelperInterface interface { + /* + This is helper interface for OsDeviceConnectivityScsiGeneric. + Mainly for writting clean unit testing, so we can Mock this interface in order to unit test OsDeviceConnectivityHelperGeneric logic. + */ + WaitForPathToExist(devicePath string, maxRetries int, intervalSeconds int) ([]string, bool, error) + GetMultipathDisk(path string) (string, error) + GetHostsIdByArrayIdentifier(arrayIdentifier string) ([]int, error) +} + +type OsDeviceConnectivityHelperGeneric struct { + executer executer.ExecuterInterface +} + +func NewOsDeviceConnectivityHelperGeneric(executer executer.ExecuterInterface) OsDeviceConnectivityHelperInterface { + return &OsDeviceConnectivityHelperGeneric{executer: executer} +} + +func (o OsDeviceConnectivityHelperGeneric) WaitForPathToExist(devicePath string, maxRetries int, intervalSeconds int) ([]string, bool, error) { + /* + Description: + Try to find all the files + iSCSI -> /dev/disk/by-path/ip*-iscsi--lun- + FC -> /dev/disk/by-path/pci-*-fc--lun- + If not find then try again maxRetries. + */ + + var err error + for i := 0; i < maxRetries; i++ { + err = nil + fpaths, err := o.executer.FilepathGlob(devicePath) + if err != nil { + return nil, false, err + } + + logger.Debugf("fpaths : {%v}", fpaths) + + if fpaths == nil { + err = os.ErrNotExist + } else { + return fpaths, true, nil + } + + time.Sleep(time.Second * time.Duration(intervalSeconds)) + } + return nil, false, err +} + +func (o OsDeviceConnectivityHelperGeneric) GetMultipathDisk(path string) (string, error) { + /* + Description: + 1. Get the name of the device(e.g: sdX) by check `path` as slink to the device. + e.g: Where path=/dev/disk/by-path/pci-*-fc-0xwwn-lun- which slink to "../../sdX" + /dev/disk/by-path/pci-*-fc-0xwwn-lun- -> ../../sdX + or + /dev/disk/by-path/TARGET-iscsi-iqn: -> ../../sdX + 2. After having sdX, the function loop over all the files in /sys/block/dm-*\/slaves/sd and return its relevant . + The is actually the second directory of the path /sys/block/dm-*\/slaves/sd. + e.g: function will return dm-1 for this path=/dev/disk/by-path/pci-0000:13:00.0-fc-0x500507680b25c0aa-lun-0, + Because the /dev/disk/by-path/pci-0000:13:00.0-fc-0x500507680b25c0aa-lun-0 -> ../../sda + And listing all the /sys/block/dm-*\/slaves/sda will be with dm-1. So the fucntion will return dm-1. + + Return Value: + dm- + */ + + // Follow link to destination directory + logger.Debugf("Getting multipath device for given path %s", path) + + // Get the sdX which is the file that path link to. + devicePath, err := o.executer.OsReadlink(path) + if err != nil { + logger.Errorf("Error reading link for multipath disk: %s. error: {%s}\n", path, err.Error()) + return "", err + } + + // Get only the physical device from /dev/disk/by-path/pci-*-fc-0xwwn-lun- -> ../../sdb + // or /dev/disk/by-path/TARGET-iscsi-iqn: -> ../../sdb + sdevice := filepath.Base(devicePath) + + // If destination directory is already identified as a multipath device, + // just return its path + if strings.HasPrefix(sdevice, "dm-") { + logger.Debugf("Already found multipath device: %s", sdevice) + return sdevice, nil + } + + // Fallback to iterating through all the entries under /sys/block/dm-* and + // check to see if any have an entry under /sys/block/dm-*/slaves matching + // the device the symlink was pointing at + dmPaths, err := o.executer.FilepathGlob("/sys/block/dm-*") + // TODO improve looping by just filepath.Glob("/sys/block/dm-*/slaves/" + sdevice) and then no loops needed below, since it will just find the device directly. + + if err != nil { + logger.Errorf("Glob error: %s", err) + return "", err + } + for _, dmPath := range dmPaths { + sdevices, err := o.executer.FilepathGlob(filepath.Join(dmPath, "slaves", "*")) + if err != nil { + logger.Warningf("Glob error: %s", err) + } + for _, spath := range sdevices { + s := filepath.Base(spath) + if sdevice == s { + // We've found a matching entry, return the path for the + // dm-* device it was found under + // for Example, return /dev/dm-3 + // ls -l /sys/block/dm-*/slaves/* + // /sys/block/dm-3/slaves/sdb -> ../../../../pci0000:00/0000:00:17.0/0000:13:00.0/host33/rport-33:0-3/target33:0:1/33:0:1:0/block/sdb + + p := filepath.Join(DevPath, filepath.Base(dmPath)) + logger.Debugf("Found matching multipath device: %s under dm-* device path %s", sdevice, dmPath) + return p, nil + } + } + } + + err = &MultipleDeviceNotFoundError{path, devicePath} + logger.Errorf(err.Error()) + return "", err +} + +const ( + FC_HOST_SYSFS_PATH = "/sys/class/fc_remote_ports/rport-*/port_name" + IscsiHostRexExPath = "/sys/class/iscsi_host/host*/device/session*/iscsi_session/session*/targetname" +) + +func (o OsDeviceConnectivityHelperGeneric) GetHostsIdByArrayIdentifier(arrayIdentifier string) ([]int, error) { + /* + Description: + This function find all the hosts IDs under directory /sys/class/fc_host/ or /sys/class/iscsi_host" + So the function goes over all the above hosts and return back only the host numbers as a list. + */ + //arrayIdentifier is wwn, value is 500507680b25c0aa + var targetFilePath string + var regexpValue string + + //IQN format is iqn.yyyy-mm.naming-authority:unique name + //For example: iqn.1986-03.com.ibm:2145.v7k194.node2 + iscsiMatchRex := `^iqn\.(\d{4}-\d{2})\.([^:]+)(:)([^,:\s']+)` + isIscsi, err := regexp.MatchString(iscsiMatchRex, arrayIdentifier) + if isIscsi { + targetFilePath = IscsiHostRexExPath + regexpValue = "host([0-9]+)" + } else { + targetFilePath = FC_HOST_SYSFS_PATH + regexpValue = "rport-([0-9]+)" + } + + var HostIDs []int + matches, err := o.executer.FilepathGlob(targetFilePath) + if err != nil { + logger.Errorf("Error while Glob targetFilePath : {%v}. err : {%v}", targetFilePath, err) + return nil, err + } + + logger.Debugf("targetname files matches were found : {%v}", matches) + + re := regexp.MustCompile(regexpValue) + for _, targetPath := range matches { + logger.Debugf("Check if targetname path (%s) is relevant for storage target (%s).", targetPath, arrayIdentifier) + targetName, err := o.executer.IoutilReadFile(targetPath) + if err != nil { + logger.Warningf("Could not read target name from file : {%v}, error : {%v}", targetPath, err) + continue + } + identifierFromHost := strings.TrimSpace(string(targetName)) + //For FC WWNs from the host, the value will like this: 0x500507680b26c0aa, but the arrayIdentifier doesn't has this prefix + if strings.HasPrefix(identifierFromHost, "0x") { + logger.Tracef("Remove the 0x prefix for: {%v}", identifierFromHost) + identifierFromHost = strings.TrimLeft(identifierFromHost, "0x") + } + if strings.EqualFold(identifierFromHost, arrayIdentifier) { + regexMatch := re.FindStringSubmatch(targetPath) + logger.Tracef("Found regex matches : {%v}", regexMatch) + hostNumber := -1 + + if len(regexMatch) < 2 { + logger.Warningf("Could not find host number for targetFilePath : {%v}", targetPath) + continue + } else { + hostNumber, err = strconv.Atoi(regexMatch[1]) + if err != nil { + logger.Warningf("Host number in for target file was not valid : {%v}", regexMatch[1]) + continue + } + } + + HostIDs = append(HostIDs, hostNumber) + logger.Debugf("portState path (%s) was found. Adding host ID {%v} to the id list.", targetPath, hostNumber) + } + } + + if len(HostIDs) == 0 { + return []int{}, &ConnectivityIdentifierStorageTargetNotFoundError{StorageTargetName: arrayIdentifier, DirectoryPath: targetFilePath} + } + + return HostIDs, nil + +} diff --git a/node/pkg/driver/device_connectivity/device_connectivity_helper_scsigeneric_test.go b/node/pkg/driver/device_connectivity/device_connectivity_helper_scsigeneric_test.go new file mode 100644 index 000000000..d223778c0 --- /dev/null +++ b/node/pkg/driver/device_connectivity/device_connectivity_helper_scsigeneric_test.go @@ -0,0 +1,1014 @@ +/** + * Copyright 2019 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package device_connectivity_test + +import ( + "fmt" + "os" + "reflect" + "strconv" + "strings" + "sync" + "testing" + + gomock "github.com/golang/mock/gomock" + mocks "github.com/ibm/ibm-block-csi-driver/node/mocks" + "github.com/ibm/ibm-block-csi-driver/node/pkg/driver/device_connectivity" + executer "github.com/ibm/ibm-block-csi-driver/node/pkg/driver/executer" +) + +type WaitForPathToExistReturn struct { + path string + devicePaths []string + exists bool + err error +} + +func NewOsDeviceConnectivityHelperScsiGenericForTest( + executer executer.ExecuterInterface, + helper device_connectivity.OsDeviceConnectivityHelperInterface, + mutexLock *sync.Mutex, +) device_connectivity.OsDeviceConnectivityHelperScsiGenericInterface { + return &device_connectivity.OsDeviceConnectivityHelperScsiGeneric{ + Executer: executer, + Helper: helper, + MutexMultipathF: mutexLock, + } +} + +type GetMultipathDiskReturn struct { + pathParam string + path string + err error +} + +func TestGetMpathDevice(t *testing.T) { + testCasesIscsi := []struct { + name string + arrayIdentifiers []string + + expErrType reflect.Type + expErr error + expDMdevice string + waitForPathToExistReturns []WaitForPathToExistReturn + getMultipathDiskReturns []GetMultipathDiskReturn + }{ + { + name: "Should fail when WaitForPathToExist not found any sd device", + arrayIdentifiers: []string{"X"}, + waitForPathToExistReturns: []WaitForPathToExistReturn{ + WaitForPathToExistReturn{ + devicePaths: nil, + exists: false, + err: nil, + }, + }, + + expErr: fmt.Errorf("Couldn't find multipath device for volumeID [volIdNotRelevant] lunID [0] from array [[X]]. Please check the host connectivity to the storage."), + expDMdevice: "", + }, + + { + name: "Should fail when WaitForPathToExist fail for some reason", + arrayIdentifiers: []string{"X"}, + waitForPathToExistReturns: []WaitForPathToExistReturn{ + WaitForPathToExistReturn{ + devicePaths: nil, + exists: false, + err: fmt.Errorf("error"), + }, + }, + + expErr: fmt.Errorf("error"), + expDMdevice: "", + }, + + { + name: "Should fail when GetMultipathDisk fail for some reason", + arrayIdentifiers: []string{"X"}, + waitForPathToExistReturns: []WaitForPathToExistReturn{ + WaitForPathToExistReturn{ + devicePaths: []string{"/dev/disk/by-path/ip1-iscsi-ID1-lun1"}, + exists: true, + err: nil, + }, + }, + getMultipathDiskReturns: []GetMultipathDiskReturn{ + GetMultipathDiskReturn{ + pathParam: "/dev/disk/by-path/ip1-iscsi-ID1-lun1", + path: "", + err: fmt.Errorf("error"), + }, + }, + + expErr: fmt.Errorf("error"), + expDMdevice: "", + }, + + { + name: "Should fail when GetMultipathDisk provide 2 different dms that apply to the same lun (bas multipathing case)", + arrayIdentifiers: []string{"X"}, + waitForPathToExistReturns: []WaitForPathToExistReturn{ + WaitForPathToExistReturn{ + devicePaths: []string{"/dev/disk/by-path/ip1-iscsi-ID1-lun1", "/dev/disk/by-path/ip1-iscsi-ID1-lun1___2"}, + exists: true, + err: nil, + }, + }, + getMultipathDiskReturns: []GetMultipathDiskReturn{ + GetMultipathDiskReturn{ + pathParam: "/dev/disk/by-path/ip1-iscsi-ID1-lun1", + path: "dm-1", + err: nil, + }, + GetMultipathDiskReturn{ + pathParam: "/dev/disk/by-path/ip1-iscsi-ID1-lun1___2", + path: "dm-2", // The main point, look like multipath crazy and give to the same vol but different path a different md device, which is wrong case - so we check it. + err: nil, + }, + }, + + expErrType: reflect.TypeOf(&device_connectivity.MultipleDmDevicesError{}), + expDMdevice: "", + }, + + { + name: "Should succeed to GetMpathDevice - good path", + arrayIdentifiers: []string{"X"}, + waitForPathToExistReturns: []WaitForPathToExistReturn{ + WaitForPathToExistReturn{ + devicePaths: []string{"/dev/disk/by-path/ip1-iscsi-ID1-lun1", "/dev/disk/by-path/ip1-iscsi-ID1-lun1___2"}, + exists: true, + err: nil, + }, + }, + getMultipathDiskReturns: []GetMultipathDiskReturn{ + GetMultipathDiskReturn{ + pathParam: "/dev/disk/by-path/ip1-iscsi-ID1-lun1", + path: "dm-1", + err: nil, + }, + GetMultipathDiskReturn{ + pathParam: "/dev/disk/by-path/ip1-iscsi-ID1-lun1___2", + path: "dm-1", // the same because there are 2 paths to the storage, so we should find 2 sd devices that point to the same dm device + err: nil, + }, + }, + + expErr: nil, + expDMdevice: "dm-1", + }, + + { + name: "Should succeed to GetMpathDevice with one more iqns", + arrayIdentifiers: []string{"X", "Y"}, + waitForPathToExistReturns: []WaitForPathToExistReturn{ + WaitForPathToExistReturn{ + devicePaths: []string{"/dev/disk/by-path/ip1-iscsi-X-lun1"}, + exists: true, + err: nil, + }, + WaitForPathToExistReturn{ + devicePaths: []string{"/dev/disk/by-path/ip1-iscsi-Y-lun2"}, + exists: true, + err: nil, + }, + }, + getMultipathDiskReturns: []GetMultipathDiskReturn{ + GetMultipathDiskReturn{ + pathParam: "/dev/disk/by-path/ip1-iscsi-X-lun1", + path: "dm-1", + err: nil, + }, + GetMultipathDiskReturn{ + pathParam: "/dev/disk/by-path/ip1-iscsi-Y-lun2", + path: "dm-1", + err: nil, + }, + }, + + expErr: nil, + expDMdevice: "dm-1", + }, + + { + name: "Should fail when WaitForPathToExist return error with the first array iqn, and found no sd device with the second array iqn", + arrayIdentifiers: []string{"X", "Y"}, + waitForPathToExistReturns: []WaitForPathToExistReturn{ + WaitForPathToExistReturn{ + devicePaths: nil, + exists: false, + err: fmt.Errorf("error"), + }, + WaitForPathToExistReturn{ + devicePaths: nil, + exists: false, + err: nil, + }, + }, + + expErr: fmt.Errorf("error,Couldn't find multipath device for volumeID [volIdNotRelevant] lunID [0] from array [[Y]]. Please check the host connectivity to the storage."), + expDMdevice: "", + }, + } + + for _, tc := range testCasesIscsi { + + t.Run(tc.name, func(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + fake_executer := mocks.NewMockExecuterInterface(mockCtrl) + fake_helper := mocks.NewMockOsDeviceConnectivityHelperInterface(mockCtrl) + fake_mutex := &sync.Mutex{} + lunId := 0 + + var mcalls []*gomock.Call + for index, r := range tc.waitForPathToExistReturns { + path := strings.Join([]string{"/dev/disk/by-path/ip*", "iscsi", tc.arrayIdentifiers[index], "lun", strconv.Itoa(lunId)}, "-") + call := fake_helper.EXPECT().WaitForPathToExist(path, 5, 1).Return( + r.devicePaths, + r.exists, + r.err) + mcalls = append(mcalls, call) + } + + for _, r := range tc.getMultipathDiskReturns { + call := fake_helper.EXPECT().GetMultipathDisk(r.pathParam).Return(r.path, r.err) + mcalls = append(mcalls, call) + } + gomock.InOrder(mcalls...) + + o := NewOsDeviceConnectivityHelperScsiGenericForTest(fake_executer, fake_helper, fake_mutex) + DMdevice, err := o.GetMpathDevice("volIdNotRelevant", lunId, tc.arrayIdentifiers, "iscsi") + if tc.expErr != nil || tc.expErrType != nil { + if err == nil { + t.Fatalf("Expected to fail with error, got success.") + } + if tc.expErrType != nil { + if reflect.TypeOf(err) != tc.expErrType { + t.Fatalf("Expected error type %v, got different error %v", tc.expErrType, reflect.TypeOf(err)) + } + } else { + if err.Error() != tc.expErr.Error() { + t.Fatalf("Expected error code %s, got %s", tc.expErr, err.Error()) + } + } + } + + if tc.expDMdevice != DMdevice { + t.Fatalf("Expected found mpath device %v, got %v", tc.expDMdevice, DMdevice) + } + + }) + } + + testCasesFc := []struct { + name string + arrayIdentifiers []string + + expErrType reflect.Type + expErr error + expDMdevice string + waitForPathToExistReturns []WaitForPathToExistReturn + getMultipathDiskReturns []GetMultipathDiskReturn + }{ + { + name: "Should fail when WaitForPathToExist not found any sd device", + arrayIdentifiers: []string{"x"}, + waitForPathToExistReturns: []WaitForPathToExistReturn{ + WaitForPathToExistReturn{ + devicePaths: nil, + exists: false, + err: nil, + }, + }, + + expErr: fmt.Errorf("Couldn't find multipath device for volumeID [volIdNotRelevant] lunID [0] from array [[0xx]]. Please check the host connectivity to the storage."), + expDMdevice: "", + }, + + { + name: "Should fail when WaitForPathToExist fail for some reason", + arrayIdentifiers: []string{"X"}, + waitForPathToExistReturns: []WaitForPathToExistReturn{ + WaitForPathToExistReturn{ + devicePaths: nil, + exists: false, + err: fmt.Errorf("error"), + }, + }, + + expErr: fmt.Errorf("error"), + expDMdevice: "", + }, + + { + name: "Should fail when GetMultipathDisk fail for some reason", + arrayIdentifiers: []string{"X"}, + waitForPathToExistReturns: []WaitForPathToExistReturn{ + WaitForPathToExistReturn{ + devicePaths: []string{"/dev/disk/by-path/pci-fc-ID1-lun-1"}, + exists: true, + err: nil, + }, + }, + getMultipathDiskReturns: []GetMultipathDiskReturn{ + GetMultipathDiskReturn{ + pathParam: "/dev/disk/by-path/pci-fc-ID1-lun-1", + path: "", + err: fmt.Errorf("error"), + }, + }, + + expErr: fmt.Errorf("error"), + expDMdevice: "", + }, + + { + name: "Should fail when GetMultipathDisk provide 2 different dms that apply to the same lun (bas multipathing case)", + arrayIdentifiers: []string{"X"}, + waitForPathToExistReturns: []WaitForPathToExistReturn{ + WaitForPathToExistReturn{ + devicePaths: []string{"/dev/disk/by-path/pci-fc-ID1-lun1", "/dev/disk/by-path/pci-fc-ID1-lun1___2"}, + exists: true, + err: nil, + }, + }, + getMultipathDiskReturns: []GetMultipathDiskReturn{ + GetMultipathDiskReturn{ + pathParam: "/dev/disk/by-path/pci-fc-ID1-lun1", + path: "dm-1", + err: nil, + }, + GetMultipathDiskReturn{ + pathParam: "/dev/disk/by-path/pci-fc-ID1-lun1___2", + path: "dm-2", // The main point, look like multipath crazy and give to the same vol but different path a different md device, which is wrong case - so we check it. + err: nil, + }, + }, + + expErrType: reflect.TypeOf(&device_connectivity.MultipleDmDevicesError{}), + expDMdevice: "", + }, + + { + name: "Should succeed to GetMpathDevice - good path", + arrayIdentifiers: []string{"X"}, + waitForPathToExistReturns: []WaitForPathToExistReturn{ + WaitForPathToExistReturn{ + devicePaths: []string{"/dev/disk/by-path/pci-fc-ID1-lun1", "/dev/disk/by-path/pci-fc-ID1-lun1___2"}, + exists: true, + err: nil, + }, + }, + getMultipathDiskReturns: []GetMultipathDiskReturn{ + GetMultipathDiskReturn{ + pathParam: "/dev/disk/by-path/pci-fc-ID1-lun1", + path: "dm-1", + err: nil, + }, + GetMultipathDiskReturn{ + pathParam: "/dev/disk/by-path/pci-fc-ID1-lun1___2", + path: "dm-1", // the same because there are 2 paths to the storage, so we should find 2 sd devices that point to the same dm device + err: nil, + }, + }, + + expErr: nil, + expDMdevice: "dm-1", + }, + + { + name: "Should succeed to GetMpathDevice with one more iqns", + arrayIdentifiers: []string{"X", "Y"}, + waitForPathToExistReturns: []WaitForPathToExistReturn{ + WaitForPathToExistReturn{ + devicePaths: []string{"/dev/disk/by-path/pci-fc-0xX-lun1"}, + exists: true, + err: nil, + }, + WaitForPathToExistReturn{ + devicePaths: []string{"/dev/disk/by-path/pci-fc-0xY-lun2"}, + exists: true, + err: nil, + }, + }, + getMultipathDiskReturns: []GetMultipathDiskReturn{ + GetMultipathDiskReturn{ + pathParam: "/dev/disk/by-path/pci-fc-0xX-lun1", + path: "dm-1", + err: nil, + }, + GetMultipathDiskReturn{ + pathParam: "/dev/disk/by-path/pci-fc-0xY-lun2", + path: "dm-1", + err: nil, + }, + }, + + expErr: nil, + expDMdevice: "dm-1", + }, + + { + name: "Should fail when WaitForPathToExist return error with the first array wwn, and found no sd device with the second array wwn", + arrayIdentifiers: []string{"x", "y"}, + waitForPathToExistReturns: []WaitForPathToExistReturn{ + WaitForPathToExistReturn{ + devicePaths: nil, + exists: false, + err: fmt.Errorf("error"), + }, + WaitForPathToExistReturn{ + devicePaths: nil, + exists: false, + err: nil, + }, + }, + + expErr: fmt.Errorf("error,Couldn't find multipath device for volumeID [volIdNotRelevant] lunID [0] from array [[0xy]]. Please check the host connectivity to the storage."), + expDMdevice: "", + }, + } + for _, tc := range testCasesFc { + + t.Run(tc.name, func(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + fake_executer := mocks.NewMockExecuterInterface(mockCtrl) + fake_helper := mocks.NewMockOsDeviceConnectivityHelperInterface(mockCtrl) + fake_mutex := &sync.Mutex{} + lunId := 0 + + var mcalls []*gomock.Call + for index, r := range tc.waitForPathToExistReturns { + array_inititor := "0x" + strings.ToLower(string(tc.arrayIdentifiers[index])) + path := strings.Join([]string{"/dev/disk/by-path/pci*", "fc", array_inititor, "lun", strconv.Itoa(lunId)}, "-") + call := fake_helper.EXPECT().WaitForPathToExist(path, 5, 1).Return( + r.devicePaths, + r.exists, + r.err) + mcalls = append(mcalls, call) + } + + for _, r := range tc.getMultipathDiskReturns { + call := fake_helper.EXPECT().GetMultipathDisk(r.pathParam).Return(r.path, r.err) + mcalls = append(mcalls, call) + } + gomock.InOrder(mcalls...) + + o := NewOsDeviceConnectivityHelperScsiGenericForTest(fake_executer, fake_helper, fake_mutex) + DMdevice, err := o.GetMpathDevice("volIdNotRelevant", lunId, tc.arrayIdentifiers, "fc") + if tc.expErr != nil || tc.expErrType != nil { + if err == nil { + t.Fatalf("Expected to fail with error, got success.") + } + if tc.expErrType != nil { + if reflect.TypeOf(err) != tc.expErrType { + t.Fatalf("Expected error type %v, got different error %v", tc.expErrType, reflect.TypeOf(err)) + } + } else { + if err.Error() != tc.expErr.Error() { + t.Fatalf("Expected error code %s, got %s", tc.expErr, err.Error()) + } + } + } + + if tc.expDMdevice != DMdevice { + t.Fatalf("Expected found mpath device %v, got %v", tc.expDMdevice, DMdevice) + } + + }) + } +} + +func TestHelperWaitForPathToExist(t *testing.T) { + testCases := []struct { + name string + fpaths []string + expErr error + expFound bool + globReturnErr error + }{ + { + name: "Should fail when Glob return error", + fpaths: nil, + globReturnErr: fmt.Errorf("error"), + expErr: fmt.Errorf("error"), + expFound: false, + }, + { + name: "Should fail when Glob succeed but with no paths", + fpaths: nil, + globReturnErr: nil, + expErr: os.ErrNotExist, + expFound: false, + }, + { + name: "Should fail when Glob return error", + fpaths: []string{"/a/a", "/a/b"}, + globReturnErr: nil, + expErr: nil, + expFound: true, + }, + } + + for _, tc := range testCases { + + t.Run(tc.name, func(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + fake_executer := mocks.NewMockExecuterInterface(mockCtrl) + devicePath := []string{"/dev/disk/by-path/pci-fc-ARRAYWWN-lun-LUNID", "/dev/disk/by-path/ip-ARRAYIP-iscsi-ARRAYIQN-lun-LUNID"} + for _, dp := range devicePath { + fake_executer.EXPECT().FilepathGlob(dp).Return(tc.fpaths, tc.globReturnErr) + helperGeneric := device_connectivity.NewOsDeviceConnectivityHelperGeneric(fake_executer) + _, found, err := helperGeneric.WaitForPathToExist(dp, 1, 1) + if err != nil { + if err.Error() != tc.expErr.Error() { + t.Fatalf("Expected error code %s, got %s", tc.expErr, err.Error()) + } + } + if found != tc.expFound { + t.Fatalf("Expected found boolean code %t, got %t", tc.expFound, found) + } + } + }) + } +} + +type globReturn struct { + globReturnfpaths []string + globReturnErr error +} + +func TestHelperGetMultipathDisk(t *testing.T) { + testCases := []struct { + name string + osReadLinkReturnPath string + osReadLinkReturnExc error + globReturns []globReturn + + expErr error + expPath string + expErrType reflect.Type + }{ + { + name: "Should fail to if OsReadlink return error", + osReadLinkReturnPath: "", + osReadLinkReturnExc: fmt.Errorf("error"), + globReturns: nil, + + expErr: fmt.Errorf("error"), + expPath: "", + }, + { + name: "Succeed if OsReadlink return md-* device instead of sdX", + osReadLinkReturnPath: "../../dm-4", + osReadLinkReturnExc: nil, + globReturns: nil, + + expErr: nil, + expPath: "dm-4", + }, + + { + name: "Fail if OsReadlink return sdX but FilepathGlob failed", + osReadLinkReturnPath: "../../sdb", + osReadLinkReturnExc: nil, + globReturns: []globReturn{ + globReturn{ + globReturnfpaths: nil, + globReturnErr: fmt.Errorf("error"), + }, + }, + + expErr: fmt.Errorf("error"), + expPath: "", + }, + { + name: "Fail if OsReadlink return sdX but FilepathGlob not show any of the sdX", + osReadLinkReturnPath: "../../sdb", + osReadLinkReturnExc: nil, + + globReturns: []globReturn{ + globReturn{ + globReturnfpaths: []string{"/sys/block/dm-4"}, + globReturnErr: nil, + }, + globReturn{ + globReturnfpaths: nil, // so no dm devices found at all /sys/block/dm-*/slaves + globReturnErr: nil, + }, + }, + + expErrType: reflect.TypeOf(&device_connectivity.MultipleDeviceNotFoundError{}), + expPath: "", + }, + + { + name: "Should succeed to find the /dev/dm-4", + osReadLinkReturnPath: "../../sdb", + osReadLinkReturnExc: nil, + + globReturns: []globReturn{ + globReturn{ + globReturnfpaths: []string{"/sys/block/dm-4"}, + globReturnErr: nil, + }, + globReturn{ + globReturnfpaths: []string{"/sys/block/dm-4/slaves/sdb"}, + globReturnErr: nil, + }, + }, + + expErr: nil, + expPath: "/dev/dm-4", + }, + } + + for _, tc := range testCases { + + t.Run(tc.name, func(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + fake_executer := mocks.NewMockExecuterInterface(mockCtrl) + path := []string{"/dev/disk/by-path/pci-fc-wwn:5", "/dev/disk/by-path/ip-ARRAYIP-iscsi-ARRAYIQN-lun-LUNID"} + + for _, dp := range path { + fake_executer.EXPECT().OsReadlink(dp).Return(tc.osReadLinkReturnPath, tc.osReadLinkReturnExc) + + if len(tc.globReturns) == 1 { + fake_executer.EXPECT().FilepathGlob("/sys/block/dm-*").Return(tc.globReturns[0].globReturnfpaths, tc.globReturns[0].globReturnErr) + } else if len(tc.globReturns) == 2 { + first := fake_executer.EXPECT().FilepathGlob("/sys/block/dm-*").Return(tc.globReturns[0].globReturnfpaths, tc.globReturns[0].globReturnErr) + second := fake_executer.EXPECT().FilepathGlob("/sys/block/dm-4/slaves/*").Return(tc.globReturns[1].globReturnfpaths, tc.globReturns[1].globReturnErr) + + gomock.InOrder(first, second) + } + helperGeneric := device_connectivity.NewOsDeviceConnectivityHelperGeneric(fake_executer) + + returnPath, err := helperGeneric.GetMultipathDisk(dp) + if tc.expErr != nil || tc.expErrType != nil { + if err == nil { + t.Fatalf("Expected to fail with error, got success.") + } + if tc.expErrType != nil { + if reflect.TypeOf(err) != tc.expErrType { + t.Fatalf("Expected error type %v, got different error %v", tc.expErrType, reflect.TypeOf(err)) + } + } else { + if err.Error() != tc.expErr.Error() { + t.Fatalf("Expected error code %s, got %s", tc.expErr, err.Error()) + } + } + } + if returnPath != tc.expPath { + t.Fatalf("Expected found multipath device %s, got %s", tc.expPath, returnPath) + } + } + }) + } +} + +type ioutilReadFileReturn struct { + ReadFileParam string // The param that the IoutilReadDir recive on each call. + data []byte + err error +} + +func TestGetHostsIdByArrayIdentifier(t *testing.T) { + testCasesIscsi := []struct { + name string + ioutilReadFileReturns []ioutilReadFileReturn + arrayIdentifier string + + expErrType reflect.Type + expErr error + expHostList []int + globReturnMatches []string + globReturnErr error + }{ + { + name: "Should fail when FilepathGlob return error", + arrayIdentifier: "iqn.1986-03.com.ibm:2145.v7k194.node2", + globReturnMatches: nil, + globReturnErr: fmt.Errorf("error"), + expErr: fmt.Errorf("error"), + expHostList: nil, + }, + { + name: "Should fail when FilepathGlob return without any hosts target files at all", + arrayIdentifier: "iqn.1986-03.com.ibm:2145.v7k194.node2", + globReturnMatches: nil, + globReturnErr: nil, + + expErrType: reflect.TypeOf(&device_connectivity.ConnectivityIdentifierStorageTargetNotFoundError{}), + expHostList: nil, + }, + { + name: "Should fail when array IQN was not found in target files at all", + ioutilReadFileReturns: []ioutilReadFileReturn{ + ioutilReadFileReturn{ + ReadFileParam: "/sys/class/iscsi_host/host1/device/session1/iscsi_session/session1/targetname", + data: []byte("fakeIQN_OTHER"), + err: nil, + }, + ioutilReadFileReturn{ + ReadFileParam: "/sys/class/iscsi_host/host2/device/session2/iscsi_session/session2/targetname", + data: []byte("fakeIQN_OTHER"), + err: nil, + }, + }, + arrayIdentifier: "iqn.1986-03.com.ibm:2145.v7k194.node2", + globReturnMatches: []string{ + "/sys/class/iscsi_host/host1/device/session1/iscsi_session/session1/targetname", + "/sys/class/iscsi_host/host2/device/session2/iscsi_session/session2/targetname", + }, + globReturnErr: nil, + + expErrType: reflect.TypeOf(&device_connectivity.ConnectivityIdentifierStorageTargetNotFoundError{}), + expHostList: nil, + }, + { + name: "Should fail when array IQN found but hostX where X is not int", + ioutilReadFileReturns: []ioutilReadFileReturn{ + ioutilReadFileReturn{ + ReadFileParam: "/sys/class/iscsi_host/hostX/device/session1/iscsi_session/session1/targetname", + data: []byte("fakeIQN"), + err: nil, + }, + }, + arrayIdentifier: "iqn.1986-03.com.ibm:2145.v7k194.node2", + + globReturnMatches: []string{ + "/sys/class/iscsi_host/hostX/device/session1/iscsi_session/session1/targetname", + }, + globReturnErr: nil, + + expErrType: reflect.TypeOf(&device_connectivity.ConnectivityIdentifierStorageTargetNotFoundError{}), + expHostList: nil, + }, + + { + name: "Should succeed to find host1 and host2 for the array IQN (while host3 is not from this IQN and also host666 fail ignore)", + ioutilReadFileReturns: []ioutilReadFileReturn{ + ioutilReadFileReturn{ + ReadFileParam: "/sys/class/iscsi_host/host1/device/session1/iscsi_session/session1/targetname", + data: []byte("iqn.1986-03.com.ibm:2145.v7k194.node2"), + err: nil, + }, + ioutilReadFileReturn{ + ReadFileParam: "/sys/class/iscsi_host/host2/device/session1/iscsi_session/session1/targetname", + data: []byte("iqn.1986-03.com.ibm:2145.v7k194.node2"), + err: nil, + }, + ioutilReadFileReturn{ + ReadFileParam: "/sys/class/iscsi_host/host3/device/session1/iscsi_session/session1/targetname", + data: []byte("fakeIQN_OTHER"), + err: nil, + }, + ioutilReadFileReturn{ + ReadFileParam: "/sys/class/iscsi_host/host666/device/session1/iscsi_session/session1/targetname", + data: nil, + err: fmt.Errorf("error"), + }, + }, + arrayIdentifier: "iqn.1986-03.com.ibm:2145.v7k194.node2", + + globReturnMatches: []string{ + "/sys/class/iscsi_host/host1/device/session1/iscsi_session/session1/targetname", + "/sys/class/iscsi_host/host2/device/session1/iscsi_session/session1/targetname", + "/sys/class/iscsi_host/host3/device/session1/iscsi_session/session1/targetname", + "/sys/class/iscsi_host/host666/device/session1/iscsi_session/session1/targetname", + }, + globReturnErr: nil, + + expErrType: nil, + expHostList: []int{1, 2}, + }, + } + + for _, tc := range testCasesIscsi { + + t.Run(tc.name, func(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + fake_executer := mocks.NewMockExecuterInterface(mockCtrl) + + fake_executer.EXPECT().FilepathGlob(device_connectivity.IscsiHostRexExPath).Return(tc.globReturnMatches, tc.globReturnErr) + + var mcalls []*gomock.Call + for _, r := range tc.ioutilReadFileReturns { + call := fake_executer.EXPECT().IoutilReadFile(r.ReadFileParam).Return(r.data, r.err) + mcalls = append(mcalls, call) + } + gomock.InOrder(mcalls...) + + helperGeneric := device_connectivity.NewOsDeviceConnectivityHelperGeneric(fake_executer) + + returnHostList, err := helperGeneric.GetHostsIdByArrayIdentifier(tc.arrayIdentifier) + if tc.expErr != nil || tc.expErrType != nil { + if err == nil { + t.Fatalf("Expected to fail with error, got success.") + } + if tc.expErrType != nil { + if reflect.TypeOf(err) != tc.expErrType { + t.Fatalf("Expected error type %v, got different error %v", tc.expErrType, reflect.TypeOf(err)) + } + } else { + if err.Error() != tc.expErr.Error() { + t.Fatalf("Expected error code %s, got %s", tc.expErr, err.Error()) + } + } + } + + if len(tc.expHostList) == 0 && len(returnHostList) == 0 { + return + } else if !reflect.DeepEqual(returnHostList, tc.expHostList) { + t.Fatalf("Expected found hosts dirs %v, got %v", tc.expHostList, returnHostList) + } + + }) + } + + testCasesFc := []struct { + name string + ioutilReadFileReturns []ioutilReadFileReturn + arrayIdentifier string + + expErrType reflect.Type + expErr error + expHostList []int + globReturnMatches []string + globReturnErr error + }{ + { + name: "Should fail when FilepathGlob return error", + arrayIdentifier: "fakeWWN", + globReturnMatches: nil, + globReturnErr: fmt.Errorf("error"), + expErr: fmt.Errorf("error"), + expHostList: nil, + }, + { + name: "Should fail when FilepathGlob return without any hosts target files at all", + arrayIdentifier: "fakeWWN", + globReturnMatches: nil, + globReturnErr: nil, + + expErrType: reflect.TypeOf(&device_connectivity.ConnectivityIdentifierStorageTargetNotFoundError{}), + expHostList: nil, + }, + { + name: "Should fail when all values are not match", + ioutilReadFileReturns: []ioutilReadFileReturn{ + ioutilReadFileReturn{ + ReadFileParam: "/sys/class/fc_remote_ports/rport-3:0-0/port_name", + data: []byte("fakeWWN_other"), + err: nil, + }, + ioutilReadFileReturn{ + ReadFileParam: "/sys/class/fc_remote_ports/rport-4:0-0/port_name", + data: []byte("fakeWWN_other"), + err: nil, + }, + }, + arrayIdentifier: "fakeWWN", + globReturnMatches: []string{ + "/sys/class/fc_remote_ports/rport-3:0-0/port_name", + "/sys/class/fc_remote_ports/rport-4:0-0/port_name", + }, + globReturnErr: nil, + + expErrType: reflect.TypeOf(&device_connectivity.ConnectivityIdentifierStorageTargetNotFoundError{}), + expHostList: nil, + }, + + { + name: "Should succeed to find host33 and host34(host 35 offline, hott36 return error)", + ioutilReadFileReturns: []ioutilReadFileReturn{ + ioutilReadFileReturn{ + ReadFileParam: "/sys/class/fc_remote_ports/rport-33:0-0/port_name", + data: []byte("fakeWWN"), + err: nil, + }, + ioutilReadFileReturn{ + ReadFileParam: "/sys/class/fc_remote_ports/rport-34:0-0/port_name", + data: []byte("0xfakeWWN"), + err: nil, + }, + ioutilReadFileReturn{ + ReadFileParam: "/sys/class/fc_remote_ports/rport-35:0-0/port_name", + data: []byte("fakeWWN_other"), + err: nil, + }, + ioutilReadFileReturn{ + ReadFileParam: "/sys/class/fc_remote_ports/rport-36:0-0/port_name", + data: nil, + err: fmt.Errorf("error"), + }, + }, + arrayIdentifier: "fakeWWN", + + globReturnMatches: []string{ + "/sys/class/fc_remote_ports/rport-33:0-0/port_name", + "/sys/class/fc_remote_ports/rport-34:0-0/port_name", + "/sys/class/fc_remote_ports/rport-35:0-0/port_name", + "/sys/class/fc_remote_ports/rport-36:0-0/port_name", + }, + globReturnErr: nil, + + expErrType: nil, + expHostList: []int{33, 34}, + }, + + { + name: "Should succeed to find host5 and host6", + ioutilReadFileReturns: []ioutilReadFileReturn{ + ioutilReadFileReturn{ + ReadFileParam: "/sys/class/fc_remote_ports/rport-5:0-0/port_name", + data: []byte("0xfakeWWN"), + err: nil, + }, + ioutilReadFileReturn{ + ReadFileParam: "/sys/class/fc_remote_ports/rport-6:0-0/port_name", + data: []byte("fakeWWN"), + err: nil, + }, + }, + arrayIdentifier: "fakeWWN", + + globReturnMatches: []string{ + "/sys/class/fc_remote_ports/rport-5:0-0/port_name", + "/sys/class/fc_remote_ports/rport-6:0-0/port_name", + }, + globReturnErr: nil, + + expErrType: nil, + expHostList: []int{5, 6}, + }, + } + + for _, tc := range testCasesFc { + + t.Run(tc.name, func(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + fake_executer := mocks.NewMockExecuterInterface(mockCtrl) + + fake_executer.EXPECT().FilepathGlob(device_connectivity.FC_HOST_SYSFS_PATH).Return(tc.globReturnMatches, tc.globReturnErr) + + var mcalls []*gomock.Call + for _, r := range tc.ioutilReadFileReturns { + call := fake_executer.EXPECT().IoutilReadFile(r.ReadFileParam).Return(r.data, r.err) + mcalls = append(mcalls, call) + } + gomock.InOrder(mcalls...) + + helperGeneric := device_connectivity.NewOsDeviceConnectivityHelperGeneric(fake_executer) + + returnHostList, err := helperGeneric.GetHostsIdByArrayIdentifier(tc.arrayIdentifier) + if tc.expErr != nil || tc.expErrType != nil { + if err == nil { + t.Fatalf("Expected to fail with error, got success.") + } + if tc.expErrType != nil { + if reflect.TypeOf(err) != tc.expErrType { + t.Fatalf("Expected error type %v, got different error %v", tc.expErrType, reflect.TypeOf(err)) + } + } else { + if err.Error() != tc.expErr.Error() { + t.Fatalf("Expected error code %s, got %s", tc.expErr, err.Error()) + } + } + } + + if len(tc.expHostList) == 0 && len(returnHostList) == 0 { + return + } else if !reflect.DeepEqual(returnHostList, tc.expHostList) { + t.Fatalf("Expected found hosts dirs %v, got %v", tc.expHostList, returnHostList) + } + + }) + } +} diff --git a/node/pkg/driver/device_connectivity/device_connectivity_interface.go b/node/pkg/driver/device_connectivity/device_connectivity_interface.go new file mode 100644 index 000000000..49cd744ca --- /dev/null +++ b/node/pkg/driver/device_connectivity/device_connectivity_interface.go @@ -0,0 +1,26 @@ +/** + * Copyright 2019 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package device_connectivity + +//go:generate mockgen -destination=../../../mocks/mock_OsDeviceConnectivityInterface.go -package=mocks github.com/ibm/ibm-block-csi-driver/node/pkg/driver/device_connectivity OsDeviceConnectivityInterface + +type OsDeviceConnectivityInterface interface { + RescanDevices(lunId int, arrayIdentifier []string) error // For NVME lunID will be namespace ID. + GetMpathDevice(volumeId string, lunId int, arrayIdentifiers []string) (string, error) + FlushMultipathDevice(mpathDevice string) error + RemovePhysicalDevice(sysDevices []string) error +} diff --git a/node/pkg/driver/device_connectivity/device_connectivity_iscsi.go b/node/pkg/driver/device_connectivity/device_connectivity_iscsi.go new file mode 100644 index 000000000..0d6390984 --- /dev/null +++ b/node/pkg/driver/device_connectivity/device_connectivity_iscsi.go @@ -0,0 +1,59 @@ +/** + * Copyright 2019 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package device_connectivity + +import ( + "github.com/ibm/ibm-block-csi-driver/node/pkg/driver/executer" +) + +type OsDeviceConnectivityIscsi struct { + Executer executer.ExecuterInterface + HelperScsiGeneric OsDeviceConnectivityHelperScsiGenericInterface +} + +func NewOsDeviceConnectivityIscsi(executer executer.ExecuterInterface) OsDeviceConnectivityInterface { + return &OsDeviceConnectivityIscsi{ + Executer: executer, + HelperScsiGeneric: NewOsDeviceConnectivityHelperScsiGeneric(executer), + } +} + +func (r OsDeviceConnectivityIscsi) RescanDevices(lunId int, arrayIdentifiers []string) error { + return r.HelperScsiGeneric.RescanDevices(lunId, arrayIdentifiers) +} + +func (r OsDeviceConnectivityIscsi) GetMpathDevice(volumeId string, lunId int, arrayIdentifiers []string) (string, error) { + /* + Description: + 1. Find all the files "/dev/disk/by-path/ip--iscsi--lun- -> ../../sd + Note: Instead of setting here the IP we just search for * on that. + 2. Get the sd devices. + 3. Search '/sys/block/dm-*\/slaves/*' and get the . For example dm-3 below: + /sys/block/dm-3/slaves/sdb -> ../../../../platform/host41/session9/target41:0:0/41:0:0:13/block/sdb + + Return Value: "dm-X" of the volumeID by using the LunID and the arrayIqn. + */ + return r.HelperScsiGeneric.GetMpathDevice(volumeId, lunId, arrayIdentifiers, "iscsi") +} + +func (r OsDeviceConnectivityIscsi) FlushMultipathDevice(mpathDevice string) error { + return r.HelperScsiGeneric.FlushMultipathDevice(mpathDevice) +} + +func (r OsDeviceConnectivityIscsi) RemovePhysicalDevice(sysDevices []string) error { + return r.HelperScsiGeneric.RemovePhysicalDevice(sysDevices) +} diff --git a/node/pkg/driver/device_connectivity/errors.go b/node/pkg/driver/device_connectivity/errors.go new file mode 100644 index 000000000..b8aadb3b2 --- /dev/null +++ b/node/pkg/driver/device_connectivity/errors.go @@ -0,0 +1,64 @@ +package device_connectivity + +import ( + "fmt" +) + +type MultipleDmDevicesError struct { + VolumeId string + LunId int + ArrayIqns []string + MultipathDevicesMap map[string]bool +} + +func (e *MultipleDmDevicesError) Error() string { + var mps string + for key := range e.MultipathDevicesMap { + mps += ", " + key + } + return fmt.Sprintf("Detected more then one multipath devices (%s) for single volume (%s) with lunID %d from array target iqn %v", mps, e.VolumeId, e.LunId, e.ArrayIqns) +} + +type MultipleDeviceNotFoundForLunError struct { + VolumeId string + LunId int + ArrayIqns []string +} + +func (e *MultipleDeviceNotFoundForLunError) Error() string { + return fmt.Sprintf("Couldn't find multipath device for volumeID [%s] lunID [%d] from array [%s]. Please check the host connectivity to the storage.", e.VolumeId, e.LunId, e.ArrayIqns) +} + +type ConnectivityIdentifierStorageTargetNotFoundError struct { + StorageTargetName string + DirectoryPath string +} + +func (e *ConnectivityIdentifierStorageTargetNotFoundError) Error() string { + return fmt.Sprintf("Connectivity Error: Storage target name [%s] was not found on the host, under directory %s. Please check the host connectivity to the storage.", e.StorageTargetName, e.DirectoryPath) +} + +type MultipleDeviceNotFoundError struct { + DiskByPathDevice string + LinkToPhysicalDevice string +} + +func (e *MultipleDeviceNotFoundError) Error() string { + return fmt.Sprintf("Couldn't find dm-* of the physical device path [%s -> %s]. Please check the host connectivity to the storage.", e.DiskByPathDevice, e.LinkToPhysicalDevice) +} + +type ErrorNothingWasWrittenToScanFileError struct { + path string +} + +func (e *ErrorNothingWasWrittenToScanFileError) Error() string { + return fmt.Sprintf("Rescan Error: Nothing was written to rescan file : {%s}", e.path) +} + +type ErrorNotFoundArrayIdentifiers struct { + lunId int +} + +func (e *ErrorNotFoundArrayIdentifiers) Error() string { + return fmt.Sprintf("Couldn't find arrayIdentifiers found for lunId: {%d}", e.lunId) +} diff --git a/node/pkg/driver/driver.go b/node/pkg/driver/driver.go new file mode 100644 index 000000000..a3b9e9158 --- /dev/null +++ b/node/pkg/driver/driver.go @@ -0,0 +1,160 @@ +/** + * Copyright 2019 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package driver + +import ( + "context" + "io/ioutil" + "net" + + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/ibm/ibm-block-csi-driver/node/logger" + "github.com/ibm/ibm-block-csi-driver/node/util" + + "github.com/ibm/ibm-block-csi-driver/node/pkg/driver/device_connectivity" + "github.com/ibm/ibm-block-csi-driver/node/pkg/driver/executer" + "google.golang.org/grpc" + "gopkg.in/yaml.v2" + "k8s.io/kubernetes/pkg/util/mount" +) + +type Driver struct { + NodeService + srv *grpc.Server + endpoint string + config ConfigFile +} + +func NewDriver(endpoint string, configFilePath string, hostname string) (*Driver, error) { + configFile, err := ReadConfigFile(configFilePath) + if err != nil { + return nil, err + } + logger.Infof("Driver: %v Version: %v", configFile.Identity.Name, configFile.Identity.Version) + + mounter := &mount.SafeFormatAndMount{ + Interface: mount.New(""), + Exec: mount.NewOsExec(), + } + + syncLock := NewSyncLock() + executer := &executer.Executer{} + osDeviceConnectivityMapping := map[string]device_connectivity.OsDeviceConnectivityInterface{ + "iscsi": device_connectivity.NewOsDeviceConnectivityIscsi(executer), + "fc": device_connectivity.NewOsDeviceConnectivityFc(executer), + // TODO nvme + } + return &Driver{ + endpoint: endpoint, + config: configFile, + NodeService: NewNodeService(configFile, hostname, *NewNodeUtils(executer), osDeviceConnectivityMapping, executer, mounter, syncLock), + }, nil +} + +func (d *Driver) Run() error { + scheme, addr, err := util.ParseEndpoint(d.endpoint) + if err != nil { + return err + } + + listener, err := net.Listen(scheme, addr) + if err != nil { + return err + } + + logErr := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + resp, err := handler(ctx, req) + if err != nil { + logger.Errorf("GRPC error: %v", err) + } + return resp, err + } + opts := []grpc.ServerOption{ + grpc.UnaryInterceptor(logErr), + } + d.srv = grpc.NewServer(opts...) + + csi.RegisterIdentityServer(d.srv, d) + csi.RegisterNodeServer(d.srv, d) + + logger.Infof("Listening for connections on address: %#v", listener.Addr()) + return d.srv.Serve(listener) +} + +func (d *Driver) Stop() { + logger.Infof("Stopping server") + d.srv.Stop() +} + +type ConfigFile struct { + Identity struct { + Name string + Version string + // TODO missing capabilities - currently the csi node is setting driver capability hardcoded. fix it low priority. + } + Controller struct { + Publish_context_lun_parameter string + Publish_context_connectivity_parameter string + Publish_context_array_iqn string + Publish_context_fc_initiators string + } +} + +const ( + DefualtConfigFile string = "config.yaml" + EnvNameDriverConfFile string = "DRIVER_CONFIG_YML" +) + +func ReadConfigFile(configFilePath string) (ConfigFile, error) { + var configFile ConfigFile + + configYamlPath := configFilePath + if configYamlPath == "" { + configYamlPath = DefualtConfigFile + logger.Debugf("Not found config file environment variable %s. Set default value %s.", EnvNameDriverConfFile, configYamlPath) + } else { + logger.Debugf("Config file environment variable %s=%s", EnvNameDriverConfFile, configYamlPath) + logger.Info(logger.GetLevel()) + } + + yamlFile, err := ioutil.ReadFile(configYamlPath) + if err != nil { + logger.Errorf("failed to read file %q: %v", yamlFile, err) + return ConfigFile{}, err + } + + err = yaml.Unmarshal(yamlFile, &configFile) + if err != nil { + logger.Errorf("error unmarshaling yaml: %v", err) + return ConfigFile{}, err + } + + // Verify mandatory attributes in config file + if configFile.Identity.Name == "" { + err := &ConfigYmlEmptyAttribute{"Identity.Name"} + logger.Errorf("%v", err) + return ConfigFile{}, err + } + + if configFile.Identity.Version == "" { + err := &ConfigYmlEmptyAttribute{"Identity.Version"} + logger.Errorf("%v", err) + return ConfigFile{}, err + } + + return configFile, nil +} diff --git a/node/pkg/driver/errors.go b/node/pkg/driver/errors.go new file mode 100644 index 000000000..35ed20614 --- /dev/null +++ b/node/pkg/driver/errors.go @@ -0,0 +1,45 @@ +/** + * Copyright 2019 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package driver + +import ( + "fmt" +) + +type ConfigYmlEmptyAttribute struct { + Attr string +} + +func (e *ConfigYmlEmptyAttribute) Error() string { + return fmt.Sprintf("Missing attribute [%s] in driver config yaml file", e.Attr) +} + +type RequestValidationError struct { + Msg string +} + +func (e *RequestValidationError) Error() string { + return fmt.Sprintf("Request Validation Error: %s", e.Msg) +} + +type VolumeAlreadyProcessingError struct { + volId string +} + +func (e *VolumeAlreadyProcessingError) Error() string { + return fmt.Sprintf("Volume %s is already processing. request cannot be completed.", e.volId) +} diff --git a/node/pkg/driver/executer/executer.go b/node/pkg/driver/executer/executer.go new file mode 100644 index 000000000..980f05db0 --- /dev/null +++ b/node/pkg/driver/executer/executer.go @@ -0,0 +1,97 @@ +/** + * Copyright 2019 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package executer + +import ( + "context" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "time" + + "github.com/ibm/ibm-block-csi-driver/node/logger" +) + +//go:generate mockgen -destination=../../../mocks/mock_executer.go -package=mocks github.com/ibm/ibm-block-csi-driver/node/pkg/driver/executer ExecuterInterface +type ExecuterInterface interface { // basic host dependent functions + ExecuteWithTimeout(mSeconds int, command string, args []string) ([]byte, error) + OsOpenFile(name string, flag int, perm os.FileMode) (*os.File, error) + OsReadlink(name string) (string, error) + FilepathGlob(pattern string) (matches []string, err error) + IoutilReadDir(dirname string) ([]os.FileInfo, error) + IoutilReadFile(filename string) ([]byte, error) + FileWriteString(f *os.File, s string) (n int, err error) +} + +type Executer struct { +} + +func (e *Executer) ExecuteWithTimeout(mSeconds int, command string, args []string) ([]byte, error) { + logger.Debugf("Executing command : {%v} with args : {%v}. and timeout : {%v} mseconds", command, args, mSeconds) + + // Create a new context and add a timeout to it + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(mSeconds)*time.Millisecond) + defer cancel() // The cancel should be deferred so resources are cleaned up + + // Create the command with our context + cmd := exec.CommandContext(ctx, command, args...) + + // This time we can simply use Output() to get the result. + out, err := cmd.Output() + + // We want to check the context error to see if the timeout was executed. + // The error returned by cmd.Output() will be OS specific based on what + // happens when a process is killed. + if ctx.Err() == context.DeadlineExceeded { + logger.Debugf("Command %s timeout reached", command) + return nil, ctx.Err() + } + + // If there's no context error, we know the command completed (or errored). + logger.Debugf("Output from command: %s", string(out)) + if err != nil { + logger.Debugf("Non-zero exit code: %s", err) + } + + logger.Debugf("Finished executing command") + return out, err +} + +func (e *Executer) OsOpenFile(name string, flag int, perm os.FileMode) (*os.File, error) { + return os.OpenFile(name, flag, perm) +} + +func (e *Executer) OsReadlink(name string) (string, error) { + return os.Readlink(name) +} + +func (e *Executer) FilepathGlob(pattern string) (matches []string, err error) { + return filepath.Glob(pattern) +} + +func (e *Executer) IoutilReadDir(dirname string) ([]os.FileInfo, error) { + return ioutil.ReadDir(dirname) +} + +func (e *Executer) IoutilReadFile(filename string) ([]byte, error) { + return ioutil.ReadFile(filename) +} + +func (e *Executer) FileWriteString(f *os.File, s string) (n int, err error) { + return f.WriteString(s) +} diff --git a/node/pkg/driver/identity.go b/node/pkg/driver/identity.go new file mode 100644 index 000000000..54569aaf4 --- /dev/null +++ b/node/pkg/driver/identity.go @@ -0,0 +1,54 @@ +/** + * Copyright 2019 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package driver + +import ( + "context" + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/ibm/ibm-block-csi-driver/node/logger" +) + +func (d *Driver) GetPluginInfo(ctx context.Context, req *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) { + logger.Debugf("GetPluginInfo: called with args %+v", *req) + resp := &csi.GetPluginInfoResponse{ + Name: d.config.Identity.Name, + VendorVersion: d.config.Identity.Version, + } + + return resp, nil +} + +func (d *Driver) GetPluginCapabilities(ctx context.Context, req *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) { + logger.Debugf("GetPluginCapabilities: called with args %+v", *req) + resp := &csi.GetPluginCapabilitiesResponse{ + Capabilities: []*csi.PluginCapability{ + { + Type: &csi.PluginCapability_Service_{ + Service: &csi.PluginCapability_Service{ + Type: csi.PluginCapability_Service_CONTROLLER_SERVICE, // Note: its not taken from the config.yaml like the csi controller. + }, + }, + }, + }, + } + + return resp, nil +} + +func (d *Driver) Probe(ctx context.Context, req *csi.ProbeRequest) (*csi.ProbeResponse, error) { + return &csi.ProbeResponse{}, nil +} diff --git a/node/pkg/driver/messages.go b/node/pkg/driver/messages.go new file mode 100644 index 000000000..9ff06202d --- /dev/null +++ b/node/pkg/driver/messages.go @@ -0,0 +1,22 @@ +/** + * Copyright 2019 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package driver + +var ErrorWhileTryingToReadIQN = "Error while trying to get iqn from string: %v." + +var ErrorUnsupportedConnectivityType = "Unsupported connectivity type : {%v}" + +var ErrorWhileTryingToReadFC = "Error while tring to get FC port from string: %v." diff --git a/node/pkg/driver/node.go b/node/pkg/driver/node.go new file mode 100644 index 000000000..46844b745 --- /dev/null +++ b/node/pkg/driver/node.go @@ -0,0 +1,547 @@ +/** + * Copyright 2019 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package driver + +import ( + "context" + "fmt" + "os" + "path" + "path/filepath" + "strings" + + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/ibm/ibm-block-csi-driver/node/goid_info" + "github.com/ibm/ibm-block-csi-driver/node/logger" + "github.com/ibm/ibm-block-csi-driver/node/pkg/driver/device_connectivity" + "github.com/ibm/ibm-block-csi-driver/node/pkg/driver/executer" + "github.com/ibm/ibm-block-csi-driver/node/util" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "k8s.io/kubernetes/pkg/util/mount" +) + +var ( + nodeCaps = []csi.NodeServiceCapability_RPC_Type{ + csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME, + } + + // volumeCaps represents how the volume could be accessed. + // It is SINGLE_NODE_WRITER since EBS volume could only be + // attached to a single node at any given time. + volumeCaps = []csi.VolumeCapability_AccessMode{ + { + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + } + + defaultFSType = "ext4" + stageInfoFilename = ".stageInfo.json" + supportedConnectivityTypes = map[string]bool{ + "iscsi": true, + "fc": true, + // TODO add nvme later on + } + + IscsiFullPath = "/host/etc/iscsi/initiatorname.iscsi" +) + +const ( + // In the Dockerfile of the node, specific commands (e.g: multipath, mount...) from the host mounted inside the container in /host directory. + // Command lines inside the container will show /host prefix. + PrefixChrootOfHostRoot = "/host" + FCPath = "/sys/class/fc_host" + FCPortPath = "/sys/class/fc_host/host*/port_name" +) + +// nodeService represents the node service of CSI driver +type NodeService struct { + mounter *mount.SafeFormatAndMount + ConfigYaml ConfigFile + Hostname string + NodeUtils NodeUtilsInterface + executer executer.ExecuterInterface + VolumeIdLocksMap SyncLockInterface + OsDeviceConnectivityMapping map[string]device_connectivity.OsDeviceConnectivityInterface +} + +// newNodeService creates a new node service +// it panics if failed to create the service +func NewNodeService(configYaml ConfigFile, hostname string, nodeUtils NodeUtilsInterface, OsDeviceConnectivityMapping map[string]device_connectivity.OsDeviceConnectivityInterface, executer executer.ExecuterInterface, mounter *mount.SafeFormatAndMount, syncLock SyncLockInterface) NodeService { + return NodeService{ + ConfigYaml: configYaml, + Hostname: hostname, + NodeUtils: nodeUtils, + executer: executer, + OsDeviceConnectivityMapping: OsDeviceConnectivityMapping, + mounter: mounter, + VolumeIdLocksMap: syncLock, + } +} + +func (d *NodeService) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) { + goid_info.SetAdditionalIDInfo(req.VolumeId) + defer goid_info.DeleteAdditionalIDInfo() + logger.Debugf(">>>> NodeStageVolume: called with args %+v", *req) + defer logger.Debugf("<<<< NodeStageVolume") + + err := d.nodeStageVolumeRequestValidation(req) + if err != nil { + switch err.(type) { + case *RequestValidationError: + return nil, status.Error(codes.InvalidArgument, err.Error()) + default: + return nil, status.Error(codes.Internal, err.Error()) + } + } + + volId := req.VolumeId + err = d.VolumeIdLocksMap.AddVolumeLock(volId, "NodeStageVolume") + if err != nil { + logger.Errorf("Another operation is being perfomed on volume : {%s}.", volId) + return nil, status.Error(codes.Aborted, err.Error()) + } + + defer d.VolumeIdLocksMap.RemoveVolumeLock(volId, "NodeStageVolume") + + connectivityType, lun, arrayInitiators, err := d.NodeUtils.GetInfoFromPublishContext(req.PublishContext, d.ConfigYaml) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + stagingPath := req.GetStagingTargetPath() // e.g in k8s /var/lib/kubelet/plugins/kubernetes.io/csi/pv/pvc-21967c74-b456-11e9-b93e-005056a45d5f/globalmount + + osDeviceConnectivity, ok := d.OsDeviceConnectivityMapping[connectivityType] + if !ok { + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("Wrong connectivity type %s", connectivityType)) + } + + err = osDeviceConnectivity.RescanDevices(lun, arrayInitiators) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + device, err := osDeviceConnectivity.GetMpathDevice(volId, lun, arrayInitiators) + logger.Debugf("Discovered device : {%v}", device) + if err != nil { + logger.Errorf("Error while discovring the device : {%v}", err.Error()) + return nil, status.Error(codes.Internal, err.Error()) + } + + // TODO move stageInfo into the node_until API + // Generate the stageInfo detail + stageInfoPath := path.Join(stagingPath, stageInfoFilename) + stageInfo := make(map[string]string) + baseDevice := path.Base(device) + stageInfo["mpathDevice"] = baseDevice //this should return the mathhh for example + sysDevices, err := d.NodeUtils.GetSysDevicesFromMpath(baseDevice) + if err != nil { + logger.Errorf("Error while trying to get sys devices : {%v}", err.Error()) + return nil, status.Error(codes.Internal, err.Error()) + } + stageInfo["sysDevices"] = sysDevices // like sda,sdb,... + stageInfo["connectivity"] = connectivityType + + // checking idempotent case if the stageInfoPath file is already exist + if d.NodeUtils.StageInfoFileIsExist(stageInfoPath) { + // means the file already exist + logger.Warningf("Idempotent case: stage info file exist - indicates that node stage was already done on this path. Verify its content...") + // lets read the file and comprae the stageInfo + existingStageInfo, err := d.NodeUtils.ReadFromStagingInfoFile(stageInfoPath) + if err != nil { + logger.Warningf("Could not read and compare the info inside the staging info file. error : {%v}", err) + } else { + logger.Warningf("Idempotent case: check if stage info file is as expected. stage info is {%v} vs expected {%v}", existingStageInfo, stageInfo) + + if (stageInfo["mpathDevice"] != existingStageInfo["mpathDevice"]) || + (stageInfo["sysDevices"] != existingStageInfo["sysDevices"]) || + (stageInfo["connectivity"] != existingStageInfo["connectivity"]) { + logger.Errorf("Stage info is not as expected. expected: {%v}. got : {%v}", stageInfo, existingStageInfo) + return nil, status.Error(codes.AlreadyExists, err.Error()) + } + logger.Warningf("Idempotent case: stage info file is the same as expected. NodeStageVolume Finished: multipath device is ready [%s] to be mounted by NodePublishVolume API.", baseDevice) + return &csi.NodeStageVolumeResponse{}, nil + } + } + + if err := d.NodeUtils.WriteStageInfoToFile(stageInfoPath, stageInfo); err != nil { + logger.Errorf("Error while trying to save the stage metadata file: {%v}", err.Error()) + return nil, status.Error(codes.Internal, err.Error()) + } + + logger.Debugf("NodeStageVolume Finished: multipath device is ready [%s] to be mounted by NodePublishVolume API.", baseDevice) + return &csi.NodeStageVolumeResponse{}, nil +} + +func (d *NodeService) nodeStageVolumeRequestValidation(req *csi.NodeStageVolumeRequest) error { + + volumeID := req.GetVolumeId() + if len(volumeID) == 0 { + return &RequestValidationError{"Volume ID not provided"} + } + + target := req.GetStagingTargetPath() + if len(target) == 0 { + return &RequestValidationError{"Staging target not provided"} + } + + volCap := req.GetVolumeCapability() + if volCap == nil { + return &RequestValidationError{"Volume capability not provided"} + } + + if !isValidVolumeCapabilitiesAccessMode([]*csi.VolumeCapability{volCap}) { + return &RequestValidationError{"Volume capability AccessMode not supported"} + } + + // If the access type is block, do nothing for stage + switch volCap.GetAccessType().(type) { + case *csi.VolumeCapability_Block: + return &RequestValidationError{"Volume Access Type Block is not supported yet"} + } + + connectivityType, lun, arrayInitiators, err := d.NodeUtils.GetInfoFromPublishContext(req.PublishContext, d.ConfigYaml) + if err != nil { + return &RequestValidationError{fmt.Sprintf("Fail to parse PublishContext %v with err = %v", req.PublishContext, err)} + } + + if _, ok := supportedConnectivityTypes[connectivityType]; !ok { + return &RequestValidationError{fmt.Sprintf("PublishContext with wrong connectivity type %s. Supported connectivities %v", connectivityType, supportedConnectivityTypes)} + } + + if lun < 0 { + return &RequestValidationError{fmt.Sprintf("PublishContext with wrong lun id %d.", lun)} + } + + if len(arrayInitiators) == 0 { + return &RequestValidationError{fmt.Sprintf("PublishContext with wrong arrayInitiators %s.", arrayInitiators)} + } + + return nil +} + +func (d *NodeService) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) { + volumeID := req.GetVolumeId() + goid_info.SetAdditionalIDInfo(volumeID) + defer goid_info.DeleteAdditionalIDInfo() + logger.Debugf(">>>> NodeUnstageVolume: called with args %+v", *req) + defer logger.Debugf("<<<< NodeUnstageVolume") + + if len(volumeID) == 0 { + logger.Errorf("Volume ID not provided") + return nil, status.Error(codes.InvalidArgument, "Volume ID not provided") + } + + err := d.VolumeIdLocksMap.AddVolumeLock(volumeID, "NodeUnstageVolume") + if err != nil { + logger.Errorf("Another operation is being perfomed on volume : {%s}", volumeID) + return nil, status.Error(codes.Aborted, err.Error()) + } + defer d.VolumeIdLocksMap.RemoveVolumeLock(volumeID, "NodeUnstageVolume") + + stagingTargetPath := req.GetStagingTargetPath() + if len(stagingTargetPath) == 0 { + logger.Errorf("Staging target not provided") + return nil, status.Error(codes.InvalidArgument, "Staging target not provided") + } + + logger.Debugf("Reading stage info file") + stageInfoPath := path.Join(stagingTargetPath, stageInfoFilename) + infoMap, err := d.NodeUtils.ReadFromStagingInfoFile(stageInfoPath) + if err != nil { + if os.IsNotExist(err) { + logger.Warningf("Idempotent case : stage info file does not exist. Finish NodeUnstageVolume OK.") + return &csi.NodeUnstageVolumeResponse{}, nil + } else { + logger.Errorf("Error while trying to read from the staging info file : {%v}", err.Error()) + return nil, status.Error(codes.Internal, err.Error()) + } + } + logger.Debugf("Reading stage info file detail : {%v}", infoMap) + + connectivityType := infoMap["connectivity"] + mpathDevice := infoMap["mpathDevice"] + sysDevices := strings.Split(infoMap["sysDevices"], ",") + + logger.Debugf("Got info from stageInfo file. connectivity : {%v}. device : {%v}, sysDevices : {%v}", connectivityType, mpathDevice, sysDevices) + + osDeviceConnectivity, ok := d.OsDeviceConnectivityMapping[connectivityType] + if !ok { + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("Wrong connectivity type %s", connectivityType)) + } + + err = osDeviceConnectivity.FlushMultipathDevice(mpathDevice) + if err != nil { + return nil, status.Errorf(codes.Internal, "Multipath -f command failed with error: %v", err) + } + err = osDeviceConnectivity.RemovePhysicalDevice(sysDevices) + if err != nil { + return nil, status.Errorf(codes.Internal, "Remove iscsi device failed with error: %v", err) + } + + if err := d.NodeUtils.ClearStageInfoFile(stageInfoPath); err != nil { + return nil, status.Errorf(codes.Internal, "Fail to clear the stage info file: error %v", err) + } + + logger.Debugf("NodeUnStageVolume Finished: multipath device removed from host") + + return &csi.NodeUnstageVolumeResponse{}, nil +} + +func (d *NodeService) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) { + goid_info.SetAdditionalIDInfo(req.VolumeId) + defer goid_info.DeleteAdditionalIDInfo() + logger.Debugf(">>>> NodePublishVolume: called with args %+v", *req) + defer logger.Debugf("<<<< NodePublishVolume") + + err := d.nodePublishVolumeRequestValidation(req) + if err != nil { + switch err.(type) { + case *RequestValidationError: + return nil, status.Error(codes.InvalidArgument, err.Error()) + default: + return nil, status.Error(codes.Internal, err.Error()) + } + } + volumeID := req.VolumeId + + err = d.VolumeIdLocksMap.AddVolumeLock(volumeID, "NodePublishVolume") + if err != nil { + logger.Errorf("Another operation is being perfomed on volume : {%s}", volumeID) + return nil, status.Error(codes.Aborted, err.Error()) + } + defer d.VolumeIdLocksMap.RemoveVolumeLock(volumeID, "NodePublishVolume") + + // checking if the node staging path was mpounted into + stagingPath := req.GetStagingTargetPath() + targetPath := req.GetTargetPath() + targetPathWithHostPrefix := path.Join(PrefixChrootOfHostRoot, targetPath) + + logger.Debugf("stagingPath : {%v}, targetPath : {%v}", stagingPath, targetPath) + + // Read staging info file in order to find the mpath device for mounting. + stageInfoPath := path.Join(stagingPath, stageInfoFilename) + infoMap, err := d.NodeUtils.ReadFromStagingInfoFile(stageInfoPath) + if err != nil { + // Note: after validation it looks like k8s create the directory in advance. So we don't try to remove it at the Unpublish + logger.Errorf("Error while trying to read from the staging info file : {%v}", err.Error()) + return nil, status.Error(codes.Internal, err.Error()) + } + + mpathDevice := filepath.Join(device_connectivity.DevPath, infoMap["mpathDevice"]) + logger.Debugf("Got info from stageInfo file. device : {%v}", mpathDevice) + + logger.Debugf("Check if targetPath {%s} exist in mount list", targetPath) + mountList, err := d.mounter.List() + for _, mount := range mountList { + logger.Tracef("Check if mount path({%v}) [with device({%v})] is equel to targetPath {%s}", mount.Path, mount.Device, targetPath) + if mount.Path == targetPathWithHostPrefix { + if mount.Device == mpathDevice { + logger.Warningf("Idempotent case : targetPath already mounted (%s), so no need to mount again. Finish NodePublishVolume.", targetPath) + return &csi.NodePublishVolumeResponse{}, nil + } else { + return nil, status.Errorf(codes.AlreadyExists, "Mount point is already mounted to but with different multipath device (%s), while the expected device is %s ", mount.Device, mpathDevice) + } + } + } + + // if the device is not mounted then we are mounting it. + + volumeCap := req.GetVolumeCapability() + fsType := volumeCap.GetMount().FsType + + if fsType == "" { + fsType = defaultFSType + } + + if _, err := os.Stat(targetPathWithHostPrefix); os.IsNotExist(err) { + logger.Debugf("Target path directory does not exist. creating : {%v}", targetPathWithHostPrefix) + d.mounter.MakeDir(targetPathWithHostPrefix) + } + + logger.Debugf("Mount the device with fs_type = {%v} (Create filesystem if needed)", fsType) + + logger.Debugf("FormatAndMount start [goid=%d]", util.GetGoID()) + err = d.mounter.FormatAndMount(mpathDevice, targetPath, fsType, nil) // Passing without /host because k8s mounter uses mount\mkfs\fsck + // TODO: pass mount options + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + logger.Debugf("FormatAndMount end [goid=%d]", util.GetGoID()) + + logger.Debugf("NodePublishVolume Finished: multipath device is now mounted to targetPath.") + + return &csi.NodePublishVolumeResponse{}, nil +} + +func (d *NodeService) nodePublishVolumeRequestValidation(req *csi.NodePublishVolumeRequest) error { + volumeID := req.GetVolumeId() + if len(volumeID) == 0 { + return &RequestValidationError{"Volume ID not provided"} + } + + source := req.GetStagingTargetPath() + if len(source) == 0 { + return &RequestValidationError{"Staging target not provided"} + } + + target := req.GetTargetPath() + if len(target) == 0 { + return &RequestValidationError{"Target path not provided"} + } + + volCap := req.GetVolumeCapability() + if volCap == nil { + return &RequestValidationError{"Volume capability not provided"} + } + + if !isValidVolumeCapabilitiesAccessMode([]*csi.VolumeCapability{volCap}) { + return &RequestValidationError{"Volume capability AccessMode not supported"} + } + + // If the access type is block, do nothing for stage + switch volCap.GetAccessType().(type) { + case *csi.VolumeCapability_Block: + return &RequestValidationError{"Volume Access Type Block is not supported yet"} + } + + return nil +} + +func (d *NodeService) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) { + volumeID := req.GetVolumeId() + goid_info.SetAdditionalIDInfo(volumeID) + defer goid_info.DeleteAdditionalIDInfo() + logger.Debugf(">>>> NodeUnpublishVolume: called with args %+v", *req) + defer logger.Debugf("<<<< NodeUnpublishVolume") + + if len(volumeID) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID not provided") + } + + err := d.VolumeIdLocksMap.AddVolumeLock(volumeID, "NodeUnpublishVolume") + if err != nil { + logger.Errorf("Another operation is being perfomed on volume : {%s}", volumeID) + return nil, status.Error(codes.Aborted, err.Error()) + } + defer d.VolumeIdLocksMap.RemoveVolumeLock(volumeID, "NodeUnpublishVolume") + + target := req.GetTargetPath() + if len(target) == 0 { + return nil, status.Error(codes.InvalidArgument, "Target path not provided") + } + + logger.Debugf("NodeUnpublishVolume: unmounting %s", target) + err = d.mounter.Unmount(target) + if err != nil { + if strings.Contains(err.Error(), "not mounted") { + logger.Warningf("Idempotent case - target was already unmounted %s", target) + return &csi.NodeUnpublishVolumeResponse{}, nil + } + return nil, status.Errorf(codes.Internal, "Could not unmount %q: %v", target, err) + } + + return &csi.NodeUnpublishVolumeResponse{}, nil + +} + +func (d *NodeService) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) { + goid_info.SetAdditionalIDInfo(req.VolumeId) + defer goid_info.DeleteAdditionalIDInfo() + return nil, status.Error(codes.Unimplemented, "NodeGetVolumeStats is not implemented yet") +} + +func (d *NodeService) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolumeRequest) (*csi.NodeExpandVolumeResponse, error) { + goid_info.SetAdditionalIDInfo(req.VolumeId) + defer goid_info.DeleteAdditionalIDInfo() + return nil, status.Error(codes.Unimplemented, fmt.Sprintf("NodeExpandVolume is not yet implemented")) +} + +func (d *NodeService) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) { + logger.Debugf(">>>> NodeGetCapabilities: called with args %+v", *req) + defer logger.Debugf("<<<< NodeGetCapabilities") + + var caps []*csi.NodeServiceCapability + for _, cap := range nodeCaps { + c := &csi.NodeServiceCapability{ + Type: &csi.NodeServiceCapability_Rpc{ + Rpc: &csi.NodeServiceCapability_RPC{ + Type: cap, + }, + }, + } + caps = append(caps, c) + } + return &csi.NodeGetCapabilitiesResponse{Capabilities: caps}, nil +} + +func (d *NodeService) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { + logger.Debugf(">>>> NodeGetInfo: called with args %+v", *req) + defer logger.Debugf("<<<< NodeGetInfo") + + var iscsiIQN string + var fcWWNs []string + var err error + + fcExists := d.NodeUtils.Exists(FCPath) + if fcExists { + fcWWNs, err = d.NodeUtils.ParseFCPorts() + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } + + iscsiExists := d.NodeUtils.Exists(IscsiFullPath) + if iscsiExists { + iscsiIQN, _ = d.NodeUtils.ParseIscsiInitiators() + } + + if fcWWNs == nil && iscsiIQN == "" { + err := fmt.Errorf("Cannot find valid fc wwns or iscsi iqn") + return nil,status.Error(codes.Internal, err.Error()) + } + + delimiter := ";" + fcPorts := strings.Join(fcWWNs, ":") + nodeId := d.Hostname + delimiter + iscsiIQN + delimiter +fcPorts + logger.Debugf("node id is : %s", nodeId) + + return &csi.NodeGetInfoResponse{ + NodeId: nodeId, + }, nil +} + +func isValidVolumeCapabilitiesAccessMode(volCaps []*csi.VolumeCapability) bool { + hasSupport := func(cap *csi.VolumeCapability) bool { + for _, c := range volumeCaps { + if c.GetMode() == cap.AccessMode.GetMode() { + return true + } + } + return false + } + + foundAll := true + for _, c := range volCaps { + if !hasSupport(c) { + foundAll = false + break + } + } + + return foundAll +} diff --git a/node/pkg/driver/node_test.go b/node/pkg/driver/node_test.go new file mode 100644 index 000000000..83c86384e --- /dev/null +++ b/node/pkg/driver/node_test.go @@ -0,0 +1,510 @@ +/** + * Copyright 2019 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package driver_test + +import ( + "context" + "fmt" + "reflect" + "testing" + + "github.com/container-storage-interface/spec/lib/go/csi" + gomock "github.com/golang/mock/gomock" + mocks "github.com/ibm/ibm-block-csi-driver/node/mocks" + driver "github.com/ibm/ibm-block-csi-driver/node/pkg/driver" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + PublishContextParamLun string = "PUBLISH_CONTEXT_LUN" // TODO for some reason I coun't take it from config.yaml + PublishContextParamConnectivity string = "PUBLISH_CONTEXT_CONNECTIVITY" +) + +func TestNodeStageVolume(t *testing.T) { + stdVolCap := &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + } + testCases := []struct { + name string + req *csi.NodeStageVolumeRequest + expErrCode codes.Code + }{ + { + name: "fail no VolumeId", + req: &csi.NodeStageVolumeRequest{ + PublishContext: map[string]string{PublishContextParamLun: "1", PublishContextParamConnectivity: "iSCSI"}, + StagingTargetPath: "/test/path", + VolumeCapability: stdVolCap, + }, + expErrCode: codes.InvalidArgument, + }, + { + name: "fail no StagingTargetPath", + req: &csi.NodeStageVolumeRequest{ + PublishContext: map[string]string{PublishContextParamLun: "1", PublishContextParamConnectivity: "iSCSI"}, + VolumeCapability: stdVolCap, + VolumeId: "vol-test", + }, + expErrCode: codes.InvalidArgument, + }, + { + name: "fail no VolumeCapability", + req: &csi.NodeStageVolumeRequest{ + PublishContext: map[string]string{PublishContextParamLun: "1", PublishContextParamConnectivity: "iSCSI"}, + StagingTargetPath: "/test/path", + VolumeId: "vol-test", + }, + expErrCode: codes.InvalidArgument, + }, + { + name: "fail invalid VolumeCapability Block instead of Mount", + req: &csi.NodeStageVolumeRequest{ + PublishContext: map[string]string{PublishContextParamLun: "1", PublishContextParamConnectivity: "iSCSI"}, + StagingTargetPath: "/test/path", + VolumeCapability: &csi.VolumeCapability{ + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + AccessType: &csi.VolumeCapability_Block{ + Block: &csi.VolumeCapability_BlockVolume{}, + }, + }, + VolumeId: "vol-test", + }, + expErrCode: codes.InvalidArgument, + }, + { + name: "fail invalid VolumeCapability ", + req: &csi.NodeStageVolumeRequest{ + PublishContext: map[string]string{PublishContextParamLun: "1", PublishContextParamConnectivity: "iSCSI"}, + StagingTargetPath: "/test/path", + VolumeCapability: &csi.VolumeCapability{ + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_UNKNOWN, + }, + }, + VolumeId: "vol-test", + }, + expErrCode: codes.InvalidArgument, + }, + /*{ + name: "fail because not implemented yet - but pass all basic verifications", + req: &csi.NodeStageVolumeRequest{ + PublishContext: map[string]string{PublishContextParamLun: "1", PublishContextParamConnectivity: "iSCSI"}, + StagingTargetPath: "/test/path", + VolumeCapability: stdVolCap, + VolumeId: "vol-test", + }, + expErrCode: codes.Unimplemented, + },*/ + } + + for _, tc := range testCases { + + t.Run(tc.name, func(t *testing.T) { + + d := newTestNodeService(nil) + + _, err := d.NodeStageVolume(context.TODO(), tc.req) + if err != nil { + srvErr, ok := status.FromError(err) + if !ok { + t.Fatalf("Could not get error status code from error: %v", srvErr) + } + if srvErr.Code() != tc.expErrCode { + t.Fatalf("Expected error code %d, got %d message %s", tc.expErrCode, srvErr.Code(), srvErr.Message()) + } + } else if tc.expErrCode != codes.OK { + t.Fatalf("Expected error %v, got no error", tc.expErrCode) + } + }) + } +} + +func newTestNodeService(nodeUtils driver.NodeUtilsInterface) driver.NodeService { + return driver.NodeService{ + Hostname: "test-host", + ConfigYaml: driver.ConfigFile{}, + NodeUtils: nodeUtils, + } +} + +// +//func TestNodeUnstageVolume(t *testing.T) { +// testCases := []struct { +// name string +// req *csi.NodeUnstageVolumeRequest +// expErrCode codes.Code +// }{ +// { +// name: "fail no VolumeId", +// req: &csi.NodeUnstageVolumeRequest{ +// StagingTargetPath: "/test/path", +// }, +// expErrCode: codes.InvalidArgument, +// }, +// { +// name: "fail no StagingTargetPath", +// req: &csi.NodeUnstageVolumeRequest{ +// VolumeId: "vol-test", +// }, +// expErrCode: codes.InvalidArgument, +// }, +// { +// name: "fail because not implemented yet - but pass all basic verifications", +// req: &csi.NodeUnstageVolumeRequest{ +// VolumeId: "vol-test", +// StagingTargetPath: "/test/path", +// }, +// expErrCode: codes.Unimplemented, +// }, +// } +// +// for _, tc := range testCases { +// t.Run(tc.name, func(t *testing.T) { +// d := newTestNodeService(nil) +// +// _, err := d.NodeUnstageVolume(context.TODO(), tc.req) +// if err != nil { +// srvErr, ok := status.FromError(err) +// if !ok { +// t.Fatalf("Could not get error status code from error: %v", srvErr) +// } +// if srvErr.Code() != tc.expErrCode { +// t.Fatalf("Expected error code %d, got %d message %s", tc.expErrCode, srvErr.Code(), srvErr.Message()) +// } +// } else if tc.expErrCode != codes.OK { +// t.Fatalf("Expected error %v, got no error", tc.expErrCode) +// } +// }) +// } +//} +// +//func TestNodePublishVolume(t *testing.T) { +// stdVolCap := &csi.VolumeCapability{ +// AccessType: &csi.VolumeCapability_Mount{ +// Mount: &csi.VolumeCapability_MountVolume{}, +// }, +// AccessMode: &csi.VolumeCapability_AccessMode{ +// Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, +// }, +// } +// testCases := []struct { +// name string +// req *csi.NodePublishVolumeRequest +// expErrCode codes.Code +// }{ +// { +// name: "fail no VolumeId", +// req: &csi.NodePublishVolumeRequest{ +// PublishContext: map[string]string{PublishContextParamLun: "1", PublishContextParamConnectivity: "iSCSI"}, +// StagingTargetPath: "/test/staging/path", +// TargetPath: "/test/target/path", +// VolumeCapability: stdVolCap, +// }, +// expErrCode: codes.InvalidArgument, +// }, +// { +// name: "fail no StagingTargetPath", +// req: &csi.NodePublishVolumeRequest{ +// PublishContext: map[string]string{PublishContextParamLun: "1", PublishContextParamConnectivity: "iSCSI"}, +// TargetPath: "/test/target/path", +// VolumeCapability: stdVolCap, +// VolumeId: "vol-test", +// }, +// expErrCode: codes.InvalidArgument, +// }, +// { +// name: "fail no TargetPath", +// req: &csi.NodePublishVolumeRequest{ +// PublishContext: map[string]string{PublishContextParamLun: "1", PublishContextParamConnectivity: "iSCSI"}, +// StagingTargetPath: "/test/staging/path", +// VolumeCapability: stdVolCap, +// VolumeId: "vol-test", +// }, +// expErrCode: codes.InvalidArgument, +// }, +// { +// name: "fail no VolumeCapability", +// req: &csi.NodePublishVolumeRequest{ +// PublishContext: map[string]string{PublishContextParamLun: "1", PublishContextParamConnectivity: "iSCSI"}, +// StagingTargetPath: "/test/staging/path", +// TargetPath: "/test/target/path", +// VolumeId: "vol-test", +// }, +// expErrCode: codes.InvalidArgument, +// }, +// { +// name: "fail invalid VolumeCapability", +// req: &csi.NodePublishVolumeRequest{ +// PublishContext: map[string]string{PublishContextParamLun: "1", PublishContextParamConnectivity: "iSCSI"}, +// StagingTargetPath: "/test/staging/path", +// TargetPath: "/test/target/path", +// VolumeId: "vol-test", +// VolumeCapability: &csi.VolumeCapability{ +// AccessMode: &csi.VolumeCapability_AccessMode{ +// Mode: csi.VolumeCapability_AccessMode_UNKNOWN, +// }, +// }, +// }, +// expErrCode: codes.InvalidArgument, +// }, +// { +// name: "fail because not implemented yet - but pass all basic verifications", +// req: &csi.NodePublishVolumeRequest{ +// PublishContext: map[string]string{PublishContextParamLun: "1", PublishContextParamConnectivity: "iSCSI"}, +// StagingTargetPath: "/test/staging/path", +// TargetPath: "/test/target/path", +// VolumeCapability: stdVolCap, +// VolumeId: "vol-test", +// }, +// expErrCode: codes.Unimplemented, +// }, +// } +// +// for _, tc := range testCases { +// t.Run(tc.name, func(t *testing.T) { +// d := newTestNodeService(nil) +// +// _, err := d.NodePublishVolume(context.TODO(), tc.req) +// if err != nil { +// srvErr, ok := status.FromError(err) +// if !ok { +// t.Fatalf("Could not get error status code from error: %v", srvErr) +// } +// if srvErr.Code() != tc.expErrCode { +// t.Fatalf("Expected error code %d, got %d message %s", tc.expErrCode, srvErr.Code(), srvErr.Message()) +// } +// } else if tc.expErrCode != codes.OK { +// t.Fatalf("Expected error %v and got no error", tc.expErrCode) +// } +// +// }) +// } +//} +// +//func TestNodeUnpublishVolume(t *testing.T) { +// testCases := []struct { +// name string +// req *csi.NodeUnpublishVolumeRequest +// // expected test error code +// expErrCode codes.Code +// }{ +// { +// name: "fail no VolumeId", +// req: &csi.NodeUnpublishVolumeRequest{ +// TargetPath: "/test/path", +// }, +// expErrCode: codes.InvalidArgument, +// }, +// { +// name: "fail no TargetPath", +// req: &csi.NodeUnpublishVolumeRequest{ +// VolumeId: "vol-test", +// }, +// expErrCode: codes.InvalidArgument, +// }, +// { +// name: "fail because not implemented yet - but pass all basic verifications", +// req: &csi.NodeUnpublishVolumeRequest{ +// VolumeId: "vol-test", +// TargetPath: "/test/path", +// }, +// expErrCode: codes.Unimplemented, +// }, +// } +// +// for _, tc := range testCases { +// t.Run(tc.name, func(t *testing.T) { +// d := newTestNodeService(nil) +// +// _, err := d.NodeUnpublishVolume(context.TODO(), tc.req) +// if err != nil { +// srvErr, ok := status.FromError(err) +// if !ok { +// t.Fatalf("Could not get error status code from error: %v", srvErr) +// } +// if srvErr.Code() != tc.expErrCode { +// t.Fatalf("Expected error code %d, got %d message %s", tc.expErrCode, srvErr.Code(), srvErr.Message()) +// } +// } else if tc.expErrCode != codes.OK { +// t.Fatalf("Expected error %v, got no error", tc.expErrCode) +// } +// }) +// } +//} + +func TestNodeGetVolumeStats(t *testing.T) { + + req := &csi.NodeGetVolumeStatsRequest{} + + d := newTestNodeService(nil) + + expErrCode := codes.Unimplemented + + _, err := d.NodeGetVolumeStats(context.TODO(), req) + if err == nil { + t.Fatalf("Expected error code %d, got nil", expErrCode) + } + srvErr, ok := status.FromError(err) + if !ok { + t.Fatalf("Could not get error status code from error: %v", srvErr) + } + if srvErr.Code() != expErrCode { + t.Fatalf("Expected error code %d, got %d message %s", expErrCode, srvErr.Code(), srvErr.Message()) + } +} + +func TestNodeGetCapabilities(t *testing.T) { + req := &csi.NodeGetCapabilitiesRequest{} + + d := newTestNodeService(nil) + + caps := []*csi.NodeServiceCapability{ + { + Type: &csi.NodeServiceCapability_Rpc{ + Rpc: &csi.NodeServiceCapability_RPC{ + Type: csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME, + }, + }, + }, + } + expResp := &csi.NodeGetCapabilitiesResponse{Capabilities: caps} + + resp, err := d.NodeGetCapabilities(context.TODO(), req) + if err != nil { + srvErr, ok := status.FromError(err) + if !ok { + t.Fatalf("Could not get error status code from error: %v", srvErr) + } + t.Fatalf("Expected nil error, got %d message %s", srvErr.Code(), srvErr.Message()) + } + if !reflect.DeepEqual(expResp, resp) { + t.Fatalf("Expected response {%+v}, got {%+v}", expResp, resp) + } +} + +func TestNodeGetInfo(t *testing.T) { + testCases := []struct { + name string + return_iqn string + return_iqn_err error + return_fcs []string + return_fc_err error + expErr error + expNodeId string + iscsiExists bool + fcExists bool + }{ + { + name: "good iqn, empty fc with error from node_utils", + return_fc_err: fmt.Errorf("some error"), + expErr: status.Error(codes.Internal, fmt.Errorf("some error").Error()), + iscsiExists: true, + fcExists: true, + }, + { + name: "empty iqn with error, one fc port", + return_fcs: []string{"10000000c9934d9f"}, + expNodeId: "test-host;;10000000c9934d9f", + iscsiExists: true, + fcExists: true, + }, + { + name: "empty iqn with error from node_utils, one more fc ports", + return_iqn: "", + return_fcs: []string{"10000000c9934d9f","10000000c9934d9h"}, + expNodeId: "test-host;;10000000c9934d9f:10000000c9934d9h", + iscsiExists: true, + fcExists: true, + }, + { + name: "good iqn and good fcs", + return_iqn: "iqn.1994-07.com.redhat:e123456789", + return_fcs: []string{"10000000c9934d9f","10000000c9934d9h"}, + expNodeId: "test-host;iqn.1994-07.com.redhat:e123456789;10000000c9934d9f:10000000c9934d9h", + iscsiExists: true, + fcExists: true, + }, + { + name: "iqn and fc path are inexistent", + iscsiExists: false, + fcExists: false, + expErr: status.Error(codes.Internal, fmt.Errorf("Cannot find valid fc wwns or iscsi iqn").Error()), + }, + { + name: "iqn path is inexistsent", + iscsiExists: false, + fcExists: true, + return_fcs: []string{"10000000c9934d9f"}, + expNodeId: "test-host;;10000000c9934d9f", + }, + { + name: "fc path is inexistent", + iscsiExists: true, + fcExists: false, + return_iqn: "iqn.1994-07.com.redhat:e123456789", + expNodeId: "test-host;iqn.1994-07.com.redhat:e123456789;", + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + req := &csi.NodeGetInfoRequest{} + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + fake_nodeutils := mocks.NewMockNodeUtilsInterface(mockCtrl) + fake_nodeutils.EXPECT().Exists(driver.FCPath).Return(tc.fcExists) + if tc.fcExists { + fake_nodeutils.EXPECT().ParseFCPorts().Return(tc.return_fcs, tc.return_fc_err) + } + if tc.return_fc_err == nil { + fake_nodeutils.EXPECT().Exists(driver.IscsiFullPath).Return(tc.iscsiExists) + if tc.iscsiExists { + fake_nodeutils.EXPECT().ParseIscsiInitiators().Return(tc.return_iqn, tc.return_iqn_err) + } + } + + d:= newTestNodeService(fake_nodeutils) + + expResponse := &csi.NodeGetInfoResponse{NodeId: tc.expNodeId} + + res, err := d.NodeGetInfo(context.TODO(), req) + if tc.expErr != nil { + if err == nil { + t.Fatalf("Expected error to be thrown : {%v}", tc.expErr) + } else { + if err.Error() != tc.expErr.Error() { + t.Fatalf("Expected error : {%v} to be equal to expected error : {%v}", err, tc.expErr) + } + } + } else { + if res.NodeId != expResponse.NodeId { + t.Fatalf("Expected res : {%v}, and got {%v}", expResponse, res) + } + } + }) + } + +} \ No newline at end of file diff --git a/node/pkg/driver/node_utils.go b/node/pkg/driver/node_utils.go new file mode 100644 index 000000000..664c7b07e --- /dev/null +++ b/node/pkg/driver/node_utils.go @@ -0,0 +1,240 @@ +/** + * Copyright 2019 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package driver + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "path/filepath" + "os" + "path" + "strconv" + "strings" + "k8s.io/apimachinery/pkg/util/errors" + + executer "github.com/ibm/ibm-block-csi-driver/node/pkg/driver/executer" + "github.com/ibm/ibm-block-csi-driver/node/logger" +) + +//go:generate mockgen -destination=../../mocks/mock_node_utils.go -package=mocks github.com/ibm/ibm-block-csi-driver/node/pkg/driver NodeUtilsInterface + +type NodeUtilsInterface interface { + ParseIscsiInitiators() (string, error) + ParseFCPorts() ([]string, error) + GetInfoFromPublishContext(publishContext map[string]string, configYaml ConfigFile) (string, int, []string, error) + GetSysDevicesFromMpath(baseDevice string) (string, error) + + // TODO refactor and move all staging methods to dedicate interface. + WriteStageInfoToFile(path string, info map[string]string) error + ReadFromStagingInfoFile(filePath string) (map[string]string, error) + ClearStageInfoFile(filePath string) error + StageInfoFileIsExist(filePath string) bool + Exists(filePath string) bool +} + +type NodeUtils struct { + Executer executer.ExecuterInterface +} + +func NewNodeUtils(executer executer.ExecuterInterface) *NodeUtils { + return &NodeUtils{Executer: executer} +} + +func (n NodeUtils) ParseIscsiInitiators() (string, error) { + file, err := os.Open(IscsiFullPath) + if err != nil { + return "", err + } + + defer file.Close() + + fileOut, err := ioutil.ReadAll(file) + if err != nil { + return "", err + } + + fileSplit := strings.Split(string(fileOut), "InitiatorName=") + if len(fileSplit) != 2 { + return "", fmt.Errorf(ErrorWhileTryingToReadIQN, string(fileOut)) + } + + iscsiIqn := strings.TrimSpace(fileSplit[1]) + + return iscsiIqn, nil +} + +func (n NodeUtils) GetInfoFromPublishContext(publishContext map[string]string, configYaml ConfigFile) (string, int, []string, error) { + // this will return : connectivityType, lun, arrayInitiators, error + var arrayInitiators []string + str_lun := publishContext[configYaml.Controller.Publish_context_lun_parameter] + + lun, err := strconv.Atoi(str_lun) + if err != nil { + return "", -1, nil, err + } + + connectivityType := publishContext[configYaml.Controller.Publish_context_connectivity_parameter] + if connectivityType == "iscsi" { + arrayInitiators = strings.Split(publishContext[configYaml.Controller.Publish_context_array_iqn], ",") + } + if connectivityType == "fc" { + arrayInitiators = strings.Split(publishContext[configYaml.Controller.Publish_context_fc_initiators], ",") + } + + logger.Debugf("PublishContext relevant info : connectivityType=%v, lun=%v, arrayInitiators=%v", connectivityType, lun, arrayInitiators) + return connectivityType, lun, arrayInitiators, nil +} + +func (n NodeUtils) WriteStageInfoToFile(fPath string, info map[string]string) error { + // writes to stageTargetPath/filename + + fPath = PrefixChrootOfHostRoot + fPath + stagePath := filepath.Dir(fPath) + if _, err := os.Stat(stagePath); os.IsNotExist(err) { + logger.Debugf("The filePath [%s] is not existed. Create it.", stagePath) + if err = os.MkdirAll(stagePath, os.FileMode(0755)); err != nil { + logger.Debugf("The filePath [%s] create fail. Error: [%v]", stagePath, err) + } + } + logger.Debugf("WriteStageInfo file : path {%v}, info {%v}", fPath, info) + stageInfo, err := json.Marshal(info) + if err != nil { + logger.Errorf("Error marshalling info file %s to json : {%v}", fPath, err.Error()) + return err + } + + err = ioutil.WriteFile(fPath, stageInfo, 0600) + + if err != nil { + logger.Errorf("Error while writing to file %s: {%v}", fPath, err.Error()) + return err + } + + return nil +} + +func (n NodeUtils) ReadFromStagingInfoFile(filePath string) (map[string]string, error) { + // reads from stageTargetPath/filename + filePath = PrefixChrootOfHostRoot + filePath + + logger.Debugf("Read StagingInfoFile : path {%v},", filePath) + stageInfo, err := ioutil.ReadFile(filePath) + if err != nil { + logger.Errorf("error reading file %s. err : {%v}", filePath, err.Error()) + return nil, err + } + + infoMap := make(map[string]string) + + err = json.Unmarshal(stageInfo, &infoMap) + if err != nil { + logger.Errorf("Error unmarshalling file %s. err : {%v}", filePath, err.Error()) + return nil, err + } + + return infoMap, nil +} + +func (n NodeUtils) ClearStageInfoFile(filePath string) error { + filePath = PrefixChrootOfHostRoot + filePath + logger.Debugf("Delete StagingInfoFile : path {%v},", filePath) + + return os.Remove(filePath) +} + +func (n NodeUtils) GetSysDevicesFromMpath(device string) (string, error) { + // this will return the /sys/block/dm-3/slaves/ + logger.Debugf("GetSysDevicesFromMpath with param : {%v}", device) + deviceSlavePath := path.Join("/sys", "block", device, "slaves") + logger.Debugf("looking in path : {%v}", deviceSlavePath) + slaves, err := ioutil.ReadDir(deviceSlavePath) + if err != nil { + logger.Errorf("an error occured while looking for device slaves : {%v}", err.Error()) + return "", err + } + + logger.Debugf("found slaves : {%v}", slaves) + slavesString := "" + for _, slave := range slaves { + slavesString += "," + slave.Name() + } + return slavesString, nil + +} + +func (n NodeUtils) StageInfoFileIsExist(filePath string) bool { + if _, err := os.Stat(filePath); err != nil { + return false + } + return true +} + +func (n NodeUtils) ParseFCPorts() ([]string, error) { + var errs []error + var fcPorts []string + + fpaths, err := n.Executer.FilepathGlob(FCPortPath) + if fpaths == nil { + err = fmt.Errorf(ErrorUnsupportedConnectivityType, "FC") + } + if err != nil { + return nil, err + } + + for _, fpath := range fpaths { + file, err := os.Open(fpath) + if err != nil { + errs = append(errs, err) + break + } + defer file.Close() + + fileOut, err := ioutil.ReadAll(file) + if err != nil { + errs = append(errs, err) + break + } + + fileSplit := strings.Split(string(fileOut), "0x") + if len(fileSplit) != 2 { + err := fmt.Errorf(ErrorWhileTryingToReadFC, string(fileOut)) + errs = append(errs, err) + } else { + fcPorts = append(fcPorts, strings.TrimSpace(fileSplit[1])) + } + } + + if errs != nil { + err := errors.NewAggregate(errs) + logger.Errorf("errors occured while looking for FC ports: {%v}", err) + if fcPorts == nil { + return nil, err + } + } + + return fcPorts, nil +} + +func (n NodeUtils) Exists(path string) bool { + _, err := os.Stat(path) + if err != nil { + return false + } + + return true +} diff --git a/node/pkg/driver/node_utils_test.go b/node/pkg/driver/node_utils_test.go new file mode 100644 index 000000000..cfa9c7202 --- /dev/null +++ b/node/pkg/driver/node_utils_test.go @@ -0,0 +1,211 @@ +/** + * Copyright 2019 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package driver_test + +import ( + "fmt" + "io/ioutil" + "os" + "syscall" + "testing" + gomock "github.com/golang/mock/gomock" + "errors" + mocks "github.com/ibm/ibm-block-csi-driver/node/mocks" + driver "github.com/ibm/ibm-block-csi-driver/node/pkg/driver" + executer "github.com/ibm/ibm-block-csi-driver/node/pkg/driver/executer" + "reflect" +) + +var ( + nodeUtils = driver.NewNodeUtils(&executer.Executer{}) +) + +func TestParseIscsiInitiators(t *testing.T) { + testCases := []struct { + name string + file_content string + expErr error + expIqn string + }{ + { + name: "wrong iqn file", + file_content: "wrong-content", + expErr: fmt.Errorf(driver.ErrorWhileTryingToReadIQN, "wrong-content"), + }, + { + name: "non existing file", + expErr: &os.PathError{"open", "/non/existent/path", syscall.ENOENT}, + }, + { + name: "right_iqn", + file_content: "InitiatorName=iqn.1996-05.com.redhat:123123122", + expIqn: "iqn.1996-05.com.redhat:123123122", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + filePath := "" + + if tc.file_content != "" { + tmpFile, err := ioutil.TempFile(os.TempDir(), "iscis-initiators-") + fmt.Println(tmpFile) + if err != nil { + t.Fatalf("Cannot create temporary file : %v", err) + } + + defer func(){ + os.Remove(tmpFile.Name()) + driver.IscsiFullPath = "/host/etc/iscsi/initiatorname.iscsi" + }() + + fmt.Println("Created File: " + tmpFile.Name()) + + text := []byte(tc.file_content) + if _, err = tmpFile.Write(text); err != nil { + t.Fatalf("Failed to write to temporary file: %v", err) + } + + if err := tmpFile.Close(); err != nil { + t.Fatalf(err.Error()) + } + filePath = tmpFile.Name() + } else { + filePath = "/non/existent/path" + } + + driver.IscsiFullPath = filePath + isci, err := nodeUtils.ParseIscsiInitiators() + + if tc.expErr != nil { + if err.Error() != tc.expErr.Error() { + t.Fatalf("Expecting err: expected %v, got %v", tc.expErr, err) + } + + } else { + if err != nil { + t.Fatalf("err is not nil. got: %v", err) + } + if isci != tc.expIqn { + t.Fatalf("scheme mismatches: expected %v, got %v", tc.expIqn, isci) + } + + } + + }) + } + +} + +func TestParseFCPortsName(t *testing.T) { + testCases := []struct { + name string + file_contents []string + err error + expErr error + expFCPorts []string + }{ + { + name: "fc port file with wrong content", + file_contents: []string{"wrong content"}, + expErr: fmt.Errorf(driver.ErrorWhileTryingToReadFC, "wrong content"), + }, + { + name: "fc unsupported", + expErr: fmt.Errorf(driver.ErrorUnsupportedConnectivityType, "FC"), + }, + { + name: "one FC port", + file_contents: []string{"0x10000000c9934d9f"}, + expFCPorts: []string{"10000000c9934d9f"}, + }, + { + name: "one FC port file with wrong content, another is good", + file_contents: []string{"wrong content", "0x10000000c9934dab"}, + expFCPorts: []string{"10000000c9934dab"}, + }, + { + name: "one fc port file with wrong content, aonther file path is inexistent", + file_contents: []string{"wrong content", ""}, + expErr: errors.New("[Error while tring to get FC port from string: wrong content., open /non/existent/path: no such file or directory]"), + }, + { + name: "two FC ports", + file_contents: []string{"0x10000000c9934d9f", "0x10000000c9934dab"}, + expFCPorts: []string{"10000000c9934d9f", "10000000c9934dab"}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + filePath := "" + var fpaths []string + + for _, file_content := range tc.file_contents { + if file_content != "" { + tmpFile, err := ioutil.TempFile(os.TempDir(), "fc-") + fmt.Println(tmpFile) + if err != nil { + t.Fatalf("Cannot create temporary file : %v", err) + } + + defer os.Remove(tmpFile.Name()) + + text := []byte(file_content) + if _, err = tmpFile.Write(text); err != nil { + t.Fatalf("Failed to write to temporary file: %v", err) + } + + if err := tmpFile.Close(); err != nil { + t.Fatalf(err.Error()) + } + filePath = tmpFile.Name() + } else { + filePath = "/non/existent/path" + } + + fpaths = append(fpaths, filePath) + } + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + fake_executer := mocks.NewMockExecuterInterface(mockCtrl) + devicePath := "/sys/class/fc_host/host*/port_name" + fake_executer.EXPECT().FilepathGlob(devicePath).Return(fpaths, tc.err) + nodeUtils := driver.NewNodeUtils(fake_executer) + + fcs, err := nodeUtils.ParseFCPorts() + + if tc.expErr != nil { + if err.Error() != tc.expErr.Error() { + t.Fatalf("Expecting err: expected %v, got %v", tc.expErr, err) + } + + } else { + if err != nil { + t.Fatalf("err is not nil. got: %v", err) + } + if !reflect.DeepEqual(fcs, tc.expFCPorts) { + t.Fatalf("scheme mismatches: expected %v, got %v", tc.expFCPorts, fcs) + } + + } + }) + } +} diff --git a/node/pkg/driver/sync_lock.go b/node/pkg/driver/sync_lock.go new file mode 100644 index 000000000..6d9c1e30b --- /dev/null +++ b/node/pkg/driver/sync_lock.go @@ -0,0 +1,69 @@ +/** + * Copyright 2019 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package driver + +import ( + "sync" + + "github.com/ibm/ibm-block-csi-driver/node/logger" +) + +//go:generate mockgen -destination=../../mocks/mock_sync_lock.go -package=mocks github.com/ibm/ibm-block-csi-driver/node/pkg/driver SyncLockInterface + +type SyncLockInterface interface { + AddVolumeLock(id string, msg string) error + RemoveVolumeLock(id string, msg string) + GetSyncMap() *sync.Map + // RemoveVolumeLock(id string, msg string) func() +} + +type SyncLock struct { + SyncMap *sync.Map +} + +func NewSyncLock() SyncLockInterface { + return &SyncLock{ + SyncMap: &sync.Map{}, + } + +} + +func (s SyncLock) GetSyncMap() *sync.Map { + return s.SyncMap +} + +func (s SyncLock) AddVolumeLock(id string, msg string) error { + logger.Debugf("Lock for action %s, Try to acquire lock for volume", msg) + _, exists := s.SyncMap.LoadOrStore(id, 0) + if !exists { + logger.Debugf("Lock for action %s, Succeed to acquire lock for volume", msg) + return nil + } else { + logger.Debugf("Lock for action %s, Lock for volume is already in use by other thread", msg) + return &VolumeAlreadyProcessingError{id} + } +} + +func (s SyncLock) RemoveVolumeLock(id string, msg string) { + logger.Debugf("Lock for action %s, release lock for volume", msg) + + s.SyncMap.Delete(id) +} + +/*func (s SyncLock) RemoveVolumeLock(id string, msg string) func() { + return func() { s.RemoveVolumeLockDo(id, msg) } +}*/ diff --git a/node/pkg/driver/version.go b/node/pkg/driver/version.go new file mode 100644 index 000000000..0df750da4 --- /dev/null +++ b/node/pkg/driver/version.go @@ -0,0 +1,67 @@ +/** + * Copyright 2019 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package driver + +import ( + "encoding/json" + "fmt" + "runtime" +) + +// These are set during build time via -ldflags +var ( + gitCommit string + buildDate string +) + +type VersionInfo struct { + DriverVersion string `json:"driverVersion"` + GitCommit string `json:"gitCommit"` + BuildDate string `json:"buildDate"` + GoVersion string `json:"goVersion"` + Compiler string `json:"compiler"` + Platform string `json:"platform"` +} + +func GetVersion(configFilePath string) (VersionInfo, error) { + configFile, err := ReadConfigFile(configFilePath) + if err != nil { + return VersionInfo{}, err + } + + return VersionInfo{ + DriverVersion: configFile.Identity.Version, + GitCommit: gitCommit, + BuildDate: buildDate, + GoVersion: runtime.Version(), + Compiler: runtime.Compiler, + Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), + }, nil +} + +func GetVersionJSON(configFilePath string) (string, error) { + info, err := GetVersion(configFilePath) + if err != nil { + return "", err + } + + marshalled, err := json.MarshalIndent(&info, "", " ") + if err != nil { + return "", err + } + return string(marshalled), nil +} diff --git a/node/pkg/driver/version_test.go b/node/pkg/driver/version_test.go new file mode 100644 index 000000000..1dd85bce2 --- /dev/null +++ b/node/pkg/driver/version_test.go @@ -0,0 +1,90 @@ +/** + * Copyright 2019 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package driver + +import ( + "fmt" + "os" + "path/filepath" + "reflect" + "runtime" + "testing" +) + +func getConfigFilePath() (string, error) { + dir, err := os.Getwd() + if err != nil { + return "", err + } + dir = filepath.Join(dir, "../../../", "common", "config.yaml") + return dir, nil +} + +func TestGetVersion(t *testing.T) { + + dir, err := getConfigFilePath() + if err != nil { + t.Fatalf("Getting config file returned an error") + } + + fmt.Println(dir) + + version, err := GetVersion(dir) + + expected := VersionInfo{ + DriverVersion: "0.9.0", + GitCommit: "", + BuildDate: "", + GoVersion: runtime.Version(), + Compiler: runtime.Compiler, + Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), + } + + if !reflect.DeepEqual(version, expected) { + t.Fatalf("structs not equall\ngot:\n%+v\nexpected:\n%+v", version, expected) + } + + if err != nil { + t.Fatalf("Get version return error") + } +} + +func TestGetVersionJSON(t *testing.T) { + dir, err := getConfigFilePath() + + if err != nil { + t.Fatalf("Getting config file returned an error") + } + + version, err := GetVersionJSON(dir) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + expected := fmt.Sprintf(`{ + "driverVersion": "0.9.0", + "gitCommit": "", + "buildDate": "", + "goVersion": "%s", + "compiler": "%s", + "platform": "%s" +}`, runtime.Version(), runtime.Compiler, fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)) + + if version != expected { + t.Fatalf("json not equall\ngot:\n%s\nexpected:\n%s", version, expected) + } +} diff --git a/node/util/util.go b/node/util/util.go new file mode 100644 index 000000000..620401800 --- /dev/null +++ b/node/util/util.go @@ -0,0 +1,61 @@ +/** + * Copyright 2019 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package util + +import ( + "bytes" + "fmt" + "net/url" + "os" + "path" + "path/filepath" + "runtime" + "strconv" + "strings" +) + +func ParseEndpoint(endpoint string) (string, string, error) { + u, err := url.Parse(endpoint) + if err != nil { + return "", "", fmt.Errorf("could not parse endpoint: %v", err) + } + + addr := path.Join(u.Host, filepath.FromSlash(u.Path)) + + scheme := strings.ToLower(u.Scheme) + switch scheme { + case "tcp": + case "unix": + addr = path.Join("/", addr) + if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { + return "", "", fmt.Errorf("could not remove unix domain socket %q: %v", addr, err) + } + default: + return "", "", fmt.Errorf("unsupported protocol: %s", scheme) + } + + return scheme, addr, nil +} + +func GetGoID() uint64 { + b := make([]byte, 64) + b = b[:runtime.Stack(b, false)] + b = bytes.TrimPrefix(b, []byte("goroutine ")) + b = b[:bytes.IndexByte(b, ' ')] + n, _ := strconv.ParseUint(string(b), 10, 64) + return n +} diff --git a/node/util/util_test.go b/node/util/util_test.go new file mode 100644 index 000000000..85b72a2d2 --- /dev/null +++ b/node/util/util_test.go @@ -0,0 +1,81 @@ +/** + * Copyright 2019 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package util + +import ( + "fmt" + "testing" +) + +func TestParseEndpoint(t *testing.T) { + testCases := []struct { + name string + endpoint string + expScheme string + expAddr string + expErr error + }{ + { + name: "valid unix endpoint 1", + endpoint: "unix:///tmp/csi/csi.sock", + expScheme: "unix", + expAddr: "/tmp/csi/csi.sock", + }, + { + name: "valid unix endpoint 2", + endpoint: "unix://tmp/csi/csi.sock", + expScheme: "unix", + expAddr: "/tmp/csi/csi.sock", + }, + { + name: "valid unix endpoint 3", + endpoint: "unix:/tmp/csi/csi.sock", + expScheme: "unix", + expAddr: "/tmp/csi/csi.sock", + }, + { + name: "invalid endpoint", + endpoint: "http://127.0.0.1", + expErr: fmt.Errorf("unsupported protocol: http"), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + scheme, addr, err := ParseEndpoint(tc.endpoint) + + if tc.expErr != nil { + if err.Error() != tc.expErr.Error() { + t.Fatalf("Expecting err: expected %v, got %v", tc.expErr, err) + } + + } else { + if err != nil { + t.Fatalf("err is not nil. got: %v", err) + } + if scheme != tc.expScheme { + t.Fatalf("scheme mismatches: expected %v, got %v", tc.expScheme, scheme) + } + + if addr != tc.expAddr { + t.Fatalf("addr mismatches: expected %v, got %v", tc.expAddr, addr) + } + } + }) + } + +} diff --git a/scripts/ci/build_push_images.sh b/scripts/ci/build_push_images.sh new file mode 100755 index 000000000..1e48134a6 --- /dev/null +++ b/scripts/ci/build_push_images.sh @@ -0,0 +1,51 @@ +#!/bin/bash -xe + +# Validations +MANDATORY_ENVS="IMAGE_VERSION BUILD_NUMBER DOCKER_REGISTRY CSI_NODE_IMAGE CSI_CONTROLLER_IMAGE GIT_BRANCH" +for envi in $MANDATORY_ENVS; do + [ -z "${!envi}" ] && { echo "Error - Env $envi is mandatory for the script."; exit 1; } || : +done + +# Prepare specific tag for the image +branch=`echo $GIT_BRANCH| sed 's|/|.|g'` #not sure if docker accept / in the version +specific_tag="${IMAGE_VERSION}_b${BUILD_NUMBER}_${branch}" + +# Set latest tag only if its from develop branch or master and prepare tags +[ "$GIT_BRANCH" = "develop" -o "$GIT_BRANCH" = "origin/develop" -o "$GIT_BRANCH" = "master" ] && tag_latest="true" || tag_latest="false" + + +# CSI controller +# -------------- +ctl_registry="${DOCKER_REGISTRY}/${CSI_CONTROLLER_IMAGE}" +ctl_tag_specific="${ctl_registry}:${specific_tag}" +ctl_tag_latest=${ctl_registry}:latest +[ "$tag_latest" = "true" ] && taglatestflag="-t ${ctl_tag_latest}" + +echo "Build and push the CSI controller image" +docker build -t ${ctl_tag_specific} $taglatestflag -f Dockerfile-csi-controller . +docker push ${ctl_tag_specific} +[ "$tag_latest" = "true" ] && docker push ${ctl_tag_latest} || : + +# CSI node +# -------- +node_registry="${DOCKER_REGISTRY}/${CSI_NODE_IMAGE}" +node_tag_specific="${node_registry}:${specific_tag}" +node_tag_latest=${node_registry}:latest +[ "$tag_latest" = "true" ] && taglatestflag="-t ${node_tag_latest}" + +echo "Build and push the CSI node image" +docker build -t ${node_tag_specific} $taglatestflag -f Dockerfile-csi-node . +docker push ${node_tag_specific} +[ "$tag_latest" = "true" ] && docker push ${node_tag_latest} || : + + +set +x +echo "" +echo "Image ready:" +echo " ${ctl_tag_specific}" +echo " ${node_tag_specific}" +[ "$tag_latest" = "true" ] && { echo " ${ctl_tag_latest}"; echo " ${node_tag_latest}"; } || : + +# if param $1 given the script echo the specific tag +[ -n "$1" ] && printf "${ctl_tag_specific}\n${node_tag_specific}\n" > $1 || : + diff --git a/scripts/ci/cleanup_volumes.py b/scripts/ci/cleanup_volumes.py new file mode 100755 index 000000000..8056ec8ec --- /dev/null +++ b/scripts/ci/cleanup_volumes.py @@ -0,0 +1,21 @@ +#!/usr/bin/python3.6 + +import os +from pyxcli.client import XCLIClient + +user = os.environ["USERNAME"] +password = os.environ["PASSWORD"] +endpoint = os.environ["STORAGE_ARRAYS"] +pool = os.environ["POOL_NAME"] + +client = XCLIClient.connect_multiendpoint_ssl( + user, + password, + endpoint + ) + +vol_list = client.cmd.vol_list(pool=pool).as_list + +for vol in vol_list: + print("deleting volume : {}".format(vol)) + client.cmd.vol_delete(vol=vol.name) diff --git a/scripts/ci/community_csi_test_cleanup.sh b/scripts/ci/community_csi_test_cleanup.sh new file mode 100755 index 000000000..c69cdf4dd --- /dev/null +++ b/scripts/ci/community_csi_test_cleanup.sh @@ -0,0 +1,5 @@ +#!/bin/bash -xe + +if [ `docker ps | grep $1 | wc -l` != 0 ] ; then + docker stop $1 +fi \ No newline at end of file diff --git a/scripts/ci/helper_to_push_docker_image.sh b/scripts/ci/helper_to_push_docker_image.sh new file mode 100755 index 000000000..369c14215 --- /dev/null +++ b/scripts/ci/helper_to_push_docker_image.sh @@ -0,0 +1,75 @@ +#!/bin/bash -xe + +# This script is for internal use in CI +# Assume docker login to the external registry was done in advance. + +function usage() +{ + echo $0 [internal-image-path] [external-image-path] [latest-tag-optional] + exit 1 +} + +[ $# -eq 2 -o $# -eq 3 ] || { usage; } + +internal_image=$1 +external_image=$2 +latest="$3" + +# if also latest, then define latest_external_image (replace the external tag with the $latest wanted tag) +if [ -n "$latest" ]; then + latest_external_image=`echo $external_image | sed "s|^\(.*/.*:\)\(.*\)$|\1$latest|"` + latest_str_msg=" And latest tag image [$latest_external_image]" +fi + +echo "Preparing to push internal_image --> external_image:" +echo " internal_image=[$internal_image]" +echo " external_image=[$external_image]" +[ -n "$latest" ] && echo " latest_image =[$latest_external_image]" + + +echo "1. Validate no external_image exist yet before pushing it." # Note: no need to test latest tag since its already exist +docker pull $external_image && { echo "Error : the $external_image exist in remote. Cannot overwrite it."; exit 1; } || { echo "$external_image is not exist on the remote."; } +echo "" + + +echo "2. Validate internal_image not exist yet on local." +docker images $internal_image +docker rmi $internal_image && { echo "Remove the internal_image image to pull it again"; } || { echo "internal_image not exist on local. Continue."; } +echo "" + + +echo "3. Pull internal_image to local" +docker pull $internal_image +echo "" + + +echo "4. Tag internal_image to external_image (and latest=[$latest_external_image]) and remove the internal_image" +docker tag $internal_image $external_image +[ -n "$latest_external_image" ] && docker tag $internal_image $latest_external_image +docker rmi $internal_image +docker push $external_image +[ -n "$latest_external_image" ] && docker push $latest_external_image +echo "" + + +echo "5. Test pushed image by delete the local and pull it back" +docker rmi $external_image +docker pull $external_image +docker rmi $external_image + + +if [ -n "$latest_external_image" ]; then + echo "6. Test pushed [latest] image by delete the local and pull it back" + docker rmi $latest_external_image + docker pull $latest_external_image + docker rmi $latest_external_image +fi + +set +x +echo "" +echo "Succeeded to push internal_image --> external_image" +echo " internal_image=[$internal_image]" +echo " external_image=[$external_image]" +[ -n "$latest" ] && echo " latest_image =[$latest_external_image]" +set -x + diff --git a/scripts/ci/jenkins_pipeline_community_csi_test b/scripts/ci/jenkins_pipeline_community_csi_test new file mode 100644 index 000000000..143b72f00 --- /dev/null +++ b/scripts/ci/jenkins_pipeline_community_csi_test @@ -0,0 +1,88 @@ +pipeline { + agent { + label 'docker-engine' + } + environment { + CONTROLLER_LOGS = "csi_controller_logs" + } + + stages { + stage('Environment Setup') { + agent { + label 'ansible_rhel73' + } + steps { + script{ + echo "checking out XAVI" + if (env.XAVILIB_BRANCH == null) { + env.XAVILIB_BRANCH = 'develop' + } + // Just bring XAVI repo (use it in different stage) + xaviCheckOutScm(path: 'testing/', name: 'xavi', branch: "${env.XAVILIB_BRANCH}") + + + // Generate the new storage conf yaml with relevant envs + env.pwd = sh(returnStdout: true, script: 'pwd').trim() + echo " env.pwd ${env.pwd}" + + env.new_conf_yaml_name = "${env.pwd}/scripts/ci/storage_conf_new.yaml" + sh 'echo new conf yaml ${new_conf_yaml_name}' + + env.full_storage_conf_yaml_path = "${env.pwd}/scripts/ci/storage_conf.yaml" + echo "full storage conf yaml path : ${env.full_storage_conf_yaml_path}" + + echo "replacing username and password in storage-conf file" + // this will replace the username and password env vars in the yaml file. + sh ''' + ( echo "cat < ${new_conf_yaml_name}"; + cat ${full_storage_conf_yaml_path}; + echo "EOF"; + ) > ${new_conf_yaml_name} + . ${new_conf_yaml_name} + cat ${new_conf_yaml_name} + ''' + + echo "getting pool name from yaml file" + env.POOL_NAME = sh(returnStdout: true, script: 'cat ${full_storage_conf_yaml_path} | grep " pools:" -A 4 | grep name | cut -d ":" -f2').trim() + echo "pool name ${POOL_NAME}" + + } + } + } + stage('Configure Storage') { + agent { + label 'ansible_rhel73' + } + steps { + echo "found storage yaml so running ansible to configure storage using yaml file : ${env.new_conf_yaml_name}" + script { + configureStorage(storage_arrays: "${env.STORAGE_ARRAYS}", vars_file: "${env.new_conf_yaml_name}") + } + } + } + + stage ('CSI-controller: build and start controller server and csi sanity tests') { + steps { + sh './scripts/ci/run_community_csi_test.sh' + } + } + } + + post { + always { + sh './scripts/ci/community_csi_test_cleanup.sh csi-controller' + sh './scripts/ci/community_csi_test_cleanup.sh csi-sanity-test' + //TODO : remove this when ansible will support remove volumes + sh ''' + docker build -f Dockerfile-cleanup-volumes -t csi-cleanupvolumes . && docker run -e STORAGE_ARRAYS=${STORAGE_ARRAYS} -e USERNAME=${USERNAME} -e PASSWORD=${PASSWORD} -e POOL_NAME=${POOL_NAME} --rm --name csi-cleanupvolumes csi-cleanupvolumes + ./scripts/ci/community_csi_test_cleanup.sh csi-cleanupvolumes + ''' + archiveArtifacts "${env.CONTROLLER_LOGS}, ${env.CONTROLLER_LOGS}_node" + sh 'ls build/reports' + junit 'build/reports/*.xml' + sh '[ -d build/reports ] && rm -rf build/reports' + sh '[ -f `${env.CONTROLLER_LOGS}` ] && rm -f csi_controller_logs' + + } + } +} diff --git a/scripts/ci/jenkins_pipeline_csi b/scripts/ci/jenkins_pipeline_csi new file mode 100644 index 000000000..da6ff526f --- /dev/null +++ b/scripts/ci/jenkins_pipeline_csi @@ -0,0 +1,95 @@ +pipeline { + agent { + label 'docker-engine' + } + stages { + stage ('CSI-controller: Unit testing + coverage') { + steps { + sh 'mkdir -p build/reports && chmod 777 build/reports' + sh './scripts/run_unitests.sh `pwd`/build/reports' + } + } + stage ('CSI-controller: pep8') { + steps { + sh 'true' + } + } + stage ('CSI-controller: pylint') { + steps { + sh 'true' + + } + } + stage ('CSI-node: go Unit testing') { + steps { + sh 'mkdir -p build/reports && chmod 777 build/reports' + sh 'make test-xunit-in-container' + } + } + stage ('CSI-deployment: k8s yamls validation') { + steps { + sh './scripts/run_yamlcheck.sh' + } + } + stage ('CSI-controller & node: Build and push images') { + steps { + sh './scripts/ci/build_push_images.sh build/reports/images_url' + } + } + } + post { + always { + sh 'ls -la build/reports/' + junit 'build/reports/*.xml' + // cobertura coberturaReportFile: 'build/reports/*.xml' ## TODO need to run cobertura but it has an issue "No such DSL method" + archiveArtifacts 'build/reports/images_url' + script { + manager.addShortText("${env.GIT_BRANCH}") + } + } + + failure { + emailext body: 'Check console output at $BUILD_URL to view the results. \n\n ${CHANGES} \n\n -------------------------------------------------- \n${BUILD_LOG, maxLines=100, escapeHtml=false}', + to: "${env.EMAIL_TO}", + subject: 'Build failed in Jenkins: $PROJECT_NAME - #$BUILD_NUMBER' + } + unstable { + emailext body: 'Check console output at $BUILD_URL to view the results. \n\n ${CHANGES} \n\n -------------------------------------------------- \n${BUILD_LOG, maxLines=100, escapeHtml=false}', + to: "${env.EMAIL_TO}", + subject: 'Unstable build in Jenkins: $PROJECT_NAME - #$BUILD_NUMBER' + } + changed { + emailext body: 'Check console output at $BUILD_URL to view the results.', + to: "${env.EMAIL_TO}", + subject: 'Jenkins build is back to normal: $PROJECT_NAME - #$BUILD_NUMBER' + } + + success { + script { + if (env.JOB_NAME =~ /PRODUCTION/) { + echo "Trigger CI Production Pipeline" + def images = readFile('build/reports/images_url').split("\n") + def controller_image = images[0] + def node_image = images[1] + build( + job: "${env.PROD_CI_JOB}", + wait: false, + parameters: [ + string(name: 'CSI_CONTROLLER_IMAGE', value: "${controller_image}"), + string(name: 'CSI_NODE_IMAGE', value: "${node_image}") + ] + ) + } else { + echo "Will not trigger CI Production pipeline as this is not a production job" + } + } + } + + cleanup { + script { + sh '[ -d build/reports ] && rm -rf build/reports' + } + } + + } +} diff --git a/scripts/ci/run_community_csi_test.sh b/scripts/ci/run_community_csi_test.sh new file mode 100755 index 000000000..09fb6aa10 --- /dev/null +++ b/scripts/ci/run_community_csi_test.sh @@ -0,0 +1,13 @@ +#!/bin/bash -xe + +[ -z "${CONTROLLER_LOGS}" ] && { echo "CONTROLLER_LOGS env is mandatory"; exit 1; } +# assume that all the environment storage was setup in advance. +echo "controller logs : ${CONTROLLER_LOGS}" +./scripts/ci/run_controller_server_for_csi_test.sh csi-controller > ${CONTROLLER_LOGS} 2>&1 +./scripts/ci/run_node_server_for_csi_test.sh csi-node > "${CONTROLLER_LOGS}_node" 2>&1 +echo `pwd` +sleep 2 +mkdir -p build/reports && chmod 777 build/reports +./scripts/ci/run_csi_test_client.sh csi-sanity-test `pwd`/build/reports/ + +docker kill csi-controller \ No newline at end of file diff --git a/scripts/ci/run_controller_server_for_csi_test.sh b/scripts/ci/run_controller_server_for_csi_test.sh new file mode 100755 index 000000000..247781c4d --- /dev/null +++ b/scripts/ci/run_controller_server_for_csi_test.sh @@ -0,0 +1,13 @@ +#!/bin/bash -xe + +# create the /tmp/k8s-dir where the grpc unix socket will be. +if [ ! -d /tmp/k8s_dir ]; then + mkdir /tmp/k8s_dir +fi + +chmod 777 /tmp/k8s_dir + +[ $# -ne 1 ] && { echo "Usage $0 : container_name" ; exit 1; } + +docker build -f Dockerfile-csi-controller -t csi-controller . && docker run -v /tmp/k8s_dir:/tmp/k8s_dir:rw -d --rm --name $1 csi-controller -e unix://tmp/k8s_dir/f + diff --git a/scripts/ci/run_csi_test_client.sh b/scripts/ci/run_csi_test_client.sh new file mode 100755 index 000000000..be4582957 --- /dev/null +++ b/scripts/ci/run_csi_test_client.sh @@ -0,0 +1,6 @@ +#!/bin/bash -xe + +[ $# -ne 2 ] && { echo "Usage $0 : container_name folder_for_junit" ; exit 1; } + +#/tmp/k8s_dir is the directory of the csi grpc\unix socket that shared between csi server and csi-test docker +docker build -f Dockerfile-csi-test -t csi-sanity-test . && docker run --user=root -e STORAGE_ARRAYS=${STORAGE_ARRAYS} -e USERNAME=${USERNAME} -e PASSWORD=${PASSWORD} -e POOL_NAME=${POOL_NAME} -v /tmp/k8s_dir:/tmp/k8s_dir:rw -v$2:/tmp/test_results:rw --rm --name $1 csi-sanity-test diff --git a/scripts/ci/run_node_server_for_csi_test.sh b/scripts/ci/run_node_server_for_csi_test.sh new file mode 100755 index 000000000..1b2b00aed --- /dev/null +++ b/scripts/ci/run_node_server_for_csi_test.sh @@ -0,0 +1,13 @@ +#!/bin/bash -xe + +# create the /tmp/k8s-dir where the grpc unix socket will be. +if [ ! -d /tmp/k8s_dir ]; then + mkdir /tmp/k8s_dir +fi + +chmod 777 /tmp/k8s_dir + +[ $# -ne 1 ] && { echo "Usage $0 : container_name" ; exit 1; } + +docker build -f Dockerfile-csi-node -t csi-node . && docker run -v /etc/iscsi:/host/etc/iscsi -v /tmp/k8s_dir:/tmp/k8s_dir:rw -d --rm --name $1 csi-node --csi-endpoint unix://tmp/k8s_dir/nodecsi --hostname=`hostname` --config-file-path=./config.yaml + diff --git a/scripts/ci/storage_conf.yaml b/scripts/ci/storage_conf.yaml new file mode 100644 index 000000000..5fb11a0bf --- /dev/null +++ b/scripts/ci/storage_conf.yaml @@ -0,0 +1,27 @@ +arrays: + - array_index: "0" + domains: + - name: xavi-csi-sanity-test-domain + max_pools: 10 + max_volumes: 100 + hard_capacity: 3000 + soft_capacity: 6000 + users: + - name: ${USERNAME} + password: ${PASSWORD} + category: storageadmin + domain: xavi-csi-sanity-test-domain + connect: true + pools: + - domain: xavi-csi-sanity-test-domain + name: xavi-csi-sanity-pool1 + size_gb: 10 + hosts: + - host_index: "0" + connectivity: iscsi + +hosts: + - host_index: "0" + type: "linux" + + diff --git a/scripts/csi_test/csi_params b/scripts/csi_test/csi_params new file mode 100644 index 000000000..b8bd87cf6 --- /dev/null +++ b/scripts/csi_test/csi_params @@ -0,0 +1,3 @@ +#SpaceEfficiency: Compression +pool: POOL_NAME +#volume_name_prefix: v_olga diff --git a/scripts/csi_test/csi_secrets b/scripts/csi_test/csi_secrets new file mode 100644 index 000000000..d1e6f830c --- /dev/null +++ b/scripts/csi_test/csi_secrets @@ -0,0 +1,23 @@ +CreateVolumeSecret: + management_address: STORAGE_ARRAYS + username: USERNAME + password: PASSWORD + alias_in_storageclass: arr1 + +DeleteVolumeSecret: + management_address: STORAGE_ARRAYS + username: USERNAME + password: PASSWORD + alias_in_storageclass: arr1 + +ControllerPublishVolumeSecret: + management_address: STORAGE_ARRAYS + username: USERNAME + password: PASSWORD + alias_in_storageclass: arr1 + +ControllerUnpublishVolumeSecret: + management_address: STORAGE_ARRAYS + username: USERNAME + password: PASSWORD + alias_in_storageclass: arr1 diff --git a/scripts/csi_test/csi_tests_to_run b/scripts/csi_test/csi_tests_to_run new file mode 100644 index 000000000..63ddab156 --- /dev/null +++ b/scripts/csi_test/csi_tests_to_run @@ -0,0 +1,5 @@ +ControllerGetCapabilities +CreateVolume +DeleteVolume +NodeGetCapabilities +NodeGetInfo \ No newline at end of file diff --git a/scripts/csi_test/entrypoint-csi-tests.sh b/scripts/csi_test/entrypoint-csi-tests.sh new file mode 100755 index 000000000..ecc6d3627 --- /dev/null +++ b/scripts/csi_test/entrypoint-csi-tests.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +echo "JUNIT OUTPUT" +echo ${JUNIT_OUTPUT} ${SECRET_FILE} ${PARAM_FILE} ${ENDPOINT} ${TESTS_TO_RUN_FILE} + + +echo "TESTT ${STORAGE_ARRAYS} ${USERNAME} ${PASSWORD}" + +# update CSI secret +sed -i -e "s/STORAGE_ARRAYS/${STORAGE_ARRAYS}/g" ${SECRET_FILE} +sed -i -e "s/USERNAME/${USERNAME}/g" ${SECRET_FILE} +sed -i -e "s/PASSWORD/${PASSWORD}/g" ${SECRET_FILE} + +echo "TESTT ${STORAGE_ARRAYS} ${USERNAME} ${PASSWORD}" + +# update params file +sed -i -e "s/POOL_NAME/${POOL_NAME}/g" ${PARAM_FILE} + +# get tests to run +TESTS=`cat ${TESTS_TO_RUN_FILE}| tr '\n' "|"` + +/usr/local/go/src/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/csi-sanity --csi.endpoint ${ENDPOINT} --csi.controllerendpoint ${ENDPOINT_CONTROLLER} --csi.secrets ${SECRET_FILE} --csi.testvolumeparameters ${PARAM_FILE} --csi.junitfile ${JUNIT_OUTPUT} --ginkgo.focus ${TESTS} diff --git a/scripts/run_unitests.sh b/scripts/run_unitests.sh new file mode 100755 index 000000000..3d9f66247 --- /dev/null +++ b/scripts/run_unitests.sh @@ -0,0 +1,4 @@ +#!/bin/bash -x + +[ -n "$1" ] && coverage="-v $1:/driver/coverage" +docker build -f Dockerfile-csi-controller.test -t csi-controller-unitests . && docker run --rm -t $coverage csi-controller-unitests diff --git a/scripts/run_yamlcheck.sh b/scripts/run_yamlcheck.sh new file mode 100755 index 000000000..80ad17718 --- /dev/null +++ b/scripts/run_yamlcheck.sh @@ -0,0 +1,4 @@ +#!/bin/bash -xe +docker run -t -v `pwd`/deploy/kubernetes/v1.13:/deploy/kubernetes/v1.13 garethr/kubeval deploy/kubernetes/v1.13/*.yaml +docker run -t -v `pwd`/deploy/kubernetes/v1.14:/deploy/kubernetes/v1.14 garethr/kubeval deploy/kubernetes/v1.14/*.yaml +