diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..49f14e66c --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,38 @@ +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for all configuration options: +# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates + +version: 2 +updates: + - package-ecosystem: "gomod" + directory: "/" + schedule: + interval: "weekly" + rebase-strategy: "disabled" + reviewers: + - "IBM/csi-reviewers" + labels: + - go + - dependencies + - package-ecosystem: "pip" + directory: "/" + schedule: + interval: "weekly" + rebase-strategy: "disabled" + reviewers: + - "IBM/csi-reviewers" + labels: + - python + - dependencies + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + rebase-strategy: "disabled" + reviewers: + - "IBM/csi-reviewers" + labels: + - github_actions + - dependencies + - testing diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index c5c971f8c..cc2e71d67 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Run link check uses: gaurav-nelson/github-action-markdown-link-check@v1 with: diff --git a/Dockerfile-controllers.test b/Dockerfile-controllers.test index f9e4fb847..3f19406e1 100644 --- a/Dockerfile-controllers.test +++ b/Dockerfile-controllers.test @@ -1,4 +1,4 @@ -# Copyright IBM Corporation 2019. +# Copyright IBM Corporation 2024. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ # This Dockerfile.test is for running the csi controller local tests inside a container. # Its similar to the Dockerfile, but with additional requirements-tests.txt and ENTRYPOINT to run the local tests. -FROM registry.access.redhat.com/ubi8/python-38:1-75.1638364053 as builder +FROM registry.access.redhat.com/ubi8/python-38:1-125.1682304659 as builder USER root RUN if [[ "$(uname -m)" != "x86"* ]]; then yum install -y rust-toolset; fi USER default @@ -39,7 +39,7 @@ COPY controllers/tests/requirements.txt ./requirements-tests.txt RUN pip3 install -r ./requirements-tests.txt -FROM registry.access.redhat.com/ubi8/python-38:1-100 +FROM registry.access.redhat.com/ubi8/python-38:1-125.1682304659 COPY --from=builder /opt/app-root /opt/app-root COPY ./common /driver/common diff --git a/Dockerfile-csi-controller b/Dockerfile-csi-controller index bf0d9b740..afb1c1313 100644 --- a/Dockerfile-csi-controller +++ b/Dockerfile-csi-controller @@ -1,4 +1,4 @@ -# Copyright IBM Corporation 2019. +# Copyright IBM Corporation 2024. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM registry.access.redhat.com/ubi8/python-38:1-75.1638364053 as builder +FROM registry.access.redhat.com/ubi8/python-38:1-125.1682304659 as builder USER root RUN if [[ "$(uname -m)" != "x86"* ]]; then yum install -y rust-toolset; fi USER default @@ -26,14 +26,15 @@ RUN pip3 install -r ./requirements.txt USER root COPY controllers/scripts/csi_general . +RUN chmod +x csi_pb2.sh RUN ./csi_pb2.sh RUN pip3 install . -FROM registry.access.redhat.com/ubi8/python-38:1-100 +FROM registry.access.redhat.com/ubi8/python-38:1-125.1682304659 MAINTAINER IBM Storage -ARG VERSION=1.11.0 +ARG VERSION=1.12.0 ARG BUILD_NUMBER=0 ###Required Labels @@ -53,9 +54,16 @@ COPY ./controllers/ /driver/controllers/ COPY ./LICENSE /licenses/ COPY ./NOTICES /licenses/ +USER root +RUN chmod +x /driver/controllers/scripts/entrypoint.sh +USER default + WORKDIR /driver ENV PYTHONPATH=/driver -# Note: UBI runs with app-user by default. +USER root +RUN yum update -y +USER default +# Note: UBI runs with app-user by default. ENTRYPOINT ["/driver/controllers/scripts/entrypoint.sh"] diff --git a/Dockerfile-csi-host-definer b/Dockerfile-csi-host-definer index 2ac7c354a..ce2f638c4 100644 --- a/Dockerfile-csi-host-definer +++ b/Dockerfile-csi-host-definer @@ -1,4 +1,4 @@ -FROM registry.access.redhat.com/ubi8/python-38:1-75.1638364053 as builder +FROM registry.access.redhat.com/ubi8/python-38:1-125.1682304659 as builder USER root RUN if [[ "$(uname -m)" != "x86"* ]]; then yum install -y rust-toolset; fi @@ -14,12 +14,13 @@ RUN pip3 install -r ./csi_requirements.txt -r ./host_definer_requirements.txt USER root COPY controllers/scripts/csi_general . +RUN chmod +x csi_pb2.sh RUN ./csi_pb2.sh RUN pip3 install . -FROM registry.access.redhat.com/ubi8/python-38:1-100 +FROM registry.access.redhat.com/ubi8/python-38:1-125.1682304659 -ARG VERSION=1.11.0 +ARG VERSION=1.12.0 ARG BUILD_NUMBER=0 ###Required Labels @@ -43,4 +44,8 @@ COPY ./NOTICES /licenses/ WORKDIR /driver/controllers/servers/host_definer/ ENV PYTHONPATH=/driver +USER root +RUN yum update -y + +USER default CMD ["python3", "/driver/controllers/servers/host_definer/main.py"] diff --git a/Dockerfile-csi-node b/Dockerfile-csi-node index 729e5428d..8b70b916b 100644 --- a/Dockerfile-csi-node +++ b/Dockerfile-csi-node @@ -1,4 +1,4 @@ -# Copyright IBM Corporation 2019. +# Copyright IBM Corporation 2024. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +13,7 @@ # limitations under the License. # Build stage -FROM golang:1.13 as builder +FROM golang:1.19 as builder WORKDIR /go/src/github.com/ibm/ibm-block-csi-driver ENV GO111MODULE=on @@ -27,10 +27,10 @@ COPY . . RUN make ibm-block-csi-driver # Final stage -FROM registry.access.redhat.com/ubi8/ubi-minimal:8.6-854 +FROM registry.access.redhat.com/ubi9-minimal:9.3-1612 MAINTAINER IBM Storage -ARG VERSION=1.11.0 +ARG VERSION=1.12.0 ARG BUILD_NUMBER=0 LABEL name="IBM block storage CSI driver node" \ @@ -74,5 +74,8 @@ RUN ln -s /chroot/chroot-host-wrapper.sh /chroot/blkid \ && ln -s /chroot/chroot-host-wrapper.sh /chroot/xfs_growfs ENV PATH="/chroot:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" +USER root +RUN microdnf update -y +USER default ENTRYPOINT ["/root/entrypoint.sh"] diff --git a/Dockerfile-csi-node.test b/Dockerfile-csi-node.test index a14a18261..65303f89b 100644 --- a/Dockerfile-csi-node.test +++ b/Dockerfile-csi-node.test @@ -1,4 +1,4 @@ -# Copyright IBM Corporation 2019. +# Copyright IBM Corporation 2024. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +13,7 @@ # limitations under the License. # Build stage -FROM golang:1.13 as builder +FROM golang:1.19 as builder WORKDIR /go/src/github.com/ibm/ibm-block-csi-driver ENV GO111MODULE=on @@ -24,6 +24,9 @@ COPY go.sum . RUN go mod download RUN go get github.com/tebeka/go2xunit # when GO111MODULE=on the module will not become executable, so get it here to run it as binary. RUN go get github.com/golang/mock/gomock +RUN go get github.com/golang/mock/mockgen +RUN go install github.com/tebeka/go2xunit +RUN go install github.com/golang/mock/gomock RUN go install github.com/golang/mock/mockgen COPY . . diff --git a/Dockerfile-csi-test b/Dockerfile-csi-test index fa36574cd..82094568b 100644 --- a/Dockerfile-csi-test +++ b/Dockerfile-csi-test @@ -1,4 +1,4 @@ -# Copyright IBM Corporation 2019. +# Copyright IBM Corporation 2024. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/NOTICES b/NOTICES index 8349c9ca4..2039beb5c 100644 --- a/NOTICES +++ b/NOTICES @@ -1,2399 +1,2399 @@ - -Additional Third Party Software License Agreements and Notices - -This file details additional third party software license agreements -and third party notices and information that are required -to be reproduced for the following programs: - -IBM Block Storage CSI Driver version 1.10.0 - - - -=========================================================================== -Section 1 - TERMS AND CONDITIONS FOR SEPARATELY LICENSED CODE -=========================================================================== - -The "Separately Licensed Code" identified in Section 1 of this -document is provided to Licensee under terms and conditions that -are different from the license agreement for the Program. - -Licensee's use of such components or portions thereof is subject to the -terms of the associated license agreement provided or referenced in this -section and not the terms of the license agreement for the Program. - -The following are Separately Licensed Code: - -Red Hat Universal Base Image 8 -Red Hat Universal Base Image 8 Python-3.8 - - - - - -@@@@@@@@@@@@ -=========================================================================== -THE FOLLOWING TERMS AND CONDITIONS APPLY to Red Hat Universal -Base Image 8 software: ---------------------------------------------------------------------------- - -END USER LICENSE AGREEMENT - -RED HAT UNIVERSAL BASE IMAGE - - -PLEASE READ THIS END USER LICENSE AGREEMENT CAREFULLY BEFORE USING -SOFTWARE FROM RED HAT. BY USING RED HAT SOFTWARE, YOU SIGNIFY YOUR -ASSENT TO AND ACCEPTANCE OF THIS END USER LICENSE AGREEMENT AND -ACKNOWLEDGE YOU HAVE READ AND UNDERSTAND THE TERMS. AN INDIVIDUAL ACTING -ON BEHALF OF AN ENTITY REPRESENTS THAT HE OR SHE HAS THE AUTHORITY TO -ENTER INTO THIS END USER LICENSE AGREEMENT ON BEHALF OF THAT ENTITY. IF -YOU DO NOT ACCEPT THE TERMS OF THIS AGREEMENT, THEN YOU MUST NOT USE THE -RED HAT SOFTWARE. THIS END USER LICENSE AGREEMENT DOES NOT PROVIDE ANY -RIGHTS TO RED HAT SERVICES SUCH AS SOFTWARE MAINTENANCE, UPGRADES OR -SUPPORT. PLEASE REVIEW YOUR SERVICE OR SUBSCRIPTION AGREEMENT(S) THAT -YOU MAY HAVE WITH RED HAT OR OTHER AUTHORIZED RED HAT SERVICE PROVIDERS -REGARDING SERVICES AND ASSOCIATED PAYMENTS. - -This end user license agreement (“EULA”) governs the use of Red Hat -Universal Base Image and associated software supporting such -container(s) and any related updates, source code, including the -appearance, structure and organization (the “Programs”), regardless of -the delivery mechanism. If a Red Hat Universal Base Image is included -in another Red Hat product, the EULA terms of such other Red Hat product -will apply and supersede this EULA. If a Red Hat Universal Base Image -is included in a third party work, the terms of this EULA will continue -to govern the Red Hat Universal Base Image. - -1. License Grant. Subject to the terms of this EULA, Red Hat, Inc. -(“Red Hat”) grants to you a perpetual, worldwide license to the Programs -(each of which may include multiple software components). With the -exception of the Red Hat trademark identified in Section 2 below, each -software component is governed by a license that permits you to run, -copy, modify, and redistribute (subject to certain obligations in some -cases) the software components. This EULA pertains solely to the -Programs and does not limit your rights under, or grant you rights that -supersede, the license terms applicable to any particular component. -The license terms applicable to each software component are provided in -the source code of that component. - -2. Intellectual Property Rights. The Programs and each of their -components are owned by Red Hat and other licensors and are protected -under copyright law and other laws as applicable. Title to the Programs -and any component shall remain with Red Hat and other licensors, subject -to the applicable license, excluding any independently developed and -licensed work. The “Red Hat” trademark is a registered trademark of Red -Hat and its affiliates in the U.S. and other countries. Subject to Red -Hat’s trademark usage guidelines (set forth at -http://www.redhat.com/about/corporate/trademark/), this EULA permits you -to distribute the Programs that include the Red Hat trademark, provided -you do not make any statements on behalf of Red Hat, including but not -limited to, stating or in any way suggesting (in any public, private -and/or confidential statement (whether written or verbal)) that Red Hat -supports or endorses software built and delivered with a Red Hat -Universal Base Image(s) (such derivative works referred to as a “Red Hat -Based Container Images”); provided if a Red Hat Based Container Image is -Red Hat Certified and deployed on a Red Hat supported configuration as -set forth at https://access.redhat.com/articles/2726611 then you may -state that the Red Hat Universal Base Image is supported by Red Hat. -You agree to include this unmodified EULA in all distributions of -container images sourced, built or otherwise derived from the Programs. -If you modify the Red Hat Universal Base Image(s), you must remove any -Red Hat trademark(s) prior to any subsequent distribution. Any breach -of this Section 2 is a material breach of the EULA and you may no longer -use and/or distribute the Red Hat trademark(s). Modifications to the -software may corrupt the Programs. - -3. Limited Warranty. Except as specifically stated in this Section 3, -a separate agreement with Red Hat, or a license for a particular -component, to the maximum extent permitted under applicable law, the -Programs and the components are provided and licensed “as is” without -warranty of any kind, expressed or implied, including the implied -warranties of merchantability, non-infringement or fitness for a -particular purpose. Neither Red Hat nor its affiliates warrant that the -functions contained in the Programs will meet your requirements or that -the operation of the Programs will be entirely error free, appear or -perform precisely as described in the accompanying documentation, or -comply with regulatory requirements. Red Hat warrants that the media on -which the Programs and the components are provided will be free from -defects in materials and manufacture under normal use for a period of 30 -days from the date of delivery to you. This warranty extends only to the -party that purchases subscription services for the supported -configurations from Red Hat and/or its affiliates or a Red Hat -authorized distributor. - -4. Limitation of Remedies and Liability. To the maximum extent -permitted by applicable law, your exclusive remedy under this EULA is to -return any defective media within 30 days of delivery along with a copy -of your payment receipt and Red Hat, at its option, will replace it or -refund the money you paid for the media. To the maximum extent -permitted under applicable law, under no circumstances will Red Hat, -its affiliates, any Red Hat authorized distributor, or the licensor of -any component provided to you under this EULA be liable to you for any -incidental or consequential damages, including lost profits or lost -savings arising out of the use or inability to use the Programs or any -component, even if Red Hat, its affiliates, an authorized distributor, -and/or licensor has been advised of the possibility of such damages. In -no event shall Red Hat's or its affiliates’ liability, an authorized -distributor’s liability or the liability of the licensor of a component -provided to you under this EULA exceed the amount that you paid to Red -Hat for the media under this EULA. - -5. Export Control. As required by the laws of the United States and -other countries, you represent and warrant that you: (a) understand that -the Programs and their components may be subject to export controls -under the U.S. Commerce Department’s Export Administration Regulations -(“EAR”); (b) are not located in a prohibited destination country under -the EAR or U.S. sanctions regulations (currently Cuba, Iran, North -Korea, Sudan, Syria, and the Crimea Region of Ukraine, subject to change -as posted by the United States government); (c) will not export, -re-export, or transfer the Programs to any prohibited destination, -persons or entities on the U.S. Bureau of Industry and Security Denied -Parties List or Entity List, or the U.S. Office of Foreign Assets -Control list of Specially Designated Nationals and Blocked Persons, or -any similar lists maintained by other countries, without the necessary -export license(s) or authorizations(s); (d) will not use or transfer the -Programs for use in connection with any nuclear, chemical or biological -weapons, missile technology, or military end-uses where prohibited by an -applicable arms embargo, unless authorized by the relevant government -agency by regulation or specific license; (e) understand and agree that -if you are in the United States and export or transfer the Programs to -eligible end users, you will, to the extent required by EAR Section -740.17(e), submit semi-annual reports to the Commerce Department’s -Bureau of Industry and Security, which include the name and address -(including country) of each transferee; and (f) understand that -countries including the United States may restrict the import, use, or -export of encryption products (which may include the Programs and the -components) and agree that you shall be solely responsible for -compliance with any such import, use, or export restrictions. - -6. Third Party Software. The Program may be provided with third party -software programs subject to their own license terms. The license terms -either accompany the third party software programs or, in some -instances, may be viewed at registry.access.redhat.com. If you do not -agree to abide by the applicable license terms for the third party -software programs, then you may not install, distribute or use them. - -7. General. If any provision of this EULA is held to be unenforceable, -the enforceability of the remaining provisions shall not be affected. -Any claim, controversy or dispute arising under or relating to this EULA -shall be governed by the laws of the State of New York and of the United -States, without regard to any conflict of laws provisions. The rights -and obligations of the parties to this EULA shall not be governed by the -United Nations Convention on the International Sale of Goods. - -Copyright © 2019 Red Hat, Inc. All rights reserved. “Red Hat,” is a -registered trademark of Red Hat, Inc. All other trademarks are the -property of their respective owners. - -=========================================================================== -END OF TERMS AND CONDITIONS FOR Red Hat Universal Base Image 8 -=========================================================================== - - - - - -@@@@@@@@@@@@ -=========================================================================== -THE FOLLOWING TERMS AND CONDITIONS APPLY to Red Hat Universal -Base Image 8 Python-3.8 software: ---------------------------------------------------------------------------- - - -END USER LICENSE AGREEMENT - -RED HAT UNIVERSAL BASE IMAGE - - -PLEASE READ THIS END USER LICENSE AGREEMENT CAREFULLY BEFORE USING -SOFTWARE FROM RED HAT. BY USING RED HAT SOFTWARE, YOU SIGNIFY YOUR -ASSENT TO AND ACCEPTANCE OF THIS END USER LICENSE AGREEMENT AND -ACKNOWLEDGE YOU HAVE READ AND UNDERSTAND THE TERMS. AN INDIVIDUAL ACTING -ON BEHALF OF AN ENTITY REPRESENTS THAT HE OR SHE HAS THE AUTHORITY TO -ENTER INTO THIS END USER LICENSE AGREEMENT ON BEHALF OF THAT ENTITY. IF -YOU DO NOT ACCEPT THE TERMS OF THIS AGREEMENT, THEN YOU MUST NOT USE THE -RED HAT SOFTWARE. THIS END USER LICENSE AGREEMENT DOES NOT PROVIDE ANY -RIGHTS TO RED HAT SERVICES SUCH AS SOFTWARE MAINTENANCE, UPGRADES OR -SUPPORT. PLEASE REVIEW YOUR SERVICE OR SUBSCRIPTION AGREEMENT(S) THAT -YOU MAY HAVE WITH RED HAT OR OTHER AUTHORIZED RED HAT SERVICE PROVIDERS -REGARDING SERVICES AND ASSOCIATED PAYMENTS. - -This end user license agreement (“EULA”) governs the use of Red Hat -Universal Base Image and associated software supporting such -container(s) and any related updates, source code, including the -appearance, structure and organization (the “Programs”), regardless of -the delivery mechanism. If a Red Hat Universal Base Image is included -in another Red Hat product, the EULA terms of such other Red Hat product -will apply and supersede this EULA. If a Red Hat Universal Base Image -is included in a third party work, the terms of this EULA will continue -to govern the Red Hat Universal Base Image. - -1. License Grant. Subject to the terms of this EULA, Red Hat, Inc. -(“Red Hat”) grants to you a perpetual, worldwide license to the Programs -(each of which may include multiple software components). With the -exception of the Red Hat trademark identified in Section 2 below, each -software component is governed by a license that permits you to run, -copy, modify, and redistribute (subject to certain obligations in some -cases) the software components. This EULA pertains solely to the -Programs and does not limit your rights under, or grant you rights that -supersede, the license terms applicable to any particular component. -The license terms applicable to each software component are provided in -the source code of that component. - -2. Intellectual Property Rights. The Programs and each of their -components are owned by Red Hat and other licensors and are protected -under copyright law and other laws as applicable. Title to the Programs -and any component shall remain with Red Hat and other licensors, subject -to the applicable license, excluding any independently developed and -licensed work. The “Red Hat” trademark is a registered trademark of Red -Hat and its affiliates in the U.S. and other countries. Subject to Red -Hat’s trademark usage guidelines (set forth at -http://www.redhat.com/about/corporate/trademark/), this EULA permits you -to distribute the Programs that include the Red Hat trademark, provided -you do not make any statements on behalf of Red Hat, including but not -limited to, stating or in any way suggesting (in any public, private -and/or confidential statement (whether written or verbal)) that Red Hat -supports or endorses software built and delivered with a Red Hat -Universal Base Image(s) (such derivative works referred to as a “Red Hat -Based Container Images”); provided if a Red Hat Based Container Image is -Red Hat Certified and deployed on a Red Hat supported configuration as -set forth at https://access.redhat.com/articles/2726611 then you may -state that the Red Hat Universal Base Image is supported by Red Hat. -You agree to include this unmodified EULA in all distributions of -container images sourced, built or otherwise derived from the Programs. -If you modify the Red Hat Universal Base Image(s), you must remove any -Red Hat trademark(s) prior to any subsequent distribution. Any breach -of this Section 2 is a material breach of the EULA and you may no longer -use and/or distribute the Red Hat trademark(s). Modifications to the -software may corrupt the Programs. - -3. Limited Warranty. Except as specifically stated in this Section 3, -a separate agreement with Red Hat, or a license for a particular -component, to the maximum extent permitted under applicable law, the -Programs and the components are provided and licensed “as is” without -warranty of any kind, expressed or implied, including the implied -warranties of merchantability, non-infringement or fitness for a -particular purpose. Neither Red Hat nor its affiliates warrant that the -functions contained in the Programs will meet your requirements or that -the operation of the Programs will be entirely error free, appear or -perform precisely as described in the accompanying documentation, or -comply with regulatory requirements. Red Hat warrants that the media on -which the Programs and the components are provided will be free from -defects in materials and manufacture under normal use for a period of 30 -days from the date of delivery to you. This warranty extends only to the -party that purchases subscription services for the supported -configurations from Red Hat and/or its affiliates or a Red Hat -authorized distributor. - -4. Limitation of Remedies and Liability. To the maximum extent -permitted by applicable law, your exclusive remedy under this EULA is to -return any defective media within 30 days of delivery along with a copy -of your payment receipt and Red Hat, at its option, will replace it or -refund the money you paid for the media. To the maximum extent -permitted under applicable law, under no circumstances will Red Hat, -its affiliates, any Red Hat authorized distributor, or the licensor of -any component provided to you under this EULA be liable to you for any -incidental or consequential damages, including lost profits or lost -savings arising out of the use or inability to use the Programs or any -component, even if Red Hat, its affiliates, an authorized distributor, -and/or licensor has been advised of the possibility of such damages. In -no event shall Red Hat's or its affiliates’ liability, an authorized -distributor’s liability or the liability of the licensor of a component -provided to you under this EULA exceed the amount that you paid to Red -Hat for the media under this EULA. - -5. Export Control. As required by the laws of the United States and -other countries, you represent and warrant that you: (a) understand that -the Programs and their components may be subject to export controls -under the U.S. Commerce Department’s Export Administration Regulations -(“EAR”); (b) are not located in a prohibited destination country under -the EAR or U.S. sanctions regulations (currently Cuba, Iran, North -Korea, Sudan, Syria, and the Crimea Region of Ukraine, subject to change -as posted by the United States government); (c) will not export, -re-export, or transfer the Programs to any prohibited destination, -persons or entities on the U.S. Bureau of Industry and Security Denied -Parties List or Entity List, or the U.S. Office of Foreign Assets -Control list of Specially Designated Nationals and Blocked Persons, or -any similar lists maintained by other countries, without the necessary -export license(s) or authorizations(s); (d) will not use or transfer the -Programs for use in connection with any nuclear, chemical or biological -weapons, missile technology, or military end-uses where prohibited by an -applicable arms embargo, unless authorized by the relevant government -agency by regulation or specific license; (e) understand and agree that -if you are in the United States and export or transfer the Programs to -eligible end users, you will, to the extent required by EAR Section -740.17(e), submit semi-annual reports to the Commerce Department’s -Bureau of Industry and Security, which include the name and address -(including country) of each transferee; and (f) understand that -countries including the United States may restrict the import, use, or -export of encryption products (which may include the Programs and the -components) and agree that you shall be solely responsible for -compliance with any such import, use, or export restrictions. - -6. Third Party Software. The Program may be provided with third party -software programs subject to their own license terms. The license terms -either accompany the third party software programs or, in some -instances, may be viewed at registry.access.redhat.com. If you do not -agree to abide by the applicable license terms for the third party -software programs, then you may not install, distribute or use them. - -7. General. If any provision of this EULA is held to be unenforceable, -the enforceability of the remaining provisions shall not be affected. -Any claim, controversy or dispute arising under or relating to this EULA -shall be governed by the laws of the State of New York and of the United -States, without regard to any conflict of laws provisions. The rights -and obligations of the parties to this EULA shall not be governed by the -United Nations Convention on the International Sale of Goods. - -Copyright © 2019 Red Hat, Inc. All rights reserved. “Red Hat,” is a -registered trademark of Red Hat, Inc. All other trademarks are the -property of their respective owners. - -=========================================================================== -END OF TERMS AND CONDITIONS FOR Red Hat Universal Base Image 8 Python-3.8 -=========================================================================== - - - - - -@@@@@@@@@@@@ -=========================================================================== -GNU General Public License 2.0: THE FOLLOWING TERMS AND CONDITIONS -APPLY to the listed components below which are licensed under the GNU -General Public License 2.0: - -Portions of Red Hat Universal Base Image 8 -Portions of Red Hat Universal Base Image 8 Python-3.8 - ---------------------------------------------------------------------------- -Start of GNU GPL Version 2.0 License ---------------------------------------------------------------------------- - - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Lesser General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. - ---------------------------------------------------------------------------- -End of GNU GPL Version 2.0 License ---------------------------------------------------------------------------- - - - - -@@@@@@@@@@@@ -=========================================================================== -GNU General Public License 3.0: THE FOLLOWING TERMS AND CONDITIONS -APPLY to the listed components below which are licensed under the GNU -General Public License 3.0: - -Portions of Red Hat Universal Base Image 8 -Portions of Red Hat Universal Base Image 8 Python-3.8 - ---------------------------------------------------------------------------- -Start of GNU GPL Version 3.0 License ---------------------------------------------------------------------------- - - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. - ---------------------------------------------------------------------------- -End of GNU GPL Version 3.0 License ---------------------------------------------------------------------------- - - - - -=========================================================================== -END OF TERMS AND CONDITIONS FOR SEPARATELY LICENSED CODE for IBM Block -Storage CSI Driver version 1.10.0 -=========================================================================== - - - - -@@@@@@@@@@@@ -=========================================================================== -======================== SOURCE CODE OFFERS =============================== - -GNU GPL and / or LGPL Source Code for: - -IBM Block Storage CSI Driver 1.10.0 - -=========================================================================== - - - - - -@@@@@@@@@@@@ -=========================================================================== -General Public License 2.0: The Program includes some or all of the -following licensed to the licensee as Separately Licensed Code under the -GNU General Public License 2.0. -=========================================================================== - - Red Hat Universal Base Image 8 - Red Hat Universal Base Image 8 Python 3.8 - -Source code to any of the above-listed packages distributed with IBM Block -Storage CSI Driver 1.10.0 is available at the website below, when a URL is -provided, or by sending a request to the following address or email: - - IBM Corporation - Attn: Dept 4XNA / 9032-2, Storage Open Source Management - 9000 S. Rita Road - Tucson, AZ 85744 - -Please identify the name of the IBM product and the GPL or LGPL licensed -program(s) required in the request for source code. - -=========================================================================== -END of GNU General Public License 2.0 Notices and Information -=========================================================================== - - - -@@@@@@@@@@@@ -=========================================================================== -General Public License 3.0: The Program includes some or all of the -following licensed to the licensee as Separately Licensed Code under the -GNU General Public License 3.0. -=========================================================================== - - Red Hat Universal Base Image 8 - Red Hat Universal Base Image 8 Python 3.8 - -Source code to any of the above-listed packages distributed with IBM Block -Storage CSI Driver 1.10.0 is available at the website below, when a URL is -provided, or by sending a request to the following address or email: - - IBM Corporation - Attn: Dept 4XNA / 9032-2, Storage Open Source Management - 9000 S. Rita Road - Tucson, AZ 85744 - -Please identify the name of the IBM product and the GPL or LGPL licensed -program(s) required in the request for source code. - -=========================================================================== -END of GNU General Public License 3.0 Notices and Information -=========================================================================== - - -========================================================================= -Section 2 - NOTICES and INFORMATION -========================================================================= - -Notwithstanding the terms and conditions of any other agreement Licensee -may have with IBM or any of its related or affiliated entities -(collectively "IBM"), the third party code identified below is subject -to the terms and conditions of the license agreement for the -Program and not the license terms that may be contained in the notices -below. The notices are provided for informational purposes. - -IMPORTANT: IBM does not represent or warrant that the information in this -NOTICES file is accurate. Third party websites are independent of IBM and -IBM does not represent or warrant that the information on any third party -website referenced in this NOTICES file is accurate. IBM disclaims any -and all liability for errors and omissions or for any damages accruing -from the use of this NOTICES file or its contents, including without -limitation URLs or references to any third party websites. - ---------------------------------------------------------------------------- -The following are NOTICES and INFORMATION: ---------------------------------------------------------------------------- - - - -@@@@@@@@@@@@ -=========================================================================== -APACHE 2.0 LICENSED CODE: The Program includes all or portions of -the following software which IBM obtained under the terms and conditions -of the Apache License Version 2.0: - -csi-lib-utils-0.9.1 -gophercloud-0.1.0 -k8s.io/klog-1.0.0 -k8s.io/mount-utils-0.20.13 -k8s.io/structured-merge-diff-3.0.0 -k8s.io/utils commit 67b214c -kubernetes-client/python-23.6.0 -pyds8k version 1.4.0 -pysvc version 1.1.1 -pyxcli version 1.2.1 -controller-util-0.3.0 -kubernetes/apimachinery-0.19.0 -k8s.io/client-go-0.19.0 -grpc-1.29.0 -grpcio-1.41.1 -grpcio-tools-1.41.1 -spec 1.5.0 -kubernetes/apimachinery-0.18.0 -kubernetes/utils-4140de9 -mock-1.3.1 -packaging-20.1 -retry version 0.9.2 -yaml-2.27.1 - ---------------------------------------------------------------------------- -Start of Apache License 2.0 ---------------------------------------------------------------------------- - -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - ---------------------------------------------------------------------------- -End of Apache License 2.0 ---------------------------------------------------------------------------- - - - -=========================================================================== -NOTICE file corresponding to section 4(d) of the Apache License, -Version 2.0, in this case for the grpc-1.29.0 distribution -=========================================================================== - -Copyright 2014 gRPC authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - - -=========================================================================== -NOTICE file corresponding to section 4(d) of the Apache License, -Version 2.0, in this case for the yaml-2.27.1 distribution -=========================================================================== - -Copyright 2011-2016 Canonical Ltd. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -=========================================================================== -END OF APACHE 2.0 NOTICES AND INFORMATION -=========================================================================== - - - -@@@@@@@@@@@@ -=========================================================================== -mergo-0.3.7: The Program includes mergo-0.3.7 software. IBM obtained the -mergo-0.3.7 software under the terms and conditions of the following -license(s): ---------------------------------------------------------------------------- - - - -Copyright (c) 2013 Dario Castañé. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -=========================================================================== -END OF mergo-0.3.7 NOTICES AND INFORMATION -=========================================================================== - - -@@@@@@@@@@@@ -=========================================================================== -logrus-1.6.0: The Program includes logrus-1.6.0 software. IBM obtained the -logrus-1.6.0 software under the terms and conditions of the following -license(s): ---------------------------------------------------------------------------- - - -Copyright (c) 2014 Simon Eskildsen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - - -=========================================================================== -END OF logrus-1.6.0 NOTICES AND INFORMATION -=========================================================================== - - -@@@@@@@@@@@@ -=========================================================================== -sys commit ed371f2: The Program includes sys commit ed371f2 software. IBM -obtained the sys commit ed371f2 software under the terms and conditions of -the following license(s): ---------------------------------------------------------------------------- - - -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -======================================================================= - -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. - - -=========================================================================== -END OF sys commit ed371f2 NOTICES AND INFORMATION -=========================================================================== - - -@@@@@@@@@@@@ -=========================================================================== -munch-2.3.2: The Program includes munch-2.3.2 software. IBM obtained the -munch-2.3.2 software under the terms and conditions of the following -license(s): ---------------------------------------------------------------------------- - - -Copyright (c) 2010 David Schoonover - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - -=========================================================================== -END OF munch-2.3.2 NOTICES AND INFORMATION -=========================================================================== - - -@@@@@@@@@@@@ -=========================================================================== -base58-2.0.0: The Program includes base58-2.0.0 software. IBM obtained the -base58-2.0.0 software under the terms and conditions of the following -license(s): ---------------------------------------------------------------------------- - - -Copyright (c) 2015 David Keijser - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - -=========================================================================== -END OF base58-2.0.0 NOTICES AND INFORMATION -=========================================================================== - - -@@@@@@@@@@@@ -=========================================================================== -grpc-1.29.0: The Program includes grpc-1.29.0 software. IBM obtained -portions of the grpc-1.29.0 software under the terms and conditions of the -following license(s): ---------------------------------------------------------------------------- - - - -Copyright (c) 2009-2011, Google Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. -* Neither the name of Google Inc. nor the names of any other -contributors may be used to endorse or promote products -derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY GOOGLE INC. ``AS IS'' AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -EVENT SHALL GOOGLE INC. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR -BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER -IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - -==================================================================================== - -Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: -1. Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. -3. Neither the name of the project nor the names of its contributors -may be used to endorse or promote products derived from this software -without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS -OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF -SUCH DAMAGE. - -==================================================================================== - -Lunit License -------------- - -Lunit is written by Michael Roth and is licensed -under the terms of the MIT license reproduced below. - -======================================================================== - -Copyright (c) 2004-2010 Michael Roth - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without restriction, -including without limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of the Software, -and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - - -=========================================================================== -END OF grpc-1.29.0 NOTICES AND INFORMATION -=========================================================================== - - -@@@@@@@@@@@@ -=========================================================================== -protobuf-3.15.0: The Program includes protobuf-3.15.0 software. IBM -obtained the protobuf-3.15.0 software under the terms and conditions of the -following license(s): ---------------------------------------------------------------------------- - - - -Copyright 2008 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -Code generated by the Protocol Buffer compiler is owned by the owner -of the input file used when generating it. This code is not -standalone and requires a support library to be linked with it. This -support library is itself covered by the above license. - - - -=========================================================================== -END OF protobuf-3.15.0 NOTICES AND INFORMATION -=========================================================================== - - -@@@@@@@@@@@@ -=========================================================================== -pyyaml-6: The Program includes pyyaml-6 software. IBM obtained the pyyaml-6 -software under the terms and conditions of the following license(s): ---------------------------------------------------------------------------- - - -Copyright (c) 2017-2021 Ingy döt Net -Copyright (c) 2006-2016 Kirill Simonov - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -=========================================================================== -END OF pyyaml-6 NOTICES AND INFORMATION -=========================================================================== - - -@@@@@@@@@@@@ -=========================================================================== -setuptools (python)-41.0.1: The Program includes setuptools (python)-41.0.1 -software. IBM obtained the setuptools (python)-41.0.1 software under the -terms and conditions of the following license(s): ---------------------------------------------------------------------------- - - -Copyright (C) 2016 Jason R Coombs - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---------------------------------------------------------------------------- - -Copyright (c) 2003-2018 Paul T. McGuire - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ---------------------------------------------------------------------------- - -Copyright (c) 2010-2015 Benjamin Peterson - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -=========================================================================== -END OF setuptools (python)-41.0.1 NOTICES AND INFORMATION -=========================================================================== - - -@@@@@@@@@@@@ -=========================================================================== -sync commit cd5d95a: The Program includes sync commit cd5d95a software. IBM -obtained the sync commit cd5d95a software under the terms and conditions of -the following license(s): ---------------------------------------------------------------------------- - - -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -======================================================================= - -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. - - -=========================================================================== -END OF sync commit cd5d95a NOTICES AND INFORMATION -=========================================================================== - - -@@@@@@@@@@@@ -=========================================================================== -yaml-2.2.8: The Program includes yaml-2.2.8 software. IBM obtained portions -of the yaml-2.2.8 software under the terms and conditions of the following -license(s): ---------------------------------------------------------------------------- - - -The following files were ported to Go from C files of libyaml, and thus -are still covered by their original copyright and license: - - apic.go - emitterc.go - parserc.go - readerc.go - scannerc.go - writerc.go - yamlh.go - yamlprivateh.go - -Copyright (c) 2006 Kirill Simonov - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -=========================================================================== -END OF yaml-2.2.8 NOTICES AND INFORMATION -=========================================================================== - - -=========================================================================== -END OF NOTICES AND INFORMATION FOR IBM Block Storage CSI -Driver 1.10.0 -=========================================================================== - + +Additional Third Party Software License Agreements and Notices + +This file details additional third party software license agreements +and third party notices and information that are required +to be reproduced for the following programs: + +IBM Block Storage CSI Driver version 1.11.0 + + + +=========================================================================== +Section 1 - TERMS AND CONDITIONS FOR SEPARATELY LICENSED CODE +=========================================================================== + +The "Separately Licensed Code" identified in Section 1 of this +document is provided to Licensee under terms and conditions that +are different from the license agreement for the Program. + +Licensee's use of such components or portions thereof is subject to the +terms of the associated license agreement provided or referenced in this +section and not the terms of the license agreement for the Program. + +The following are Separately Licensed Code: + +Red Hat Universal Base Image 8 +Red Hat Universal Base Image 8 Python-3.8 + + + + + +@@@@@@@@@@@@ +=========================================================================== +THE FOLLOWING TERMS AND CONDITIONS APPLY to Red Hat Universal +Base Image 8 software: +--------------------------------------------------------------------------- + +END USER LICENSE AGREEMENT + +RED HAT UNIVERSAL BASE IMAGE + + +PLEASE READ THIS END USER LICENSE AGREEMENT CAREFULLY BEFORE USING +SOFTWARE FROM RED HAT. BY USING RED HAT SOFTWARE, YOU SIGNIFY YOUR +ASSENT TO AND ACCEPTANCE OF THIS END USER LICENSE AGREEMENT AND +ACKNOWLEDGE YOU HAVE READ AND UNDERSTAND THE TERMS. AN INDIVIDUAL ACTING +ON BEHALF OF AN ENTITY REPRESENTS THAT HE OR SHE HAS THE AUTHORITY TO +ENTER INTO THIS END USER LICENSE AGREEMENT ON BEHALF OF THAT ENTITY. IF +YOU DO NOT ACCEPT THE TERMS OF THIS AGREEMENT, THEN YOU MUST NOT USE THE +RED HAT SOFTWARE. THIS END USER LICENSE AGREEMENT DOES NOT PROVIDE ANY +RIGHTS TO RED HAT SERVICES SUCH AS SOFTWARE MAINTENANCE, UPGRADES OR +SUPPORT. PLEASE REVIEW YOUR SERVICE OR SUBSCRIPTION AGREEMENT(S) THAT +YOU MAY HAVE WITH RED HAT OR OTHER AUTHORIZED RED HAT SERVICE PROVIDERS +REGARDING SERVICES AND ASSOCIATED PAYMENTS. + +This end user license agreement (“EULA”) governs the use of Red Hat +Universal Base Image and associated software supporting such +container(s) and any related updates, source code, including the +appearance, structure and organization (the “Programs”), regardless of +the delivery mechanism. If a Red Hat Universal Base Image is included +in another Red Hat product, the EULA terms of such other Red Hat product +will apply and supersede this EULA. If a Red Hat Universal Base Image +is included in a third party work, the terms of this EULA will continue +to govern the Red Hat Universal Base Image. + +1. License Grant. Subject to the terms of this EULA, Red Hat, Inc. +(“Red Hat”) grants to you a perpetual, worldwide license to the Programs +(each of which may include multiple software components). With the +exception of the Red Hat trademark identified in Section 2 below, each +software component is governed by a license that permits you to run, +copy, modify, and redistribute (subject to certain obligations in some +cases) the software components. This EULA pertains solely to the +Programs and does not limit your rights under, or grant you rights that +supersede, the license terms applicable to any particular component. +The license terms applicable to each software component are provided in +the source code of that component. + +2. Intellectual Property Rights. The Programs and each of their +components are owned by Red Hat and other licensors and are protected +under copyright law and other laws as applicable. Title to the Programs +and any component shall remain with Red Hat and other licensors, subject +to the applicable license, excluding any independently developed and +licensed work. The “Red Hat” trademark is a registered trademark of Red +Hat and its affiliates in the U.S. and other countries. Subject to Red +Hat’s trademark usage guidelines (set forth at +http://www.redhat.com/about/corporate/trademark/), this EULA permits you +to distribute the Programs that include the Red Hat trademark, provided +you do not make any statements on behalf of Red Hat, including but not +limited to, stating or in any way suggesting (in any public, private +and/or confidential statement (whether written or verbal)) that Red Hat +supports or endorses software built and delivered with a Red Hat +Universal Base Image(s) (such derivative works referred to as a “Red Hat +Based Container Images”); provided if a Red Hat Based Container Image is +Red Hat Certified and deployed on a Red Hat supported configuration as +set forth at https://access.redhat.com/articles/2726611 then you may +state that the Red Hat Universal Base Image is supported by Red Hat. +You agree to include this unmodified EULA in all distributions of +container images sourced, built or otherwise derived from the Programs. +If you modify the Red Hat Universal Base Image(s), you must remove any +Red Hat trademark(s) prior to any subsequent distribution. Any breach +of this Section 2 is a material breach of the EULA and you may no longer +use and/or distribute the Red Hat trademark(s). Modifications to the +software may corrupt the Programs. + +3. Limited Warranty. Except as specifically stated in this Section 3, +a separate agreement with Red Hat, or a license for a particular +component, to the maximum extent permitted under applicable law, the +Programs and the components are provided and licensed “as is” without +warranty of any kind, expressed or implied, including the implied +warranties of merchantability, non-infringement or fitness for a +particular purpose. Neither Red Hat nor its affiliates warrant that the +functions contained in the Programs will meet your requirements or that +the operation of the Programs will be entirely error free, appear or +perform precisely as described in the accompanying documentation, or +comply with regulatory requirements. Red Hat warrants that the media on +which the Programs and the components are provided will be free from +defects in materials and manufacture under normal use for a period of 30 +days from the date of delivery to you. This warranty extends only to the +party that purchases subscription services for the supported +configurations from Red Hat and/or its affiliates or a Red Hat +authorized distributor. + +4. Limitation of Remedies and Liability. To the maximum extent +permitted by applicable law, your exclusive remedy under this EULA is to +return any defective media within 30 days of delivery along with a copy +of your payment receipt and Red Hat, at its option, will replace it or +refund the money you paid for the media. To the maximum extent +permitted under applicable law, under no circumstances will Red Hat, +its affiliates, any Red Hat authorized distributor, or the licensor of +any component provided to you under this EULA be liable to you for any +incidental or consequential damages, including lost profits or lost +savings arising out of the use or inability to use the Programs or any +component, even if Red Hat, its affiliates, an authorized distributor, +and/or licensor has been advised of the possibility of such damages. In +no event shall Red Hat's or its affiliates’ liability, an authorized +distributor’s liability or the liability of the licensor of a component +provided to you under this EULA exceed the amount that you paid to Red +Hat for the media under this EULA. + +5. Export Control. As required by the laws of the United States and +other countries, you represent and warrant that you: (a) understand that +the Programs and their components may be subject to export controls +under the U.S. Commerce Department’s Export Administration Regulations +(“EAR”); (b) are not located in a prohibited destination country under +the EAR or U.S. sanctions regulations (currently Cuba, Iran, North +Korea, Sudan, Syria, and the Crimea Region of Ukraine, subject to change +as posted by the United States government); (c) will not export, +re-export, or transfer the Programs to any prohibited destination, +persons or entities on the U.S. Bureau of Industry and Security Denied +Parties List or Entity List, or the U.S. Office of Foreign Assets +Control list of Specially Designated Nationals and Blocked Persons, or +any similar lists maintained by other countries, without the necessary +export license(s) or authorizations(s); (d) will not use or transfer the +Programs for use in connection with any nuclear, chemical or biological +weapons, missile technology, or military end-uses where prohibited by an +applicable arms embargo, unless authorized by the relevant government +agency by regulation or specific license; (e) understand and agree that +if you are in the United States and export or transfer the Programs to +eligible end users, you will, to the extent required by EAR Section +740.17(e), submit semi-annual reports to the Commerce Department’s +Bureau of Industry and Security, which include the name and address +(including country) of each transferee; and (f) understand that +countries including the United States may restrict the import, use, or +export of encryption products (which may include the Programs and the +components) and agree that you shall be solely responsible for +compliance with any such import, use, or export restrictions. + +6. Third Party Software. The Program may be provided with third party +software programs subject to their own license terms. The license terms +either accompany the third party software programs or, in some +instances, may be viewed at registry.access.redhat.com. If you do not +agree to abide by the applicable license terms for the third party +software programs, then you may not install, distribute or use them. + +7. General. If any provision of this EULA is held to be unenforceable, +the enforceability of the remaining provisions shall not be affected. +Any claim, controversy or dispute arising under or relating to this EULA +shall be governed by the laws of the State of New York and of the United +States, without regard to any conflict of laws provisions. The rights +and obligations of the parties to this EULA shall not be governed by the +United Nations Convention on the International Sale of Goods. + +Copyright © 2019 Red Hat, Inc. All rights reserved. “Red Hat,” is a +registered trademark of Red Hat, Inc. All other trademarks are the +property of their respective owners. + +=========================================================================== +END OF TERMS AND CONDITIONS FOR Red Hat Universal Base Image 8 +=========================================================================== + + + + + +@@@@@@@@@@@@ +=========================================================================== +THE FOLLOWING TERMS AND CONDITIONS APPLY to Red Hat Universal +Base Image 8 Python-3.8 software: +--------------------------------------------------------------------------- + + +END USER LICENSE AGREEMENT + +RED HAT UNIVERSAL BASE IMAGE + + +PLEASE READ THIS END USER LICENSE AGREEMENT CAREFULLY BEFORE USING +SOFTWARE FROM RED HAT. BY USING RED HAT SOFTWARE, YOU SIGNIFY YOUR +ASSENT TO AND ACCEPTANCE OF THIS END USER LICENSE AGREEMENT AND +ACKNOWLEDGE YOU HAVE READ AND UNDERSTAND THE TERMS. AN INDIVIDUAL ACTING +ON BEHALF OF AN ENTITY REPRESENTS THAT HE OR SHE HAS THE AUTHORITY TO +ENTER INTO THIS END USER LICENSE AGREEMENT ON BEHALF OF THAT ENTITY. IF +YOU DO NOT ACCEPT THE TERMS OF THIS AGREEMENT, THEN YOU MUST NOT USE THE +RED HAT SOFTWARE. THIS END USER LICENSE AGREEMENT DOES NOT PROVIDE ANY +RIGHTS TO RED HAT SERVICES SUCH AS SOFTWARE MAINTENANCE, UPGRADES OR +SUPPORT. PLEASE REVIEW YOUR SERVICE OR SUBSCRIPTION AGREEMENT(S) THAT +YOU MAY HAVE WITH RED HAT OR OTHER AUTHORIZED RED HAT SERVICE PROVIDERS +REGARDING SERVICES AND ASSOCIATED PAYMENTS. + +This end user license agreement (“EULA”) governs the use of Red Hat +Universal Base Image and associated software supporting such +container(s) and any related updates, source code, including the +appearance, structure and organization (the “Programs”), regardless of +the delivery mechanism. If a Red Hat Universal Base Image is included +in another Red Hat product, the EULA terms of such other Red Hat product +will apply and supersede this EULA. If a Red Hat Universal Base Image +is included in a third party work, the terms of this EULA will continue +to govern the Red Hat Universal Base Image. + +1. License Grant. Subject to the terms of this EULA, Red Hat, Inc. +(“Red Hat”) grants to you a perpetual, worldwide license to the Programs +(each of which may include multiple software components). With the +exception of the Red Hat trademark identified in Section 2 below, each +software component is governed by a license that permits you to run, +copy, modify, and redistribute (subject to certain obligations in some +cases) the software components. This EULA pertains solely to the +Programs and does not limit your rights under, or grant you rights that +supersede, the license terms applicable to any particular component. +The license terms applicable to each software component are provided in +the source code of that component. + +2. Intellectual Property Rights. The Programs and each of their +components are owned by Red Hat and other licensors and are protected +under copyright law and other laws as applicable. Title to the Programs +and any component shall remain with Red Hat and other licensors, subject +to the applicable license, excluding any independently developed and +licensed work. The “Red Hat” trademark is a registered trademark of Red +Hat and its affiliates in the U.S. and other countries. Subject to Red +Hat’s trademark usage guidelines (set forth at +http://www.redhat.com/about/corporate/trademark/), this EULA permits you +to distribute the Programs that include the Red Hat trademark, provided +you do not make any statements on behalf of Red Hat, including but not +limited to, stating or in any way suggesting (in any public, private +and/or confidential statement (whether written or verbal)) that Red Hat +supports or endorses software built and delivered with a Red Hat +Universal Base Image(s) (such derivative works referred to as a “Red Hat +Based Container Images”); provided if a Red Hat Based Container Image is +Red Hat Certified and deployed on a Red Hat supported configuration as +set forth at https://access.redhat.com/articles/2726611 then you may +state that the Red Hat Universal Base Image is supported by Red Hat. +You agree to include this unmodified EULA in all distributions of +container images sourced, built or otherwise derived from the Programs. +If you modify the Red Hat Universal Base Image(s), you must remove any +Red Hat trademark(s) prior to any subsequent distribution. Any breach +of this Section 2 is a material breach of the EULA and you may no longer +use and/or distribute the Red Hat trademark(s). Modifications to the +software may corrupt the Programs. + +3. Limited Warranty. Except as specifically stated in this Section 3, +a separate agreement with Red Hat, or a license for a particular +component, to the maximum extent permitted under applicable law, the +Programs and the components are provided and licensed “as is” without +warranty of any kind, expressed or implied, including the implied +warranties of merchantability, non-infringement or fitness for a +particular purpose. Neither Red Hat nor its affiliates warrant that the +functions contained in the Programs will meet your requirements or that +the operation of the Programs will be entirely error free, appear or +perform precisely as described in the accompanying documentation, or +comply with regulatory requirements. Red Hat warrants that the media on +which the Programs and the components are provided will be free from +defects in materials and manufacture under normal use for a period of 30 +days from the date of delivery to you. This warranty extends only to the +party that purchases subscription services for the supported +configurations from Red Hat and/or its affiliates or a Red Hat +authorized distributor. + +4. Limitation of Remedies and Liability. To the maximum extent +permitted by applicable law, your exclusive remedy under this EULA is to +return any defective media within 30 days of delivery along with a copy +of your payment receipt and Red Hat, at its option, will replace it or +refund the money you paid for the media. To the maximum extent +permitted under applicable law, under no circumstances will Red Hat, +its affiliates, any Red Hat authorized distributor, or the licensor of +any component provided to you under this EULA be liable to you for any +incidental or consequential damages, including lost profits or lost +savings arising out of the use or inability to use the Programs or any +component, even if Red Hat, its affiliates, an authorized distributor, +and/or licensor has been advised of the possibility of such damages. In +no event shall Red Hat's or its affiliates’ liability, an authorized +distributor’s liability or the liability of the licensor of a component +provided to you under this EULA exceed the amount that you paid to Red +Hat for the media under this EULA. + +5. Export Control. As required by the laws of the United States and +other countries, you represent and warrant that you: (a) understand that +the Programs and their components may be subject to export controls +under the U.S. Commerce Department’s Export Administration Regulations +(“EAR”); (b) are not located in a prohibited destination country under +the EAR or U.S. sanctions regulations (currently Cuba, Iran, North +Korea, Sudan, Syria, and the Crimea Region of Ukraine, subject to change +as posted by the United States government); (c) will not export, +re-export, or transfer the Programs to any prohibited destination, +persons or entities on the U.S. Bureau of Industry and Security Denied +Parties List or Entity List, or the U.S. Office of Foreign Assets +Control list of Specially Designated Nationals and Blocked Persons, or +any similar lists maintained by other countries, without the necessary +export license(s) or authorizations(s); (d) will not use or transfer the +Programs for use in connection with any nuclear, chemical or biological +weapons, missile technology, or military end-uses where prohibited by an +applicable arms embargo, unless authorized by the relevant government +agency by regulation or specific license; (e) understand and agree that +if you are in the United States and export or transfer the Programs to +eligible end users, you will, to the extent required by EAR Section +740.17(e), submit semi-annual reports to the Commerce Department’s +Bureau of Industry and Security, which include the name and address +(including country) of each transferee; and (f) understand that +countries including the United States may restrict the import, use, or +export of encryption products (which may include the Programs and the +components) and agree that you shall be solely responsible for +compliance with any such import, use, or export restrictions. + +6. Third Party Software. The Program may be provided with third party +software programs subject to their own license terms. The license terms +either accompany the third party software programs or, in some +instances, may be viewed at registry.access.redhat.com. If you do not +agree to abide by the applicable license terms for the third party +software programs, then you may not install, distribute or use them. + +7. General. If any provision of this EULA is held to be unenforceable, +the enforceability of the remaining provisions shall not be affected. +Any claim, controversy or dispute arising under or relating to this EULA +shall be governed by the laws of the State of New York and of the United +States, without regard to any conflict of laws provisions. The rights +and obligations of the parties to this EULA shall not be governed by the +United Nations Convention on the International Sale of Goods. + +Copyright © 2019 Red Hat, Inc. All rights reserved. “Red Hat,” is a +registered trademark of Red Hat, Inc. All other trademarks are the +property of their respective owners. + +=========================================================================== +END OF TERMS AND CONDITIONS FOR Red Hat Universal Base Image 8 Python-3.8 +=========================================================================== + + + + + +@@@@@@@@@@@@ +=========================================================================== +GNU General Public License 2.0: THE FOLLOWING TERMS AND CONDITIONS +APPLY to the listed components below which are licensed under the GNU +General Public License 2.0: + +Portions of Red Hat Universal Base Image 8 +Portions of Red Hat Universal Base Image 8 Python-3.8 + +--------------------------------------------------------------------------- +Start of GNU GPL Version 2.0 License +--------------------------------------------------------------------------- + + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. + +--------------------------------------------------------------------------- +End of GNU GPL Version 2.0 License +--------------------------------------------------------------------------- + + + + +@@@@@@@@@@@@ +=========================================================================== +GNU General Public License 3.0: THE FOLLOWING TERMS AND CONDITIONS +APPLY to the listed components below which are licensed under the GNU +General Public License 3.0: + +Portions of Red Hat Universal Base Image 8 +Portions of Red Hat Universal Base Image 8 Python-3.8 + +--------------------------------------------------------------------------- +Start of GNU GPL Version 3.0 License +--------------------------------------------------------------------------- + + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. + +--------------------------------------------------------------------------- +End of GNU GPL Version 3.0 License +--------------------------------------------------------------------------- + + + + +=========================================================================== +END OF TERMS AND CONDITIONS FOR SEPARATELY LICENSED CODE for IBM Block +Storage CSI Driver version 1.11.0 +=========================================================================== + + + + +@@@@@@@@@@@@ +=========================================================================== +======================== SOURCE CODE OFFERS =============================== + +GNU GPL and / or LGPL Source Code for: + +IBM Block Storage CSI Driver 1.11.0 + +=========================================================================== + + + + + +@@@@@@@@@@@@ +=========================================================================== +General Public License 2.0: The Program includes some or all of the +following licensed to the licensee as Separately Licensed Code under the +GNU General Public License 2.0. +=========================================================================== + + Red Hat Universal Base Image 8 + Red Hat Universal Base Image 8 Python 3.8 + +Source code to any of the above-listed packages distributed with IBM Block +Storage CSI Driver 1.11.0 is available at the website below, when a URL is +provided, or by sending a request to the following address or email: + + IBM Corporation + Attn: Dept 4XNA / 9032-2, Storage Open Source Management + 9000 S. Rita Road + Tucson, AZ 85744 + +Please identify the name of the IBM product and the GPL or LGPL licensed +program(s) required in the request for source code. + +=========================================================================== +END of GNU General Public License 2.0 Notices and Information +=========================================================================== + + + +@@@@@@@@@@@@ +=========================================================================== +General Public License 3.0: The Program includes some or all of the +following licensed to the licensee as Separately Licensed Code under the +GNU General Public License 3.0. +=========================================================================== + + Red Hat Universal Base Image 8 + Red Hat Universal Base Image 8 Python 3.8 + +Source code to any of the above-listed packages distributed with IBM Block +Storage CSI Driver 1.11.0 is available at the website below, when a URL is +provided, or by sending a request to the following address or email: + + IBM Corporation + Attn: Dept 4XNA / 9032-2, Storage Open Source Management + 9000 S. Rita Road + Tucson, AZ 85744 + +Please identify the name of the IBM product and the GPL or LGPL licensed +program(s) required in the request for source code. + +=========================================================================== +END of GNU General Public License 3.0 Notices and Information +=========================================================================== + + +========================================================================= +Section 2 - NOTICES and INFORMATION +========================================================================= + +Notwithstanding the terms and conditions of any other agreement Licensee +may have with IBM or any of its related or affiliated entities +(collectively "IBM"), the third party code identified below is subject +to the terms and conditions of the license agreement for the +Program and not the license terms that may be contained in the notices +below. The notices are provided for informational purposes. + +IMPORTANT: IBM does not represent or warrant that the information in this +NOTICES file is accurate. Third party websites are independent of IBM and +IBM does not represent or warrant that the information on any third party +website referenced in this NOTICES file is accurate. IBM disclaims any +and all liability for errors and omissions or for any damages accruing +from the use of this NOTICES file or its contents, including without +limitation URLs or references to any third party websites. + +--------------------------------------------------------------------------- +The following are NOTICES and INFORMATION: +--------------------------------------------------------------------------- + + + +@@@@@@@@@@@@ +=========================================================================== +APACHE 2.0 LICENSED CODE: The Program includes all or portions of +the following software which IBM obtained under the terms and conditions +of the Apache License Version 2.0: + +csi-lib-utils-0.9.1 +gophercloud-0.1.0 +k8s.io/klog-1.0.0 +k8s.io/mount-utils-0.20.13 +k8s.io/structured-merge-diff-3.0.0 +k8s.io/utils commit 67b214c +kubernetes-client/python-23.6.0 +pyds8k version 1.4.0 +pysvc version 1.1.1 +pyxcli version 1.2.1 +controller-util-0.3.0 +kubernetes/apimachinery-0.19.0 +k8s.io/client-go-0.19.0 +grpc-1.29.0 +grpcio-1.41.1 +grpcio-tools-1.41.1 +spec 1.5.0 +kubernetes/apimachinery-0.18.0 +kubernetes/utils-4140de9 +mock-1.3.1 +packaging-20.1 +retry version 0.9.2 +yaml-2.27.1 + +--------------------------------------------------------------------------- +Start of Apache License 2.0 +--------------------------------------------------------------------------- + +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +--------------------------------------------------------------------------- +End of Apache License 2.0 +--------------------------------------------------------------------------- + + + +=========================================================================== +NOTICE file corresponding to section 4(d) of the Apache License, +Version 2.0, in this case for the grpc-1.29.0 distribution +=========================================================================== + +Copyright 2014 gRPC authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + +=========================================================================== +NOTICE file corresponding to section 4(d) of the Apache License, +Version 2.0, in this case for the yaml-2.27.1 distribution +=========================================================================== + +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +=========================================================================== +END OF APACHE 2.0 NOTICES AND INFORMATION +=========================================================================== + + + +@@@@@@@@@@@@ +=========================================================================== +mergo-0.3.7: The Program includes mergo-0.3.7 software. IBM obtained the +mergo-0.3.7 software under the terms and conditions of the following +license(s): +--------------------------------------------------------------------------- + + + +Copyright (c) 2013 Dario Castañé. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +=========================================================================== +END OF mergo-0.3.7 NOTICES AND INFORMATION +=========================================================================== + + +@@@@@@@@@@@@ +=========================================================================== +logrus-1.6.0: The Program includes logrus-1.6.0 software. IBM obtained the +logrus-1.6.0 software under the terms and conditions of the following +license(s): +--------------------------------------------------------------------------- + + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + + +=========================================================================== +END OF logrus-1.6.0 NOTICES AND INFORMATION +=========================================================================== + + +@@@@@@@@@@@@ +=========================================================================== +sys commit ed371f2: The Program includes sys commit ed371f2 software. IBM +obtained the sys commit ed371f2 software under the terms and conditions of +the following license(s): +--------------------------------------------------------------------------- + + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +======================================================================= + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + + +=========================================================================== +END OF sys commit ed371f2 NOTICES AND INFORMATION +=========================================================================== + + +@@@@@@@@@@@@ +=========================================================================== +munch-2.3.2: The Program includes munch-2.3.2 software. IBM obtained the +munch-2.3.2 software under the terms and conditions of the following +license(s): +--------------------------------------------------------------------------- + + +Copyright (c) 2010 David Schoonover + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +=========================================================================== +END OF munch-2.3.2 NOTICES AND INFORMATION +=========================================================================== + + +@@@@@@@@@@@@ +=========================================================================== +base58-2.0.0: The Program includes base58-2.0.0 software. IBM obtained the +base58-2.0.0 software under the terms and conditions of the following +license(s): +--------------------------------------------------------------------------- + + +Copyright (c) 2015 David Keijser + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +=========================================================================== +END OF base58-2.0.0 NOTICES AND INFORMATION +=========================================================================== + + +@@@@@@@@@@@@ +=========================================================================== +grpc-1.29.0: The Program includes grpc-1.29.0 software. IBM obtained +portions of the grpc-1.29.0 software under the terms and conditions of the +following license(s): +--------------------------------------------------------------------------- + + + +Copyright (c) 2009-2011, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of Google Inc. nor the names of any other +contributors may be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY GOOGLE INC. ``AS IS'' AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +EVENT SHALL GOOGLE INC. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +==================================================================================== + +Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. Neither the name of the project nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +==================================================================================== + +Lunit License +------------- + +Lunit is written by Michael Roth and is licensed +under the terms of the MIT license reproduced below. + +======================================================================== + +Copyright (c) 2004-2010 Michael Roth + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + + +=========================================================================== +END OF grpc-1.29.0 NOTICES AND INFORMATION +=========================================================================== + + +@@@@@@@@@@@@ +=========================================================================== +protobuf-3.15.0: The Program includes protobuf-3.15.0 software. IBM +obtained the protobuf-3.15.0 software under the terms and conditions of the +following license(s): +--------------------------------------------------------------------------- + + + +Copyright 2008 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Code generated by the Protocol Buffer compiler is owned by the owner +of the input file used when generating it. This code is not +standalone and requires a support library to be linked with it. This +support library is itself covered by the above license. + + + +=========================================================================== +END OF protobuf-3.15.0 NOTICES AND INFORMATION +=========================================================================== + + +@@@@@@@@@@@@ +=========================================================================== +pyyaml-6: The Program includes pyyaml-6 software. IBM obtained the pyyaml-6 +software under the terms and conditions of the following license(s): +--------------------------------------------------------------------------- + + +Copyright (c) 2017-2021 Ingy döt Net +Copyright (c) 2006-2016 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +=========================================================================== +END OF pyyaml-6 NOTICES AND INFORMATION +=========================================================================== + + +@@@@@@@@@@@@ +=========================================================================== +setuptools (python)-41.0.1: The Program includes setuptools (python)-41.0.1 +software. IBM obtained the setuptools (python)-41.0.1 software under the +terms and conditions of the following license(s): +--------------------------------------------------------------------------- + + +Copyright (C) 2016 Jason R Coombs + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------------------------------------------------------------------- + +Copyright (c) 2003-2018 Paul T. McGuire + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +--------------------------------------------------------------------------- + +Copyright (c) 2010-2015 Benjamin Peterson + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +=========================================================================== +END OF setuptools (python)-41.0.1 NOTICES AND INFORMATION +=========================================================================== + + +@@@@@@@@@@@@ +=========================================================================== +sync commit cd5d95a: The Program includes sync commit cd5d95a software. IBM +obtained the sync commit cd5d95a software under the terms and conditions of +the following license(s): +--------------------------------------------------------------------------- + + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +======================================================================= + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + + +=========================================================================== +END OF sync commit cd5d95a NOTICES AND INFORMATION +=========================================================================== + + +@@@@@@@@@@@@ +=========================================================================== +yaml-2.2.8: The Program includes yaml-2.2.8 software. IBM obtained portions +of the yaml-2.2.8 software under the terms and conditions of the following +license(s): +--------------------------------------------------------------------------- + + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +=========================================================================== +END OF yaml-2.2.8 NOTICES AND INFORMATION +=========================================================================== + + +=========================================================================== +END OF NOTICES AND INFORMATION FOR IBM Block Storage CSI +Driver 1.11.0 +=========================================================================== + diff --git a/common/config.yaml b/common/config.yaml index a995e00b3..7ec21b97f 100644 --- a/common/config.yaml +++ b/common/config.yaml @@ -1,6 +1,6 @@ identity: name: block.csi.ibm.com - version: 1.11.0 + version: 1.12.0 capabilities: Service: [ CONTROLLER_SERVICE, VOLUME_ACCESSIBILITY_CONSTRAINTS ] VolumeExpansion: ONLINE diff --git a/controllers/array_action/array_action_types.py b/controllers/array_action/array_action_types.py index d7c637f5c..74c1effd2 100644 --- a/controllers/array_action/array_action_types.py +++ b/controllers/array_action/array_action_types.py @@ -4,7 +4,21 @@ @dataclass -class Volume: +class ArrayObject: + name: str + id: str + internal_id: str + array_type: str + + +@dataclass +class ThinVolume(ArrayObject): + capacity_bytes: int + source_id: str = "" + + +@dataclass +class Volume(ArrayObject): capacity_bytes: int id: str internal_id: str @@ -15,6 +29,12 @@ class Volume: pool: str space_efficiency_aliases: set = field(default_factory=set) volume_group_id: str = None + volume_group_name: str = None + + +@dataclass +class VolumeGroup(ArrayObject): + volumes: list = field(default_factory=list) @dataclass @@ -63,3 +83,12 @@ class ObjectIds: def __bool__(self): return bool(self.internal_id or self.uid) + + +@dataclass +class VolumeGroupIds: + internal_id: str = '' + name: str = '' + + def __bool__(self): + return bool(self.internal_id or self.name) diff --git a/controllers/array_action/array_mediator_ds8k.py b/controllers/array_action/array_mediator_ds8k.py index 4bd251af5..a7310a69a 100644 --- a/controllers/array_action/array_mediator_ds8k.py +++ b/controllers/array_action/array_mediator_ds8k.py @@ -363,7 +363,7 @@ def _get_api_volume_with_cache(self, name, pool_id): api_volume = self._get_api_volume_by_name(volume_name=name, pool_id=pool_id) return api_volume - def get_volume(self, name, pool, is_virt_snap_func): + def get_volume(self, name, pool, is_virt_snap_func, source_type): logger.debug("getting volume {} in pool {}".format(name, pool)) api_volume = self._get_api_volume_with_cache(name, pool) if api_volume: @@ -726,3 +726,6 @@ def get_host_io_group(self, host_name): def change_host_protocol(self, host_name, protocol): raise NotImplementedError + + def register_plugin(self, unique_key, metadata): + return None diff --git a/controllers/array_action/array_mediator_interface.py b/controllers/array_action/array_mediator_interface.py index d56858303..813f26d2d 100644 --- a/controllers/array_action/array_mediator_interface.py +++ b/controllers/array_action/array_mediator_interface.py @@ -99,7 +99,7 @@ def delete_volume(self, volume_id): raise NotImplementedError @abstractmethod - def get_volume(self, name, pool, is_virt_snap_func): + def get_volume(self, name, pool, is_virt_snap_func, source_type): """ This function return volume info about the volume. @@ -107,6 +107,7 @@ def get_volume(self, name, pool, is_virt_snap_func): name : name of the volume on storage system. pool : pool of the volume to find the volume more efficiently. is_virt_snap_func : indicate if svc's snapshot function feature is enabled + source_type : volume or snapshot Returns: @@ -633,6 +634,23 @@ def change_host_protocol(self, host_name, protocol): """ raise NotImplementedError + @abstractmethod + def register_plugin(self, unique_key, metadata): + """ + This function should register CSI plugin with unique_key and metadata accordingly to the feature it used. + + Args: + unique_key : a unique key that will represent a feature + metadata : a metadata that will add some information + + Returns: + None + + Raises: + None + """ + raise NotImplementedError + @property @abstractmethod def identifier(self): diff --git a/controllers/array_action/array_mediator_svc.py b/controllers/array_action/array_mediator_svc.py index 5a0614365..db97a6e73 100644 --- a/controllers/array_action/array_mediator_svc.py +++ b/controllers/array_action/array_mediator_svc.py @@ -1,4 +1,5 @@ from collections import defaultdict +from datetime import datetime, timedelta from io import StringIO from random import choice @@ -10,14 +11,19 @@ import controllers.array_action.errors as array_errors import controllers.array_action.settings as array_settings -from controllers.array_action import svc_messages import controllers.servers.settings as controller_settings -from controllers.array_action.array_action_types import Volume, Snapshot, Replication, Host +from controllers.array_action import svc_messages +from controllers.array_action.array_action_types import Volume, Snapshot, Replication, Host, VolumeGroup, ThinVolume from controllers.array_action.array_mediator_abstract import ArrayMediatorAbstract +from controllers.array_action.registration_cache import SVC_REGISTRATION_CACHE from controllers.array_action.utils import ClassProperty, convert_scsi_id_to_nguid +from controllers.array_action.volume_group_interface import VolumeGroupInterface from controllers.common import settings as common_settings +from controllers.common.config import config from controllers.common.csi_logger import get_stdout_logger -from controllers.servers.utils import get_connectivity_type_ports, split_string +from controllers.servers.csi.decorators import register_csi_plugin +from controllers.servers.settings import UNIQUE_KEY_KEY +from controllers.servers.utils import get_connectivity_type_ports, split_string, is_call_home_enabled array_connections_dict = {} logger = get_stdout_logger() @@ -49,6 +55,7 @@ NOT_VALID_IO_GROUP = 'CMMVC5729E' NOT_SUPPORTED_PARAMETER = 'CMMVC5709E' CANNOT_CHANGE_HOST_PROTOCOL_BECAUSE_OF_MAPPED_PORTS = 'CMMVC9331E' +STR_AS_NUMERIC_FIELD = 'CMMVC5716E' HOST_NQN = 'nqn' HOST_WWPN = 'WWPN' @@ -129,8 +136,9 @@ def build_create_host_kwargs(host_name, connectivity_type, port, io_group): cli_kwargs = _add_port_to_command_kwargs(connectivity_type, port, cli_kwargs) if connectivity_type == array_settings.NVME_OVER_FC_CONNECTIVITY_TYPE: cli_kwargs['protocol'] = 'nvme' - if io_group: - cli_kwargs['iogrp'] = io_group + if not io_group: + io_group = common_settings.FULL_IO_GROUP + cli_kwargs['iogrp'] = io_group return cli_kwargs @@ -191,6 +199,16 @@ def build_change_host_protocol_kwargs(host_name, protocol): } +def build_register_plugin_kwargs(unique_key, metadata): + cli_kwargs = { + UNIQUE_KEY_KEY: unique_key, + array_settings.VERSION_KEY: config.identity.version + } + if metadata: + cli_kwargs[array_settings.METADATA_KEY] = metadata + return cli_kwargs + + def _get_cli_volume_space_efficiency_aliases(cli_volume): space_efficiency_aliases = {common_settings.SPACE_EFFICIENCY_THICK, ''} if cli_volume.se_copy == YES: @@ -207,7 +225,7 @@ def _get_cli_volume_space_efficiency_aliases(cli_volume): return space_efficiency_aliases -class SVCArrayMediator(ArrayMediatorAbstract): +class SVCArrayMediator(ArrayMediatorAbstract, VolumeGroupInterface): ARRAY_ACTIONS = {} BLOCK_SIZE_IN_BYTES = 512 MAX_LUN_NUMBER = 511 @@ -300,10 +318,11 @@ def identifier(self): def is_active(self): return self.client.transport.transport.get_transport().is_active() - def _generate_volume_response(self, cli_volume, is_virt_snap_func=False): + def _generate_volume_response(self, cli_volume, is_virt_snap_func=False, source_type=None): pool = self._get_volume_pool(cli_volume) - source_id = None - if not is_virt_snap_func: + if is_virt_snap_func: + source_id = self._get_source_id_flashcopy2(cli_volume, source_type) + else: source_id = self._get_source_volume_wwn_if_exists(cli_volume) space_efficiency = _get_cli_volume_space_efficiency_aliases(cli_volume) return Volume( @@ -316,7 +335,8 @@ def _generate_volume_response(self, cli_volume, is_virt_snap_func=False): source_id=source_id, array_type=self.array_type, space_efficiency_aliases=space_efficiency, - volume_group_id=cli_volume.volume_group_id + volume_group_id=cli_volume.volume_group_id, + volume_group_name=cli_volume.volume_group_name ) def _generate_snapshot_response_from_cli_volume(self, cli_volume, source_id): @@ -339,6 +359,16 @@ def _generate_snapshot_response(self, capacity, name, source_id, internal_id, vd array_type=self.array_type ) + def _get_source_id_flashcopy2(self, cli_volume, source_type): + if source_type == controller_settings.SNAPSHOT_TYPE_NAME: + if hasattr(cli_volume, "source_snapshot"): + cli_snapshot = self._get_cli_snapshot_by_name(cli_volume.source_snapshot) + return cli_snapshot.snapshot_name + elif hasattr(cli_volume, "source_volume_name"): + cli_volume = self._get_cli_volume(cli_volume.source_volume_name) + return cli_volume.vdisk_UID + return None + def _generate_snapshot_response_with_verification(self, cli_object): if not cli_object.FC_id: logger.error("FlashCopy Mapping not found for target volume: {}".format(cli_object.name)) @@ -349,10 +379,22 @@ def _generate_snapshot_response_with_verification(self, cli_object): source_id = self._get_wwn_by_volume_name_if_exists(fcmap.source_vdisk_name) return self._generate_snapshot_response_from_cli_volume(cli_object, source_id) + def _lsvdisk_single_element(self, **kwargs): + lsvdisk_response = self._lsvdisk(**kwargs) + if lsvdisk_response is None: + return None + return lsvdisk_response.as_single_element + + def _lsvdisk_list(self, **kwargs): + lsvdisk_response = self._lsvdisk(**kwargs) + if lsvdisk_response is None: + return None + return lsvdisk_response.as_list + def _lsvdisk(self, **kwargs): kwargs['bytes'] = True try: - return self.client.svcinfo.lsvdisk(**kwargs).as_single_element + return self.client.svcinfo.lsvdisk(**kwargs) except (svc_errors.CommandExecutionError, CLIFailureError) as ex: if (OBJ_NOT_FOUND in ex.my_message or NAME_NOT_EXIST_OR_MEET_RULES in ex.my_message): @@ -362,13 +404,15 @@ def _lsvdisk(self, **kwargs): raise array_errors.InvalidArgumentError(ex.my_message) raise ex - def _lsvolumegroup(self, id_or_name): + def _lsvolumegroup(self, id_or_name, not_exist_err=False): try: return self.client.svcinfo.lsvolumegroup(object_id=id_or_name).as_single_element except (svc_errors.CommandExecutionError, CLIFailureError) as ex: if (SPECIFIED_OBJ_NOT_EXIST in ex.my_message or NAME_NOT_EXIST_OR_MEET_RULES in ex.my_message): logger.info("volume group {} was not found".format(id_or_name)) + if not_exist_err: + raise array_errors.ObjectNotFoundError(id_or_name) return None if any(msg_id in ex.my_message for msg_id in (NON_ASCII_CHARS, VALUE_TOO_LONG)): raise array_errors.InvalidArgumentError(ex.my_message) @@ -411,7 +455,7 @@ def _chvolumegroupreplication(self, id_or_name, **cli_kwargs): raise ex def _get_cli_volume(self, volume_name, not_exist_err=True): - cli_volume = self._lsvdisk(object_id=volume_name) + cli_volume = self._lsvdisk_single_element(object_id=volume_name) if not cli_volume and not_exist_err: raise array_errors.ObjectNotFoundError(volume_name) return cli_volume @@ -451,9 +495,9 @@ def _get_volume_pool(self, cli_volume): pools = self._get_volume_pools(cli_volume) return ':'.join(pools) - def get_volume(self, name, pool, is_virt_snap_func): + def get_volume(self, name, pool, is_virt_snap_func, source_type): cli_volume = self._get_cli_volume(name) - return self._generate_volume_response(cli_volume, is_virt_snap_func) + return self._generate_volume_response(cli_volume, is_virt_snap_func, source_type) def _get_object_fcmaps(self, object_name): all_fcmaps = [] @@ -539,7 +583,7 @@ def _get_wwn_by_volume_name_if_exists(self, volume_name): def _lsvdisk_by_uid(self, vdisk_uid): filter_value = 'vdisk_UID=' + vdisk_uid - return self._lsvdisk(filtervalue=filter_value) + return self._lsvdisk_single_element(filtervalue=filter_value) def _get_cli_volume_by_wwn(self, volume_id, not_exist_err=False): cli_volume = self._lsvdisk_by_uid(volume_id) @@ -649,6 +693,7 @@ def _create_cli_volume_from_source(self, name, pool, io_group, volume_group, sou def _is_vdisk_support_addsnapshot(self, vdisk_uid): return self._is_addsnapshot_supported() and not self._is_vdisk_has_fcmaps(vdisk_uid) + @register_csi_plugin() def create_volume(self, name, size_in_bytes, space_efficiency, pool, io_group, volume_group, source_ids, source_type, is_virt_snap_func): if is_virt_snap_func and source_ids: @@ -659,7 +704,7 @@ def create_volume(self, name, size_in_bytes, space_efficiency, pool, io_group, v else: self._create_cli_volume(name, size_in_bytes, space_efficiency, pool, io_group, volume_group) cli_volume = self._get_cli_volume(name) - return self._generate_volume_response(cli_volume, is_virt_snap_func) + return self._generate_volume_response(cli_volume, is_virt_snap_func, source_type) def _rmvolume(self, volume_id_or_name, not_exist_err=True): logger.info("deleting volume with name : {0}".format(volume_id_or_name)) @@ -675,6 +720,7 @@ def _rmvolume(self, volume_id_or_name, not_exist_err=True): raise array_errors.ObjectNotFoundError(volume_id_or_name) raise ex + @register_csi_plugin() def delete_volume(self, volume_id): logger.info("Deleting volume with id : {0}".format(volume_id)) self._delete_volume(volume_id) @@ -687,7 +733,7 @@ def get_snapshot(self, volume_id, snapshot_name, pool, is_virt_snap_func): cli_snapshot = self._get_cli_snapshot_by_name(snapshot_name) if not cli_snapshot: return None - source_cli_volume = self._get_cli_volume_by_wwn(volume_id) + source_cli_volume = self._get_cli_volume(cli_snapshot.volume_name) return self._generate_snapshot_response_from_cli_snapshot(cli_snapshot, source_cli_volume) raise array_errors.VirtSnapshotFunctionNotSupportedMessage(volume_id) target_cli_volume = self._get_cli_volume_if_exists(snapshot_name) @@ -697,20 +743,22 @@ def get_snapshot(self, volume_id, snapshot_name, pool, is_virt_snap_func): def get_object_by_id(self, object_id, object_type, is_virt_snap_func=False): if is_virt_snap_func and object_type == controller_settings.SNAPSHOT_TYPE_NAME: - cli_snapshot = self._get_cli_snapshot_by_id(object_id) + cli_snapshot = self._get_cli_snapshot(object_id) if not cli_snapshot: return None source_cli_volume = self._get_cli_volume(cli_snapshot.volume_name) if not source_cli_volume: return None return self._generate_snapshot_response_from_cli_snapshot(cli_snapshot, source_cli_volume) + if object_type == controller_settings.VOLUME_GROUP_TYPE_NAME: + return self.get_volume_group(object_id) cli_volume = self._get_cli_volume_by_wwn(object_id) if not cli_volume: return None if object_type is controller_settings.SNAPSHOT_TYPE_NAME: return self._generate_snapshot_response_with_verification(cli_volume) cli_volume = self._get_cli_volume(cli_volume.name) - return self._generate_volume_response(cli_volume) + return self._generate_volume_response(cli_volume, is_virt_snap_func, object_type) def _create_similar_volume(self, source_cli_volume, target_volume_name, space_efficiency, pool): logger.info("creating target cli volume '{0}' from source volume '{1}'".format(target_volume_name, @@ -885,6 +933,7 @@ def _get_cli_volume_in_pool_site(self, volume_name, pool_name): return other_cli_volume raise RuntimeError('could not find a volume for {} in site {}'.format(volume_name, pool_site_name)) + @register_csi_plugin() def create_snapshot(self, volume_id, snapshot_name, space_efficiency, pool, is_virt_snap_func): logger.info("creating snapshot '{0}' from volume '{1}'".format(snapshot_name, volume_id)) source_volume_name = self._get_volume_name_by_wwn(volume_id) @@ -914,11 +963,20 @@ def _rmsnapshot(self, internal_snapshot_id): raise array_errors.ObjectNotFoundError(internal_snapshot_id) raise ex + def _handle_delete_snapshot(self, snapshot_name, internal_snapshot_id): + if self._is_addsnapshot_supported(): + snapshot_id = snapshot_name if snapshot_name else internal_snapshot_id + cli_snapshot = self._get_cli_snapshot(snapshot_id) + if cli_snapshot: + self._rmsnapshot(cli_snapshot.snapshot_id) + return True + return False + + @register_csi_plugin() def delete_snapshot(self, snapshot_id, internal_snapshot_id): logger.info("Deleting snapshot with id : {0}".format(snapshot_id)) - if self._is_addsnapshot_supported() and not snapshot_id: - self._rmsnapshot(internal_snapshot_id) - else: + is_handled = self._handle_delete_snapshot(snapshot_id, internal_snapshot_id) + if not is_handled: self._delete_volume(snapshot_id, is_snapshot=True) logger.info("Finished snapshot deletion. id : {0}".format(snapshot_id)) @@ -1129,6 +1187,7 @@ def _raise_error_when_host_not_exist_or_not_meet_the_rules(self, host_name, erro if NAME_NOT_EXIST_OR_MEET_RULES in error_message: raise array_errors.HostNotFoundError(host_name) + @register_csi_plugin() def map_volume(self, volume_id, host_name, connectivity_type): logger.debug("mapping volume : {0} to host : " "{1}".format(volume_id, host_name)) @@ -1162,6 +1221,7 @@ def map_volume(self, volume_id, host_name, connectivity_type): return str(lun) + @register_csi_plugin() def unmap_volume(self, volume_id, host_name): logger.debug("unmapping volume : {0} from host : " "{1}".format(volume_id, host_name)) @@ -1392,12 +1452,10 @@ def _get_ear_replication(self, replication_request): logger.info("EAR replication is not supported on the existing storage") return None - # for phase 1 - find volume by id and get volume group id from result volume - cli_volume = self._get_cli_volume(replication_request.volume_internal_id) - volume_group_id = cli_volume.volume_group_id - if volume_group_id == "": + volume_group_id = replication_request.volume_internal_id + replication_policy = self._get_replication_policy(volume_group_id) + if replication_policy != replication_request.replication_policy: return None - replication_mode = self._get_replication_mode(volume_group_id) if not replication_mode: return None @@ -1415,6 +1473,12 @@ def _get_replication_mode(self, volume_group_id): return getattr(volume_group_replication, location_attr_name, None) + def _get_replication_policy(self, volume_group_id): + volume_group_replication = self._lsvolumegroupreplication(volume_group_id) + if not volume_group_replication: + return None + return volume_group_replication.replication_policy_name + def _is_earreplication_supported(self): return hasattr(self.client.svctask, "chvolumereplicationinternals") @@ -1458,6 +1522,7 @@ def _start_rcrelationship(self, rcrelationship_id, primary_endpoint_type=None, f else: logger.warning("failed to start rcrelationship '{}': {}".format(rcrelationship_id, ex)) + @register_csi_plugin() def create_replication(self, replication_request): if replication_request.replication_type == array_settings.REPLICATION_TYPE_MIRROR: self._create_replication(replication_request) @@ -1477,14 +1542,9 @@ def _create_ear_replication(self, replication_request): return replication_policy = replication_request.replication_policy + volume_group_id = replication_request.volume_internal_id - volume_internal_id = replication_request.volume_internal_id - cli_volume = self._get_cli_volume(volume_internal_id) - - volume_group_name = cli_volume.name + common_settings.VOLUME_GROUP_NAME_SUFFIX - volume_group_id = self._create_volume_group(volume_group_name) - self._change_volume_group(volume_internal_id, volume_group_name) - + self._change_volume_group_policy(volume_group_id) self._change_volume_group_policy(volume_group_id, replication_policy) def _stop_rcrelationship(self, rcrelationship_id, add_access_to_secondary=False): @@ -1513,6 +1573,7 @@ def _delete_rcrelationship(self, rcrelationship_id): else: logger.warning("failed to delete rcrelationship '{0}': {1}".format(rcrelationship_id, ex)) + @register_csi_plugin() def delete_replication(self, replication): if replication.replication_type == array_settings.REPLICATION_TYPE_MIRROR: self._delete_replication(replication.name) @@ -1534,10 +1595,6 @@ def _delete_ear_replication(self, volume_group_id): self._change_volume_group_policy(volume_group_id) - cli_volume_id = self._get_cli_volume_id_from_volume_group("volume_group_id", volume_group_id) - self._change_volume_group(cli_volume_id) - self._rmvolumegroup(volume_group_id) - def _promote_replication_endpoint(self, endpoint_type, replication_name): logger.info("making '{}' primary for remote copy relationship {}".format(endpoint_type, replication_name)) try: @@ -1567,6 +1624,7 @@ def _ensure_endpoint_is_primary(self, rcrelationship, endpoint_type): self._start_rcrelationship(rcrelationship.id, primary_endpoint_type=other_endpoint_type, force=True) self._promote_replication_endpoint(endpoint_type, rcrelationship.name) + @register_csi_plugin() def promote_replication_volume(self, replication): if replication.replication_type == array_settings.REPLICATION_TYPE_MIRROR: self._promote_replication_volume(replication.name) @@ -1598,6 +1656,7 @@ def _promote_ear_replication_volume(self, volume_group_id): else: logger.info("Can't be promoted because the local volume group is not an independent copy") + @register_csi_plugin() def demote_replication_volume(self, replication): if replication.replication_type == array_settings.REPLICATION_TYPE_MIRROR: self._demote_replication_volume(replication.name) @@ -1660,6 +1719,14 @@ def _lsvolumesnapshot(self, **kwargs): raise ex return None + def _get_cli_snapshot(self, snapshot_id_or_name): + try: + return self._get_cli_snapshot_by_id(snapshot_id_or_name) + except (svc_errors.CommandExecutionError, CLIFailureError) as ex: + if STR_AS_NUMERIC_FIELD in ex.my_message: + return self._get_cli_snapshot_by_name(snapshot_id_or_name) + raise ex + def _get_cli_snapshot_by_id(self, snapshot_id): return self._lsvolumesnapshot(object_id=snapshot_id) @@ -1699,7 +1766,7 @@ def _mkvolumegroup(self, name, **cli_kwargs): def _get_cli_volume_id_from_volume_group(self, filter, filter_parameter): filter_value = '{}={}'.format(filter, filter_parameter) - cli_volume = self._lsvdisk(filtervalue=filter_value) + cli_volume = self._lsvdisk_single_element(filtervalue=filter_value) return cli_volume.id def _rollback_create_volume_from_snapshot(self, cli_volume_id, volume_group_name): @@ -1759,7 +1826,7 @@ def _is_vdisk_has_fcmaps(self, vdisk_uid): cli_volume = self._get_cli_volume_by_wwn(vdisk_uid, not_exist_err=False) if self._is_earreplication_supported(): return cli_volume and cli_volume.replication_mode and cli_volume.fc_map_count != \ - common_settings.EAR_VOLUME_FC_MAP_COUNT + common_settings.EAR_VOLUME_FC_MAP_COUNT return cli_volume and cli_volume.FC_id def _raise_invalid_io_group(self, io_group, error_message): @@ -1786,6 +1853,7 @@ def _mkhost(self, host_name, connectivity_type, port, io_group): logger.warning("exception encountered during host {} creation : {}".format(host_name, ex.my_message)) raise ex + @register_csi_plugin() def create_host(self, host_name, initiators, connectivity_type, io_group): ports = get_connectivity_type_ports(initiators, connectivity_type) for port in ports: @@ -1807,6 +1875,7 @@ def _rmhost(self, host_name): return raise ex + @register_csi_plugin() def delete_host(self, host_name): logger.info(svc_messages.DELETE_HOST.format(host_name)) self._rmhost(host_name) @@ -1817,17 +1886,23 @@ def _raise_error_when_no_ports_added(self, host_name): self.delete_host(host_name) raise array_errors.NoPortIsValid(host_name) + def _raise_error_when_host_not_found(self, host_name, error_message): + if OBJ_NOT_FOUND in error_message: + raise array_errors.HostNotFoundError(host_name) + def _addhostport(self, host_name, connectivity_type, port): cli_kwargs = build_host_port_command_kwargs(host_name, connectivity_type, port) try: self.client.svctask.addhostport(**cli_kwargs) except (svc_errors.CommandExecutionError, CLIFailureError) as ex: + self._raise_error_when_host_not_found(host_name, ex.my_message) if not self._is_port_invalid(ex.my_message): if is_warning_message(ex.my_message): logger.warning("exception encountered during adding port {} to host {} : {}".format( port, host_name, ex.my_message)) raise ex + @register_csi_plugin() def add_ports_to_host(self, host_name, initiators, connectivity_type): ports = get_connectivity_type_ports(initiators, connectivity_type) for port in ports: @@ -1840,12 +1915,14 @@ def _rmhostport(self, host_name, connectivity_type, port): try: self.client.svctask.rmhostport(**cli_kwargs) except (svc_errors.CommandExecutionError, CLIFailureError) as ex: + self._raise_error_when_host_not_found(host_name, ex.my_message) if not self._is_port_invalid(ex.my_message): if is_warning_message(ex.my_message): logger.warning("exception encountered during removing port {} from host {} : {}".format( port, host_name, ex.my_message)) raise ex + @register_csi_plugin() def remove_ports_from_host(self, host_name, ports, connectivity_type): for port in ports: logger.info(svc_messages.REMOVE_HOST_PORT.format(port, host_name)) @@ -1936,10 +2013,6 @@ def get_host_io_group(self, host_name): logger.info(svc_messages.HOST_IO_GROUP_IDS.format(host_name, io_group.id)) return io_group - def _raise_error_when_host_not_found(self, host_name, error_message): - if OBJ_NOT_FOUND in error_message: - raise array_errors.HostNotFoundError(host_name) - def _raise_unsupported_parameter_error(self, error_message, parameter): if NOT_SUPPORTED_PARAMETER in error_message: raise array_errors.UnSupportedParameter(parameter) @@ -1964,3 +2037,97 @@ def _chhost(self, host_name, protocol): def change_host_protocol(self, host_name, protocol): self._chhost(host_name, protocol) logger.info(svc_messages.CHANGE_HOST_PROTOCOL.format(host_name, protocol)) + + def _generate_thin_volume_response(self, cli_volume): + return ThinVolume( + capacity_bytes=int(cli_volume.capacity), + id=cli_volume.vdisk_UID, + internal_id=cli_volume.id, + name=cli_volume.name, + array_type=self.array_type + ) + + def _get_cli_volumes_from_volume_group(self, volume_group_name): + filter_value = 'volume_group_name={}'.format(volume_group_name) + cli_volumes = self._lsvdisk_list(filtervalue=filter_value) + return [self._generate_thin_volume_response(cli_volume) for cli_volume in cli_volumes] + + def _generate_volume_group_response(self, cli_volume_group): + volumes = [] + if int(cli_volume_group.volume_count) > 0: + volumes = self._get_cli_volumes_from_volume_group(cli_volume_group.name) + return VolumeGroup(name=cli_volume_group.name, + array_type=self.array_type, + id=cli_volume_group.uid if hasattr(cli_volume_group, "uid") else cli_volume_group.id, + internal_id=cli_volume_group.id, + volumes=volumes) + + @register_csi_plugin() + def create_volume_group(self, name): + volume_group_id = self._create_volume_group(name) + cli_volume_group = self._lsvolumegroup(volume_group_id) + return self._generate_volume_group_response(cli_volume_group) + + def get_volume_group(self, volume_group_id): + cli_volume_group = self._lsvolumegroup(volume_group_id) + if cli_volume_group is None: + raise array_errors.ObjectNotFoundError(volume_group_id) + return self._generate_volume_group_response(cli_volume_group) + + @register_csi_plugin() + def delete_volume_group(self, volume_group_id): + self._lsvolumegroup(volume_group_id, not_exist_err=True) + self._rmvolumegroup(volume_group_id) + + @register_csi_plugin() + def add_volume_to_volume_group(self, volume_group_id, volume_id): + volume_name = self._get_volume_name_by_wwn(volume_id) + cli_volume = self._get_cli_volume(volume_name) + if cli_volume.volume_group_name and cli_volume.volume_group_name != volume_group_id: + raise array_errors.VolumeAlreadyInVolumeGroup(volume_id, cli_volume.volume_group_name) + self._change_volume_group(cli_volume.id, volume_group_id) + + @register_csi_plugin() + def remove_volume_from_volume_group(self, volume_id): + cli_volume = self._get_cli_volume_by_wwn(volume_id, not_exist_err=True) + self._change_volume_group(cli_volume.id, None) + + def register_plugin(self, unique_key, metadata): + if is_call_home_enabled() and self._is_registerplugin_supported() and \ + self._is_plugin_needs_to_be_registered(unique_key): + self._register_plugin(unique_key, metadata) + + def _is_registerplugin_supported(self): + return hasattr(self.client.svctask, "registerplugin") + + def _is_plugin_needs_to_be_registered(self, unique_key): + current_time = datetime.now() + endpoint_cache = SVC_REGISTRATION_CACHE.get(self.endpoint) + if not endpoint_cache: + return True + last_registration_time = endpoint_cache.get(unique_key) + if last_registration_time: + time_difference = current_time - last_registration_time + return time_difference >= timedelta(hours=array_settings.MINIMUM_HOURS_BETWEEN_REGISTRATIONS) + return True + + def _register_plugin(self, unique_key, metadata): + self._update_registration_cache(unique_key) + self._registerplugin(unique_key, metadata) + + def _update_registration_cache(self, unique_key): + endpoint_cache = SVC_REGISTRATION_CACHE.get(self.endpoint) + if not endpoint_cache: + SVC_REGISTRATION_CACHE[self.endpoint] = {} + SVC_REGISTRATION_CACHE[self.endpoint][unique_key] = datetime.now() + + def _registerplugin(self, unique_key, metadata): + logger.info("Registering {} plugin, using {} unique key with [{}] metadata".format( + array_settings.REGISTRATION_PLUGIN, unique_key, metadata)) + cli_kwargs = build_register_plugin_kwargs(unique_key, metadata) + try: + self.client.svctask.registerplugin(name='{}'.format(array_settings.REGISTRATION_PLUGIN), **cli_kwargs) + except Exception as ex: + logger.error("exception encountered during" + "registering {} plugin using {} unique key with [{}] metadata: {}".format( + array_settings.REGISTRATION_PLUGIN, unique_key, metadata, ex)) diff --git a/controllers/array_action/array_mediator_xiv.py b/controllers/array_action/array_mediator_xiv.py index f785ec854..915704f34 100644 --- a/controllers/array_action/array_mediator_xiv.py +++ b/controllers/array_action/array_mediator_xiv.py @@ -161,7 +161,7 @@ def _get_cli_object_by_name(self, volume_name): logger.exception(ex) raise array_errors.InvalidArgumentError(ex.status) - def get_volume(self, name, pool, is_virt_snap_func): + def get_volume(self, name, pool, is_virt_snap_func, source_type): logger.debug("Get volume : {}".format(name)) cli_volume = self._get_cli_object_by_name(name) @@ -558,3 +558,6 @@ def get_host_io_group(self, host_name): def change_host_protocol(self, host_name, protocol): raise NotImplementedError + + def register_plugin(self, unique_key, metadata): + return None diff --git a/controllers/array_action/ds8k_volume_cache.py b/controllers/array_action/ds8k_volume_cache.py index 4bfac8de3..32c8f4e87 100644 --- a/controllers/array_action/ds8k_volume_cache.py +++ b/controllers/array_action/ds8k_volume_cache.py @@ -1,58 +1,58 @@ -from collections import defaultdict -from threading import RLock - -from controllers.common.csi_logger import get_stdout_logger - -logger = get_stdout_logger() - - -class VolumeCacheByAddress: - def __init__(self): - logger.debug("creating a new cache") - self._volume_cache_by_address = defaultdict(dict) - self._cache_lock = RLock() - - def add(self, address, key, value): - logger.debug("adding {} to cache".format(key)) - with self._cache_lock: - self._volume_cache_by_address[address][key] = value - - def remove(self, address, key): - logger.debug("removing {} from cache".format(key)) - with self._cache_lock: - if self._volume_cache_by_address[address].get(key) is not None: - del self._volume_cache_by_address[address][key] - - def get(self, address, key): - logger.debug("getting {} from cache".format(key)) - with self._cache_lock: - return self._volume_cache_by_address[address].get(key) - - def add_or_delete(self, address, key, value): - with self._cache_lock: - if self._volume_cache_by_address[address].get(key) is None: - logger.debug("adding {} to cache".format(key)) - self._volume_cache_by_address[address][key] = value - else: - logger.debug("removing {} from cache".format(key)) - del self._volume_cache_by_address[address][key] - - -volume_cache_by_address = VolumeCacheByAddress() - - -class VolumeCache: - def __init__(self, service_address): - self._service_address = service_address - - def add(self, key, value): - volume_cache_by_address.add(self._service_address, key, value) - - def remove(self, key): - volume_cache_by_address.remove(self._service_address, key) - - def get(self, key): - return volume_cache_by_address.get(self._service_address, key) - - def add_or_delete(self, key, value): - volume_cache_by_address.add_or_delete(self._service_address, key, value) +from collections import defaultdict +from threading import RLock + +from controllers.common.csi_logger import get_stdout_logger + +logger = get_stdout_logger() + + +class VolumeCacheByAddress: + def __init__(self): + logger.debug("creating a new cache") + self._volume_cache_by_address = defaultdict(dict) + self._cache_lock = RLock() + + def add(self, address, key, value): + logger.debug("adding {} to cache".format(key)) + with self._cache_lock: + self._volume_cache_by_address[address][key] = value + + def remove(self, address, key): + logger.debug("removing {} from cache".format(key)) + with self._cache_lock: + if self._volume_cache_by_address[address].get(key) is not None: + del self._volume_cache_by_address[address][key] + + def get(self, address, key): + logger.debug("getting {} from cache".format(key)) + with self._cache_lock: + return self._volume_cache_by_address[address].get(key) + + def add_or_delete(self, address, key, value): + with self._cache_lock: + if self._volume_cache_by_address[address].get(key) is None: + logger.debug("adding {} to cache".format(key)) + self._volume_cache_by_address[address][key] = value + else: + logger.debug("removing {} from cache".format(key)) + del self._volume_cache_by_address[address][key] + + +volume_cache_by_address = VolumeCacheByAddress() + + +class VolumeCache: + def __init__(self, service_address): + self._service_address = service_address + + def add(self, key, value): + volume_cache_by_address.add(self._service_address, key, value) + + def remove(self, key): + volume_cache_by_address.remove(self._service_address, key) + + def get(self, key): + return volume_cache_by_address.get(self._service_address, key) + + def add_or_delete(self, key, value): + volume_cache_by_address.add_or_delete(self._service_address, key, value) diff --git a/controllers/array_action/errors.py b/controllers/array_action/errors.py index 7edcb190e..d8ef90d9a 100644 --- a/controllers/array_action/errors.py +++ b/controllers/array_action/errors.py @@ -1,4 +1,6 @@ import controllers.array_action.messages as messages +from controllers.tests.common.test_settings import VOLUME_OBJECT_TYPE, SNAPSHOT_OBJECT_TYPE, HOST_OBJECT_TYPE, \ + VOLUME_GROUP_OBJECT_TYPE class BaseArrayActionException(Exception): @@ -56,6 +58,13 @@ def __init__(self, name): self.message = messages.OBJECT_NOT_FOUND_ERROR_MESSAGE.format(name) +class ObjectAlreadyExistError(BaseArrayActionException): + + def __init__(self, name, field_name, actual_value, expected_value): + super().__init__() + self.message = messages.VOLUME_NOT_MATCH_REQUEST.format(name, field_name, actual_value, expected_value) + + class VolumeNameBelongsToSnapshotError(BaseArrayActionException): def __init__(self, volume, array): @@ -77,6 +86,13 @@ def __init__(self, volume_id): self.message = messages.VOLUME_DELETION_ERROR_MESSAGE.format(volume_id) +class VolumeAlreadyInVolumeGroup(BaseArrayActionException): + + def __init__(self, volume_id, volume_group_name): + super().__init__() + self.message = messages.VOLUME_ALREADY_IN_VOLUME_GROUP_ERROR_MESSAGE.format(volume_id, volume_group_name) + + class PoolDoesNotMatchSpaceEfficiency(InvalidArgumentError): def __init__(self, pool, space_efficiency, error): @@ -104,7 +120,7 @@ class VolumeAlreadyExists(BaseArrayActionException): def __init__(self, volume_name, array): super().__init__() - self.message = messages.VOLUME_ALREADY_EXISTS_MESSAGE.format(volume_name, array) + self.message = messages.OBJECT_ALREADY_EXISTS_MESSAGE.format(VOLUME_OBJECT_TYPE, volume_name, array) class PoolDoesNotExist(InvalidArgumentError): @@ -216,14 +232,21 @@ class SnapshotAlreadyExists(BaseArrayActionException): def __init__(self, snapshot_id_or_name, array): super().__init__() - self.message = messages.SNAPSHOT_ALREADY_EXISTS_ERROR_MESSAGE.format(snapshot_id_or_name, array) + self.message = messages.OBJECT_ALREADY_EXISTS_MESSAGE.format(SNAPSHOT_OBJECT_TYPE, snapshot_id_or_name, array) class HostAlreadyExists(BaseArrayActionException): def __init__(self, host_name, array): super().__init__() - self.message = messages.HOST_ALREADY_EXISTS_ERROR_MESSAGE.format(host_name, array) + self.message = messages.OBJECT_ALREADY_EXISTS_MESSAGE.format(HOST_OBJECT_TYPE, host_name, array) + + +class VolumeGroupAlreadyExists(BaseArrayActionException): + + def __init__(self, volume_group_name, array): + super().__init__() + self.message = messages.OBJECT_ALREADY_EXISTS_MESSAGE.format(VOLUME_GROUP_OBJECT_TYPE, volume_group_name, array) class NoPortFoundByConnectivityType(BaseArrayActionException): diff --git a/controllers/array_action/messages.py b/controllers/array_action/messages.py index 21f9522ab..a3d599456 100644 --- a/controllers/array_action/messages.py +++ b/controllers/array_action/messages.py @@ -9,6 +9,8 @@ VOLUME_NAME_BELONGS_TO_SNAPSHOT_ERROR_MESSAGE = "Volume not found. Snapshot with the same id exists. \ Name : {0} , array : {1}" +VOLUME_ALREADY_IN_VOLUME_GROUP_ERROR_MESSAGE = "Volume {0} is already in a volume group {1}" + POOL_DOES_NOT_MATCH_SPACE_EFFICIENCY_MESSAGE = "Pool : {0} does not match the following space efficiency : {1} . " \ "error : {2}" @@ -19,7 +21,7 @@ VIRT_SNAPSHOT_FUNCTION_NOT_SUPPORTED_MESSAGE = "Snapshot function is enabled but not supported with object : {0} " -VOLUME_ALREADY_EXISTS_MESSAGE = "Volume already exists : {0} , array : {1}" +OBJECT_ALREADY_EXISTS_MESSAGE = "{0} already exists : {1} , array : {2}" POOL_DOES_NOT_EXIST_MESSAGE = "Pool does not exist: {0} , array : {1}" @@ -58,10 +60,6 @@ SNAPSHOT_NOT_FOUND_ERROR_MESSAGE = "Snapshot was not found : {0} " -SNAPSHOT_ALREADY_EXISTS_ERROR_MESSAGE = "Snapshot already exists : {0} , array : {1}" - -HOST_ALREADY_EXISTS_ERROR_MESSAGE = "Host already exists : {0} , array : {1}" - EXPECTED_SNAPSHOT_BUT_FOUND_VOLUME_ERROR_MESSAGE = "Could not find info about the source of: {0}, array: {1}" SNAPSHOT_WRONG_VOLUME_ERROR_MESSAGE = "Snapshot {0} exists but it is of Volume {1} and not {2}" @@ -85,5 +83,7 @@ UNSUPPORTED_PARAMETER = "The parameter {} is not a supported parameter" -CANNOT_CHANGE_HOST_PROTOCOL_BECAUSE_OF_MAPPED_PORTS = "The host protocol cannot be changed"\ - " because the host {} has ports configured." +CANNOT_CHANGE_HOST_PROTOCOL_BECAUSE_OF_MAPPED_PORTS = "The host protocol cannot be changed" \ + " because the host {} has ports configured." + +VOLUME_NOT_MATCH_REQUEST = "Volume: {0} was already created with different {1}. volume {1}: {2}, requested {1}: {3}" diff --git a/controllers/array_action/registration_cache.py b/controllers/array_action/registration_cache.py new file mode 100644 index 000000000..80f1da495 --- /dev/null +++ b/controllers/array_action/registration_cache.py @@ -0,0 +1 @@ +SVC_REGISTRATION_CACHE = {} diff --git a/controllers/array_action/registration_maps.py b/controllers/array_action/registration_maps.py new file mode 100644 index 000000000..cdf677161 --- /dev/null +++ b/controllers/array_action/registration_maps.py @@ -0,0 +1,38 @@ +from controllers.array_action.settings import METADATA_KEY +from controllers.servers.settings import UNIQUE_KEY_KEY + + +def _generate_plugin_type(unique_key, metadata=''): + return { + UNIQUE_KEY_KEY: unique_key, + METADATA_KEY: metadata + } + + +basic_plugin_type = _generate_plugin_type('basic') +replication_plugin_type = _generate_plugin_type('replication') +volume_group_plugin_type = _generate_plugin_type('volume_group') +snapshot_plugin_type = _generate_plugin_type('snapshot') +host_definition_plugin_type = _generate_plugin_type('host_definition') + + +REGISTRATION_MAP = { + 'create_volume': basic_plugin_type, + 'delete_volume': basic_plugin_type, + 'map_volume': basic_plugin_type, + 'unmap_volume': basic_plugin_type, + 'create_replication': replication_plugin_type, + 'delete_replication': replication_plugin_type, + 'promote_replication_volume': replication_plugin_type, + 'demote_replication_volume': replication_plugin_type, + 'create_volume_group': volume_group_plugin_type, + 'delete_volume_group': volume_group_plugin_type, + 'add_volume_to_volume_group': volume_group_plugin_type, + 'remove_volume_from_volume_group': volume_group_plugin_type, + 'create_snapshot': snapshot_plugin_type, + 'delete_snapshot': snapshot_plugin_type, + 'create_host': host_definition_plugin_type, + 'delete_host': host_definition_plugin_type, + 'add_ports_to_host': host_definition_plugin_type, + 'remove_ports_from_host': host_definition_plugin_type, +} diff --git a/controllers/array_action/settings.py b/controllers/array_action/settings.py index 5e7383100..2d84aa4c3 100644 --- a/controllers/array_action/settings.py +++ b/controllers/array_action/settings.py @@ -20,4 +20,11 @@ CONTEXT_POOL = "pool" WWN_OUI_END = 7 +NGUID_OUI_END = 22 WWN_VENDOR_IDENTIFIER_END = 16 +VENDOR_IDENTIFIER_LENGTH = 9 + +METADATA_KEY = 'metadata' +VERSION_KEY = 'version' +REGISTRATION_PLUGIN = 'block.csi.ibm.com' +MINIMUM_HOURS_BETWEEN_REGISTRATIONS = 2 diff --git a/controllers/array_action/utils.py b/controllers/array_action/utils.py index 7b480ab61..8b648a057 100644 --- a/controllers/array_action/utils.py +++ b/controllers/array_action/utils.py @@ -1,27 +1,38 @@ -import encodings - -from controllers.array_action.settings import WWN_OUI_END, WWN_VENDOR_IDENTIFIER_END -from controllers.common.csi_logger import get_stdout_logger - -UTF_8 = encodings.utf_8.getregentry().name - -logger = get_stdout_logger() - - -def convert_scsi_id_to_nguid(volume_id): - logger.debug("Converting scsi uuid : {} to nguid".format(volume_id)) - oui = volume_id[1:WWN_OUI_END] - vendor_identifier = volume_id[WWN_OUI_END:WWN_VENDOR_IDENTIFIER_END] - vendor_identifier_extension = volume_id[WWN_VENDOR_IDENTIFIER_END:] - final_nguid = ''.join((vendor_identifier_extension, oui, '0', vendor_identifier)) - logger.debug("Nguid is : {}".format(final_nguid)) - return final_nguid - - -class ClassProperty: - - def __init__(self, function): - self._function = function - - def __get__(self, instance, owner): - return self._function(owner) +import encodings + +from controllers.array_action.settings import WWN_OUI_END, WWN_VENDOR_IDENTIFIER_END, VENDOR_IDENTIFIER_LENGTH, \ + NGUID_OUI_END +from controllers.common.csi_logger import get_stdout_logger + +UTF_8 = encodings.utf_8.getregentry().name + +logger = get_stdout_logger() + + +def convert_scsi_id_to_nguid(volume_id): + logger.debug("Converting scsi uuid : {} to nguid".format(volume_id)) + oui = volume_id[1:WWN_OUI_END] + vendor_identifier = volume_id[WWN_OUI_END:WWN_VENDOR_IDENTIFIER_END] + vendor_identifier_extension = volume_id[WWN_VENDOR_IDENTIFIER_END:] + final_nguid = ''.join((vendor_identifier_extension, oui, '0', vendor_identifier)) + logger.debug("Nguid is : {}".format(final_nguid)) + return final_nguid + + +def convert_nguid_to_scsi_id(volume_id): + logger.debug("Converting nguid : {} to scsi uuid".format(volume_id)) + oui = volume_id[WWN_VENDOR_IDENTIFIER_END:NGUID_OUI_END] + vendor_identifier = volume_id[-VENDOR_IDENTIFIER_LENGTH:] + vendor_identifier_extension = volume_id[:WWN_VENDOR_IDENTIFIER_END] + final_scsi_id = ''.join((oui, vendor_identifier, vendor_identifier_extension)) + logger.debug("scsi uuid is : {}".format(final_scsi_id)) + return final_scsi_id + + +class ClassProperty: + + def __init__(self, function): + self._function = function + + def __get__(self, instance, owner): + return self._function(owner) diff --git a/controllers/array_action/volume_group_interface.py b/controllers/array_action/volume_group_interface.py new file mode 100644 index 000000000..1291aa069 --- /dev/null +++ b/controllers/array_action/volume_group_interface.py @@ -0,0 +1,97 @@ +from abc import ABC, abstractmethod + + +class VolumeGroupInterface(ABC): + + @abstractmethod + def create_volume_group(self, name): + """ + This function should create a volume group in the storage system. + + Args: + name : name of the volume group to be created in the storage system + + Returns: + VolumeGroup + + Raises: + VolumeGroupAlreadyExists + InvalidArgument + PermissionDenied + """ + raise NotImplementedError + + @abstractmethod + def get_volume_group(self, volume_group_id): + """ + This function return volume group info about the volume. + + Args: + volume_group_id : id of the volume group on storage system. + + Returns: + VolumeGroup + + Raises: + ObjectNotFound + InvalidArgument + PermissionDenied + """ + raise NotImplementedError + + @abstractmethod + def delete_volume_group(self, volume_group_id): + """ + This function should delete a volume group in the storage system. + + Args: + volume_group_id : id of the volume group to delete. + + Returns: + None + + Raises: + ObjectNotFound + InvalidArgument + PermissionDenied + ObjectIsStillInUse + """ + raise NotImplementedError + + def add_volume_to_volume_group(self, volume_group_id, volume_id): + """ + This function should add a volume to a volume group in the storage system. + + Args: + volume_group_id : id of the volume group on storage system. + volume_id : id of the volume on storage system. + + Returns: + None + + Raises: + ObjectNotFound + InvalidArgument + PermissionDenied + ObjectIsStillInUse + """ + raise NotImplementedError + + def remove_volume_from_volume_group(self, volume_id): + """ + This function should remove a volume from a volume group in the storage system. + + Args: + volume_group_id : id of the volume group on storage system. + volume_id : id of the volume on storage system. + + Returns: + None + + Raises: + ObjectNotFound + InvalidArgument + PermissionDenied + ObjectIsStillInUse + """ + raise NotImplementedError diff --git a/controllers/common/config.py b/controllers/common/config.py index c587b14d2..a41cb1c63 100644 --- a/controllers/common/config.py +++ b/controllers/common/config.py @@ -1,11 +1,11 @@ -import os.path - -import yaml -from munch import DefaultMunch - -_this_file_path = os.path.abspath(os.path.dirname(__file__)) -_config_path = os.path.join(_this_file_path, "../../common/config.yaml") - -with open(_config_path, 'r', encoding="utf-8") as yamlfile: - _cfg = yaml.safe_load(yamlfile) -config = DefaultMunch.fromDict(_cfg) +import os.path + +import yaml +from munch import DefaultMunch + +_this_file_path = os.path.abspath(os.path.dirname(__file__)) +_config_path = os.path.join(_this_file_path, "../../common/config.yaml") + +with open(_config_path, 'r', encoding="utf-8") as yamlfile: + _cfg = yaml.safe_load(yamlfile) +config = DefaultMunch.fromDict(_cfg) diff --git a/controllers/common/settings.py b/controllers/common/settings.py index 6bec7057a..b93551c6a 100644 --- a/controllers/common/settings.py +++ b/controllers/common/settings.py @@ -3,8 +3,6 @@ NAME_PREFIX_SEPARATOR = "_" ENDPOINTS_SEPARATOR = ", " -VOLUME_GROUP_NAME_SUFFIX = "_vg" - CSI_CONTROLLER_SERVER_WORKERS = 10 # array types @@ -22,6 +20,7 @@ SPACE_EFFICIENCY_THICK = "thick" SPACE_EFFICIENCY_NONE = "none" +# hostDefiner HOST_DEFINITION_PLURAL = 'hostdefinitions' CSI_IBM_GROUP = 'csi.ibm.com' VERSION = 'v1' @@ -30,8 +29,64 @@ NAMESPACE_FIELD = 'namespace' IO_GROUP_DELIMITER = ':' IO_GROUP_LABEL_PREFIX = 'hostdefiner.block.csi.ibm.com/io-group-' +RESOURCE_VERSION_FIELD = 'resource_version' +ITEMS_FIELD = 'items' +CSI_PROVISIONER_NAME = 'block.csi.ibm.com' +CSI_IBM_API_VERSION = 'csi.ibm.com/v1' +HOST_DEFINITION_KIND = 'HostDefinition' +MANAGE_NODE_LABEL = 'hostdefiner.block.csi.ibm.com/manage-node' +FORBID_DELETION_LABEL = 'hostdefiner.block.csi.ibm.com/do-not-delete-definition' +TRUE_STRING = 'true' +PREFIX_ENV_VAR = 'PREFIX' +CONNECTIVITY_ENV_VAR = 'CONNECTIVITY_TYPE' +DYNAMIC_NODE_LABELING_ENV_VAR = 'DYNAMIC_NODE_LABELING' +ALLOW_DELETE_ENV_VAR = 'ALLOW_DELETE' +DEFINE_ACTION = 'Define' +UNDEFINE_ACTION = 'Undefine' +FAILED_MESSAGE_TYPE = 'Failed' +NORMAL_EVENT_TYPE = 'Normal' +WARNING_EVENT_TYPE = 'Warning' +CSI_IBM_FINALIZER = HOST_DEFINITION_PLURAL + '.' + CSI_IBM_GROUP +TOPOLOGY_IBM_BLOCK_PREFIX = 'topology.block.csi.ibm.com' +ADDED_EVENT_TYPE = 'ADDED' +MODIFIED_EVENT_TYPE = 'MODIFIED' +DELETED_EVENT_TYPE = 'DELETED' +PENDING_CREATION_PHASE = 'PendingCreation' +PENDING_DELETION_PHASE = 'PendingDeletion' +DRIVER_PRODUCT_LABEL = 'product=ibm-block-csi-driver' +CONNECTIVITY_TYPE_LABEL = '{}/connectivity-type'.format(CSI_PROVISIONER_NAME) +SUCCESSFUL_MESSAGE_TYPE = 'Successful' +HOST_DEFINER = 'hostDefiner' +STORAGE_API_GROUP = 'storage.k8s.io' +CSI_PARAMETER_PREFIX = "csi.{}/".format(STORAGE_API_GROUP) +SECRET_NAME_SUFFIX = 'secret-name' +READY_PHASE = 'Ready' +ERROR_PHASE = 'Error' +DEFAULT_NAMESPACE = 'default' + +# hostDefiner fields +API_VERSION_FIELD = 'apiVersion' +KIND_FIELD = 'kind' +SPEC_FIELD = 'spec' +METADATA_FIELD = 'metadata' +STATUS_FIELD = 'status' +HOST_DEFINITION_NODE_NAME_FIELD = 'nodeName' +SECRET_NAME_FIELD = 'secretName' +SECRET_NAMESPACE_FIELD = 'secretNamespace' +CONNECTIVITY_TYPE_FIELD = 'connectivityType' +PORTS_FIELD = 'ports' +NODE_NAME_ON_STORAGE_FIELD = 'nodeNameOnStorage' +IO_GROUP_FIELD = 'ioGroups' +MANAGEMENT_ADDRESS_FIELD = 'managementAddress' +STATUS_PHASE_FIELD = 'phase' +LABELS_FIELD = 'labels' +FINALIZERS_FIELD = 'finalizers' +SECRET_CONFIG_FIELD = 'config' +HOST_DEFINITION_FIELD = 'hostDefinition' EAR_VOLUME_FC_MAP_COUNT = "2" SCSI_PROTOCOL = 'scsi' NVME_PROTOCOL = 'nvme' + +FULL_IO_GROUP = '0:1:2:3' diff --git a/controllers/scripts/csi_general/csi_pb2.sh b/controllers/scripts/csi_general/csi_pb2.sh index c21d51b0c..d485fabc4 100755 --- a/controllers/scripts/csi_general/csi_pb2.sh +++ b/controllers/scripts/csi_general/csi_pb2.sh @@ -3,14 +3,16 @@ set -x CSI_VERSION="v1.5.0" ADDONS_VERSION="v0.1.1" +VG_VERSION="v0.9.0" PB2_DIR="csi_general" mkdir -p ./proto/${PB2_DIR} cd ./proto/${PB2_DIR} curl -O https://raw.githubusercontent.com/container-storage-interface/spec/${CSI_VERSION}/csi.proto -curl -O https://raw.githubusercontent.com/csi-addons/spec/${ADDONS_VERSION}/replication.proto - +curl -O https://raw.githubusercontent.com/IBM/csi-volume-group/${VG_VERSION}/volumegroup/volumegroup.proto +curl -O https://raw.githubusercontent.com/csi-addons/spec/v0.2.0/replication/replication.proto +sed -i 's|github.com/container-storage-interface/spec/lib/go/csi/csi.proto|csi_general/csi.proto|g' replication.proto cd - python -m grpc_tools.protoc --proto_path=proto \ diff --git a/controllers/scripts/csi_general/setup.py b/controllers/scripts/csi_general/setup.py index 56b50203d..949481baf 100644 --- a/controllers/scripts/csi_general/setup.py +++ b/controllers/scripts/csi_general/setup.py @@ -1,3 +1,3 @@ -import setuptools - -setuptools.setup(name='csi_general', packages=['csi_general']) +import setuptools + +setuptools.setup(name='csi_general', packages=['csi_general']) diff --git a/controllers/servers/csi/addons_server.py b/controllers/servers/csi/addons_server.py index 5e88e28bb..55211b2bb 100644 --- a/controllers/servers/csi/addons_server.py +++ b/controllers/servers/csi/addons_server.py @@ -8,7 +8,7 @@ from controllers.array_action.storage_agent import get_agent from controllers.common.csi_logger import get_stdout_logger from controllers.servers import utils -from controllers.servers.csi.decorators import csi_method +from controllers.servers.csi.decorators import csi_replication_method from controllers.servers.csi.exception_handler import build_error_response logger = get_stdout_logger() @@ -16,51 +16,62 @@ class ReplicationControllerServicer(pb2_grpc.ControllerServicer): - @csi_method(error_response_type=pb2.EnableVolumeReplicationResponse, lock_request_attribute="volume_id") + @csi_replication_method(error_response_type=pb2.EnableVolumeReplicationResponse) def EnableVolumeReplication(self, request, context): replication_type = utils.get_addons_replication_type(request) utils.validate_addons_request(request, replication_type) - volume_id_info = utils.get_volume_id_info(request.volume_id) - volume_id = volume_id_info.ids.uid - replication_request = utils.generate_addons_replication_request(request, replication_type) + object_type, object_id_info = utils.get_replication_object_type_and_id_info(request) + object_id = object_id_info.ids.internal_id + + error_message = self._validate_replication_object(object_type, replication_type) + if error_message: + return build_error_response(error_message, context, grpc.StatusCode.FAILED_PRECONDITION, + pb2.EnableVolumeReplicationResponse) + + replication_request = utils.generate_addons_replication_request(request, replication_type, object_id) connection_info = utils.get_array_connection_info_from_secrets(request.secrets) - with get_agent(connection_info, volume_id_info.array_type).get_mediator() as mediator: - volume = mediator.get_object_by_id(volume_id, servers_settings.VOLUME_TYPE_NAME) - if not volume: - raise array_errors.ObjectNotFoundError(volume_id) + with get_agent(connection_info, object_id_info.array_type).get_mediator() as mediator: + object_uid = object_id_info.ids.uid if object_type == servers_settings.VOLUME_TYPE_NAME else \ + object_id_info.ids.internal_id + replication_object = mediator.get_object_by_id(object_uid, object_type) + if not replication_object: + raise array_errors.ObjectNotFoundError(object_uid) replication = mediator.get_replication(replication_request) if replication: - error_message = self._ensure_replication_idempotency(replication_request, replication, volume) + error_message = self._ensure_replication_idempotency(replication_request, replication) if error_message: return build_error_response(error_message, context, grpc.StatusCode.ALREADY_EXISTS, pb2.EnableVolumeReplicationResponse) logger.info("idempotent case. replication already exists " - "for volume {} with system: {}".format(volume.name, + "for volume {} with system: {}".format(replication_object.name, replication_request.other_system_id)) return pb2.EnableVolumeReplicationResponse() - error_message = self._validate_volume(volume) - if error_message: - return build_error_response(error_message, context, grpc.StatusCode.FAILED_PRECONDITION, - pb2.EnableVolumeReplicationResponse) logger.info("creating replication for volume {} with system: {}" - .format(volume.name, replication_request.other_system_id)) + .format(replication_object.name, replication_request.other_system_id)) mediator.create_replication(replication_request) return pb2.EnableVolumeReplicationResponse() - @csi_method(error_response_type=pb2.DisableVolumeReplicationResponse, lock_request_attribute="volume_id") + @csi_replication_method(error_response_type=pb2.DisableVolumeReplicationResponse) def DisableVolumeReplication(self, request, context): replication_type = utils.get_addons_replication_type(request) utils.validate_addons_request(request, replication_type) - volume_id_info = utils.get_volume_id_info(request.volume_id) - replication_request = utils.generate_addons_replication_request(request, replication_type) + object_type, object_id_info = utils.get_replication_object_type_and_id_info(request) + object_id = object_id_info.ids.internal_id + + error_message = self._validate_replication_object(object_type, replication_type) + if error_message: + return build_error_response(error_message, context, grpc.StatusCode.FAILED_PRECONDITION, + pb2.EnableVolumeReplicationResponse) + + replication_request = utils.generate_addons_replication_request(request, replication_type, object_id) connection_info = utils.get_array_connection_info_from_secrets(request.secrets) - with get_agent(connection_info, volume_id_info.array_type).get_mediator() as mediator: + with get_agent(connection_info, object_id_info.array_type).get_mediator() as mediator: replication = mediator.get_replication(replication_request) if replication: logger.info("deleting replication {} with system {}".format(replication.name, @@ -93,11 +104,18 @@ def _ensure_volume_role(self, request, context, is_to_promote, response_type): replication_type = utils.get_addons_replication_type(request) utils.validate_addons_request(request, replication_type) - volume_id_info = utils.get_volume_id_info(request.volume_id) - replication_request = utils.generate_addons_replication_request(request, replication_type) + object_type, object_id_info = utils.get_replication_object_type_and_id_info(request) + object_id = object_id_info.ids.internal_id + + error_message = self._validate_replication_object(object_type, replication_type) + if error_message: + return build_error_response(error_message, context, grpc.StatusCode.FAILED_PRECONDITION, + pb2.EnableVolumeReplicationResponse) + + replication_request = utils.generate_addons_replication_request(request, replication_type, object_id) connection_info = utils.get_array_connection_info_from_secrets(request.secrets) - with get_agent(connection_info, volume_id_info.array_type).get_mediator() as mediator: + with get_agent(connection_info, object_id_info.array_type).get_mediator() as mediator: replication = mediator.get_replication(replication_request) if not replication: message = "could not find replication for volume internal id: {} with " \ @@ -112,24 +130,31 @@ def _ensure_volume_role(self, request, context, is_to_promote, response_type): logger.info("finished {}".format(method_name)) return response_type() - @csi_method(error_response_type=pb2.PromoteVolumeResponse, lock_request_attribute="volume_id") + @csi_replication_method(error_response_type=pb2.PromoteVolumeResponse) def PromoteVolume(self, request, context): return self._ensure_volume_role(request, context, is_to_promote=True, response_type=pb2.PromoteVolumeResponse) - @csi_method(error_response_type=pb2.DemoteVolumeResponse, lock_request_attribute="volume_id") + @csi_replication_method(error_response_type=pb2.DemoteVolumeResponse) def DemoteVolume(self, request, context): return self._ensure_volume_role(request, context, is_to_promote=False, response_type=pb2.DemoteVolumeResponse) - @csi_method(error_response_type=pb2.ResyncVolumeResponse, lock_request_attribute="volume_id") + @csi_replication_method(error_response_type=pb2.ResyncVolumeResponse) def ResyncVolume(self, request, context): replication_type = utils.get_addons_replication_type(request) utils.validate_addons_request(request, replication_type) - volume_id_info = utils.get_volume_id_info(request.volume_id) - replication_request = utils.generate_addons_replication_request(request, replication_type) + object_type, object_id_info = utils.get_replication_object_type_and_id_info(request) + object_id = object_id_info.ids.internal_id + + error_message = self._validate_replication_object(object_type, replication_type) + if error_message: + return build_error_response(error_message, context, grpc.StatusCode.FAILED_PRECONDITION, + pb2.EnableVolumeReplicationResponse) + + replication_request = utils.generate_addons_replication_request(request, replication_type, object_id) connection_info = utils.get_array_connection_info_from_secrets(request.secrets) - with get_agent(connection_info, volume_id_info.array_type).get_mediator() as mediator: + with get_agent(connection_info, object_id_info.array_type).get_mediator() as mediator: replication = mediator.get_replication(replication_request) if not replication: message = "could not find replication for volume internal id: {} with " \ @@ -143,28 +168,19 @@ def ResyncVolume(self, request, context): return pb2.ResyncVolumeResponse(ready=replication.is_ready) @staticmethod - def _ensure_replication_idempotency(replication_request, replication, volume): + def _ensure_replication_idempotency(replication_request, replication): if replication_request.replication_type == array_settings.REPLICATION_TYPE_MIRROR and \ replication.copy_type != replication_request.copy_type: error_message = "replication already exists " \ - "but has copy type of {} and not {}".format(replication.copy_type, - replication_request.copy_type) + "but has copy type of {} and not {}".format(replication.copy_type, + replication_request.copy_type) return error_message - elif replication.replication_type == array_settings.REPLICATION_TYPE_EAR: - if replication.name != replication_request.replication_policy: - error_message = "replication already exists, " \ - "but volume {} uses another replication policy {}".format(volume.name, replication.name) - return error_message - if replication.volume_group_id != volume.volume_group_id: - error_message = "replication already exists, " \ - "but volume {} belongs to another group {}".format(volume.name, volume.volume_group_name) - return error_message return None @staticmethod - def _validate_volume(volume): - if volume.volume_group_id: - error_message = "could not enable replication, " \ - "volume {} already belongs to volume group {}".format(volume.name, volume.volume_group_id) + def _validate_replication_object(object_type, replication_type): + if object_type == servers_settings.VOLUME_TYPE_NAME and \ + replication_type == array_settings.REPLICATION_TYPE_EAR: + error_message = "EAR replication is supported only on volume group level" return error_message return None diff --git a/controllers/servers/csi/controller_server_manager.py b/controllers/servers/csi/controller_server_manager.py index 38a5683a8..e5458403a 100644 --- a/controllers/servers/csi/controller_server_manager.py +++ b/controllers/servers/csi/controller_server_manager.py @@ -3,14 +3,14 @@ from concurrent import futures import grpc -from controllers.common.settings import CSI_CONTROLLER_SERVER_WORKERS -from csi_general import csi_pb2_grpc -from csi_general import replication_pb2_grpc +from csi_general import csi_pb2_grpc, replication_pb2_grpc, volumegroup_pb2_grpc from controllers.common.config import config from controllers.common.csi_logger import get_stdout_logger +from controllers.common.settings import CSI_CONTROLLER_SERVER_WORKERS from controllers.servers.csi.addons_server import ReplicationControllerServicer from controllers.servers.csi.csi_controller_server import CSIControllerServicer +from controllers.servers.csi.volume_group_server import VolumeGroupControllerServicer logger = get_stdout_logger() @@ -25,6 +25,7 @@ def __init__(self, array_endpoint): self.endpoint = array_endpoint self.csi_servicer = CSIControllerServicer() self.replication_servicer = ReplicationControllerServicer() + self.volume_group_servicer = VolumeGroupControllerServicer() def start_server(self): max_workers = get_max_workers_count() @@ -33,6 +34,7 @@ def start_server(self): csi_pb2_grpc.add_ControllerServicer_to_server(self.csi_servicer, controller_server) csi_pb2_grpc.add_IdentityServicer_to_server(self.csi_servicer, controller_server) replication_pb2_grpc.add_ControllerServicer_to_server(self.replication_servicer, controller_server) + volumegroup_pb2_grpc.add_ControllerServicer_to_server(self.volume_group_servicer, controller_server) # bind the server to the port defined above # controller_server.add_insecure_port('[::]:{}'.format(self.server_port)) diff --git a/controllers/servers/csi/controller_types.py b/controllers/servers/csi/controller_types.py index 5bd3957b0..3cd11dd83 100644 --- a/controllers/servers/csi/controller_types.py +++ b/controllers/servers/csi/controller_types.py @@ -1,33 +1,51 @@ -from dataclasses import dataclass, field, InitVar - -from controllers.array_action.array_action_types import ObjectIds - - -@dataclass -class ArrayConnectionInfo: - array_addresses: list - user: str - password: str - system_id: str = None - - -@dataclass -class ObjectIdInfo: - array_type: str - system_id: str - internal_id: InitVar[str] - uid: InitVar[str] - ids: ObjectIds = field(init=False) - - def __post_init__(self, internal_id, uid): - self.ids = ObjectIds(internal_id=internal_id, uid=uid) - - -@dataclass -class ObjectParameters: - pool: str - space_efficiency: str - prefix: str - io_group: str - volume_group: str - virt_snap_func: bool +from dataclasses import dataclass, field, InitVar + +from controllers.array_action.array_action_types import ObjectIds, VolumeGroupIds + + +@dataclass +class ArrayConnectionInfo: + array_addresses: list + user: str + password: str + system_id: str = None + + +@dataclass +class CommonIdInfo: + array_type: str + internal_id: InitVar[str] + + +@dataclass +class ObjectIdInfo(CommonIdInfo): + system_id: str + uid: InitVar[str] + ids: ObjectIds = field(init=False) + + def __post_init__(self, internal_id, uid): + self.ids = ObjectIds(internal_id=internal_id, uid=uid) + + +@dataclass +class VolumeGroupIdInfo(CommonIdInfo): + name: InitVar[str] + ids: VolumeGroupIds = field(init=False) + + def __post_init__(self, internal_id, name): + self.ids = VolumeGroupIds(internal_id=internal_id, name=name) + + +@dataclass +class ObjectParameters: + pool: str + space_efficiency: str + prefix: str + io_group: str + volume_group: str + virt_snap_func: bool + + +@dataclass +class VolumeGroupParameters: + prefix: str diff --git a/controllers/servers/csi/csi_controller_server.py b/controllers/servers/csi/csi_controller_server.py index c97b4ae6b..193c0771c 100755 --- a/controllers/servers/csi/csi_controller_server.py +++ b/controllers/servers/csi/csi_controller_server.py @@ -93,25 +93,21 @@ def CreateVolume(self, request, context): utils.validate_parameters_match_source_volume(space_efficiency, required_bytes, source_volume) try: - volume = array_mediator.get_volume(volume_final_name, pool, is_virt_snap_func) + volume = array_mediator.get_volume(volume_final_name, pool, is_virt_snap_func, source_type) except array_errors.ObjectNotFoundError: logger.debug( "volume was not found. creating a new volume with parameters: {0}".format(request.parameters)) array_mediator.validate_supported_space_efficiency(space_efficiency) + if topologies: + array_mediator.register_plugin('topology', '') volume = array_mediator.create_volume(volume_final_name, required_bytes, space_efficiency, pool, volume_parameters.io_group, volume_parameters.volume_group, source_ids, source_type, is_virt_snap_func) else: logger.debug("volume found : {}".format(volume)) - volume_capacity_bytes = volume.capacity_bytes - if not source_id and volume_capacity_bytes < required_bytes: - message = "Volume was already created with different size." \ - " volume size: {}, requested size: {}".format(volume_capacity_bytes, - required_bytes) - return build_error_response(message, context, grpc.StatusCode.ALREADY_EXISTS, - csi_pb2.CreateVolumeResponse) + utils.validate_volume_idempotency(volume, required_bytes, source_id) if not is_virt_snap_func: response = self._get_create_volume_response_for_existing_volume_source(volume, @@ -448,21 +444,20 @@ def ControllerExpandVolume(self, request, context): return handle_exception(ex, context, grpc.StatusCode.INTERNAL, csi_pb2.ControllerExpandVolumeResponse) - def ControllerGetCapabilities(self, request, context): - logger.info("ControllerGetCapabilities") + def _get_controller_service_capability(self, capability_name): types = csi_pb2.ControllerServiceCapability.RPC.Type + capability_enum_value = types.Value(capability_name) + return csi_pb2.ControllerServiceCapability( + rpc=csi_pb2.ControllerServiceCapability.RPC(type=capability_enum_value)) + def ControllerGetCapabilities(self, request, context): + logger.info("ControllerGetCapabilities") response = csi_pb2.ControllerGetCapabilitiesResponse( - capabilities=[csi_pb2.ControllerServiceCapability( - rpc=csi_pb2.ControllerServiceCapability.RPC(type=types.Value("CREATE_DELETE_VOLUME"))), - csi_pb2.ControllerServiceCapability( - rpc=csi_pb2.ControllerServiceCapability.RPC(type=types.Value("CREATE_DELETE_SNAPSHOT"))), - csi_pb2.ControllerServiceCapability( - rpc=csi_pb2.ControllerServiceCapability.RPC(type=types.Value("PUBLISH_UNPUBLISH_VOLUME"))), - csi_pb2.ControllerServiceCapability( - rpc=csi_pb2.ControllerServiceCapability.RPC(type=types.Value("CLONE_VOLUME"))), - csi_pb2.ControllerServiceCapability( - rpc=csi_pb2.ControllerServiceCapability.RPC(type=types.Value("EXPAND_VOLUME")))]) + capabilities=[self._get_controller_service_capability("CREATE_DELETE_VOLUME"), + self._get_controller_service_capability("CREATE_DELETE_SNAPSHOT"), + self._get_controller_service_capability("PUBLISH_UNPUBLISH_VOLUME"), + self._get_controller_service_capability("CLONE_VOLUME"), + self._get_controller_service_capability("EXPAND_VOLUME")]) logger.info("finished ControllerGetCapabilities") return response @@ -479,33 +474,12 @@ def GetPluginInfo(self, _, context): # pylint: disable=invalid-name return csi_pb2.GetPluginInfoResponse(name=name, vendor_version=version) def _get_volume_final_name(self, volume_parameters, name, array_mediator): - return self._get_object_final_name(volume_parameters, name, array_mediator, + return utils.get_object_final_name(volume_parameters, name, array_mediator, servers_settings.VOLUME_TYPE_NAME) def _get_snapshot_final_name(self, volume_parameters, name, array_mediator): - name = self._get_object_final_name(volume_parameters, name, array_mediator, + return utils.get_object_final_name(volume_parameters, name, array_mediator, servers_settings.SNAPSHOT_TYPE_NAME) - return name - - def _get_object_final_name(self, volume_parameters, name, array_mediator, object_type): - prefix = "" - if volume_parameters.prefix: - prefix = volume_parameters.prefix - if len(prefix) > array_mediator.max_object_prefix_length: - raise array_errors.InvalidArgumentError( - "The {} name prefix '{}' is too long, max allowed length is {}".format( - object_type, - prefix, - array_mediator.max_object_prefix_length - ) - ) - if not prefix: - prefix = array_mediator.default_object_prefix - full_name = utils.join_object_prefix_with_name(prefix, name) - if len(full_name) > array_mediator.max_object_name_length: - hashed_name = utils.hash_string(name) - full_name = utils.join_object_prefix_with_name(prefix, hashed_name) - return full_name[:array_mediator.max_object_name_length] def GetPluginCapabilities(self, _, __): # pylint: disable=invalid-name logger.info("GetPluginCapabilities") diff --git a/controllers/servers/csi/decorators.py b/controllers/servers/csi/decorators.py index 4c3151161..3935c4367 100644 --- a/controllers/servers/csi/decorators.py +++ b/controllers/servers/csi/decorators.py @@ -1,28 +1,66 @@ -import grpc -from decorator import decorator - -from controllers.common.csi_logger import get_stdout_logger -from controllers.common.utils import set_current_thread_name -from controllers.servers.errors import ObjectAlreadyProcessingError -from controllers.servers.csi.exception_handler import handle_exception, handle_common_exceptions -from controllers.servers.csi.sync_lock import SyncLock - -logger = get_stdout_logger() - - -def csi_method(error_response_type, lock_request_attribute=''): - @decorator - def call_csi_method(controller_method, servicer, request, context): - lock_id = getattr(request, lock_request_attribute, None) - set_current_thread_name(lock_id) - controller_method_name = controller_method.__name__ - logger.info(controller_method_name) - try: - with SyncLock(lock_request_attribute, lock_id, controller_method_name): - response = handle_common_exceptions(controller_method, servicer, request, context, error_response_type) - except ObjectAlreadyProcessingError as ex: - return handle_exception(ex, context, grpc.StatusCode.ABORTED, error_response_type) - logger.info("finished {}".format(controller_method_name)) - return response - - return call_csi_method +import grpc +from decorator import decorator + +from controllers.common.csi_logger import get_stdout_logger +from controllers.common.utils import set_current_thread_name +from controllers.servers.errors import ObjectAlreadyProcessingError +from controllers.servers.settings import (VOLUME_TYPE_NAME, VOLUME_GROUP_TYPE_NAME, + LOCK_REPLICATION_REQUEST_ATTR, UNIQUE_KEY_KEY) +from controllers.array_action.settings import METADATA_KEY +from controllers.array_action.registration_maps import REGISTRATION_MAP +from controllers.servers.csi.exception_handler import handle_exception, handle_common_exceptions +from controllers.servers.csi.sync_lock import SyncLock + +logger = get_stdout_logger() + + +def csi_method(error_response_type, lock_request_attribute=''): + @decorator + def call_csi_method(controller_method, servicer, request, context): + lock_id = getattr(request, lock_request_attribute, None) + return _set_sync_lock(lock_id, lock_request_attribute, error_response_type, + controller_method, servicer, request, context) + + return call_csi_method + + +def csi_replication_method(error_response_type): + @decorator + def call_csi_method(controller_method, servicer, request, context): + replication_id = getattr(request, LOCK_REPLICATION_REQUEST_ATTR, None) + if replication_id: + if replication_id.HasField(VOLUME_GROUP_TYPE_NAME): + lock_id = replication_id.volumegroup.volume_group_id + elif replication_id.HasField(VOLUME_TYPE_NAME): + lock_id = replication_id.volume.volume_id + else: + lock_id = None + return _set_sync_lock(lock_id, LOCK_REPLICATION_REQUEST_ATTR, error_response_type, + controller_method, servicer, request, context) + + return call_csi_method + + +def _set_sync_lock(lock_id, lock_request_attribute, error_response_type, + controller_method, servicer, request, context): + set_current_thread_name(lock_id) + controller_method_name = controller_method.__name__ + logger.info(controller_method_name) + try: + with SyncLock(lock_request_attribute, lock_id, controller_method_name): + response = handle_common_exceptions(controller_method, servicer, request, context, error_response_type) + except ObjectAlreadyProcessingError as ex: + return handle_exception(ex, context, grpc.StatusCode.ABORTED, error_response_type) + logger.info("finished {}".format(controller_method_name)) + return response + + +def register_csi_plugin(): + @decorator + def call_csi_plugin_registration(mediator_method, mediator_class, *args): + plugin_fields = REGISTRATION_MAP.get(mediator_method.__name__, {}) + if plugin_fields: + mediator_class.register_plugin(plugin_fields[UNIQUE_KEY_KEY], plugin_fields[METADATA_KEY]) + return mediator_method(mediator_class, *args) + + return call_csi_plugin_registration diff --git a/controllers/servers/csi/exception_handler.py b/controllers/servers/csi/exception_handler.py index 7f54a45b8..dec0827e6 100644 --- a/controllers/servers/csi/exception_handler.py +++ b/controllers/servers/csi/exception_handler.py @@ -16,7 +16,8 @@ array_errors.HostNotFoundError: grpc.StatusCode.NOT_FOUND, array_errors.PermissionDeniedError: grpc.StatusCode.PERMISSION_DENIED, array_errors.ObjectIsStillInUseError: grpc.StatusCode.FAILED_PRECONDITION, - array_errors.CredentialsError: grpc.StatusCode.UNAUTHENTICATED + array_errors.CredentialsError: grpc.StatusCode.UNAUTHENTICATED, + array_errors.ObjectAlreadyExistError: grpc.StatusCode.ALREADY_EXISTS, } diff --git a/controllers/servers/csi/requirements.txt b/controllers/servers/csi/requirements.txt index 28e7bbef8..0f1c50ee3 100644 --- a/controllers/servers/csi/requirements.txt +++ b/controllers/servers/csi/requirements.txt @@ -1,9 +1,9 @@ -grpcio==1.41.1 -grpcio-tools==1.41.1 -protobuf==3.15.0 -pyyaml==6 +grpcio==1.58.0 +grpcio-tools==1.58.0 +protobuf==4.21.6 +pyyaml==6.0.1 munch==2.3.2 -retry==0.9.2 +retry2==0.9.5 packaging==20.1 base58==2.0.0 @@ -12,4 +12,4 @@ pyxcli==1.2.1 # SVC python client pysvc==1.1.1 # DS8K python client -pyds8k==1.4.0 \ No newline at end of file +pyds8k==1.4.0 diff --git a/controllers/servers/csi/sync_lock.py b/controllers/servers/csi/sync_lock.py index e02b1c74a..9c369d73c 100644 --- a/controllers/servers/csi/sync_lock.py +++ b/controllers/servers/csi/sync_lock.py @@ -1,58 +1,58 @@ -import threading -from collections import defaultdict - -from controllers.common.csi_logger import get_stdout_logger -from controllers.servers.errors import ObjectAlreadyProcessingError - -logger = get_stdout_logger() - -ids_in_use = defaultdict(set) -ids_in_use_lock = threading.Lock() - - -def _add_to_ids_in_use(lock_key, object_id): - ids_in_use[lock_key].add(object_id) - - -def _remove_from_ids_in_use(lock_key, object_id): - if object_id in ids_in_use[lock_key]: - ids_in_use[lock_key].remove(object_id) - else: - logger.error("could not find lock to release for {}: {}".format(lock_key, object_id)) - - -class SyncLock: - def __init__(self, lock_key, object_id, action_name): - self.lock_key = lock_key - self.object_id = object_id - self.action_name = action_name - - def __enter__(self): - if self.lock_key: - self._add_object_lock() - - def __exit__(self, exc_type, exc_val, exc_tb): - if self.lock_key: - self._remove_object_lock() - - def _add_object_lock(self): - logger.debug( - ("trying to acquire lock for action {} with {}: {}".format(self.action_name, self.lock_key, - self.object_id))) - with ids_in_use_lock: - if self.object_id in ids_in_use[self.lock_key]: - logger.error( - "lock for action {} with {}: {} is already in use by another thread".format(self.action_name, - self.lock_key, - self.object_id)) - raise ObjectAlreadyProcessingError(self.object_id) - _add_to_ids_in_use(self.lock_key, self.object_id) - logger.debug( - "succeed to acquire lock for action {} with {}: {}".format(self.action_name, - self.lock_key, - self.object_id)) - - def _remove_object_lock(self): - logger.debug("release lock for action {} with {}: {}".format(self.action_name, self.lock_key, self.object_id)) - with ids_in_use_lock: - _remove_from_ids_in_use(self.lock_key, self.object_id) +import threading +from collections import defaultdict + +from controllers.common.csi_logger import get_stdout_logger +from controllers.servers.errors import ObjectAlreadyProcessingError + +logger = get_stdout_logger() + +ids_in_use = defaultdict(set) +ids_in_use_lock = threading.Lock() + + +def _add_to_ids_in_use(lock_key, object_id): + ids_in_use[lock_key].add(object_id) + + +def _remove_from_ids_in_use(lock_key, object_id): + if object_id in ids_in_use[lock_key]: + ids_in_use[lock_key].remove(object_id) + else: + logger.error("could not find lock to release for {}: {}".format(lock_key, object_id)) + + +class SyncLock: + def __init__(self, lock_key, object_id, action_name): + self.lock_key = lock_key + self.object_id = object_id + self.action_name = action_name + + def __enter__(self): + if self.lock_key: + self._add_object_lock() + + def __exit__(self, exc_type, exc_val, exc_tb): + if self.lock_key: + self._remove_object_lock() + + def _add_object_lock(self): + logger.debug( + ("trying to acquire lock for action {} with {}: {}".format(self.action_name, self.lock_key, + self.object_id))) + with ids_in_use_lock: + if self.object_id in ids_in_use[self.lock_key]: + logger.error( + "lock for action {} with {}: {} is already in use by another thread".format(self.action_name, + self.lock_key, + self.object_id)) + raise ObjectAlreadyProcessingError(self.object_id) + _add_to_ids_in_use(self.lock_key, self.object_id) + logger.debug( + "succeed to acquire lock for action {} with {}: {}".format(self.action_name, + self.lock_key, + self.object_id)) + + def _remove_object_lock(self): + logger.debug("release lock for action {} with {}: {}".format(self.action_name, self.lock_key, self.object_id)) + with ids_in_use_lock: + _remove_from_ids_in_use(self.lock_key, self.object_id) diff --git a/controllers/servers/csi/volume_group_server.py b/controllers/servers/csi/volume_group_server.py new file mode 100644 index 000000000..20470d6e1 --- /dev/null +++ b/controllers/servers/csi/volume_group_server.py @@ -0,0 +1,155 @@ +import grpc +from csi_general import volumegroup_pb2_grpc, volumegroup_pb2 + +import controllers.array_action.errors as array_errors +import controllers.servers.settings as servers_settings +import controllers.servers.utils as utils +from controllers.array_action.storage_agent import get_agent, detect_array_type +from controllers.array_action.utils import convert_scsi_id_to_nguid, convert_nguid_to_scsi_id +from controllers.common.csi_logger import get_stdout_logger +from controllers.servers.csi.decorators import csi_method +from controllers.servers.csi.exception_handler import handle_exception, \ + build_error_response +from controllers.servers.errors import ObjectIdError + +logger = get_stdout_logger() + + +class VolumeGroupControllerServicer(volumegroup_pb2_grpc.ControllerServicer): + @csi_method(error_response_type=volumegroup_pb2.CreateVolumeGroupResponse, lock_request_attribute="name") + def CreateVolumeGroup(self, request, context): + utils.validate_create_volume_group_request(request) + + logger.debug("volume group name : {}".format(request.name)) + try: + array_connection_info = utils.get_array_connection_info_from_secrets(request.secrets) + volume_group_parameters = utils.get_volume_group_parameters(parameters=request.parameters) + + # TODO : pass multiple array addresses + array_type = detect_array_type(array_connection_info.array_addresses) + with get_agent(array_connection_info, array_type).get_mediator() as array_mediator: + logger.debug(array_mediator) + volume_group_final_name = self._get_volume_group_final_name(volume_group_parameters, request.name, + array_mediator) + + try: + volume_group = array_mediator.get_volume_group(volume_group_final_name) + except array_errors.ObjectNotFoundError: + logger.debug( + "volume group was not found. creating a new volume group") + volume_group = array_mediator.create_volume_group(volume_group_final_name) + else: + logger.debug("volume group found : {}".format(volume_group)) + + if len(volume_group.volumes) > 0: + message = "Volume group {} is not empty".format(volume_group.name) + return build_error_response(message, context, grpc.StatusCode.ALREADY_EXISTS, + volumegroup_pb2.CreateVolumeGroupResponse) + + response = utils.generate_csi_create_volume_group_response(volume_group) + return response + except array_errors.VolumeGroupAlreadyExists as ex: + return handle_exception(ex, context, grpc.StatusCode.ALREADY_EXISTS, + volumegroup_pb2.CreateVolumeGroupResponse) + + @csi_method(error_response_type=volumegroup_pb2.DeleteVolumeGroupResponse, lock_request_attribute="volume_group_id") + def DeleteVolumeGroup(self, request, _): + secrets = request.secrets + utils.validate_delete_volume_group_request(request) + + try: + volume_group_id_info = utils.get_volume_group_id_info(request.volume_group_id) + except ObjectIdError as ex: + logger.warning("volume group id is invalid. error : {}".format(ex)) + return volumegroup_pb2.DeleteVolumeGroupResponse() + + array_type = volume_group_id_info.array_type + volume_group_name = volume_group_id_info.ids.name + array_connection_info = utils.get_array_connection_info_from_secrets(secrets) + + with get_agent(array_connection_info, array_type).get_mediator() as array_mediator: + logger.debug(array_mediator) + + try: + logger.debug("Deleting volume group {}".format(volume_group_name)) + array_mediator.delete_volume_group(volume_group_name) + + except array_errors.ObjectNotFoundError as ex: + logger.debug("volume group was not found during deletion: {0}".format(ex)) + + return volumegroup_pb2.DeleteVolumeGroupResponse() + + def _add_volumes_missing_from_group(self, array_mediator, volume_ids_in_request, volume_ids_in_volume_group, + volume_group_id): + for volume_id in volume_ids_in_request: + if not self._is_volume_id_in_volume_group(volume_id, volume_ids_in_volume_group): + array_mediator.add_volume_to_volume_group(volume_group_id, volume_id) + + def _is_volume_id_in_volume_group(self, volume_id, volume_ids_in_volume_group): + return volume_id in volume_ids_in_volume_group \ + or convert_scsi_id_to_nguid(volume_id) in volume_ids_in_volume_group + + def _remove_volumes_missing_from_request(self, array_mediator, volume_ids_in_request, volume_ids_in_volume_group): + for volume_id in volume_ids_in_volume_group: + is_volume_id_in_request = False + for volume_id_in_request in volume_ids_in_request: + if self._is_volume_id_in_request(volume_id, volume_id_in_request): + is_volume_id_in_request = True + if not is_volume_id_in_request: + array_mediator.remove_volume_from_volume_group(volume_id) + + def _is_volume_id_in_request(self, volume_id, volume_id_in_request): + return volume_id == volume_id_in_request or volume_id_in_request.find(convert_nguid_to_scsi_id(volume_id)) >= 0 + + def _get_volume_group(self, array_mediator, volume_group_name): + try: + return array_mediator.get_volume_group(volume_group_name) + except array_errors.ObjectNotFoundError: + raise array_errors.ObjectNotFoundError(volume_group_name) + + def _get_volume_ids_from_request(self, volume_ids): + volume_ids_in_request = [] + for volume_id in volume_ids: + volume_id_info = utils.get_volume_id_info(volume_id) + volume_ids_in_request.append(volume_id_info.ids.uid) + return volume_ids_in_request + + def _get_volume_ids_from_volume_group(self, volumes): + return [volume.id for volume in volumes] + + @csi_method(error_response_type=volumegroup_pb2.ModifyVolumeGroupMembershipResponse, + lock_request_attribute="volume_group_id") + def ModifyVolumeGroupMembership(self, request, context): + secrets = request.secrets + utils.validate_modify_volume_group_request(request) + + try: + volume_group_id_info = utils.get_volume_group_id_info(request.volume_group_id) + except ObjectIdError as ex: + return handle_exception(ex, context, grpc.StatusCode.INVALID_ARGUMENT, + volumegroup_pb2.ModifyVolumeGroupMembershipResponse) + + array_type = volume_group_id_info.array_type + volume_group_name = volume_group_id_info.ids.name + array_connection_info = utils.get_array_connection_info_from_secrets(secrets) + + with get_agent(array_connection_info, array_type).get_mediator() as array_mediator: + logger.debug(array_mediator) + + volume_group = self._get_volume_group(array_mediator, volume_group_name) + + volume_ids_in_volume_group = self._get_volume_ids_from_volume_group(volume_group.volumes) + volume_ids_in_request = self._get_volume_ids_from_request(request.volume_ids) + + self._add_volumes_missing_from_group(array_mediator, volume_ids_in_request, volume_ids_in_volume_group, + volume_group_name) + self._remove_volumes_missing_from_request(array_mediator, volume_ids_in_request, volume_ids_in_volume_group) + + volume_group = self._get_volume_group(array_mediator, volume_group_name) + + response = utils.generate_csi_modify_volume_group_response(volume_group) + return response + + def _get_volume_group_final_name(self, volume_parameters, name, array_mediator): + return utils.get_object_final_name(volume_parameters, name, array_mediator, + servers_settings.VOLUME_GROUP_TYPE_NAME) diff --git a/controllers/servers/host_definer/kubernetes_manager/__init__.py b/controllers/servers/host_definer/definition_manager/__init__.py similarity index 100% rename from controllers/servers/host_definer/kubernetes_manager/__init__.py rename to controllers/servers/host_definer/definition_manager/__init__.py diff --git a/controllers/servers/host_definer/definition_manager/definition.py b/controllers/servers/host_definer/definition_manager/definition.py new file mode 100644 index 000000000..bee0160c3 --- /dev/null +++ b/controllers/servers/host_definer/definition_manager/definition.py @@ -0,0 +1,125 @@ +from controllers.common.csi_logger import get_stdout_logger +from controllers.servers.host_definer.globals import NODES, MANAGED_SECRETS +import controllers.servers.host_definer.messages as messages +from controllers.servers.host_definer.utils import manifest_utils +from controllers.servers.host_definer.types import DefineHostResponse +from controllers.servers.host_definer.k8s.api import K8SApi +from controllers.servers.host_definer.resource_manager.secret import SecretManager +from controllers.servers.host_definer.resource_manager.host_definition import HostDefinitionManager +from controllers.servers.host_definer.definition_manager.request import RequestManager +from controllers.servers.host_definer.storage_manager.host_definer_server import HostDefinerServicer + +logger = get_stdout_logger() + + +class DefinitionManager: + def __init__(self): + self.k8s_api = K8SApi() + self.secret_manager = SecretManager() + self.request_manager = RequestManager() + self.host_definition_manager = HostDefinitionManager() + self.storage_host_servicer = HostDefinerServicer() + + def define_node_on_all_storages(self, node_name): + logger.info(messages.DEFINE_NODE_ON_ALL_MANAGED_SECRETS.format(node_name)) + for secret_info in MANAGED_SECRETS: + if secret_info.managed_storage_classes == 0: + continue + host_definition_info = self.host_definition_manager.get_host_definition_info_from_secret_and_node_name( + node_name, secret_info) + self.create_definition(host_definition_info) + + def delete_definition(self, host_definition_info): + response = DefineHostResponse() + if self.secret_manager.is_node_should_be_managed_on_secret( + host_definition_info.node_name, host_definition_info.secret_name, + host_definition_info.secret_namespace): + response = self.undefine_host(host_definition_info) + self.host_definition_manager.handle_k8s_host_definition_after_undefine_action(host_definition_info, + response) + + def undefine_node_definitions(self, node_name): + for secret_info in MANAGED_SECRETS: + host_definition_info = self.host_definition_manager.get_host_definition_info_from_secret_and_node_name( + node_name, secret_info) + self.delete_definition(host_definition_info) + + def undefine_host_after_pending(self, host_definition_info): + response = DefineHostResponse() + if self.secret_manager.is_node_should_be_managed_on_secret( + host_definition_info.node_name, host_definition_info.secret_name, + host_definition_info.secret_namespace): + response = self.undefine_host(host_definition_info) + return response + + def undefine_host(self, host_definition_info): + logger.info(messages.UNDEFINED_HOST.format(host_definition_info.node_name, + host_definition_info.secret_name, host_definition_info.secret_namespace)) + return self._ensure_definition_state(host_definition_info, self.storage_host_servicer.undefine_host) + + def define_host_after_pending(self, host_definition_info): + response = DefineHostResponse() + if self.secret_manager.is_node_should_be_managed_on_secret( + host_definition_info.node_name, host_definition_info.secret_name, + host_definition_info.secret_namespace): + response = self.define_host(host_definition_info) + self._update_host_definition_from_storage_response(host_definition_info.name, response) + else: + self.host_definition_manager.delete_host_definition(host_definition_info.name) + return response + + def _update_host_definition_from_storage_response(self, host_definition_name, response): + logger.info(messages.UPDATE_HOST_DEFINITION_FIELDS_FROM_STORAGE.format(host_definition_name, response)) + host_definition_manifest = manifest_utils.generate_host_definition_response_fields_manifest( + host_definition_name, response) + self.k8s_api.patch_host_definition(host_definition_manifest) + + def define_nodes_when_new_secret(self, secret_info): + managed_secret_info, index = self.secret_manager.get_matching_managed_secret_info(secret_info) + secret_info.managed_storage_classes = 1 + if index == -1: + MANAGED_SECRETS.append(secret_info) + self._define_nodes_from_secret_info(secret_info) + elif managed_secret_info.managed_storage_classes == 0: + MANAGED_SECRETS[index] = secret_info + self._define_nodes_from_secret_info(secret_info) + else: + secret_info.managed_storage_classes = managed_secret_info.managed_storage_classes + 1 + MANAGED_SECRETS[index] = secret_info + + def _define_nodes_from_secret_info(self, secret_info): + logger.info(messages.NEW_MANAGED_SECRET.format(secret_info.name, secret_info.namespace)) + host_definition_info = self.host_definition_manager.get_host_definition_info_from_secret(secret_info) + self.define_nodes(host_definition_info) + + def define_nodes(self, host_definition_info): + for node_name, _ in NODES.items(): + host_definition_info = self.host_definition_manager.add_name_to_host_definition_info( + node_name, host_definition_info) + self.create_definition(host_definition_info) + + def create_definition(self, host_definition_info): + if not self.secret_manager.is_node_should_be_managed_on_secret( + host_definition_info.node_name, host_definition_info.secret_name, + host_definition_info.secret_namespace): + return + host_definition_info = self.host_definition_manager.update_host_definition_info(host_definition_info) + response = self.define_host(host_definition_info) + current_host_definition_info_on_cluster = self.host_definition_manager.create_host_definition_if_not_exist( + host_definition_info, response) + self.host_definition_manager.set_status_to_host_definition_after_definition( + response.error_message, current_host_definition_info_on_cluster) + + def define_host(self, host_definition_info): + logger.info(messages.DEFINE_NODE_ON_SECRET.format(host_definition_info.node_name, + host_definition_info.secret_name, host_definition_info.secret_namespace)) + return self._ensure_definition_state(host_definition_info, self.storage_host_servicer.define_host) + + def _ensure_definition_state(self, host_definition_info, define_function): + request = self.request_manager.generate_request(host_definition_info) + if not request: + response = DefineHostResponse() + response.error_message = messages.FAILED_TO_GET_SECRET_EVENT.format( + host_definition_info.secret_name, host_definition_info.secret_namespace) + return response + return define_function(request) diff --git a/controllers/servers/host_definer/definition_manager/request.py b/controllers/servers/host_definer/definition_manager/request.py new file mode 100644 index 000000000..49ce0d604 --- /dev/null +++ b/controllers/servers/host_definer/definition_manager/request.py @@ -0,0 +1,59 @@ +import controllers.common.settings as common_settings +from controllers.common.csi_logger import get_stdout_logger +from controllers.servers.host_definer.globals import NODES +from controllers.servers.host_definer import settings +from controllers.servers.host_definer.utils import utils +import controllers.servers.host_definer.messages as messages +from controllers.servers.host_definer.types import DefineHostRequest +from controllers.servers.host_definer.resource_manager.secret import SecretManager +from controllers.servers.host_definer.resource_manager.resource_info import ResourceInfoManager + +logger = get_stdout_logger() + + +class RequestManager: + def __init__(self): + self.secret_manager = SecretManager() + self.resource_info_manager = ResourceInfoManager() + + def generate_request(self, host_definition_info): + node_name = host_definition_info.node_name + logger.info(messages.GENERATE_REQUEST_FOR_NODE.format(node_name)) + node_info = self.resource_info_manager.get_node_info(node_name) + request = self._get_new_request(node_info.labels) + request = self._add_array_connectivity_info_to_request( + request, host_definition_info.secret_name, host_definition_info.secret_namespace, node_info.labels) + if request: + request.node_id_from_host_definition = host_definition_info.node_id + request.node_id_from_csi_node = self._get_node_id_by_node(host_definition_info) + request.io_group = self._get_io_group_by_node(host_definition_info.node_name) + return request + + def _get_new_request(self, labels): + request = DefineHostRequest() + connectivity_type_label_on_node = self._get_label_value(labels, common_settings.CONNECTIVITY_TYPE_LABEL) + request.prefix = utils.get_prefix() + request.connectivity_type_from_user = utils.get_connectivity_type_from_user(connectivity_type_label_on_node) + return request + + def _get_label_value(self, labels, label): + return labels.get(label) + + def _add_array_connectivity_info_to_request(self, request, secret_name, secret_namespace, labels): + request.array_connection_info = self.secret_manager.get_array_connection_info( + secret_name, secret_namespace, labels) + if request.array_connection_info: + return request + return None + + def _get_node_id_by_node(self, host_definition_info): + try: + return NODES[host_definition_info.node_name].node_id + except Exception: + return host_definition_info.node_id + + def _get_io_group_by_node(self, node_name): + try: + return NODES[node_name].io_group + except Exception: + return '' diff --git a/controllers/servers/host_definer/globals.py b/controllers/servers/host_definer/globals.py new file mode 100644 index 000000000..c65bf409b --- /dev/null +++ b/controllers/servers/host_definer/globals.py @@ -0,0 +1,2 @@ +MANAGED_SECRETS = [] +NODES = {} diff --git a/controllers/servers/host_definer/k8s/__init__.py b/controllers/servers/host_definer/k8s/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/controllers/servers/host_definer/k8s/api.py b/controllers/servers/host_definer/k8s/api.py new file mode 100644 index 000000000..d4db2d87c --- /dev/null +++ b/controllers/servers/host_definer/k8s/api.py @@ -0,0 +1,192 @@ +from kubernetes import client, config, dynamic, watch +from kubernetes.client import api_client +from kubernetes.client.rest import ApiException +from munch import Munch + +from controllers.common.csi_logger import get_stdout_logger +from controllers.servers.host_definer import settings +from controllers.servers.host_definer.utils import utils +import controllers.common.settings as common_settings +import controllers.servers.host_definer.messages as messages + +logger = get_stdout_logger() + + +class K8SApi(): + def __init__(self): + self._load_cluster_configuration() + self.dynamic_client = self._get_dynamic_client() + self.storage_api = client.StorageV1Api() + self.core_api = client.CoreV1Api() + self.custom_object_api = client.CustomObjectsApi() + self.apps_api = client.AppsV1Api() + self.csi_nodes_api = self._get_csi_nodes_api() + self.host_definitions_api = self._get_host_definitions_api() + + def _get_dynamic_client(self): + return dynamic.DynamicClient(api_client.ApiClient(configuration=self._load_cluster_configuration())) + + def _load_cluster_configuration(self): + return config.load_incluster_config() + + def _get_csi_nodes_api(self): + return self.dynamic_client.resources.get(api_version=settings.STORAGE_API_VERSION, + kind=settings.CSINODE_KIND) + + def _get_host_definitions_api(self): + return self.dynamic_client.resources.get(api_version=common_settings.CSI_IBM_API_VERSION, + kind=common_settings.HOST_DEFINITION_KIND) + + def get_csi_node(self, node_name): + try: + return self.csi_nodes_api.get(name=node_name) + except ApiException as ex: + if ex.status == 404: + logger.error(messages.CSI_NODE_DOES_NOT_EXIST.format(node_name)) + else: + logger.error(messages.FAILED_TO_GET_CSI_NODE.format(node_name, ex.body)) + return None + + def list_host_definition(self): + try: + return self.host_definitions_api.get() + except ApiException as ex: + logger.error(messages.FAILED_TO_GET_LIST_OF_HOST_DEFINITIONS.format(ex.body)) + return self._get_empty_k8s_list() + + def create_host_definition(self, host_definition_manifest): + try: + return self.host_definitions_api.create(body=host_definition_manifest) + except ApiException as ex: + if ex != 404: + logger.error(messages.FAILED_TO_CREATE_HOST_DEFINITION.format( + host_definition_manifest[common_settings.METADATA_FIELD][common_settings.NAME_FIELD], ex.body)) + return None + + def patch_cluster_custom_object_status(self, group, version, plural, name, status): + try: + self.custom_object_api.patch_cluster_custom_object_status(group, version, plural, name, status) + except ApiException as ex: + if ex.status == 404: + logger.error(messages.HOST_DEFINITION_DOES_NOT_EXIST.format(name)) + else: + logger.error(messages.FAILED_TO_SET_HOST_DEFINITION_STATUS.format(name, ex.body)) + + def create_event(self, namespace, k8s_event): + try: + self.core_api.create_namespaced_event(namespace, k8s_event) + except ApiException as ex: + logger.error(messages.FAILED_TO_CREATE_EVENT_FOR_HOST_DEFINITION.format( + k8s_event.involved_object.name, ex.body)) + + def delete_host_definition(self, host_definition_name): + try: + return self.host_definitions_api.delete(name=host_definition_name, body={}) + except ApiException as ex: + if ex.status != 404: + logger.error(messages.FAILED_TO_DELETE_HOST_DEFINITION.format(host_definition_name, ex.body)) + return None + + def patch_host_definition(self, host_definition_manifest): + host_definition_name = host_definition_manifest[common_settings.METADATA_FIELD][common_settings.NAME_FIELD] + logger.info(messages.PATCHING_HOST_DEFINITION.format(host_definition_name)) + try: + self.host_definitions_api.patch(body=host_definition_manifest, name=host_definition_name, + content_type='application/merge-patch+json') + return 200 + except ApiException as ex: + if ex.status == 404: + logger.error(messages.HOST_DEFINITION_DOES_NOT_EXIST.format(host_definition_name)) + else: + logger.error(messages.FAILED_TO_PATCH_HOST_DEFINITION.format(host_definition_name, ex.body)) + return ex.status + + def patch_node(self, node_name, body): + try: + self.core_api.patch_node(node_name, body) + except ApiException as ex: + logger.error(messages.FAILED_TO_UPDATE_NODE_LABEL.format( + node_name, common_settings.MANAGE_NODE_LABEL, ex.body)) + + def get_secret_data(self, secret_name, secret_namespace): + try: + return self.core_api.read_namespaced_secret(name=secret_name, namespace=secret_namespace).data + except ApiException as ex: + if ex.status == 404: + logger.error(messages.SECRET_DOES_NOT_EXIST.format(secret_name, secret_namespace)) + else: + logger.error(messages.FAILED_TO_GET_SECRET.format(secret_name, secret_namespace, ex.body)) + return {} + + def read_node(self, node_name): + try: + logger.info(messages.READ_NODE.format(node_name)) + return self.core_api.read_node(name=node_name) + except ApiException as ex: + logger.error(messages.FAILED_TO_GET_NODE.format(node_name, ex.body)) + return None + + def list_daemon_set_for_all_namespaces(self, label_selector): + try: + return self.apps_api.list_daemon_set_for_all_namespaces(label_selector=label_selector) + except ApiException as ex: + logger.error(messages.FAILED_TO_LIST_DAEMON_SETS.format(ex.body)) + return None + + def list_pod_for_all_namespaces(self, label_selector): + try: + return self.core_api.list_pod_for_all_namespaces(label_selector=label_selector) + except ApiException as ex: + logger.error(messages.FAILED_TO_LIST_PODS.format(ex.body)) + return None + + def get_storage_class_stream(self): + return self._get_basic_resource_stream(self.storage_api.list_storage_class, 5) + + def list_storage_class(self): + try: + return self.storage_api.list_storage_class() + except ApiException as ex: + logger.error(messages.FAILED_TO_GET_STORAGE_CLASSES.format(ex.body)) + return self._get_empty_k8s_list() + + def get_node_stream(self): + return self._get_basic_resource_stream(self.core_api.list_node, 5) + + def list_node(self): + try: + return self.core_api.list_node() + except ApiException as ex: + logger.error(messages.FAILED_TO_GET_NODES.format(ex.body)) + return self._get_empty_k8s_list() + + def get_secret_stream(self): + return self._get_basic_resource_stream(self.core_api.list_secret_for_all_namespaces, 5) + + def _get_basic_resource_stream(self, list_function, timeout): + resource_version = utils.get_k8s_object_resource_version(list_function()) + return watch.Watch().stream(list_function, resource_version=resource_version, timeout_seconds=timeout) + + def get_host_definition_stream(self, resource_version, timeout): + return self.host_definitions_api.watch(resource_version=resource_version, timeout=timeout) + + def get_csi_node_stream(self): + resource_version = utils.get_k8s_object_resource_version(self.list_csi_node()) + return self.csi_nodes_api.watch(resource_version=resource_version, timeout=5) + + def list_csi_node(self): + try: + return self.csi_nodes_api.get() + except ApiException as ex: + logger.error(messages.FAILED_TO_GET_CSI_NODES.format(ex.body)) + return self._get_empty_k8s_list() + + def _get_empty_k8s_list(self): + much_object = Munch.fromDict({ + common_settings.ITEMS_FIELD: [], + common_settings.METADATA_FIELD: { + common_settings.RESOURCE_VERSION_FIELD + } + }) + much_object.items = [] + return much_object diff --git a/controllers/servers/host_definer/kubernetes_manager/manager.py b/controllers/servers/host_definer/kubernetes_manager/manager.py deleted file mode 100644 index 7a657b1db..000000000 --- a/controllers/servers/host_definer/kubernetes_manager/manager.py +++ /dev/null @@ -1,367 +0,0 @@ -import datetime -import base64 - -from kubernetes import client, config, dynamic -from kubernetes.client import api_client -from kubernetes.client.rest import ApiException - -from controllers.common.csi_logger import get_stdout_logger -import controllers.servers.host_definer.messages as messages -from controllers.servers.host_definer import settings -import controllers.common.settings as common_settings -from controllers.servers.host_definer.types import ( - CsiNodeInfo, PodInfo, NodeInfo, StorageClassInfo, HostDefinitionInfo) - -logger = get_stdout_logger() - - -class KubernetesManager(): - def __init__(self): - self._load_cluster_configuration() - self.dynamic_client = self._get_dynamic_client() - self.storage_api = client.StorageV1Api() - self.core_api = client.CoreV1Api() - self.custom_object_api = client.CustomObjectsApi() - self.apps_api = client.AppsV1Api() - self.csi_nodes_api = self._get_csi_nodes_api() - self.host_definitions_api = self._get_host_definitions_api() - - def _get_dynamic_client(self): - return dynamic.DynamicClient(api_client.ApiClient(configuration=self._load_cluster_configuration())) - - def _load_cluster_configuration(self): - return config.load_incluster_config() - - def _get_csi_nodes_api(self): - return self.dynamic_client.resources.get(api_version=settings.STORAGE_API_VERSION, - kind=settings.CSINODE_KIND) - - def _get_host_definitions_api(self): - return self.dynamic_client.resources.get(api_version=settings.CSI_IBM_API_VERSION, - kind=settings.HOST_DEFINITION_KIND) - - def _get_csi_nodes_info_with_driver(self): - csi_nodes_info_with_driver = [] - k8s_csi_nodes = self._get_k8s_csi_nodes() - for k8s_csi_node in k8s_csi_nodes: - if self._is_k8s_csi_node_has_driver(k8s_csi_node): - csi_nodes_info_with_driver.append(self._generate_csi_node_info(k8s_csi_node)) - logger.info(messages.CSI_NODES_WITH_IBM_BLOCK_CSI_DRIVER.format(csi_nodes_info_with_driver)) - return csi_nodes_info_with_driver - - def _get_k8s_csi_nodes(self): - try: - return self.csi_nodes_api.get().items - except ApiException as ex: - logger.error(messages.FAILED_TO_GET_CSI_NODES.format(ex.body)) - return [] - - def _get_nodes_info(self): - try: - nodes_info = [] - for k8s_node in self.core_api.list_node().items: - k8s_node = self._generate_node_info(k8s_node) - nodes_info.append(k8s_node) - return nodes_info - except ApiException as ex: - logger.error(messages.FAILED_TO_GET_NODES.format(ex.body)) - return [] - - def _get_storage_classes_info(self): - try: - storage_classes_info = [] - for k8s_storage_class in self.storage_api.list_storage_class().items: - storage_class_info = self._generate_storage_class_info(k8s_storage_class) - storage_classes_info.append(storage_class_info) - return storage_classes_info - - except ApiException as ex: - logger.error(messages.FAILED_TO_GET_STORAGE_CLASSES.format(ex.body)) - return [] - - def _generate_storage_class_info(self, k8s_storage_class): - storage_class_info = StorageClassInfo() - storage_class_info.name = k8s_storage_class.metadata.name - storage_class_info.provisioner = k8s_storage_class.provisioner - storage_class_info.parameters = k8s_storage_class.parameters - return storage_class_info - - def _is_k8s_csi_node_has_driver(self, k8s_csi_node): - if k8s_csi_node.spec.drivers: - for driver in k8s_csi_node.spec.drivers: - if driver.name == settings.CSI_PROVISIONER_NAME: - return True - return False - - def _get_csi_node_info(self, node_name): - try: - k8s_csi_node = self.csi_nodes_api.get(name=node_name) - return self._generate_csi_node_info(k8s_csi_node) - except ApiException as ex: - if ex.status == 404: - logger.error(messages.CSI_NODE_DOES_NOT_EXIST.format(node_name)) - else: - logger.error(messages.FAILED_TO_GET_CSI_NODE.format(node_name, ex.body)) - return CsiNodeInfo() - - def _generate_csi_node_info(self, k8s_csi_node): - csi_node_info = CsiNodeInfo() - csi_node_info.name = k8s_csi_node.metadata.name - csi_node_info.node_id = self._get_node_id_from_k8s_csi_node(k8s_csi_node) - return csi_node_info - - def _get_node_id_from_k8s_csi_node(self, k8s_csi_node): - if k8s_csi_node.spec.drivers: - for driver in k8s_csi_node.spec.drivers: - if driver.name == settings.CSI_PROVISIONER_NAME: - return driver.nodeID - return '' - - def _get_matching_host_definition_info(self, node_name, secret_name, secret_namespace): - k8s_host_definitions = self._get_k8s_host_definitions() - for k8s_host_definition in k8s_host_definitions: - host_definition_info = self._generate_host_definition_info(k8s_host_definition) - if self._is_host_definition_matches(host_definition_info, node_name, secret_name, secret_namespace): - return host_definition_info - return None - - def _get_k8s_host_definitions(self): - try: - return self.host_definitions_api.get().items - - except ApiException as ex: - logger.error(messages.FAILED_TO_GET_LIST_OF_HOST_DEFINITIONS.format(ex.body)) - return [] - - def _generate_host_definition_info(self, k8s_host_definition): - host_definition_info = HostDefinitionInfo() - host_definition_info.name = k8s_host_definition.metadata.name - host_definition_info.resource_version = self._get_k8s_object_resource_version(k8s_host_definition) - host_definition_info.uid = k8s_host_definition.metadata.uid - host_definition_info.phase = self._get_host_definition_phase(k8s_host_definition) - host_definition_info.secret_name = self._get_attr_from_host_definition( - k8s_host_definition, settings.SECRET_NAME_FIELD) - host_definition_info.secret_namespace = self._get_attr_from_host_definition( - k8s_host_definition, settings.SECRET_NAMESPACE_FIELD) - host_definition_info.node_name = self._get_attr_from_host_definition( - k8s_host_definition, settings.NODE_NAME_FIELD) - host_definition_info.node_id = self._get_attr_from_host_definition( - k8s_host_definition, common_settings.HOST_DEFINITION_NODE_ID_FIELD) - host_definition_info.connectivity_type = self._get_attr_from_host_definition( - k8s_host_definition, settings.CONNECTIVITY_TYPE_FIELD) - return host_definition_info - - def _get_k8s_object_resource_version(self, k8s_object): - if k8s_object.metadata.resource_version: - return k8s_object.metadata.resource_version - return k8s_object.metadata.resourceVersion - - def _get_host_definition_phase(self, k8s_host_definition): - if k8s_host_definition.status: - return k8s_host_definition.status.phase - return '' - - def _get_attr_from_host_definition(self, k8s_host_definition, attribute): - if hasattr(k8s_host_definition.spec.hostDefinition, attribute): - return getattr(k8s_host_definition.spec.hostDefinition, attribute) - return '' - - def _is_host_definition_matches(self, host_definition_info, node_name, secret_name, secret_namespace): - return host_definition_info.node_name == node_name and \ - host_definition_info.secret_name == secret_name and \ - host_definition_info.secret_namespace == secret_namespace - - def _create_host_definition(self, host_definition_manifest): - try: - k8s_host_definition = self.host_definitions_api.create(body=host_definition_manifest) - logger.info(messages.CREATED_HOST_DEFINITION.format(k8s_host_definition.metadata.name)) - self._add_finalizer(k8s_host_definition.metadata.name) - return self._generate_host_definition_info(k8s_host_definition) - except ApiException as ex: - if ex != 404: - logger.error(messages.FAILED_TO_CREATE_HOST_DEFINITION.format( - host_definition_manifest[settings.METADATA][common_settings.NAME_FIELD], ex.body)) - - def _add_finalizer(self, host_definition_name): - logger.info(messages.ADD_FINALIZER_TO_HOST_DEFINITION.format(host_definition_name)) - self._update_finalizer(host_definition_name, [settings.CSI_IBM_FINALIZER, ]) - - def _set_host_definition_status(self, host_definition_name, host_definition_phase): - logger.info(messages.SET_HOST_DEFINITION_STATUS.format(host_definition_name, host_definition_phase)) - status = self._get_status_manifest(host_definition_phase) - try: - self.custom_object_api.patch_cluster_custom_object_status( - common_settings.CSI_IBM_GROUP, common_settings.VERSION, common_settings.HOST_DEFINITION_PLURAL, - host_definition_name, status) - except ApiException as ex: - if ex.status == 404: - logger.error(messages.HOST_DEFINITION_DOES_NOT_EXIST.format(host_definition_name)) - else: - logger.error(messages.FAILED_TO_SET_HOST_DEFINITION_STATUS.format(host_definition_name, ex.body)) - - def _get_status_manifest(self, host_definition_phase): - return { - settings.STATUS: { - settings.PHASE: host_definition_phase, - } - } - - def _generate_k8s_event(self, host_definition_info, message, action, message_type): - return client.CoreV1Event( - metadata=client.V1ObjectMeta(generate_name='{}.'.format(host_definition_info.name),), - reporting_component=settings.HOST_DEFINER, reporting_instance=settings.HOST_DEFINER, action=action, - type=self._get_event_type(message_type), reason=message_type+action, message=str(message), - event_time=datetime.datetime.utcnow().isoformat(timespec='microseconds') + 'Z', - involved_object=client.V1ObjectReference( - api_version=settings.CSI_IBM_API_VERSION, kind=settings.HOST_DEFINITION_KIND, - name=host_definition_info.name, resource_version=host_definition_info.resource_version, - uid=host_definition_info.uid,)) - - def _get_event_type(self, message_type): - if message_type != settings.SUCCESSFUL_MESSAGE_TYPE: - return settings.WARNING_EVENT_TYPE - return settings.NORMAL_EVENT_TYPE - - def _create_k8s_event(self, namespace, k8s_event): - try: - self.core_api.create_namespaced_event(namespace, k8s_event) - except ApiException as ex: - logger.error(messages.FAILED_TO_CREATE_EVENT_FOR_HOST_DEFINITION.format( - k8s_event.involved_object.name, ex.body)) - - def _delete_host_definition(self, host_definition_name): - logger.info(messages.DELETE_HOST_DEFINITION.format(host_definition_name)) - try: - remove_finalizer_status_code = self._remove_finalizer(host_definition_name) - if remove_finalizer_status_code == 200: - self.host_definitions_api.delete(name=host_definition_name, body={}) - else: - logger.error(messages.FAILED_TO_DELETE_HOST_DEFINITION.format( - host_definition_name, messages.FAILED_TO_REMOVE_FINALIZER)) - except ApiException as ex: - if ex.status != 404: - logger.error(messages.FAILED_TO_DELETE_HOST_DEFINITION.format(host_definition_name, ex.body)) - - def _remove_finalizer(self, host_definition_name): - logger.info(messages.REMOVE_FINALIZER_TO_HOST_DEFINITION.format(host_definition_name)) - return self._update_finalizer(host_definition_name, []) - - def _update_finalizer(self, host_definition_name, finalizers): - finalizer_manifest = { - settings.METADATA: { - common_settings.NAME_FIELD: host_definition_name, - settings.FINALIZERS: finalizers, - } - } - return self._patch_host_definition(finalizer_manifest) - - def _patch_host_definition(self, host_definition_manifest): - host_definition_name = host_definition_manifest[settings.METADATA][common_settings.NAME_FIELD] - logger.info(messages.PATCHING_HOST_DEFINITION.format(host_definition_name)) - try: - self.host_definitions_api.patch(body=host_definition_manifest, name=host_definition_name, - content_type='application/merge-patch+json') - return 200 - except ApiException as ex: - if ex.status == 404: - logger.error(messages.HOST_DEFINITION_DOES_NOT_EXIST.format(host_definition_name)) - else: - logger.error(messages.FAILED_TO_PATCH_HOST_DEFINITION.format(host_definition_name, ex.body)) - return ex.status - - def _update_manage_node_label(self, node_name, label_value): - body = self._get_body_for_labels(label_value) - try: - self.core_api.patch_node(node_name, body) - except ApiException as ex: - logger.error(messages.FAILED_TO_UPDATE_NODE_LABEL.format( - node_name, settings.MANAGE_NODE_LABEL, ex.body)) - - def _get_body_for_labels(self, label_value): - body = { - settings.METADATA: { - settings.LABELS: { - settings.MANAGE_NODE_LABEL: label_value} - } - } - - return body - - def _get_secret_data(self, secret_name, secret_namespace): - try: - logger.info(messages.READ_SECRET.format(secret_name, secret_namespace)) - secret_data = self.core_api.read_namespaced_secret(name=secret_name, namespace=secret_namespace).data - return self._change_decode_base64_secret_config(secret_data) - except ApiException as ex: - if ex.status == 404: - logger.error(messages.SECRET_DOES_NOT_EXIST.format(secret_name, secret_namespace)) - else: - logger.error(messages.FAILED_TO_GET_SECRET.format(secret_name, secret_namespace, ex.body)) - return {} - - def _change_decode_base64_secret_config(self, secret_data): - if settings.SECRET_CONFIG_FIELD in secret_data.keys(): - secret_data[settings.SECRET_CONFIG_FIELD] = self._decode_base64_to_dict( - secret_data[settings.SECRET_CONFIG_FIELD]) - return secret_data - - def _decode_base64_to_dict(self, content_with_base64): - decoded_string_content = self._decode_base64_to_string(content_with_base64) - encoded_dict = str(decoded_string_content).encode('utf-8') - base64_dict = base64.b64encode(encoded_dict) - my_dict_again = eval(base64.b64decode(base64_dict)) - return my_dict_again - - def _decode_base64_to_string(self, content_with_base64): - try: - base64_bytes = content_with_base64.encode('ascii') - decoded_string_in_bytes = base64.b64decode(base64_bytes) - return decoded_string_in_bytes.decode('ascii') - except Exception: - return content_with_base64 - - def _get_node_info(self, node_name): - k8s_node = self._read_node(node_name) - if k8s_node: - return self._generate_node_info(k8s_node) - return NodeInfo('', {}) - - def _read_node(self, node_name): - try: - logger.info(messages.READ_NODE.format(node_name)) - return self.core_api.read_node(name=node_name) - except ApiException as ex: - logger.error(messages.FAILED_TO_GET_NODE.format(node_name, ex.body)) - return None - - def _generate_node_info(self, k8s_node): - return NodeInfo(k8s_node.metadata.name, k8s_node.metadata.labels) - - def _get_csi_daemon_set(self): - try: - daemon_sets = self.apps_api.list_daemon_set_for_all_namespaces(label_selector=settings.DRIVER_PRODUCT_LABEL) - if daemon_sets.items: - return daemon_sets.items[0] - return None - except ApiException as ex: - logger.error(messages.FAILED_TO_LIST_DAEMON_SETS.format(ex.body)) - return None - - def _get_csi_pods_info(self): - try: - pods_info = [] - k8s_pods = self.core_api.list_pod_for_all_namespaces(label_selector=settings.DRIVER_PRODUCT_LABEL) - for k8s_pod in k8s_pods.items: - pod_info = self._generate_pod_info(k8s_pod) - pods_info.append(pod_info) - return pods_info - - except ApiException as ex: - logger.error(messages.FAILED_TO_LIST_PODS.format(ex.body)) - return [] - - def _generate_pod_info(self, k8s_pod): - pod_info = PodInfo() - pod_info.name = k8s_pod.metadata.name - pod_info.node_name = k8s_pod.spec.node_name - return pod_info diff --git a/controllers/servers/host_definer/messages.py b/controllers/servers/host_definer/messages.py index a4e3e8beb..79c91fd1e 100644 --- a/controllers/servers/host_definer/messages.py +++ b/controllers/servers/host_definer/messages.py @@ -1,4 +1,4 @@ -from controllers.servers.host_definer import settings +import controllers.common.settings as common_settings SECRET_DOES_NOT_EXIST = 'Secret {} in namespace {} does not exist' FAILED_TO_GET_SECRET = 'Failed to get Secret {} in namespace {}, go this error: {}' @@ -37,7 +37,7 @@ DELETE_HOST_DEFINITION = 'Deleting host definition {}' ADD_FINALIZER_TO_HOST_DEFINITION = 'Adding finalizer to host definition {}' REMOVE_FINALIZER_TO_HOST_DEFINITION = 'Removing finalizer from host definition {}' -FAILED_TO_REMOVE_FINALIZER = 'Failed to remove {} finalizer from node'.format(settings.CSI_IBM_FINALIZER) +FAILED_TO_REMOVE_FINALIZER = 'Failed to remove {} finalizer from node'.format(common_settings.CSI_IBM_FINALIZER) NODE_ID_WAS_CHANGED = "NodeId was changed for {} node, updating his ports in his definitions,"\ "old NodeId [{}], new NodeId [{}]" UPDATE_HOST_DEFINITION_FIELDS_FROM_STORAGE = 'Update host definition {} host from storage fields with {}' diff --git a/controllers/servers/host_definer/resource_manager/__init__.py b/controllers/servers/host_definer/resource_manager/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/controllers/servers/host_definer/resource_manager/csi_node.py b/controllers/servers/host_definer/resource_manager/csi_node.py new file mode 100644 index 000000000..d85d2c6ae --- /dev/null +++ b/controllers/servers/host_definer/resource_manager/csi_node.py @@ -0,0 +1,51 @@ +from controllers.common.csi_logger import get_stdout_logger +import controllers.common.settings as common_settings +import controllers.servers.host_definer.messages as messages +from controllers.servers.host_definer.k8s.api import K8SApi +from controllers.servers.host_definer.resource_manager.resource_info import ResourceInfoManager +from controllers.servers.host_definer.resource_manager.daemon_set import DaemonSetManager + +logger = get_stdout_logger() + + +class CSINodeManager: + def __init__(self): + self.k8s_api = K8SApi() + self.resource_info_manager = ResourceInfoManager() + self.daemon_set_manager = DaemonSetManager() + + def get_csi_nodes_info_with_driver(self): + csi_nodes_info_with_driver = [] + k8s_csi_nodes = self.k8s_api.list_csi_node().items + for k8s_csi_node in k8s_csi_nodes: + if self._is_k8s_csi_node_has_driver(k8s_csi_node): + csi_nodes_info_with_driver.append(self.resource_info_manager.generate_csi_node_info(k8s_csi_node)) + logger.info(messages.CSI_NODES_WITH_IBM_BLOCK_CSI_DRIVER.format(csi_nodes_info_with_driver)) + return csi_nodes_info_with_driver + + def _is_k8s_csi_node_has_driver(self, k8s_csi_node): + if k8s_csi_node.spec.drivers: + for driver in k8s_csi_node.spec.drivers: + if driver.name == common_settings.CSI_PROVISIONER_NAME: + return True + return False + + def is_host_part_of_update(self, worker): + logger.info(messages.CHECK_IF_NODE_IS_PART_OF_UPDATE.format(worker)) + daemon_set_name = self.daemon_set_manager.wait_until_all_daemon_set_pods_are_up_to_date() + if daemon_set_name: + return self._is_csi_node_pod_running_on_worker(worker, daemon_set_name) + return False + + def _is_csi_node_pod_running_on_worker(self, worker, daemon_set_name): + logger.info(messages.CHECK_IF_CSI_NODE_POD_IS_RUNNING.format(worker)) + csi_pods_info = self.resource_info_manager.get_csi_pods_info() + for pod_info in csi_pods_info: + if (pod_info.node_name == worker) and (daemon_set_name in pod_info.name): + return True + return False + + def is_node_id_changed(self, host_definition_node_id, csi_node_node_id): + if host_definition_node_id != csi_node_node_id and host_definition_node_id and csi_node_node_id: + return True + return False diff --git a/controllers/servers/host_definer/resource_manager/daemon_set.py b/controllers/servers/host_definer/resource_manager/daemon_set.py new file mode 100644 index 000000000..be97ca252 --- /dev/null +++ b/controllers/servers/host_definer/resource_manager/daemon_set.py @@ -0,0 +1,37 @@ +import time + +import controllers.common.settings as common_settings +from controllers.common.csi_logger import get_stdout_logger +import controllers.servers.host_definer.messages as messages +from controllers.servers.host_definer import settings +from controllers.servers.host_definer.k8s.api import K8SApi + +logger = get_stdout_logger() + + +class DaemonSetManager(): + def __init__(self): + self.k8s_api = K8SApi() + + def wait_until_all_daemon_set_pods_are_up_to_date(self): + csi_daemon_set = self._get_csi_daemon_set() + if not csi_daemon_set: + return None + status = csi_daemon_set.status + while status.updated_number_scheduled != status.desired_number_scheduled: + logger.info(messages.UPDATED_CSI_NODE_VS_DESIRED.format( + status.updated_number_scheduled, status.desired_number_scheduled)) + if status.desired_number_scheduled == 0: + return None + csi_daemon_set = self._get_csi_daemon_set() + if not csi_daemon_set: + return None + status = csi_daemon_set.status + time.sleep(0.5) + return csi_daemon_set.metadata.name + + def _get_csi_daemon_set(self): + daemon_sets = self.k8s_api.list_daemon_set_for_all_namespaces(common_settings.DRIVER_PRODUCT_LABEL) + if daemon_sets and daemon_sets.items: + return daemon_sets.items[0] + return None diff --git a/controllers/servers/host_definer/resource_manager/event.py b/controllers/servers/host_definer/resource_manager/event.py new file mode 100644 index 000000000..652ee6675 --- /dev/null +++ b/controllers/servers/host_definer/resource_manager/event.py @@ -0,0 +1,29 @@ +import datetime +from kubernetes import client + +from controllers.common.csi_logger import get_stdout_logger +from controllers.servers.host_definer import settings +import controllers.common.settings as common_settings + +logger = get_stdout_logger() + + +class EventManager(): + def generate_k8s_event(self, host_definition_info, message, action, message_type): + return client.CoreV1Event(metadata=client.V1ObjectMeta( + generate_name='{}.'.format(host_definition_info.name),), + reporting_component=common_settings.HOST_DEFINER, + reporting_instance=common_settings.HOST_DEFINER, action=action, + type=self._get_event_type(message_type), + reason=message_type + action, message=str(message), + event_time=datetime.datetime.utcnow().isoformat(timespec='microseconds') + 'Z', + involved_object=client.V1ObjectReference( + api_version=common_settings.CSI_IBM_API_VERSION, + kind=common_settings.HOST_DEFINITION_KIND, name=host_definition_info.name, + resource_version=host_definition_info.resource_version, + uid=host_definition_info.uid,)) + + def _get_event_type(self, message_type): + if message_type != common_settings.SUCCESSFUL_MESSAGE_TYPE: + return common_settings.WARNING_EVENT_TYPE + return common_settings.NORMAL_EVENT_TYPE diff --git a/controllers/servers/host_definer/resource_manager/host_definition.py b/controllers/servers/host_definer/resource_manager/host_definition.py new file mode 100644 index 000000000..61d14a85f --- /dev/null +++ b/controllers/servers/host_definer/resource_manager/host_definition.py @@ -0,0 +1,171 @@ +from controllers.common.csi_logger import get_stdout_logger +import controllers.common.settings as common_settings +from controllers.servers.host_definer import settings +from controllers.servers.host_definer.globals import NODES +from controllers.servers.host_definer.utils import utils +from controllers.servers.host_definer.utils import manifest_utils +from controllers.servers.host_definer.types import HostDefinitionInfo +import controllers.servers.host_definer.messages as messages +from controllers.servers.host_definer.k8s.api import K8SApi +from controllers.servers.host_definer.resource_manager.event import EventManager +from controllers.servers.host_definer.resource_manager.resource_info import ResourceInfoManager + +logger = get_stdout_logger() + + +class HostDefinitionManager: + def __init__(self): + self.k8s_api = K8SApi() + self.event_manager = EventManager() + self.resource_info_manager = ResourceInfoManager() + + def get_host_definition_info_from_secret_and_node_name(self, node_name, secret_info): + host_definition_info = self.get_host_definition_info_from_secret(secret_info) + host_definition_info = self.add_name_to_host_definition_info(node_name, host_definition_info) + return host_definition_info + + def get_host_definition_info_from_secret(self, secret_info): + host_definition_info = HostDefinitionInfo() + host_definition_info.secret_name = secret_info.name + host_definition_info.secret_namespace = secret_info.namespace + return host_definition_info + + def add_name_to_host_definition_info(self, node_name, host_definition_info): + host_definition_info.node_name = node_name + host_definition_info.node_id = NODES[node_name].node_id + host_definition_info.name = self._get_host_definition_name(node_name) + return host_definition_info + + def _get_host_definition_name(self, node_name): + return '{0}-{1}'.format(node_name, utils.get_random_string()).replace('_', '.') + + def update_host_definition_info(self, host_definition_info): + host_definition_info_on_cluster = self.get_matching_host_definition_info( + host_definition_info.node_name, host_definition_info.secret_name, host_definition_info.secret_namespace) + if host_definition_info_on_cluster: + host_definition_info.connectivity_type = host_definition_info_on_cluster.connectivity_type + host_definition_info.node_id = host_definition_info_on_cluster.node_id + return host_definition_info + + def create_host_definition_if_not_exist(self, host_definition_info, response): + node_id = NODES[host_definition_info.node_name].node_id + host_definition_manifest = manifest_utils.get_host_definition_manifest(host_definition_info, response, node_id) + current_host_definition_info_on_cluster = self.get_matching_host_definition_info( + host_definition_info.node_name, host_definition_info.secret_name, host_definition_info.secret_namespace) + if current_host_definition_info_on_cluster: + host_definition_manifest[common_settings.METADATA_FIELD][ + common_settings.NAME_FIELD] = current_host_definition_info_on_cluster.name + self.k8s_api.patch_host_definition(host_definition_manifest) + return current_host_definition_info_on_cluster + else: + logger.info(messages.CREATING_NEW_HOST_DEFINITION.format(host_definition_info.name)) + return self.create_host_definition(host_definition_manifest) + + def create_host_definition(self, host_definition_manifest): + k8s_host_definition = self.k8s_api.create_host_definition(host_definition_manifest) + if k8s_host_definition: + logger.info(messages.CREATED_HOST_DEFINITION.format(k8s_host_definition.metadata.name)) + self._add_finalizer(k8s_host_definition.metadata.name) + return self.resource_info_manager.generate_host_definition_info(k8s_host_definition) + return HostDefinitionInfo() + + def _add_finalizer(self, host_definition_name): + logger.info(messages.ADD_FINALIZER_TO_HOST_DEFINITION.format(host_definition_name)) + self._update_finalizer(host_definition_name, [common_settings.CSI_IBM_FINALIZER, ]) + + def set_status_to_host_definition_after_definition(self, message_from_storage, host_definition_info): + if message_from_storage and host_definition_info: + self.set_host_definition_status(host_definition_info.name, + common_settings.PENDING_CREATION_PHASE) + self.create_k8s_event_for_host_definition( + host_definition_info, message_from_storage, common_settings.DEFINE_ACTION, + common_settings.FAILED_MESSAGE_TYPE) + elif host_definition_info: + self.set_host_definition_status_to_ready(host_definition_info) + + def set_host_definition_status_to_ready(self, host_definition): + self.set_host_definition_status(host_definition.name, common_settings.READY_PHASE) + self.create_k8s_event_for_host_definition( + host_definition, settings.SUCCESS_MESSAGE, common_settings.DEFINE_ACTION, + common_settings.SUCCESSFUL_MESSAGE_TYPE) + + def handle_k8s_host_definition_after_undefine_action(self, host_definition_info, response): + current_host_definition_info_on_cluster = self.get_matching_host_definition_info( + host_definition_info.node_name, host_definition_info.secret_name, host_definition_info.secret_namespace) + if current_host_definition_info_on_cluster: + self._handle_existing_k8s_host_definition_after_undefine_action( + response.error_message, current_host_definition_info_on_cluster) + + def _handle_existing_k8s_host_definition_after_undefine_action(self, message_from_storage, host_definition_info): + if message_from_storage and host_definition_info: + self.set_host_definition_status(host_definition_info.name, + common_settings.PENDING_DELETION_PHASE) + self.create_k8s_event_for_host_definition( + host_definition_info, message_from_storage, + common_settings.UNDEFINE_ACTION, common_settings.FAILED_MESSAGE_TYPE) + elif host_definition_info: + self.delete_host_definition(host_definition_info.name) + + def create_k8s_event_for_host_definition(self, host_definition_info, message, action, message_type): + logger.info(messages.CREATE_EVENT_FOR_HOST_DEFINITION.format(message, host_definition_info.name)) + k8s_event = self.event_manager.generate_k8s_event(host_definition_info, message, action, message_type) + self.k8s_api.create_event(common_settings.DEFAULT_NAMESPACE, k8s_event) + + def delete_host_definition(self, host_definition_name): + logger.info(messages.DELETE_HOST_DEFINITION.format(host_definition_name)) + remove_finalizer_status_code = self._remove_finalizer(host_definition_name) + if remove_finalizer_status_code == 200: + self.k8s_api.delete_host_definition(host_definition_name) + else: + logger.error(messages.FAILED_TO_DELETE_HOST_DEFINITION.format( + host_definition_name, messages.FAILED_TO_REMOVE_FINALIZER)) + + def _remove_finalizer(self, host_definition_name): + logger.info(messages.REMOVE_FINALIZER_TO_HOST_DEFINITION.format(host_definition_name)) + return self._update_finalizer(host_definition_name, []) + + def _update_finalizer(self, host_definition_name, finalizers): + finalizer_manifest = manifest_utils.get_finalizer_manifest(host_definition_name, finalizers) + return self.k8s_api.patch_host_definition(finalizer_manifest) + + def is_host_definition_in_pending_phase(self, phase): + return phase.startswith(settings.PENDING_PREFIX) + + def set_host_definition_phase_to_error(self, host_definition_info): + logger.info(messages.SET_HOST_DEFINITION_PHASE_TO_ERROR.format(host_definition_info.name)) + self.set_host_definition_status(host_definition_info.name, common_settings.ERROR_PHASE) + + def set_host_definition_status(self, host_definition_name, host_definition_phase): + logger.info(messages.SET_HOST_DEFINITION_STATUS.format(host_definition_name, host_definition_phase)) + status = manifest_utils.get_host_definition_status_manifest(host_definition_phase) + self.k8s_api.patch_cluster_custom_object_status( + common_settings.CSI_IBM_GROUP, common_settings.VERSION, common_settings.HOST_DEFINITION_PLURAL, + host_definition_name, status) + + def is_host_definition_not_pending(self, host_definition_info): + current_host_definition_info_on_cluster = self.get_matching_host_definition_info( + host_definition_info.node_name, host_definition_info.secret_name, host_definition_info.secret_namespace) + return not current_host_definition_info_on_cluster or \ + current_host_definition_info_on_cluster.phase == common_settings.READY_PHASE + + def get_matching_host_definition_info(self, node_name, secret_name, secret_namespace): + k8s_host_definitions = self.k8s_api.list_host_definition().items + for k8s_host_definition in k8s_host_definitions: + host_definition_info = self.resource_info_manager.generate_host_definition_info(k8s_host_definition) + if self._is_host_definition_matches(host_definition_info, node_name, secret_name, secret_namespace): + return host_definition_info + return None + + def _is_host_definition_matches(self, host_definition_info, node_name, secret_name, secret_namespace): + return host_definition_info.node_name == node_name and \ + host_definition_info.secret_name == secret_name and \ + host_definition_info.secret_namespace == secret_namespace + + def get_all_host_definitions_info_of_the_node(self, node_name): + node_host_definitions_info = [] + k8s_host_definitions = self.k8s_api.list_host_definition().items + for k8s_host_definition in k8s_host_definitions: + host_definition_info = self.resource_info_manager.generate_host_definition_info(k8s_host_definition) + if host_definition_info.node_name == node_name: + node_host_definitions_info.append(host_definition_info) + return node_host_definitions_info diff --git a/controllers/servers/host_definer/resource_manager/node.py b/controllers/servers/host_definer/resource_manager/node.py new file mode 100644 index 000000000..ab7dbb28c --- /dev/null +++ b/controllers/servers/host_definer/resource_manager/node.py @@ -0,0 +1,151 @@ +from controllers.common.csi_logger import get_stdout_logger +from controllers.servers.utils import get_system_info_for_topologies +from controllers.servers.errors import ValidationException +import controllers.common.settings as common_settings +from controllers.servers.host_definer.globals import NODES, MANAGED_SECRETS +from controllers.servers.host_definer.types import ManagedNode +import controllers.servers.host_definer.messages as messages +from controllers.servers.host_definer.utils import manifest_utils +from controllers.servers.host_definer.utils import utils +from controllers.servers.host_definer.k8s.api import K8SApi +from controllers.servers.host_definer.resource_manager.secret import SecretManager +from controllers.servers.host_definer.resource_manager.host_definition import HostDefinitionManager +from controllers.servers.host_definer.definition_manager.definition import DefinitionManager +from controllers.servers.host_definer.resource_manager.resource_info import ResourceInfoManager + +logger = get_stdout_logger() + + +class NodeManager: + def __init__(self): + self.k8s_api = K8SApi() + self.secret_manager = SecretManager() + self.host_definition_manager = HostDefinitionManager() + self.definition_manager = DefinitionManager() + self.resource_info_manager = ResourceInfoManager() + + def is_node_can_be_defined(self, node_name): + return utils.is_dynamic_node_labeling_allowed() or self.is_node_has_manage_node_label(node_name) + + def is_node_can_be_undefined(self, node_name): + return utils.is_host_definer_can_delete_hosts() and \ + self.is_node_has_manage_node_label(node_name) and \ + not self.is_node_has_forbid_deletion_label(node_name) + + def is_node_has_forbid_deletion_label(self, node_name): + return self._is_node_has_label_in_true(node_name, common_settings.FORBID_DELETION_LABEL) + + def add_node_to_nodes(self, csi_node_info): + logger.info(messages.NEW_KUBERNETES_NODE.format(csi_node_info.name)) + self._add_manage_node_label_to_node(csi_node_info.name) + NODES[csi_node_info.name] = self.generate_managed_node(csi_node_info) + + def _add_manage_node_label_to_node(self, node_name): + if self.is_node_has_manage_node_label(node_name): + return + logger.info(messages.ADD_LABEL_TO_NODE.format(common_settings.MANAGE_NODE_LABEL, node_name)) + self._update_manage_node_label(node_name, common_settings.TRUE_STRING) + + def generate_managed_node(self, csi_node_info): + node_info = self.resource_info_manager.get_node_info(csi_node_info.name) + return ManagedNode(csi_node_info, node_info.labels) + + def remove_manage_node_label(self, node_name): + if self._is_node_should_be_removed(node_name): + logger.info(messages.REMOVE_LABEL_FROM_NODE.format(common_settings.MANAGE_NODE_LABEL, node_name)) + self._update_manage_node_label(node_name, None) + + def _is_node_should_be_removed(self, node_name): + return utils.is_dynamic_node_labeling_allowed() and \ + not self._is_node_has_ibm_block_csi(node_name) and \ + not self.is_node_has_host_definitions(node_name) + + def _is_node_has_ibm_block_csi(self, node_name): + csi_node_info = self.resource_info_manager.get_csi_node_info(node_name) + return csi_node_info.node_id != '' + + def is_node_has_host_definitions(self, node_name): + host_definitions_info = self.host_definition_manager.get_all_host_definitions_info_of_the_node(node_name) + return host_definitions_info != [] + + def _update_manage_node_label(self, node_name, label_value): + body = manifest_utils.get_body_manifest_for_labels(label_value) + self.k8s_api.patch_node(node_name, body) + + def generate_nodes_with_system_id(self, secret_data): + nodes_with_system_id = {} + secret_config = utils.get_secret_config(secret_data) + nodes_info = self.get_nodes_info() + for node_info in nodes_info: + nodes_with_system_id[node_info.name] = self._get_system_id_for_node(node_info, secret_config) + return nodes_with_system_id + + def get_nodes_info(self): + nodes_info = [] + for k8s_node in self.k8s_api.list_node().items: + k8s_node = self.resource_info_manager.generate_node_info(k8s_node) + nodes_info.append(k8s_node) + return nodes_info + + def _get_system_id_for_node(self, node_info, secret_config): + node_topology_labels = self.secret_manager.get_topology_labels(node_info.labels) + try: + _, system_id = get_system_info_for_topologies(secret_config, node_topology_labels) + except ValidationException: + return '' + return system_id + + def is_node_has_new_manage_node_label(self, csi_node_info, unmanaged_csi_nodes_with_driver): + return not utils.is_dynamic_node_labeling_allowed() and \ + self.is_node_has_manage_node_label(csi_node_info.name) and \ + self._is_node_is_unmanaged_and_with_csi_node(csi_node_info, unmanaged_csi_nodes_with_driver) + + def is_node_has_manage_node_label(self, node_name): + return self._is_node_has_label_in_true(node_name, common_settings.MANAGE_NODE_LABEL) + + def _is_node_has_label_in_true(self, node_name, label): + node_info = self.resource_info_manager.get_node_info(node_name) + return node_info.labels.get(label) == common_settings.TRUE_STRING + + def _is_node_is_unmanaged_and_with_csi_node(self, csi_node_info, unmanaged_csi_nodes_with_driver): + if csi_node_info.name not in NODES and csi_node_info.node_id and \ + csi_node_info.name in unmanaged_csi_nodes_with_driver: + return True + return False + + def handle_node_topologies(self, node_info, watch_event_type): + if node_info.name not in NODES or watch_event_type != common_settings.MODIFIED_EVENT_TYPE: + return + for index, managed_secret_info in enumerate(MANAGED_SECRETS): + if not managed_secret_info.system_ids_topologies: + continue + if self.secret_manager.is_node_should_managed_on_secret_info(node_info.name, managed_secret_info): + self._remove_node_if_topology_not_match(node_info, index, managed_secret_info) + elif self.secret_manager.is_node_labels_in_system_ids_topologies(managed_secret_info.system_ids_topologies, + node_info.labels): + self._define_host_with_new_topology(node_info, index, managed_secret_info) + + def _remove_node_if_topology_not_match(self, node_info, index, managed_secret_info): + if not self.secret_manager.is_node_labels_in_system_ids_topologies(managed_secret_info.system_ids_topologies, + node_info.labels): + managed_secret_info.nodes_with_system_id.pop(node_info.name, None) + MANAGED_SECRETS[index] = managed_secret_info + + def _define_host_with_new_topology(self, node_info, index, managed_secret_info): + node_name = node_info.name + system_id = self.secret_manager.get_system_id_for_node_labels( + managed_secret_info.system_ids_topologies, node_info.labels) + managed_secret_info.nodes_with_system_id[node_name] = system_id + MANAGED_SECRETS[index] = managed_secret_info + self.definition_manager.define_node_on_all_storages(node_name) + + def update_node_io_group(self, node_info): + io_group = utils.generate_io_group_from_labels(node_info.labels) + node_name = node_info.name + try: + if io_group != NODES[node_name].io_group: + logger.info(messages.IO_GROUP_CHANGED.format(node_name, io_group, NODES[node_name].io_group)) + NODES[node_name].io_group = io_group + self.definition_manager.define_node_on_all_storages(node_name) + except KeyError: + pass diff --git a/controllers/servers/host_definer/resource_manager/resource_info.py b/controllers/servers/host_definer/resource_manager/resource_info.py new file mode 100644 index 000000000..8d999e58a --- /dev/null +++ b/controllers/servers/host_definer/resource_manager/resource_info.py @@ -0,0 +1,108 @@ +from controllers.common.csi_logger import get_stdout_logger +from controllers.servers.host_definer import settings +import controllers.common.settings as common_settings +from controllers.servers.host_definer.k8s.api import K8SApi +from controllers.servers.host_definer.utils import utils +from controllers.servers.host_definer.types import ( + NodeInfo, CsiNodeInfo, StorageClassInfo, PodInfo, HostDefinitionInfo, SecretInfo) + + +logger = get_stdout_logger() + + +class ResourceInfoManager: + def __init__(self): + self.k8s_api = K8SApi() + + def get_node_info(self, node_name): + k8s_node = self.k8s_api.read_node(node_name) + if k8s_node: + return self.generate_node_info(k8s_node) + return NodeInfo('', {}) + + def generate_node_info(self, k8s_node): + return NodeInfo(k8s_node.metadata.name, k8s_node.metadata.labels) + + def get_csi_node_info(self, node_name): + k8s_csi_node = self.k8s_api.get_csi_node(node_name) + if k8s_csi_node: + return self.generate_csi_node_info(k8s_csi_node) + return CsiNodeInfo() + + def generate_csi_node_info(self, k8s_csi_node): + csi_node_info = CsiNodeInfo() + csi_node_info.name = k8s_csi_node.metadata.name + csi_node_info.node_id = self._get_node_id_from_k8s_csi_node(k8s_csi_node) + return csi_node_info + + def _get_node_id_from_k8s_csi_node(self, k8s_csi_node): + if k8s_csi_node.spec.drivers: + for driver in k8s_csi_node.spec.drivers: + if driver.name == common_settings.CSI_PROVISIONER_NAME: + return driver.nodeID + return '' + + def get_storage_classes_info(self): + storage_classes_info = [] + for k8s_storage_class in self.k8s_api.list_storage_class().items: + storage_class_info = self.generate_storage_class_info(k8s_storage_class) + storage_classes_info.append(storage_class_info) + return storage_classes_info + + def generate_storage_class_info(self, k8s_storage_class): + storage_class_info = StorageClassInfo() + storage_class_info.name = k8s_storage_class.metadata.name + storage_class_info.provisioner = k8s_storage_class.provisioner + storage_class_info.parameters = k8s_storage_class.parameters + return storage_class_info + + def get_csi_pods_info(self): + pods_info = [] + k8s_pods = self.k8s_api.list_pod_for_all_namespaces(common_settings.DRIVER_PRODUCT_LABEL) + if not k8s_pods: + return pods_info + for k8s_pod in k8s_pods.items: + pod_info = self._generate_pod_info(k8s_pod) + pods_info.append(pod_info) + return pods_info + + def _generate_pod_info(self, k8s_pod): + pod_info = PodInfo() + pod_info.name = k8s_pod.metadata.name + pod_info.node_name = k8s_pod.spec.node_name + return pod_info + + def generate_host_definition_info(self, k8s_host_definition): + host_definition_info = HostDefinitionInfo() + host_definition_info.name = k8s_host_definition.metadata.name + host_definition_info.resource_version = utils.get_k8s_object_resource_version(k8s_host_definition) + host_definition_info.uid = k8s_host_definition.metadata.uid + host_definition_info.phase = self._get_host_definition_phase(k8s_host_definition) + host_definition_info.secret_name = self._get_attr_from_host_definition( + k8s_host_definition, common_settings.SECRET_NAME_FIELD) + host_definition_info.secret_namespace = self._get_attr_from_host_definition( + k8s_host_definition, common_settings.SECRET_NAMESPACE_FIELD) + host_definition_info.node_name = self._get_attr_from_host_definition( + k8s_host_definition, common_settings.HOST_DEFINITION_NODE_NAME_FIELD) + host_definition_info.node_id = self._get_attr_from_host_definition( + k8s_host_definition, common_settings.HOST_DEFINITION_NODE_ID_FIELD) + host_definition_info.connectivity_type = self._get_attr_from_host_definition( + k8s_host_definition, common_settings.CONNECTIVITY_TYPE_FIELD) + return host_definition_info + + def _get_host_definition_phase(self, k8s_host_definition): + if k8s_host_definition.status: + return k8s_host_definition.status.phase + return '' + + def _get_attr_from_host_definition(self, k8s_host_definition, attribute): + if hasattr(k8s_host_definition.spec.hostDefinition, attribute): + return getattr(k8s_host_definition.spec.hostDefinition, attribute) + return '' + + def generate_k8s_secret_to_secret_info(self, k8s_secret, nodes_with_system_id={}, system_ids_topologies={}): + return SecretInfo( + k8s_secret.metadata.name, k8s_secret.metadata.namespace, nodes_with_system_id, system_ids_topologies) + + def generate_secret_info(self, secret_name, secret_namespace, nodes_with_system_id={}, system_ids_topologies={}): + return SecretInfo(secret_name, secret_namespace, nodes_with_system_id, system_ids_topologies) diff --git a/controllers/servers/host_definer/resource_manager/secret.py b/controllers/servers/host_definer/resource_manager/secret.py new file mode 100644 index 000000000..15285f57c --- /dev/null +++ b/controllers/servers/host_definer/resource_manager/secret.py @@ -0,0 +1,126 @@ +from controllers.common.csi_logger import get_stdout_logger +from controllers.servers.settings import SECRET_SUPPORTED_TOPOLOGIES_PARAMETER +from controllers.servers.utils import is_topology_match +import controllers.common.settings as common_settings +from controllers.servers.host_definer.globals import MANAGED_SECRETS +from controllers.servers.host_definer import settings +from controllers.servers.host_definer.utils import utils +import controllers.servers.host_definer.messages as messages +from controllers.servers.host_definer.k8s.api import K8SApi +from controllers.servers.host_definer.resource_manager.resource_info import ResourceInfoManager + +logger = get_stdout_logger() + + +class SecretManager: + def __init__(self): + self.k8s_api = K8SApi() + self.resource_info_manager = ResourceInfoManager() + + def is_node_should_be_managed_on_secret(self, node_name, secret_name, secret_namespace): + logger.info(messages.CHECK_NODE_SHOULD_BE_MANAGED_BY_SECRET.format(node_name, secret_name, secret_namespace)) + secret_data = self.get_secret_data(secret_name, secret_namespace) + utils.validate_secret(secret_data) + managed_secret_info, _ = self._get_managed_secret_by_name_and_namespace(secret_name, secret_namespace) + if self.is_node_should_managed_on_secret_info(node_name, managed_secret_info): + logger.info(messages.NODE_SHOULD_BE_MANAGED_ON_SECRET.format(node_name, secret_name, secret_namespace)) + return True + logger.info(messages.NODE_SHOULD_NOT_BE_MANAGED_ON_SECRET.format(node_name, secret_name, secret_namespace)) + return False + + def _get_managed_secret_by_name_and_namespace(self, secret_name, secret_namespace): + secret_info = self.resource_info_manager.generate_secret_info(secret_name, secret_namespace) + managed_secret_info, index = self.get_matching_managed_secret_info(secret_info) + return managed_secret_info, index + + def is_node_should_managed_on_secret_info(self, node_name, secret_info): + if secret_info: + nodes_with_system_id = secret_info.nodes_with_system_id + if nodes_with_system_id and nodes_with_system_id.get(node_name): + return True + if nodes_with_system_id: + return False + return True + return False + + def is_node_labels_in_system_ids_topologies(self, system_ids_topologies, node_labels): + return self.get_system_id_for_node_labels(system_ids_topologies, node_labels) != '' + + def get_system_id_for_node_labels(self, system_ids_topologies, node_labels): + node_topology_labels = self.get_topology_labels(node_labels) + for system_id, system_topologies in system_ids_topologies.items(): + if is_topology_match(system_topologies, node_topology_labels): + return system_id + return '' + + def is_topology_secret(self, secret_data): + utils.validate_secret(secret_data) + if utils.get_secret_config(secret_data): + return True + return False + + def generate_secret_system_ids_topologies(self, secret_data): + system_ids_topologies = {} + secret_config = utils.get_secret_config(secret_data) + for system_id, system_info in secret_config.items(): + try: + system_ids_topologies[system_id] = (system_info.get(SECRET_SUPPORTED_TOPOLOGIES_PARAMETER)) + except AttributeError: + system_ids_topologies[system_id] = None + return system_ids_topologies + + def is_secret(self, parameter_name): + return parameter_name.endswith(common_settings.SECRET_NAME_SUFFIX) and \ + parameter_name.startswith(common_settings.CSI_PARAMETER_PREFIX) + + def get_secret_name_and_namespace(self, storage_class_info, parameter_name): + secret_name_suffix = common_settings.SECRET_NAME_SUFFIX + prefix = parameter_name.split(secret_name_suffix)[0] + return (storage_class_info.parameters[parameter_name], + storage_class_info.parameters[prefix + secret_name_suffix.replace( + common_settings.NAME_FIELD, common_settings.NAMESPACE_FIELD)]) + + def add_unique_secret_info_to_list(self, secret_info, secrets_info_list): + for secret_info_in_list in secrets_info_list: + if secret_info_in_list.name == secret_info.name and \ + secret_info_in_list.namespace == secret_info.namespace: + return secrets_info_list + secrets_info_list.append(secret_info) + return secrets_info_list + + def is_secret_can_be_changed(self, secret_info, watch_event_type): + return self._is_secret_managed(secret_info) and \ + not utils.is_watch_object_type_is_delete(watch_event_type) + + def _is_secret_managed(self, secret_info): + _, index = self.get_matching_managed_secret_info(secret_info) + if index != -1: + return True + return False + + def get_matching_managed_secret_info(self, secret_info): + for index, managed_secret_info in enumerate(MANAGED_SECRETS): + if managed_secret_info.name == secret_info.name and managed_secret_info.namespace == secret_info.namespace: + return managed_secret_info, index + return secret_info, -1 + + def get_array_connection_info(self, secret_name, secret_namespace, labels): + secret_data = self.get_secret_data(secret_name, secret_namespace) + if secret_data: + node_topology_labels = self.get_topology_labels(labels) + return utils.get_array_connection_info_from_secret_data(secret_data, node_topology_labels) + return {} + + def get_secret_data(self, secret_name, secret_namespace): + logger.info(messages.READ_SECRET.format(secret_name, secret_namespace)) + secret_data = self.k8s_api.get_secret_data(secret_name, secret_namespace) + if secret_data: + return utils.change_decode_base64_secret_config(secret_data) + return {} + + def get_topology_labels(self, labels): + topology_labels = {} + for label in labels: + if utils.is_topology_label(label): + topology_labels[label] = labels[label] + return topology_labels diff --git a/controllers/servers/host_definer/resource_manager/storage_class.py b/controllers/servers/host_definer/resource_manager/storage_class.py new file mode 100644 index 000000000..31d6386d2 --- /dev/null +++ b/controllers/servers/host_definer/resource_manager/storage_class.py @@ -0,0 +1,9 @@ +from controllers.common.csi_logger import get_stdout_logger +import controllers.common.settings as common_settings + +logger = get_stdout_logger() + + +class StorageClassManager: + def is_storage_class_has_csi_as_a_provisioner(self, storage_class_info): + return storage_class_info.provisioner == common_settings.CSI_PROVISIONER_NAME diff --git a/controllers/servers/host_definer/settings.py b/controllers/servers/host_definer/settings.py index fd8fd07e6..4ebdce727 100644 --- a/controllers/servers/host_definer/settings.py +++ b/controllers/servers/host_definer/settings.py @@ -1,63 +1,14 @@ -from controllers.common.settings import HOST_DEFINITION_PLURAL, CSI_IBM_GROUP +from controllers.common.settings import TOPOLOGY_IBM_BLOCK_PREFIX, STORAGE_API_GROUP import controllers.array_action.settings as array_config -STORAGE_API_VERSION = 'storage.k8s.io/v1' -CSI_PARAMETER_PREFIX = "csi.storage.k8s.io/" +STORAGE_API_VERSION = '{}/v1'.format(STORAGE_API_GROUP) CSINODE_KIND = 'CSINode' -CSI_IBM_API_VERSION = 'csi.ibm.com/v1' -HOST_DEFINITION_KIND = 'HostDefinition' -SECRET_NAME_SUFFIX = 'secret-name' -CSI_PROVISIONER_NAME = 'block.csi.ibm.com' -ADDED_EVENT = 'ADDED' -DELETED_EVENT = 'DELETED' -MODIFIED_EVENT = 'MODIFIED' PENDING_PREFIX = 'Pending' -PENDING_CREATION_PHASE = 'PendingCreation' -PENDING_DELETION_PHASE = 'PendingDeletion' -ERROR_PHASE = 'Error' -READY_PHASE = 'Ready' -DRIVER_PRODUCT_LABEL = 'product=ibm-block-csi-driver' -DEFAULT_NAMESPACE = 'default' -HOST_DEFINER = 'hostDefiner' -MANAGE_NODE_LABEL = 'hostdefiner.block.csi.ibm.com/manage-node' -FORBID_DELETION_LABEL = 'hostdefiner.block.csi.ibm.com/do-not-delete-definition' -CONNECTIVITY_TYPE_LABEL = 'block.csi.ibm.com/connectivity-type' SUPPORTED_CONNECTIVITY_TYPES = [array_config.ISCSI_CONNECTIVITY_TYPE, array_config.FC_CONNECTIVITY_TYPE, array_config.NVME_OVER_FC_CONNECTIVITY_TYPE] -NODE_NAME_FIELD = 'nodeName' -SECRET_NAME_FIELD = 'secretName' -SECRET_NAMESPACE_FIELD = 'secretNamespace' -CONNECTIVITY_TYPE_FIELD = 'connectivityType' -PORTS_FIELD = 'ports' -NODE_NAME_ON_STORAGE_FIELD = 'nodeNameOnStorage' -IO_GROUP_FIELD = 'ioGroups' -MANAGEMENT_ADDRESS_FIELD = 'managementAddress' -API_VERSION = 'apiVersion' -KIND = 'kind' -METADATA = 'metadata' -SPEC = 'spec' -HOST_DEFINITION_FIELD = 'hostDefinition' -PREFIX_ENV_VAR = 'PREFIX' -CONNECTIVITY_ENV_VAR = 'CONNECTIVITY_TYPE' -STATUS = 'status' -PHASE = 'phase' -LABELS = 'labels' -TRUE_STRING = 'true' -DYNAMIC_NODE_LABELING_ENV_VAR = 'DYNAMIC_NODE_LABELING' -ALLOW_DELETE_ENV_VAR = 'ALLOW_DELETE' -DEFINE_ACTION = 'Define' -UNDEFINE_ACTION = 'Undefine' SUCCESS_MESSAGE = 'Host defined successfully on the array' -FAILED_MESSAGE_TYPE = 'Failed' -SUCCESSFUL_MESSAGE_TYPE = 'Successful' -NORMAL_EVENT_TYPE = 'Normal' -WARNING_EVENT_TYPE = 'Warning' -FINALIZERS = 'finalizers' -CSI_IBM_FINALIZER = HOST_DEFINITION_PLURAL + '.' + CSI_IBM_GROUP HOST_DEFINITION_PENDING_RETRIES = 5 HOST_DEFINITION_PENDING_EXPONENTIAL_BACKOFF_IN_SECONDS = 3 HOST_DEFINITION_PENDING_DELAY_IN_SECONDS = 3 -SECRET_CONFIG_FIELD = 'config' -TOPOLOGY_PREFIXES = ['topology.block.csi.ibm.com'] -FULL_IO_GROUP = '0:1:2:3' +TOPOLOGY_PREFIXES = [TOPOLOGY_IBM_BLOCK_PREFIX] POSSIBLE_NUMBER_OF_IO_GROUP = 4 diff --git a/controllers/servers/host_definer/storage_manager/host_definer_server.py b/controllers/servers/host_definer/storage_manager/host_definer_server.py index 0c7ffc075..069f3a2fe 100644 --- a/controllers/servers/host_definer/storage_manager/host_definer_server.py +++ b/controllers/servers/host_definer/storage_manager/host_definer_server.py @@ -170,7 +170,7 @@ def _get_io_group_to_modify(self, io_group_from_host, ig_group_from_user): def _split_io_group_from_user(self, ig_group_from_user): if not ig_group_from_user: - return host_definer_settings.FULL_IO_GROUP.split(common_settings.IO_GROUP_DELIMITER) + return common_settings.FULL_IO_GROUP.split(common_settings.IO_GROUP_DELIMITER) return ig_group_from_user.split(common_settings.IO_GROUP_DELIMITER) def _get_io_group_to_remove_and_add_lists(self, io_group_from_host, ig_group_from_user): diff --git a/controllers/servers/host_definer/types.py b/controllers/servers/host_definer/types.py index 0e5277106..90b40ec6c 100644 --- a/controllers/servers/host_definer/types.py +++ b/controllers/servers/host_definer/types.py @@ -1,6 +1,6 @@ from dataclasses import dataclass, field from controllers.servers.csi.controller_types import ArrayConnectionInfo -from controllers.servers.host_definer import utils +from controllers.servers.host_definer.utils import utils @dataclass diff --git a/controllers/servers/host_definer/utils.py b/controllers/servers/host_definer/utils.py deleted file mode 100644 index 0d7562a0a..000000000 --- a/controllers/servers/host_definer/utils.py +++ /dev/null @@ -1,13 +0,0 @@ -import controllers.servers.host_definer.settings as host_definer_settings -import controllers.common.settings as common_settings - - -def generate_io_group_from_labels(labels): - io_group = '' - for io_group_index in range(host_definer_settings.POSSIBLE_NUMBER_OF_IO_GROUP): - label_content = labels.get(common_settings.IO_GROUP_LABEL_PREFIX + str(io_group_index)) - if label_content == host_definer_settings.TRUE_STRING: - if io_group: - io_group += common_settings.IO_GROUP_DELIMITER - io_group += str(io_group_index) - return io_group diff --git a/controllers/servers/host_definer/utils/__init__.py b/controllers/servers/host_definer/utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/controllers/servers/host_definer/utils/manifest_utils.py b/controllers/servers/host_definer/utils/manifest_utils.py new file mode 100644 index 000000000..baa0f9eba --- /dev/null +++ b/controllers/servers/host_definer/utils/manifest_utils.py @@ -0,0 +1,60 @@ +from controllers.servers.host_definer import settings +import controllers.common.settings as common_settings + + +def get_host_definition_manifest(host_definition_info, response, node_id): + manifest = generate_host_definition_response_fields_manifest(host_definition_info.name, response) + manifest[common_settings.API_VERSION_FIELD] = common_settings.CSI_IBM_API_VERSION + manifest[common_settings.KIND_FIELD] = common_settings.HOST_DEFINITION_KIND + manifest[common_settings.SPEC_FIELD][common_settings.HOST_DEFINITION_FIELD][ + common_settings.HOST_DEFINITION_NODE_NAME_FIELD] = host_definition_info.node_name + manifest[common_settings.SPEC_FIELD][common_settings.HOST_DEFINITION_FIELD][ + common_settings.HOST_DEFINITION_NODE_ID_FIELD] = node_id + manifest[common_settings.SPEC_FIELD][common_settings.HOST_DEFINITION_FIELD][common_settings.SECRET_NAME_FIELD] = \ + host_definition_info.secret_name + manifest[common_settings.SPEC_FIELD][common_settings.HOST_DEFINITION_FIELD][ + common_settings.SECRET_NAMESPACE_FIELD] = host_definition_info.secret_namespace + return manifest + + +def get_host_definition_status_manifest(host_definition_phase): + return { + common_settings.STATUS_FIELD: { + common_settings.STATUS_PHASE_FIELD: host_definition_phase, + } + } + + +def get_body_manifest_for_labels(label_value): + return { + common_settings.METADATA_FIELD: { + common_settings.LABELS_FIELD: { + common_settings.MANAGE_NODE_LABEL: label_value} + } + } + + +def get_finalizer_manifest(host_definition_name, finalizers): + return { + common_settings.METADATA_FIELD: { + common_settings.NAME_FIELD: host_definition_name, + common_settings.FINALIZERS_FIELD: finalizers, + } + } + + +def generate_host_definition_response_fields_manifest(host_definition_name, response): + return { + common_settings.METADATA_FIELD: { + common_settings.NAME_FIELD: host_definition_name, + }, + common_settings.SPEC_FIELD: { + common_settings.HOST_DEFINITION_FIELD: { + common_settings.CONNECTIVITY_TYPE_FIELD: response.connectivity_type, + common_settings.PORTS_FIELD: response.ports, + common_settings.NODE_NAME_ON_STORAGE_FIELD: response.node_name_on_storage, + common_settings.IO_GROUP_FIELD: response.io_group, + common_settings.MANAGEMENT_ADDRESS_FIELD: response.management_address + }, + }, + } diff --git a/controllers/servers/host_definer/utils/utils.py b/controllers/servers/host_definer/utils/utils.py new file mode 100644 index 000000000..0cb367431 --- /dev/null +++ b/controllers/servers/host_definer/utils/utils.py @@ -0,0 +1,156 @@ +import os +import ast +import base64 +import json +import random +import string +from munch import Munch + +from controllers.common.csi_logger import get_stdout_logger +from controllers.servers.utils import validate_secrets, get_array_connection_info_from_secrets +from controllers.servers.errors import ValidationException +import controllers.servers.host_definer.settings as host_definer_settings +import controllers.common.settings as common_settings +from controllers.servers.host_definer import settings + + +logger = get_stdout_logger() + + +def generate_io_group_from_labels(labels): + io_group = '' + for io_group_index in range(host_definer_settings.POSSIBLE_NUMBER_OF_IO_GROUP): + label_content = labels.get(common_settings.IO_GROUP_LABEL_PREFIX + str(io_group_index)) + if label_content == common_settings.TRUE_STRING: + if io_group: + io_group += common_settings.IO_GROUP_DELIMITER + io_group += str(io_group_index) + return io_group + + +def get_k8s_object_resource_version(k8s_object): + if hasattr(k8s_object.metadata, 'resource_version'): + return k8s_object.metadata.resource_version + return k8s_object.metadata.resourceVersion + + +def change_decode_base64_secret_config(secret_data): + if common_settings.SECRET_CONFIG_FIELD in secret_data.keys(): + secret_data[common_settings.SECRET_CONFIG_FIELD] = _decode_base64_to_dict( + secret_data[common_settings.SECRET_CONFIG_FIELD]) + return secret_data + + +def _decode_base64_to_dict(content_with_base64): + decoded_string_content = decode_base64_to_string(content_with_base64) + my_dict_again = ast.literal_eval(decoded_string_content) + return my_dict_again + + +def get_secret_config(secret_data): + secret_data = _convert_secret_config_to_dict(secret_data) + return secret_data.get(common_settings.SECRET_CONFIG_FIELD, {}) + + +def _convert_secret_config_to_dict(secret_data): + if common_settings.SECRET_CONFIG_FIELD in secret_data.keys(): + if type(secret_data[common_settings.SECRET_CONFIG_FIELD]) is str: + secret_data[common_settings.SECRET_CONFIG_FIELD] = json.loads( + secret_data[common_settings.SECRET_CONFIG_FIELD]) + return secret_data + + +def munch(watch_event): + return Munch.fromDict(watch_event) + + +def loop_forever(): + return True + + +def validate_secret(secret_data): + secret_data = _convert_secret_config_to_string(secret_data) + try: + validate_secrets(secret_data) + except ValidationException as ex: + logger.error(str(ex)) + + +def get_prefix(): + return os.getenv(common_settings.PREFIX_ENV_VAR) + + +def get_connectivity_type_from_user(connectivity_type_label_on_node): + if connectivity_type_label_on_node in settings.SUPPORTED_CONNECTIVITY_TYPES: + return connectivity_type_label_on_node + return os.getenv(common_settings.CONNECTIVITY_ENV_VAR) + + +def is_topology_label(label): + for prefix in settings.TOPOLOGY_PREFIXES: + if label.startswith(prefix): + return True + return False + + +def get_array_connection_info_from_secret_data(secret_data, labels): + try: + secret_data = _convert_secret_config_to_string(secret_data) + array_connection_info = get_array_connection_info_from_secrets(secret_data, labels) + return decode_array_connectivity_info(array_connection_info) + except ValidationException as ex: + logger.error(str(ex)) + return None + + +def _convert_secret_config_to_string(secret_data): + if common_settings.SECRET_CONFIG_FIELD in secret_data.keys(): + if type(secret_data[common_settings.SECRET_CONFIG_FIELD]) is dict: + secret_data[common_settings.SECRET_CONFIG_FIELD] = json.dumps( + secret_data[common_settings.SECRET_CONFIG_FIELD]) + return secret_data + + +def decode_array_connectivity_info(array_connection_info): + array_connection_info.array_addresses = _decode_list_base64_to_list_string( + array_connection_info.array_addresses) + array_connection_info.user = decode_base64_to_string(array_connection_info.user) + array_connection_info.password = decode_base64_to_string(array_connection_info.password) + return array_connection_info + + +def _decode_list_base64_to_list_string(list_with_base64): + for index, base64_content in enumerate(list_with_base64): + list_with_base64[index] = decode_base64_to_string(base64_content) + return list_with_base64 + + +def decode_base64_to_string(content_with_base64): + try: + base64_bytes = content_with_base64.encode('ascii') + decoded_string_in_bytes = base64.b64decode(base64_bytes) + return decoded_string_in_bytes.decode('ascii') + except Exception: + return content_with_base64 + + +def get_random_string(): + return ''.join(random.choices(string.ascii_lowercase + string.digits, k=20)) + + +def is_watch_object_type_is_delete(watch_object_type): + return watch_object_type == common_settings.DELETED_EVENT_TYPE + + +def is_host_definer_can_delete_hosts(): + return os.getenv(common_settings.ALLOW_DELETE_ENV_VAR) == common_settings.TRUE_STRING + + +def is_dynamic_node_labeling_allowed(): + return os.getenv(common_settings.DYNAMIC_NODE_LABELING_ENV_VAR) == common_settings.TRUE_STRING + + +def get_action(phase): + if phase == common_settings.PENDING_CREATION_PHASE: + return common_settings.DEFINE_ACTION + return common_settings.UNDEFINE_ACTION diff --git a/controllers/servers/host_definer/watcher/csi_node_watcher.py b/controllers/servers/host_definer/watcher/csi_node_watcher.py index 7c47eb58c..262cb62c7 100644 --- a/controllers/servers/host_definer/watcher/csi_node_watcher.py +++ b/controllers/servers/host_definer/watcher/csi_node_watcher.py @@ -1,10 +1,12 @@ -import time from threading import Thread +import controllers.common.settings as common_settings from controllers.common.csi_logger import get_stdout_logger -from controllers.servers.host_definer.watcher.watcher_helper import Watcher, NODES, MANAGED_SECRETS +from controllers.servers.host_definer.globals import MANAGED_SECRETS, NODES +from controllers.servers.host_definer.watcher.watcher_helper import Watcher import controllers.servers.host_definer.messages as messages from controllers.servers.host_definer import settings +from controllers.servers.host_definer.utils import utils logger = get_stdout_logger() @@ -12,100 +14,61 @@ class CsiNodeWatcher(Watcher): def add_initial_csi_nodes(self): - csi_nodes_info = self._get_csi_nodes_info_with_driver() + csi_nodes_info = self.csi_node.get_csi_nodes_info_with_driver() for csi_node_info in csi_nodes_info: - if self._is_host_can_be_defined(csi_node_info.name): - self._add_node_to_nodes(csi_node_info) + if self.node_manager.is_node_can_be_defined(csi_node_info.name): + self.node_manager.add_node_to_nodes(csi_node_info) def watch_csi_nodes_resources(self): - while self._loop_forever(): - resource_version = self._get_k8s_object_resource_version(self.csi_nodes_api.get()) - stream = self.csi_nodes_api.watch(resource_version=resource_version, timeout=5) + while utils.loop_forever(): + stream = self.k8s_api.get_csi_node_stream() for watch_event in stream: - watch_event = self._munch(watch_event) - csi_node_info = self._generate_csi_node_info(watch_event.object) - if (watch_event.type == settings.DELETED_EVENT) and (csi_node_info.name in NODES): + watch_event = utils.munch(watch_event) + csi_node_info = self.resource_info_manager.generate_csi_node_info(watch_event.object) + if (watch_event.type == common_settings.DELETED_EVENT_TYPE) and (csi_node_info.name in NODES): self._handle_deleted_csi_node_pod(csi_node_info) - elif watch_event.type == settings.MODIFIED_EVENT: + elif watch_event.type == common_settings.MODIFIED_EVENT_TYPE: self._handle_modified_csi_node(csi_node_info) def _handle_modified_csi_node(self, csi_node_info): if self._is_new_csi_node(csi_node_info): - self._add_node_to_nodes(csi_node_info) - self._define_host_on_all_storages(csi_node_info.name) + self.node_manager.add_node_to_nodes(csi_node_info) + self.definition_manager.define_node_on_all_storages(csi_node_info.name) elif csi_node_info.name in NODES: self._handle_deleted_csi_node_pod(csi_node_info) def _is_new_csi_node(self, csi_node_info): - return csi_node_info.node_id and self._is_host_can_be_defined(csi_node_info.name) and \ + return csi_node_info.node_id and self.node_manager.is_node_can_be_defined(csi_node_info.name) and \ csi_node_info.name not in NODES def _handle_deleted_csi_node_pod(self, csi_node_info): - if self._is_node_has_manage_node_label(csi_node_info.name): + if self.node_manager.is_node_has_manage_node_label(csi_node_info.name): remove_host_thread = Thread(target=self._undefine_host_when_node_pod_is_deleted, args=(csi_node_info,)) remove_host_thread.start() def _undefine_host_when_node_pod_is_deleted(self, csi_node_info): node_name = csi_node_info.name - if self._is_host_part_of_update(node_name): + if self.csi_node.is_host_part_of_update(node_name): self._create_definitions_when_csi_node_changed(csi_node_info) - elif self._is_host_definer_can_delete_hosts() and \ - not self._is_node_has_forbid_deletion_label(node_name): - self._undefine_hosts(csi_node_info.name) + elif utils.is_host_definer_can_delete_hosts() and \ + not self.node_manager.is_node_has_forbid_deletion_label(node_name): + self._undefine_all_the_definitions_of_a_node(csi_node_info.name) else: NODES.pop(node_name, None) - def _is_host_part_of_update(self, worker): - logger.info(messages.CHECK_IF_NODE_IS_PART_OF_UPDATE.format(worker)) - daemon_set_name = self._wait_until_all_daemon_set_pods_are_up_to_date() - if daemon_set_name: - return self._is_csi_node_pod_running_on_worker(worker, daemon_set_name) - return False - - def _is_csi_node_pod_running_on_worker(self, worker, daemon_set_name): - logger.info(messages.CHECK_IF_CSI_NODE_POD_IS_RUNNING.format(worker)) - csi_pods_info = self._get_csi_pods_info() - for pod_info in csi_pods_info: - if (pod_info.node_name == worker) and (daemon_set_name in pod_info.name): - return True - return False - - def _wait_until_all_daemon_set_pods_are_up_to_date(self): - csi_daemon_set = self._get_csi_daemon_set() - if not csi_daemon_set: - return None - status = csi_daemon_set.status - while status.updated_number_scheduled != status.desired_number_scheduled: - logger.info(messages.UPDATED_CSI_NODE_VS_DESIRED.format( - status.updated_number_scheduled, status.desired_number_scheduled)) - if status.desired_number_scheduled == 0: - return None - csi_daemon_set = self._get_csi_daemon_set() - if not csi_daemon_set: - return None - status = csi_daemon_set.status - time.sleep(0.5) - return csi_daemon_set.metadata.name - def _create_definitions_when_csi_node_changed(self, csi_node_info): for secret_info in MANAGED_SECRETS: secret_name, secret_namespace = secret_info.name, secret_info.namespace - host_definition_info = self._get_matching_host_definition_info( + host_definition_info = self.host_definition_manager.get_matching_host_definition_info( csi_node_info.name, secret_name, secret_namespace) - if host_definition_info: - if self._is_node_id_changed(host_definition_info.node_id, csi_node_info.node_id): - logger.info(messages.NODE_ID_WAS_CHANGED.format(csi_node_info.name, - host_definition_info.node_id, csi_node_info.node_id)) - NODES[csi_node_info.name] = self._generate_managed_node(csi_node_info) - self._create_definition(host_definition_info) - - def _is_node_id_changed(self, host_definition_node_id, csi_node_node_id): - return host_definition_node_id != csi_node_node_id \ - and host_definition_node_id and csi_node_node_id - - def _undefine_hosts(self, node_name): - for secret_info in MANAGED_SECRETS: - host_definition_info = self._get_host_definition_info_from_secret_and_node_name(node_name, secret_info) - self._delete_definition(host_definition_info) - self._remove_manage_node_label(node_name) + if host_definition_info and self.csi_node.is_node_id_changed( + host_definition_info.node_id, csi_node_info.node_id): + logger.info(messages.NODE_ID_WAS_CHANGED.format(csi_node_info.name, + host_definition_info.node_id, csi_node_info.node_id)) + NODES[csi_node_info.name] = self.node_manager.generate_managed_node(csi_node_info) + self.definition_manager.create_definition(host_definition_info) + + def _undefine_all_the_definitions_of_a_node(self, node_name): + self.definition_manager.undefine_node_definitions(node_name) + self.node_manager.remove_manage_node_label(node_name) NODES.pop(node_name, None) diff --git a/controllers/servers/host_definer/watcher/host_definition_watcher.py b/controllers/servers/host_definer/watcher/host_definition_watcher.py index 652352449..d8a73b6cb 100644 --- a/controllers/servers/host_definer/watcher/host_definition_watcher.py +++ b/controllers/servers/host_definer/watcher/host_definition_watcher.py @@ -1,12 +1,13 @@ from threading import Thread from time import sleep +import controllers.common.settings as common_settings +from controllers.servers.host_definer.utils import utils import controllers.servers.host_definer.messages as messages from controllers.common.csi_logger import get_stdout_logger from controllers.servers.host_definer.watcher.watcher_helper import Watcher from controllers.servers.host_definer.types import DefineHostResponse from controllers.servers.host_definer import settings -import controllers.common.settings as common_settings logger = get_stdout_logger() @@ -15,22 +16,19 @@ class HostDefinitionWatcher(Watcher): def watch_host_definitions_resources(self): self._watch_host_definition_with_timeout('') - while self._loop_forever(): - resource_version = self._get_k8s_object_resource_version(self.host_definitions_api.get()) + while utils.loop_forever(): + resource_version = utils.get_k8s_object_resource_version(self.k8s_api.list_host_definition()) self._watch_host_definition_with_timeout(resource_version) def _watch_host_definition_with_timeout(self, resource_version, timeout=5): - stream = self.host_definitions_api.watch(resource_version=resource_version, timeout=timeout) + stream = self.k8s_api.get_host_definition_stream(resource_version, timeout) for watch_event in stream: - watch_event = self._munch(watch_event) - host_definition_info = self._generate_host_definition_info(watch_event.object) - if self._is_host_definition_in_pending_phase(host_definition_info.phase) and \ - watch_event.type != settings.DELETED_EVENT: + watch_event = utils.munch(watch_event) + host_definition_info = self.resource_info_manager.generate_host_definition_info(watch_event.object) + if self.host_definition_manager.is_host_definition_in_pending_phase(host_definition_info.phase) and \ + not utils.is_watch_object_type_is_delete(watch_event.type): self._define_host_definition_after_pending_state(host_definition_info) - def _is_host_definition_in_pending_phase(self, phase): - return phase.startswith(settings.PENDING_PREFIX) - def _define_host_definition_after_pending_state(self, host_definition_info): logger.info(messages.FOUND_HOST_DEFINITION_IN_PENDING_STATE.format(host_definition_info.name)) remove_host_thread = Thread(target=self._define_host_using_exponential_backoff, @@ -44,7 +42,7 @@ def _define_host_using_exponential_backoff(self, host_definition_info): while retries > 0: logger.info(messages.VERIFY_HOST_DEFINITION_USING_EXPONENTIAL_BACKOFF.format( host_definition_info.name, retries)) - if self._is_host_definition_not_pending(host_definition_info) and \ + if self.host_definition_manager.is_host_definition_not_pending(host_definition_info) and \ retries != settings.HOST_DEFINITION_PENDING_RETRIES: logger.info(messages.HOST_DEFINITION_IS_NOT_PENDING.format(host_definition_info.name)) return @@ -53,84 +51,31 @@ def _define_host_using_exponential_backoff(self, host_definition_info): delay_in_seconds *= backoff_in_seconds sleep(delay_in_seconds) - self._set_host_definition_phase_to_error(host_definition_info) - - def _is_host_definition_not_pending(self, host_definition_info): - current_host_definition_info_on_cluster = self._get_matching_host_definition_info( - host_definition_info.node_name, host_definition_info.secret_name, host_definition_info.secret_namespace) - return not current_host_definition_info_on_cluster or \ - current_host_definition_info_on_cluster.phase == settings.READY_PHASE + self.host_definition_manager.set_host_definition_phase_to_error(host_definition_info) def _handle_pending_host_definition(self, host_definition_info): response = DefineHostResponse() phase = host_definition_info.phase - action = self._get_action(phase) - if phase == settings.PENDING_CREATION_PHASE: - response = self._define_host_after_pending(host_definition_info) + action = utils.get_action(phase) + if phase == common_settings.PENDING_CREATION_PHASE: + response = self.definition_manager.define_host_after_pending(host_definition_info) elif self._is_pending_for_deletion_need_to_be_handled(phase, host_definition_info.node_name): - response = self._undefine_host_after_pending(host_definition_info) + response = self.definition_manager.undefine_host_after_pending(host_definition_info) self._handle_message_from_storage( host_definition_info, response.error_message, action) - def _get_action(self, phase): - if phase == settings.PENDING_CREATION_PHASE: - return settings.DEFINE_ACTION - return settings.UNDEFINE_ACTION - - def _define_host_after_pending(self, host_definition_info): - response = DefineHostResponse() - if self._is_node_should_be_managed_on_secret( - host_definition_info.node_name, host_definition_info.secret_name, - host_definition_info.secret_namespace): - response = self._define_host(host_definition_info) - self._update_host_definition_from_storage_response(host_definition_info.name, response) - else: - self._delete_host_definition(host_definition_info.name) - return response - - def _update_host_definition_from_storage_response(self, host_definition_name, response): - logger.info(messages.UPDATE_HOST_DEFINITION_FIELDS_FROM_STORAGE.format(host_definition_name, response)) - host_definition_manifest = self._generate_host_definition_manifest(host_definition_name, response) - self._patch_host_definition(host_definition_manifest) - - def _generate_host_definition_manifest(self, host_definition_name, response): - return { - settings.METADATA: { - common_settings.NAME_FIELD: host_definition_name, - }, - settings.SPEC: { - settings.HOST_DEFINITION_FIELD: { - settings.CONNECTIVITY_TYPE_FIELD: response.connectivity_type, - settings.PORTS_FIELD: response.ports, - settings.NODE_NAME_ON_STORAGE_FIELD: response.node_name_on_storage, - settings.IO_GROUP_FIELD: response.io_group - }, - }, - } - - def _undefine_host_after_pending(self, host_definition_info): - response = DefineHostResponse() - if self._is_node_should_be_managed_on_secret( - host_definition_info.node_name, host_definition_info.secret_name, - host_definition_info.secret_namespace): - response = self._undefine_host(host_definition_info) - return response - def _handle_message_from_storage(self, host_definition_info, error_message, action): phase = host_definition_info.phase if error_message: - self._create_k8s_event_for_host_definition( + self.host_definition_manager.create_k8s_event_for_host_definition( host_definition_info, str(error_message), - action, settings.FAILED_MESSAGE_TYPE) - elif phase == settings.PENDING_CREATION_PHASE: - self._set_host_definition_status_to_ready(host_definition_info) + action, common_settings.FAILED_MESSAGE_TYPE) + elif phase == common_settings.PENDING_CREATION_PHASE: + self.host_definition_manager.set_host_definition_status_to_ready(host_definition_info) elif self._is_pending_for_deletion_need_to_be_handled(phase, host_definition_info.node_name): - self._delete_host_definition(host_definition_info.name) - self._remove_manage_node_label(host_definition_info.node_name) + self.host_definition_manager.delete_host_definition(host_definition_info.name) + self.node_manager.remove_manage_node_label(host_definition_info.node_name) def _is_pending_for_deletion_need_to_be_handled(self, phase, node_name): - return phase == settings.PENDING_DELETION_PHASE and self._is_host_can_be_undefined(node_name) - - def _set_host_definition_phase_to_error(self, host_definition_info): - logger.info(messages.SET_HOST_DEFINITION_PHASE_TO_ERROR.format(host_definition_info.name)) - self._set_host_definition_status(host_definition_info.name, settings.ERROR_PHASE) + return phase == common_settings.PENDING_DELETION_PHASE and \ + self.node_manager.is_node_can_be_undefined(node_name) diff --git a/controllers/servers/host_definer/watcher/node_watcher.py b/controllers/servers/host_definer/watcher/node_watcher.py index 354d3a022..9ff8975af 100644 --- a/controllers/servers/host_definer/watcher/node_watcher.py +++ b/controllers/servers/host_definer/watcher/node_watcher.py @@ -1,10 +1,8 @@ -from kubernetes import watch - from controllers.common.csi_logger import get_stdout_logger -from controllers.servers.utils import is_topology_match -from controllers.servers.host_definer.watcher.watcher_helper import NODES, Watcher, MANAGED_SECRETS +import controllers.common.settings as common_settings +from controllers.servers.host_definer.watcher.watcher_helper import Watcher from controllers.servers.host_definer import settings -from controllers.servers.host_definer import utils +from controllers.servers.host_definer.utils import utils from controllers.servers.host_definer import messages logger = get_stdout_logger() @@ -13,109 +11,59 @@ class NodeWatcher(Watcher): def add_initial_nodes(self): - nodes_info = self._get_nodes_info() + nodes_info = self.node_manager.get_nodes_info() for node_info in nodes_info: node_name = node_info.name - csi_node_info = self._get_csi_node_info(node_name) + csi_node_info = self.resource_info_manager.get_csi_node_info(node_name) if self._is_csi_node_pod_deleted_while_host_definer_was_down(csi_node_info): logger.info(messages.CSI_NODE_POD_DELETED_WHILE_HOST_DEFINER_WAS_DOWN.format(node_name)) self._delete_host_definitions(node_name) - self._remove_manage_node_label(node_name) + self.node_manager.remove_manage_node_label(node_name) if self._is_unmanaged_csi_node_has_driver(csi_node_info): logger.info(messages.DETECTED_UNMANAGED_CSI_NODE_WITH_IBM_BLOCK_CSI_DRIVER.format(csi_node_info.name)) unmanaged_csi_nodes_with_driver.add(csi_node_info.name) def _is_csi_node_pod_deleted_while_host_definer_was_down(self, csi_node_info): - return self._is_node_has_manage_node_label(csi_node_info.name) and \ - self._is_node_has_host_definitions(csi_node_info.name) and not csi_node_info.node_id + if self.node_manager.is_node_has_manage_node_label(csi_node_info.name) and \ + self.node_manager.is_node_has_host_definitions(csi_node_info.name) and not csi_node_info.node_id: + return True + return False + + def _delete_host_definitions(self, node_name): + if not self.node_manager.is_node_can_be_undefined(node_name): + return + host_definitions_info = self.host_definition_manager.get_all_host_definitions_info_of_the_node(node_name) + for host_definition_info in host_definitions_info: + self.definition_manager.delete_definition(host_definition_info) + self.node_manager.remove_manage_node_label(node_name) def watch_nodes_resources(self): - while self._loop_forever(): - resource_version = self._get_k8s_object_resource_version(self.core_api.list_node()) - stream = watch.Watch().stream(self.core_api.list_node, resource_version=resource_version, timeout_seconds=5) + while utils.loop_forever(): + stream = self.k8s_api.get_node_stream() for watch_event in stream: - watch_event = self._munch(watch_event) + watch_event = utils.munch(watch_event) node_name = watch_event.object.metadata.name - csi_node_info = self._get_csi_node_info(node_name) - node_info = self._generate_node_info(watch_event.object) + csi_node_info = self.resource_info_manager.get_csi_node_info(node_name) + node_info = self.resource_info_manager.generate_node_info(watch_event.object) self._add_new_unmanaged_nodes_with_ibm_csi_driver(watch_event, csi_node_info) self._define_new_managed_node(watch_event, node_name, csi_node_info) - self._handle_node_topologies(node_info, watch_event) - self._update_io_group(node_info) + self.node_manager.handle_node_topologies(node_info, watch_event.type) + self.node_manager.update_node_io_group(node_info) def _add_new_unmanaged_nodes_with_ibm_csi_driver(self, watch_event, csi_node_info): - if watch_event.type in settings.MODIFIED_EVENT and \ + if watch_event.type in common_settings.MODIFIED_EVENT_TYPE and \ self._is_unmanaged_csi_node_has_driver(csi_node_info): logger.info(messages.DETECTED_UNMANAGED_CSI_NODE_WITH_IBM_BLOCK_CSI_DRIVER.format(csi_node_info.name)) unmanaged_csi_nodes_with_driver.add(csi_node_info.name) def _is_unmanaged_csi_node_has_driver(self, csi_node_info): - return csi_node_info.node_id and not self._is_host_can_be_defined(csi_node_info.name) + return csi_node_info.node_id and not self.node_manager.is_node_can_be_defined(csi_node_info.name) def _define_new_managed_node(self, watch_event, node_name, csi_node_info): - if watch_event.type == settings.MODIFIED_EVENT and \ - self._is_node_has_new_manage_node_label(csi_node_info): + if watch_event.type == common_settings.MODIFIED_EVENT_TYPE and \ + self.node_manager.is_node_has_new_manage_node_label(csi_node_info, unmanaged_csi_nodes_with_driver): logger.info(messages.DETECTED_NEW_MANAGED_CSI_NODE.format(node_name)) - self._add_node_to_nodes(csi_node_info) - self._define_host_on_all_storages(node_name) + self.node_manager.add_node_to_nodes(csi_node_info) + self.definition_manager.define_node_on_all_storages(node_name) unmanaged_csi_nodes_with_driver.remove(csi_node_info.name) - - def _delete_host_definitions(self, node_name): - if not self._is_host_can_be_undefined(node_name): - return - host_definitions_info = self._get_all_node_host_definitions_info(node_name) - for host_definition_info in host_definitions_info: - self._delete_definition(host_definition_info) - self._remove_manage_node_label(node_name) - - def _is_node_has_new_manage_node_label(self, csi_node_info): - return not self._is_dynamic_node_labeling_allowed() and \ - self._is_node_has_manage_node_label(csi_node_info.name) and \ - self._is_node_with_csi_ibm_csi_node_and_is_not_managed(csi_node_info) - - def _is_node_with_csi_ibm_csi_node_and_is_not_managed(self, csi_node_info): - return csi_node_info.name not in NODES and csi_node_info.node_id and \ - csi_node_info.name in unmanaged_csi_nodes_with_driver - - def _handle_node_topologies(self, node_info, watch_event): - if node_info.name not in NODES or watch_event.type != settings.MODIFIED_EVENT: - return - for index, managed_secret_info in enumerate(MANAGED_SECRETS): - if not managed_secret_info.system_ids_topologies: - continue - if self._is_node_should_managed_on_secret_info(node_info.name, managed_secret_info): - self._remove_node_if_topology_not_match(node_info, index, managed_secret_info) - elif self._is_node_in_system_ids_topologies(managed_secret_info.system_ids_topologies, node_info.labels): - self._define_host_with_new_topology(node_info, index, managed_secret_info) - - def _define_host_with_new_topology(self, node_info, index, managed_secret_info): - node_name = node_info.name - system_id = self._get_system_id_for_node_labels( - managed_secret_info.system_ids_topologies, node_info.labels) - managed_secret_info.nodes_with_system_id[node_name] = system_id - MANAGED_SECRETS[index] = managed_secret_info - self._define_host_on_all_storages(node_name) - - def _remove_node_if_topology_not_match(self, node_info, index, managed_secret_info): - if not self._is_node_in_system_ids_topologies(managed_secret_info.system_ids_topologies, node_info.labels): - managed_secret_info.nodes_with_system_id.pop(node_info.name, None) - MANAGED_SECRETS[index] = managed_secret_info - - def _is_node_in_system_ids_topologies(self, system_ids_topologies, node_labels): - return self._get_system_id_for_node_labels(system_ids_topologies, node_labels) != '' - - def _get_system_id_for_node_labels(self, system_ids_topologies, node_labels): - topology_labels = self._get_topology_labels(node_labels) - for system_id, system_topologies in system_ids_topologies.items(): - if is_topology_match(system_topologies, topology_labels): - return system_id - return '' - - def _update_io_group(self, node_info): - io_group = utils.generate_io_group_from_labels(node_info.labels) - node_name = node_info.name - if node_name in NODES and io_group != NODES[node_name].io_group: - logger.info(messages.IO_GROUP_CHANGED.format(node_name, io_group, NODES[node_name].io_group)) - NODES[node_name].io_group = io_group - self._define_host_on_all_storages(node_name) diff --git a/controllers/servers/host_definer/watcher/secret_watcher.py b/controllers/servers/host_definer/watcher/secret_watcher.py index 0b1b7fe22..7a3570553 100644 --- a/controllers/servers/host_definer/watcher/secret_watcher.py +++ b/controllers/servers/host_definer/watcher/secret_watcher.py @@ -1,10 +1,8 @@ -from kubernetes import watch - import controllers.servers.host_definer.messages as messages from controllers.common.csi_logger import get_stdout_logger -from controllers.servers.host_definer.watcher.watcher_helper import Watcher, MANAGED_SECRETS -from controllers.servers.host_definer.types import SecretInfo -from controllers.servers.host_definer import settings +from controllers.servers.host_definer.globals import MANAGED_SECRETS +from controllers.servers.host_definer.watcher.watcher_helper import Watcher +from controllers.servers.host_definer.utils import utils logger = get_stdout_logger() @@ -12,37 +10,34 @@ class SecretWatcher(Watcher): def watch_secret_resources(self): - while self._loop_forever(): - resource_version = self._get_k8s_object_resource_version(self.core_api.list_secret_for_all_namespaces()) - stream = watch.Watch().stream(self.core_api.list_secret_for_all_namespaces, - resource_version=resource_version, timeout_seconds=5) + while utils.loop_forever(): + stream = self.k8s_api.get_secret_stream() for watch_event in stream: - watch_event = self._munch(watch_event) - secret_info = self._generate_k8s_secret_to_secret_info(watch_event.object) - if self._is_secret_managed(secret_info): - secret_data = self._change_decode_base64_secret_config(watch_event.object.data) - if self._is_topology_secret(secret_data): - nodes_with_system_id = self._generate_nodes_with_system_id(secret_data) - system_ids_topologies = self._generate_secret_system_ids_topologies(secret_data) - secret_info = self._generate_k8s_secret_to_secret_info( - watch_event.object, nodes_with_system_id, system_ids_topologies) - else: - secret_info = self._generate_k8s_secret_to_secret_info(watch_event.object) - self._handle_storage_class_secret(secret_info, watch_event.type) - - def _generate_k8s_secret_to_secret_info(self, k8s_secret, nodes_with_system_id={}, system_ids_topologies={}): - return SecretInfo( - k8s_secret.metadata.name, k8s_secret.metadata.namespace, nodes_with_system_id, system_ids_topologies) - - def _handle_storage_class_secret(self, secret_info, watch_event_type): - managed_secret_info, index = self._get_matching_managed_secret_info(secret_info) - if watch_event_type in (settings.ADDED_EVENT, settings.MODIFIED_EVENT) and \ - managed_secret_info.managed_storage_classes > 0: + watch_event = utils.munch(watch_event) + secret_info = self.resource_info_manager.generate_k8s_secret_to_secret_info(watch_event.object) + if self.secret_manager.is_secret_can_be_changed(secret_info, watch_event.type): + secret_info = self._get_secret_info(watch_event.object) + self._handle_storage_class_secret(secret_info) + + def _get_secret_info(self, watch_event_object): + secret_data = utils.change_decode_base64_secret_config(watch_event_object.data) + if self.secret_manager.is_topology_secret(secret_data): + nodes_with_system_id = self.node_manager.generate_nodes_with_system_id(secret_data) + system_ids_topologies = self.secret_manager.generate_secret_system_ids_topologies(secret_data) + secret_info = self.resource_info_manager.generate_k8s_secret_to_secret_info( + watch_event_object, nodes_with_system_id, system_ids_topologies) + else: + secret_info = self.resource_info_manager.generate_k8s_secret_to_secret_info(watch_event_object) + return secret_info + + def _handle_storage_class_secret(self, secret_info): + managed_secret_info, index = self.secret_manager.get_matching_managed_secret_info(secret_info) + if managed_secret_info.managed_storage_classes > 0: secret_info.managed_storage_classes = managed_secret_info.managed_storage_classes MANAGED_SECRETS[index] = secret_info self._define_host_after_watch_event(secret_info) def _define_host_after_watch_event(self, secret_info): logger.info(messages.SECRET_HAS_BEEN_MODIFIED.format(secret_info.name, secret_info.namespace)) - host_definition_info = self._get_host_definition_info_from_secret(secret_info) - self._define_nodes(host_definition_info) + host_definition_info = self.host_definition_manager.get_host_definition_info_from_secret(secret_info) + self.definition_manager.define_nodes(host_definition_info) diff --git a/controllers/servers/host_definer/watcher/storage_class_watcher.py b/controllers/servers/host_definer/watcher/storage_class_watcher.py index 9ea9a08be..584137fbc 100644 --- a/controllers/servers/host_definer/watcher/storage_class_watcher.py +++ b/controllers/servers/host_definer/watcher/storage_class_watcher.py @@ -1,11 +1,9 @@ -import json -from kubernetes import watch - +import controllers.common.settings as common_settings import controllers.servers.host_definer.messages as messages from controllers.common.csi_logger import get_stdout_logger -from controllers.servers.host_definer.watcher.watcher_helper import Watcher, MANAGED_SECRETS -from controllers.servers.host_definer import settings -import controllers.common.settings as common_settings +from controllers.servers.host_definer.globals import MANAGED_SECRETS +from controllers.servers.host_definer.watcher.watcher_helper import Watcher +from controllers.servers.host_definer.utils import utils logger = get_stdout_logger() @@ -13,98 +11,59 @@ class StorageClassWatcher(Watcher): def add_initial_storage_classes(self): - storage_classes_info = self._get_storage_classes_info() + storage_classes_info = self.resource_info_manager.get_storage_classes_info() for storage_class_info in storage_classes_info: secrets_info = self._get_secrets_info_from_storage_class_with_driver_provisioner(storage_class_info) self._handle_added_watch_event(secrets_info, storage_class_info.name) def watch_storage_class_resources(self): - while self._loop_forever(): - resource_version = self._get_k8s_object_resource_version(self.storage_api.list_storage_class()) - stream = watch.Watch().stream(self.storage_api.list_storage_class, - resource_version=resource_version, timeout_seconds=5) + while utils.loop_forever(): + stream = self.k8s_api.get_storage_class_stream() for watch_event in stream: - watch_event = self._munch(watch_event) - storage_class_info = self._generate_storage_class_info(watch_event.object) + watch_event = utils.munch(watch_event) + storage_class_info = self.resource_info_manager.generate_storage_class_info(watch_event.object) secrets_info = self._get_secrets_info_from_storage_class_with_driver_provisioner(storage_class_info) - if watch_event.type == settings.ADDED_EVENT: + if watch_event.type == common_settings.ADDED_EVENT_TYPE: self._handle_added_watch_event(secrets_info, storage_class_info.name) - - if watch_event.type == settings.DELETED_EVENT: + elif utils.is_watch_object_type_is_delete(watch_event.type): self._handle_deleted_watch_event(secrets_info) def _get_secrets_info_from_storage_class_with_driver_provisioner(self, storage_class_info): - if self._is_storage_class_has_csi_as_a_provisioner(storage_class_info): + if self.storage_class_manager.is_storage_class_has_csi_as_a_provisioner(storage_class_info): return self._get_secrets_info_from_storage_class(storage_class_info) return [] - def _is_storage_class_has_csi_as_a_provisioner(self, storage_class_info): - return storage_class_info.provisioner == settings.CSI_PROVISIONER_NAME - def _get_secrets_info_from_storage_class(self, storage_class_info): secrets_info = [] for parameter_name in storage_class_info.parameters: - if self._is_secret(parameter_name): - secret_name, secret_namespace = self._get_secret_name_and_namespace(storage_class_info, parameter_name) - secret_data = self._get_secret_data(secret_name, secret_namespace) + if self.secret_manager.is_secret(parameter_name): + secret_name, secret_namespace = self.secret_manager.get_secret_name_and_namespace( + storage_class_info, parameter_name) logger.info(messages.SECRET_IS_BEING_USED_BY_STORAGE_CLASS.format( secret_name, secret_namespace, storage_class_info.name)) - if self._is_topology_secret(secret_data): - logger.info(messages.SECRET_IS_FROM_TOPOLOGY_TYPE.format(secret_name, secret_namespace)) - nodes_with_system_id = self._generate_nodes_with_system_id(secret_data) - system_ids_topologies = self._generate_secret_system_ids_topologies(secret_data) - secret_info = self._generate_secret_info( - secret_name, secret_namespace, nodes_with_system_id, system_ids_topologies) - secrets_info = self._add_secret_info_to_list(secret_info, secrets_info) - else: - secret_info = self._generate_secret_info(secret_name, secret_namespace) - secrets_info = self._add_secret_info_to_list(secret_info, secrets_info) + secret_info = self._get_secret_info(secret_name, secret_namespace) + secrets_info = self.secret_manager.add_unique_secret_info_to_list(secret_info, secrets_info) return list(filter(None, secrets_info)) - def _is_secret(self, parameter_name): - return parameter_name.endswith(settings.SECRET_NAME_SUFFIX) and \ - parameter_name.startswith(settings.CSI_PARAMETER_PREFIX) - - def _get_secret_name_and_namespace(self, storage_class_info, parameter_name): - secret_name_suffix = settings.SECRET_NAME_SUFFIX - prefix = parameter_name.split(secret_name_suffix)[0] - return (storage_class_info.parameters[parameter_name], - storage_class_info.parameters[prefix + secret_name_suffix.replace( - common_settings.NAME_FIELD, common_settings.NAMESPACE_FIELD)]) - - def _add_secret_info_to_list(self, secret_info, list_with_secrets_info): - for secret_info_in_list in list_with_secrets_info: - if secret_info_in_list.name == secret_info.name and \ - secret_info_in_list.namespace == secret_info.namespace: - return list_with_secrets_info - list_with_secrets_info.append(secret_info) - return list_with_secrets_info + def _get_secret_info(self, secret_name, secret_namespace): + secret_data = self.secret_manager.get_secret_data(secret_name, secret_namespace) + if self.secret_manager.is_topology_secret(secret_data): + logger.info(messages.SECRET_IS_FROM_TOPOLOGY_TYPE.format(secret_name, secret_namespace)) + nodes_with_system_id = self.node_manager.generate_nodes_with_system_id(secret_data) + system_ids_topologies = self.secret_manager.generate_secret_system_ids_topologies(secret_data) + secret_info = self.resource_info_manager.generate_secret_info( + secret_name, secret_namespace, nodes_with_system_id, system_ids_topologies) + else: + secret_info = self.resource_info_manager.generate_secret_info(secret_name, secret_namespace) + return secret_info def _handle_added_watch_event(self, secrets_info, storage_class_name): logger.info(messages.NEW_STORAGE_CLASS.format(storage_class_name)) for secret_info in secrets_info: if secret_info: - self._define_nodes_when_new_secret(secret_info) - - def _define_nodes_when_new_secret(self, secret_info): - managed_secret_info, index = self._get_matching_managed_secret_info(secret_info) - secret_info.managed_storage_classes = 1 - if index == -1: - MANAGED_SECRETS.append(secret_info) - self._define_nodes_from_secret_info(secret_info) - elif managed_secret_info.managed_storage_classes == 0: - MANAGED_SECRETS[index] = secret_info - self._define_nodes_from_secret_info(secret_info) - else: - secret_info.managed_storage_classes = managed_secret_info.managed_storage_classes + 1 - MANAGED_SECRETS[index] = secret_info - - def _define_nodes_from_secret_info(self, secret_info): - logger.info(messages.NEW_MANAGED_SECRET.format(secret_info.name, secret_info.namespace)) - host_definition_info = self._get_host_definition_info_from_secret(secret_info) - self._define_nodes(host_definition_info) + self.definition_manager.define_nodes_when_new_secret(secret_info) def _handle_deleted_watch_event(self, secrets_info): for secret_info in secrets_info: - _, index = self._get_matching_managed_secret_info(secret_info) + _, index = self.secret_manager.get_matching_managed_secret_info(secret_info) MANAGED_SECRETS[index].managed_storage_classes -= 1 diff --git a/controllers/servers/host_definer/watcher/watcher_helper.py b/controllers/servers/host_definer/watcher/watcher_helper.py index 3f4cb4eb5..d00200161 100644 --- a/controllers/servers/host_definer/watcher/watcher_helper.py +++ b/controllers/servers/host_definer/watcher/watcher_helper.py @@ -1,437 +1,21 @@ -import os -import random -import string -from munch import Munch -import json +from controllers.servers.host_definer.k8s.api import K8SApi +from controllers.servers.host_definer.resource_manager.host_definition import HostDefinitionManager +from controllers.servers.host_definer.resource_manager.secret import SecretManager +from controllers.servers.host_definer.resource_manager.node import NodeManager +from controllers.servers.host_definer.resource_manager.csi_node import CSINodeManager +from controllers.servers.host_definer.definition_manager.definition import DefinitionManager +from controllers.servers.host_definer.resource_manager.resource_info import ResourceInfoManager +from controllers.servers.host_definer.resource_manager.storage_class import StorageClassManager -from controllers.common.csi_logger import get_stdout_logger -from controllers.servers.settings import SECRET_SUPPORTED_TOPOLOGIES_PARAMETER -from controllers.servers.utils import ( - validate_secrets, get_array_connection_info_from_secrets, get_system_info_for_topologies) -from controllers.servers.errors import ValidationException -import controllers.servers.host_definer.messages as messages -from controllers.servers.host_definer.kubernetes_manager.manager import KubernetesManager -from controllers.servers.host_definer import settings -import controllers.common.settings as common_settings -from controllers.servers.host_definer.types import ( - DefineHostRequest, DefineHostResponse, HostDefinitionInfo, SecretInfo, ManagedNode) -from controllers.servers.host_definer.storage_manager.host_definer_server import HostDefinerServicer -MANAGED_SECRETS = [] -NODES = {} -logger = get_stdout_logger() - - -class Watcher(KubernetesManager): +class Watcher(): def __init__(self): super().__init__() - self.storage_host_servicer = HostDefinerServicer() - - def _define_host_on_all_storages(self, node_name): - logger.info(messages.DEFINE_NODE_ON_ALL_MANAGED_SECRETS.format(node_name)) - for secret_info in MANAGED_SECRETS: - if secret_info.managed_storage_classes == 0: - continue - host_definition_info = self._get_host_definition_info_from_secret_and_node_name(node_name, secret_info) - self._create_definition(host_definition_info) - - def _get_host_definition_info_from_secret_and_node_name(self, node_name, secret_info): - host_definition_info = self._get_host_definition_info_from_secret(secret_info) - host_definition_info = self._add_name_to_host_definition_info(node_name, host_definition_info) - return host_definition_info - - def _get_host_definition_info_from_secret(self, secret_info): - host_definition_info = HostDefinitionInfo() - host_definition_info.secret_name = secret_info.name - host_definition_info.secret_namespace = secret_info.namespace - return host_definition_info - - def _define_nodes(self, host_definition_info): - for node_name, _ in NODES.items(): - host_definition_info = self._add_name_to_host_definition_info(node_name, host_definition_info) - self._create_definition(host_definition_info) - - def _add_name_to_host_definition_info(self, node_name, host_definition_info): - host_definition_info.node_name = node_name - host_definition_info.node_id = NODES[node_name].node_id - host_definition_info.name = self._get_host_definition_name(node_name) - return host_definition_info - - def _create_definition(self, host_definition_info): - if not self._is_node_should_be_managed_on_secret( - host_definition_info.node_name, host_definition_info.secret_name, - host_definition_info.secret_namespace): - return - host_definition_info = self._update_host_definition_info(host_definition_info) - response = self._define_host(host_definition_info) - current_host_definition_info_on_cluster = self._create_host_definition_if_not_exist( - host_definition_info, response) - self._set_status_to_host_definition_after_definition( - response.error_message, current_host_definition_info_on_cluster) - - def _update_host_definition_info(self, host_definition_info): - host_definition_info_on_cluster = self._get_matching_host_definition_info( - host_definition_info.node_name, host_definition_info.secret_name, host_definition_info.secret_namespace) - if host_definition_info_on_cluster: - host_definition_info.connectivity_type = host_definition_info_on_cluster.connectivity_type - host_definition_info.node_id = host_definition_info_on_cluster.node_id - return host_definition_info - - def _define_host(self, host_definition_info): - logger.info(messages.DEFINE_NODE_ON_SECRET.format(host_definition_info.node_name, - host_definition_info.secret_name, host_definition_info.secret_namespace)) - return self._ensure_definition_state(host_definition_info, self.storage_host_servicer.define_host) - - def _create_host_definition_if_not_exist(self, host_definition_info, response): - host_definition_manifest = self._get_host_definition_manifest(host_definition_info, response) - current_host_definition_info_on_cluster = self._get_matching_host_definition_info( - host_definition_info.node_name, host_definition_info.secret_name, host_definition_info.secret_namespace) - if current_host_definition_info_on_cluster: - host_definition_manifest[settings.METADATA][ - common_settings.NAME_FIELD] = current_host_definition_info_on_cluster.name - self._patch_host_definition(host_definition_manifest) - return current_host_definition_info_on_cluster - else: - logger.info(messages.CREATING_NEW_HOST_DEFINITION.format(host_definition_info.name)) - return self._create_host_definition(host_definition_manifest) - - def _get_host_definition_manifest(self, host_definition_info, response): - return { - settings.API_VERSION: settings.CSI_IBM_API_VERSION, - settings.KIND: settings.HOST_DEFINITION_KIND, - settings.METADATA: { - common_settings.NAME_FIELD: host_definition_info.name, - }, - settings.SPEC: { - settings.HOST_DEFINITION_FIELD: { - settings.NODE_NAME_FIELD: host_definition_info.node_name, - common_settings.HOST_DEFINITION_NODE_ID_FIELD: NODES[host_definition_info.node_name].node_id, - settings.SECRET_NAME_FIELD: host_definition_info.secret_name, - settings.SECRET_NAMESPACE_FIELD: host_definition_info.secret_namespace, - settings.CONNECTIVITY_TYPE_FIELD: response.connectivity_type, - settings.PORTS_FIELD: response.ports, - settings.NODE_NAME_ON_STORAGE_FIELD: response.node_name_on_storage, - settings.IO_GROUP_FIELD: response.io_group, - settings.MANAGEMENT_ADDRESS_FIELD: response.management_address - }, - }, - } - - def _set_status_to_host_definition_after_definition(self, message_from_storage, host_definition_info): - if message_from_storage and host_definition_info: - self._set_host_definition_status(host_definition_info.name, - settings.PENDING_CREATION_PHASE) - self._create_k8s_event_for_host_definition( - host_definition_info, message_from_storage, settings.DEFINE_ACTION, settings.FAILED_MESSAGE_TYPE) - elif host_definition_info: - self._set_host_definition_status_to_ready(host_definition_info) - - def _delete_definition(self, host_definition_info): - response = DefineHostResponse() - if self._is_node_should_be_managed_on_secret(host_definition_info.node_name, host_definition_info.secret_name, - host_definition_info.secret_namespace): - response = self._undefine_host(host_definition_info) - self._handle_k8s_host_definition_after_undefine_action_if_exist(host_definition_info, response) - - def _is_node_should_be_managed_on_secret(self, node_name, secret_name, secret_namespace): - logger.info(messages.CHECK_NODE_SHOULD_BE_MANAGED_BY_SECRET.format(node_name, secret_name, secret_namespace)) - secret_data = self._get_secret_data(secret_name, secret_namespace) - self._validate_secret(secret_data) - managed_secret_info, _ = self._get_managed_secret_by_name_and_namespace(secret_name, secret_namespace) - if self._is_node_should_managed_on_secret_info(node_name, managed_secret_info): - logger.info(messages.NODE_SHOULD_BE_MANAGED_ON_SECRET.format(node_name, secret_name, secret_namespace)) - return True - logger.info(messages.NODE_SHOULD_NOT_BE_MANAGED_ON_SECRET.format(node_name, secret_name, secret_namespace)) - return False - - def _get_managed_secret_by_name_and_namespace(self, secret_name, secret_namespace): - secret_info = self._generate_secret_info(secret_name, secret_namespace) - managed_secret_info, index = self._get_matching_managed_secret_info(secret_info) - return managed_secret_info, index - - def _is_node_should_managed_on_secret_info(self, node_name, secret_info): - if secret_info: - nodes_with_system_id = secret_info.nodes_with_system_id - if nodes_with_system_id and nodes_with_system_id.get(node_name): - return True - if nodes_with_system_id: - return False - return True - return False - - def _is_topology_secret(self, secret_data): - self._validate_secret(secret_data) - if self._get_secret_secret_config(secret_data): - return True - return False - - def _validate_secret(self, secret_data): - secret_data = self._convert_secret_config_to_string(secret_data) - try: - validate_secrets(secret_data) - except ValidationException as ex: - logger.error(str(ex)) - - def _undefine_host(self, host_definition_info): - logger.info(messages.UNDEFINED_HOST.format(host_definition_info.node_name, - host_definition_info.secret_name, host_definition_info.secret_namespace)) - return self._ensure_definition_state(host_definition_info, self.storage_host_servicer.undefine_host) - - def _handle_k8s_host_definition_after_undefine_action_if_exist(self, host_definition_info, response): - current_host_definition_info_on_cluster = self._get_matching_host_definition_info( - host_definition_info.node_name, host_definition_info.secret_name, host_definition_info.secret_namespace) - if current_host_definition_info_on_cluster: - self._handle_k8s_host_definition_after_undefine_action( - response.error_message, current_host_definition_info_on_cluster) - - def _handle_k8s_host_definition_after_undefine_action(self, message_from_storage, host_definition_info): - if message_from_storage and host_definition_info: - self._set_host_definition_status(host_definition_info.name, - settings.PENDING_DELETION_PHASE) - self._create_k8s_event_for_host_definition( - host_definition_info, message_from_storage, - settings.UNDEFINE_ACTION, settings.FAILED_MESSAGE_TYPE) - elif host_definition_info: - self._delete_host_definition(host_definition_info.name) - - def _set_host_definition_status_to_ready(self, host_definition): - self._set_host_definition_status(host_definition.name, settings.READY_PHASE) - self._create_k8s_event_for_host_definition( - host_definition, settings.SUCCESS_MESSAGE, settings.DEFINE_ACTION, settings.SUCCESSFUL_MESSAGE_TYPE) - - def _create_k8s_event_for_host_definition(self, host_definition_info, message, action, message_type): - logger.info(messages.CREATE_EVENT_FOR_HOST_DEFINITION.format(message, host_definition_info.name)) - k8s_event = self._generate_k8s_event(host_definition_info, message, action, message_type) - self._create_k8s_event(settings.DEFAULT_NAMESPACE, k8s_event) - - def _is_host_can_be_defined(self, node_name): - return self._is_dynamic_node_labeling_allowed() or self._is_node_has_manage_node_label(node_name) - - def _is_dynamic_node_labeling_allowed(self): - return os.getenv(settings.DYNAMIC_NODE_LABELING_ENV_VAR) == settings.TRUE_STRING - - def _is_host_can_be_undefined(self, node_name): - return self._is_host_definer_can_delete_hosts() and \ - self._is_node_has_manage_node_label(node_name) and \ - not self._is_node_has_forbid_deletion_label(node_name) - - def _is_host_definer_can_delete_hosts(self): - return os.getenv(settings.ALLOW_DELETE_ENV_VAR) == settings.TRUE_STRING - - def _is_node_has_manage_node_label(self, node_name): - return self._is_host_has_label_in_true(node_name, settings.MANAGE_NODE_LABEL) - - def _is_node_has_forbid_deletion_label(self, node_name): - return self._is_host_has_label_in_true(node_name, settings.FORBID_DELETION_LABEL) - - def _is_host_has_label_in_true(self, node_name, label): - node_info = self._get_node_info(node_name) - return self._get_label_value(node_info.labels, label) == settings.TRUE_STRING - - def _ensure_definition_state(self, host_definition_info, define_function): - request = self._get_request_from_host_definition(host_definition_info) - if not request: - response = DefineHostResponse() - response.error_message = messages.FAILED_TO_GET_SECRET_EVENT.format( - host_definition_info.secret_name, host_definition_info.secret_namespace) - return response - return define_function(request) - - def _get_request_from_host_definition(self, host_definition_info): - node_name = host_definition_info.node_name - logger.info(messages.GENERATE_REQUEST_FOR_NODE.format(node_name)) - node_info = self._get_node_info(node_name) - request = self._get_new_request(node_info.labels) - request = self._add_array_connectivity_info_to_request( - request, host_definition_info.secret_name, host_definition_info.secret_namespace, node_info.labels) - if request: - request.node_id_from_host_definition = host_definition_info.node_id - request.node_id_from_csi_node = self._get_node_id_by_node(host_definition_info) - request.io_group = self._get_io_group_by_node(host_definition_info.node_name) - return request - - def _get_new_request(self, labels): - request = DefineHostRequest() - request.prefix = self._get_prefix() - request.connectivity_type_from_user = self._get_connectivity_type_from_user(labels) - return request - - def _get_prefix(self): - return os.getenv(settings.PREFIX_ENV_VAR) - - def _get_connectivity_type_from_user(self, labels): - connectivity_type_label_on_node = self._get_label_value(labels, settings.CONNECTIVITY_TYPE_LABEL) - if connectivity_type_label_on_node in settings.SUPPORTED_CONNECTIVITY_TYPES: - return connectivity_type_label_on_node - return os.getenv(settings.CONNECTIVITY_ENV_VAR) - - def _get_label_value(self, labels, label): - return labels.get(label) - - def _add_array_connectivity_info_to_request(self, request, secret_name, secret_namespace, labels): - request.array_connection_info = self._get_array_connection_info_from_secret( - secret_name, secret_namespace, labels) - if request.array_connection_info: - return request - return None - - def _get_array_connection_info_from_secret(self, secret_name, secret_namespace, labels): - secret_data = self._get_secret_data(secret_name, secret_namespace) - if secret_data: - node_topology_labels = self._get_topology_labels(labels) - return self._get_array_connection_info_from_secret_data(secret_data, node_topology_labels) - return {} - - def _get_array_connection_info_from_secret_data(self, secret_data, labels): - try: - secret_data = self._convert_secret_config_to_string(secret_data) - array_connection_info = get_array_connection_info_from_secrets(secret_data, labels) - return self._decode_array_connectivity_info(array_connection_info) - except ValidationException as ex: - logger.error(str(ex)) - return None - - def _convert_secret_config_to_string(self, secret_data): - if settings.SECRET_CONFIG_FIELD in secret_data.keys(): - if type(secret_data[settings.SECRET_CONFIG_FIELD]) is dict: - secret_data[settings.SECRET_CONFIG_FIELD] = json.dumps(secret_data[settings.SECRET_CONFIG_FIELD]) - return secret_data - - def _decode_array_connectivity_info(self, array_connection_info): - array_connection_info.array_addresses = self._decode_list_base64_to_list_string( - array_connection_info.array_addresses) - array_connection_info.user = self._decode_base64_to_string(array_connection_info.user) - array_connection_info.password = self._decode_base64_to_string(array_connection_info.password) - return array_connection_info - - def _decode_list_base64_to_list_string(self, list_with_base64): - for index, base64_content in enumerate(list_with_base64): - list_with_base64[index] = self._decode_base64_to_string(base64_content) - return list_with_base64 - - def _get_node_id_by_node(self, host_definition_info): - try: - return NODES[host_definition_info.node_name].node_id - except Exception: - return host_definition_info.node_id - - def _get_io_group_by_node(self, node_name): - try: - return NODES[node_name].io_group - except Exception: - return '' - - def _get_host_definition_name(self, node_name): - return '{0}-{1}'.format(node_name, self._get_random_string()).replace('_', '.') - - def _get_random_string(self): - return ''.join(random.choices(string.ascii_lowercase + string.digits, k=20)) - - def _add_node_to_nodes(self, csi_node_info): - logger.info(messages.NEW_KUBERNETES_NODE.format(csi_node_info.name)) - self._add_manage_node_label_to_node(csi_node_info.name) - NODES[csi_node_info.name] = self._generate_managed_node(csi_node_info) - - def _add_manage_node_label_to_node(self, node_name): - if self._is_node_has_manage_node_label(node_name): - return - logger.info(messages.ADD_LABEL_TO_NODE.format(settings.MANAGE_NODE_LABEL, node_name)) - self._update_manage_node_label(node_name, settings.TRUE_STRING) - - def _generate_managed_node(self, csi_node_info): - node_info = self._get_node_info(csi_node_info.name) - return ManagedNode(csi_node_info, node_info.labels) - - def _remove_manage_node_label(self, node_name): - if self._is_managed_by_host_definer_label_should_be_removed(node_name): - logger.info(messages.REMOVE_LABEL_FROM_NODE.format(settings.MANAGE_NODE_LABEL, node_name)) - self._update_manage_node_label(node_name, None) - - def _is_managed_by_host_definer_label_should_be_removed(self, node_name): - return self._is_dynamic_node_labeling_allowed() and \ - not self._is_node_has_ibm_block_csi(node_name) and \ - not self._is_node_has_host_definitions(node_name) - - def _is_node_has_ibm_block_csi(self, node_name): - csi_node_info = self._get_csi_node_info(node_name) - return csi_node_info.node_id != '' - - def _is_node_has_host_definitions(self, node_name): - host_definitions_info = self._get_all_node_host_definitions_info(node_name) - return host_definitions_info != [] - - def _get_all_node_host_definitions_info(self, node_name): - node_host_definitions_info = [] - k8s_host_definitions = self._get_k8s_host_definitions() - for k8s_host_definition in k8s_host_definitions: - host_definition_info = self._generate_host_definition_info(k8s_host_definition) - if host_definition_info.node_name == node_name: - node_host_definitions_info.append(host_definition_info) - return node_host_definitions_info - - def _munch(self, watch_event): - return Munch.fromDict(watch_event) - - def _loop_forever(self): - return True - - def _generate_secret_info(self, secret_name, secret_namespace, nodes_with_system_id={}, system_ids_topologies={}): - return SecretInfo(secret_name, secret_namespace, nodes_with_system_id, system_ids_topologies) - - def _is_secret_managed(self, secret_info): - _, index = self._get_matching_managed_secret_info(secret_info) - if index != -1: - return True - return False - - def _get_matching_managed_secret_info(self, secret_info): - for index, managed_secret_info in enumerate(MANAGED_SECRETS): - if managed_secret_info.name == secret_info.name and managed_secret_info.namespace == secret_info.namespace: - return managed_secret_info, index - return secret_info, -1 - - def _generate_nodes_with_system_id(self, secret_data): - nodes_with_system_id = {} - secret_config = self._get_secret_secret_config(secret_data) - nodes_info = self._get_nodes_info() - for node_info in nodes_info: - nodes_with_system_id[node_info.name] = self._get_system_id_for_node(node_info, secret_config) - return nodes_with_system_id - - def _get_system_id_for_node(self, node_info, secret_config): - node_topology_labels = self._get_topology_labels(node_info.labels) - try: - _, system_id = get_system_info_for_topologies(secret_config, node_topology_labels) - except ValidationException: - return '' - return system_id - - def _get_topology_labels(self, labels): - topology_labels = {} - for label in labels: - if self._is_topology_label(label): - topology_labels[label] = labels[label] - return topology_labels - - def _is_topology_label(self, label): - for prefix in settings.TOPOLOGY_PREFIXES: - if label.startswith(prefix): - return True - return False - - def _generate_secret_system_ids_topologies(self, secret_data): - system_ids_topologies = {} - secret_config = self._get_secret_secret_config(secret_data) - for system_id, system_info in secret_config.items(): - system_ids_topologies[system_id] = (system_info.get(SECRET_SUPPORTED_TOPOLOGIES_PARAMETER)) - return system_ids_topologies - - def _get_secret_secret_config(self, secret_data): - secret_data = self._convert_secret_config_to_dict(secret_data) - return secret_data.get(settings.SECRET_CONFIG_FIELD, {}) - - def _convert_secret_config_to_dict(self, secret_data): - if settings.SECRET_CONFIG_FIELD in secret_data.keys(): - if type(secret_data[settings.SECRET_CONFIG_FIELD]) is str: - secret_data[settings.SECRET_CONFIG_FIELD] = json.loads(secret_data[settings.SECRET_CONFIG_FIELD]) - return secret_data + self.k8s_api = K8SApi() + self.host_definition_manager = HostDefinitionManager() + self.secret_manager = SecretManager() + self.definition_manager = DefinitionManager() + self.node_manager = NodeManager() + self.csi_node = CSINodeManager() + self.resource_info_manager = ResourceInfoManager() + self.storage_class_manager = StorageClassManager() diff --git a/controllers/servers/messages.py b/controllers/servers/messages.py index 7cc5fff5d..71491f2be 100644 --- a/controllers/servers/messages.py +++ b/controllers/servers/messages.py @@ -19,8 +19,9 @@ UNSUPPORTED_FS_TYPE_MESSAGE = "unsupported fs_type : {}" UNSUPPORTED_MOUNT_FLAGS_MESSAGE = "mount_flags is unsupported" UNSUPPORTED_VOLUME_ACCESS_TYPE_MESSAGE = "unsupported volume access type" +UNSUPPORTED_REPLICATION_SOURCE_TYPE_MESSAGE = "unsupported replication source type" UNSUPPORTED_ACCESS_MODE_MESSAGE = "unsupported access mode : {}" -NAME_SHOULD_NOT_BE_EMPTY_MESSAGE = 'name should not be empty' +PARAMETER_SHOULD_NOT_BE_EMPTY_MESSAGE = '{} should not be empty' VOLUME_ID_SHOULD_NOT_BE_EMPTY_MESSAGE = 'volume id should not be empty' SNAPSHOT_ID_SHOULD_NOT_BE_EMPTY_MESSAGE = 'snapshot id should not be empty' SIZE_SHOULD_NOT_BE_NEGATIVE_MESSAGE = 'size should not be negative' @@ -39,3 +40,4 @@ POOL_NOT_MATCH_VOLUME_MESSAGE = 'pool name: {0} does not match existing volume pool name: {1}' PREFIX_NOT_MATCH_VOLUME_MESSAGE = 'prefix: {0} does not match existing volume name: {1}' REQUIRED_BYTES_MISMATCH_MESSAGE = "required bytes : {0} does not match the source volume required bytes : {1}" +UNSUPPORTED_STORAGECLASS_VOLUME_GROUP = "Unsupported storage class volume group with volume group feature" diff --git a/controllers/servers/settings.py b/controllers/servers/settings.py index 7ca74cee7..15909f021 100644 --- a/controllers/servers/settings.py +++ b/controllers/servers/settings.py @@ -39,6 +39,7 @@ PARAMETERS_VIRT_SNAP_FUNC = "virt_snap_func" PARAMETERS_VOLUME_NAME_PREFIX = "volume_name_prefix" PARAMETERS_SNAPSHOT_NAME_PREFIX = "snapshot_name_prefix" +PARAMETERS_VOLUME_GROUP_NAME_PREFIX = "volume_group_name_prefix" PARAMETERS_SYSTEM_ID = "system_id" PARAMETERS_COPY_TYPE = "copy_type" PARAMETERS_REPLICATION_HANDLE = "replicationHandle" @@ -52,10 +53,14 @@ PARAMETERS_ARRAY_ADDRESSES_DELIMITER = "," REQUEST_ACCESSIBILITY_REQUIREMENTS_FIELD = "accessibility_requirements" +LOCK_REPLICATION_REQUEST_ATTR = "replication_source" SNAPSHOT_TYPE_NAME = "snapshot" VOLUME_TYPE_NAME = "volume" +VOLUME_GROUP_TYPE_NAME = "volumegroup" VOLUME_SOURCE_ID_FIELDS = {SNAPSHOT_TYPE_NAME: 'snapshot_id', VOLUME_TYPE_NAME: 'volume_id'} MINIMUM_VOLUME_ID_PARTS = 2 MAXIMUM_VOLUME_ID_PARTS = 3 +ENABLE_CALL_HOME_ENV_VAR = 'ENABLE_CALL_HOME' +UNIQUE_KEY_KEY = 'uniquekey' diff --git a/controllers/servers/utils.py b/controllers/servers/utils.py index 2e0a59956..f2dd1e74d 100644 --- a/controllers/servers/utils.py +++ b/controllers/servers/utils.py @@ -2,14 +2,16 @@ import re from hashlib import sha256 from operator import eq +from os import getenv import base58 -from csi_general import csi_pb2 +from csi_general import csi_pb2, volumegroup_pb2 from google.protobuf.timestamp_pb2 import Timestamp +import controllers.array_action.errors as array_errors +import controllers.array_action.settings as array_settings import controllers.servers.messages as messages import controllers.servers.settings as servers_settings -import controllers.array_action.errors as array_errors from controllers.array_action.array_action_types import ReplicationRequest from controllers.array_action.settings import NVME_OVER_FC_CONNECTIVITY_TYPE, FC_CONNECTIVITY_TYPE, \ ISCSI_CONNECTIVITY_TYPE, REPLICATION_COPY_TYPE_SYNC, REPLICATION_COPY_TYPE_ASYNC, REPLICATION_TYPE_MIRROR, \ @@ -20,7 +22,7 @@ from controllers.common.settings import NAME_PREFIX_SEPARATOR from controllers.servers.csi.controller_types import (ArrayConnectionInfo, ObjectIdInfo, - ObjectParameters) + ObjectParameters, VolumeGroupParameters, VolumeGroupIdInfo) from controllers.servers.errors import ObjectIdError, ValidationException, InvalidNodeId logger = get_stdout_logger() @@ -98,6 +100,10 @@ def get_snapshot_parameters(parameters, system_id): return get_object_parameters(parameters, servers_settings.PARAMETERS_SNAPSHOT_NAME_PREFIX, system_id) +def get_volume_group_parameters(parameters): + return VolumeGroupParameters(prefix=parameters.get(servers_settings.PARAMETERS_VOLUME_GROUP_NAME_PREFIX)) + + def _str_to_bool(parameter): if parameter and parameter.lower() == "true": return True @@ -128,16 +134,21 @@ def get_object_parameters(parameters, prefix_param_name, system_id): def get_volume_id(new_volume, system_id): - return _get_object_id(new_volume, system_id) + return _get_object_id(new_volume, new_volume.id, system_id) def get_snapshot_id(new_snapshot, system_id): - return _get_object_id(new_snapshot, system_id) + snapshot_strong_id = new_snapshot.id if new_snapshot.id else new_snapshot.name + return _get_object_id(new_snapshot, snapshot_strong_id, system_id) + + +def get_volume_group_id(new_volume_group, system_id): + return _get_object_id(new_volume_group, new_volume_group.name, system_id) -def _get_object_id(obj, system_id): +def _get_object_id(obj, obj_strong_id, system_id): object_ids_delimiter = servers_settings.PARAMETERS_OBJECT_IDS_DELIMITER - object_ids_value = object_ids_delimiter.join((obj.internal_id, obj.id)) + object_ids_value = object_ids_delimiter.join((obj.internal_id, obj_strong_id)) object_id_info_delimiter = servers_settings.PARAMETERS_OBJECT_ID_INFO_DELIMITER if system_id: return object_id_info_delimiter.join((obj.array_type, system_id, object_ids_value)) @@ -268,12 +279,22 @@ def _validate_object_id(object_id, object_type=servers_settings.VOLUME_TYPE_NAME raise ValidationException(messages.WRONG_FORMAT_MESSAGE.format("volume id")) +def _validate_request_required_field(field_value, field_name): + logger.debug("validating request {}".format(field_name)) + if not field_value: + raise ValidationException(messages.PARAMETER_SHOULD_NOT_BE_EMPTY_MESSAGE.format(field_name)) + + +def _validate_minimum_request_fields(request, required_field_names): + for required_field_name in required_field_names: + _validate_request_required_field(getattr(request, required_field_name), required_field_name) + validate_secrets(request.secrets) + + def validate_create_volume_request(request): logger.debug("validating create volume request") - logger.debug("validating volume name") - if not request.name: - raise ValidationException(messages.NAME_SHOULD_NOT_BE_EMPTY_MESSAGE) + _validate_minimum_request_fields(request, ["name"]) logger.debug("validating volume capacity") if request.capacity_range: @@ -285,8 +306,6 @@ def validate_create_volume_request(request): validate_csi_volume_capabilities(request.volume_capabilities) - validate_secrets(request.secrets) - if request.parameters: _validate_pool_parameter(request.parameters) else: @@ -298,13 +317,17 @@ def validate_create_volume_request(request): logger.debug("request validation finished.") +def validate_create_volume_group_request(request): + logger.debug("validating create volume group request") + + _validate_minimum_request_fields(request, ["name"]) + + logger.debug("request validation finished.") + + def validate_create_snapshot_request(request): logger.debug("validating create snapshot request") - logger.debug("validating snapshot name") - if not request.name: - raise ValidationException(messages.NAME_SHOULD_NOT_BE_EMPTY_MESSAGE) - - validate_secrets(request.secrets) + _validate_minimum_request_fields(request, ["name"]) logger.debug("validating source volume id") if not request.source_volume_id: @@ -314,17 +337,15 @@ def validate_create_snapshot_request(request): def validate_delete_snapshot_request(request): logger.debug("validating delete snapshot request") - if not request.snapshot_id: - raise ValidationException(messages.SNAPSHOT_ID_SHOULD_NOT_BE_EMPTY_MESSAGE) - validate_secrets(request.secrets) + _validate_minimum_request_fields(request, ["snapshot_id"]) logger.debug("request validation finished.") def validate_validate_volume_capabilities_request(request): logger.debug("validating validate_volume_capabilities request") - + _validate_minimum_request_fields(request, ["volume_id"]) _validate_object_id(request.volume_id) if request.parameters: @@ -332,8 +353,6 @@ def validate_validate_volume_capabilities_request(request): validate_csi_volume_capabilities(request.volume_capabilities) - validate_secrets(request.secrets) - def validate_volume_context_match_volume(volume_context, volume): logger.debug("validate volume_context is matching volume") @@ -348,8 +367,7 @@ def validate_volume_context_match_volume(volume_context, volume): def validate_expand_volume_request(request): logger.debug("validating expand volume request") - if not request.volume_id: - raise ValidationException(messages.VOLUME_ID_SHOULD_NOT_BE_EMPTY_MESSAGE) + _validate_minimum_request_fields(request, ["volume_id"]) logger.debug("validating volume capacity") if request.capacity_range: @@ -358,14 +376,17 @@ def validate_expand_volume_request(request): else: raise ValidationException(messages.NO_CAPACITY_RANGE_MESSAGE) - validate_secrets(request.secrets) - logger.debug("expand volume validation finished") -def generate_csi_create_volume_response(new_volume, system_id=None, source_type=None): - logger.debug("creating create volume response for volume : {0}".format(new_volume)) +def _generate_volumes_response(new_volumes): + volumes = [] + for volume in new_volumes: + volumes.append(_generate_volumegroup_volume_response(volume)) + return volumes + +def _generate_volume_response(new_volume, system_id=None, source_type=None): content_source = None if new_volume.source_id: if source_type == servers_settings.SNAPSHOT_TYPE_NAME: @@ -375,15 +396,55 @@ def generate_csi_create_volume_response(new_volume, system_id=None, source_type= volume_source = csi_pb2.VolumeContentSource.VolumeSource(volume_id=new_volume.source_id) content_source = csi_pb2.VolumeContentSource(volume=volume_source) - response = csi_pb2.CreateVolumeResponse(volume=csi_pb2.Volume( + return csi_pb2.Volume( capacity_bytes=new_volume.capacity_bytes, volume_id=get_volume_id(new_volume, system_id), - content_source=content_source)) + content_source=content_source) + + +def _generate_volumegroup_volume_response(new_volume, system_id=None): + content_source = None + if new_volume.source_id: + volume_source = volumegroup_pb2.VgVolumeContentSource.VolumeSource(volume_id=new_volume.source_id) + content_source = volumegroup_pb2.VgVolumeContentSource(volume=volume_source) + + return volumegroup_pb2.VgVolume( + capacity_bytes=new_volume.capacity_bytes, + volume_id=get_volume_id(new_volume, system_id), + content_source=content_source) + + +def generate_csi_create_volume_response(new_volume, system_id=None, source_type=None): + logger.debug("creating create volume response for volume : {0}".format(new_volume)) + + response = csi_pb2.CreateVolumeResponse(volume=_generate_volume_response(new_volume, system_id, source_type)) logger.debug("finished creating volume response : {0}".format(response)) return response +def generate_csi_create_volume_group_response(volume_group): + logger.debug("creating create volume group response for volume group : {0}".format(volume_group)) + + response = volumegroup_pb2.CreateVolumeGroupResponse(volume_group=volumegroup_pb2.VolumeGroup( + volume_group_id=get_volume_group_id(volume_group, None), + volumes=[])) + logger.debug("finished creating volume group response : {0}".format(response)) + + return response + + +def generate_csi_modify_volume_group_response(volume_group): + logger.debug("creating modify volume group response for volume group : {0}".format(volume_group)) + + response = volumegroup_pb2.ModifyVolumeGroupMembershipResponse(volume_group=volumegroup_pb2.VolumeGroup( + volume_group_id=get_volume_group_id(volume_group, None), + volumes=_generate_volumes_response(volume_group.volumes))) + logger.debug("finished creating volume group response : {0}".format(response)) + + return response + + def generate_csi_create_snapshot_response(new_snapshot, system_id, source_volume_id): logger.debug("creating create snapshot response for snapshot : {0}".format(new_snapshot)) @@ -443,10 +504,7 @@ def generate_csi_validate_volume_capabilities_response(volume_context, volume_ca def validate_delete_volume_request(request): logger.debug("validating delete volume request") - if request.volume_id == "": - raise ValidationException("Volume id cannot be empty") - - validate_secrets(request.secrets) + _validate_minimum_request_fields(request, ["volume_id"]) logger.debug("delete volume validation finished") @@ -471,7 +529,7 @@ def validate_publish_volume_request(request): validate_csi_volume_capability(request.volume_capability) - validate_secrets(request.secrets) + _validate_minimum_request_fields(request, ["node_id"]) _validate_node_id(request.node_id) @@ -486,6 +544,10 @@ def get_snapshot_id_info(snapshot_id): return get_object_id_info(snapshot_id, servers_settings.SNAPSHOT_TYPE_NAME) +def get_volume_group_id_info(volume_group_id): + return get_object_id_info(volume_group_id, servers_settings.VOLUME_GROUP_TYPE_NAME) + + def _get_context_from_volume(volume): return {servers_settings.VOLUME_CONTEXT_VOLUME_NAME: volume.name, servers_settings.VOLUME_CONTEXT_ARRAY_ADDRESS: ",".join( @@ -498,7 +560,7 @@ def _get_context_from_volume(volume): def get_object_id_info(full_object_id, object_type): logger.debug("getting {0} info for id : {1}".format(object_type, full_object_id)) splitted_object_id = full_object_id.split(servers_settings.PARAMETERS_OBJECT_ID_INFO_DELIMITER) - system_id, wwn, internal_id = None, None, None + system_id, strong_id, internal_id = None, None, None if len(splitted_object_id) == 2: array_type, object_id = splitted_object_id elif len(splitted_object_id) == 3: @@ -507,13 +569,15 @@ def get_object_id_info(full_object_id, object_type): raise ObjectIdError(object_type, full_object_id) splitted_id = object_id.split(servers_settings.PARAMETERS_OBJECT_IDS_DELIMITER) if len(splitted_id) == 1: - wwn = splitted_id[0] + strong_id = splitted_id[0] elif len(splitted_id) == 2: - internal_id, wwn = splitted_id + internal_id, strong_id = splitted_id else: raise ObjectIdError(object_type, full_object_id) - logger.debug("volume id : {0}, array type :{1}".format(object_id, array_type)) - return ObjectIdInfo(array_type=array_type, system_id=system_id, internal_id=internal_id, uid=wwn) + logger.debug("{0} id : {1}, array type :{2}".format(object_type, object_id, array_type)) + if object_type == servers_settings.VOLUME_GROUP_TYPE_NAME: + return VolumeGroupIdInfo(array_type=array_type, internal_id=internal_id, name=strong_id) + return ObjectIdInfo(array_type=array_type, system_id=system_id, internal_id=internal_id, uid=strong_id) def choose_connectivity_type(connectivity_types): @@ -563,7 +627,7 @@ def validate_unpublish_volume_request(request): _validate_object_id(request.volume_id) - validate_secrets(request.secrets) + _validate_minimum_request_fields(request, ["volume_id"]) _validate_node_id(request.node_id) @@ -572,10 +636,14 @@ def validate_unpublish_volume_request(request): def validate_addons_request(request, replication_type): logger.debug("validating addons request") + minimum_request_fields = [] + replication_source_field = getattr(request, "replication_source") + if not replication_source_field: + minimum_request_fields.append("volume_id") - logger.debug("validating volume id") - if request.volume_id == "" or (replication_type == REPLICATION_TYPE_MIRROR and request.replication_id == ""): - raise ValidationException(messages.VOLUME_ID_SHOULD_NOT_BE_EMPTY_MESSAGE) + if replication_type == REPLICATION_TYPE_MIRROR: + minimum_request_fields.append("replication_id") + _validate_minimum_request_fields(request, minimum_request_fields) if replication_type == REPLICATION_TYPE_EAR: logger.debug("validating obsolete non-EAR parameters") @@ -588,8 +656,6 @@ def validate_addons_request(request, replication_type): if copy_type not in (REPLICATION_COPY_TYPE_SYNC, REPLICATION_COPY_TYPE_ASYNC): raise ValidationException(messages.INVALID_REPLICATION_COPY_TYPE_MESSAGE.format(copy_type)) - validate_secrets(request.secrets) - logger.debug("addons request validation finished") @@ -615,10 +681,7 @@ def get_addons_replication_type(request): return replication_type -def generate_addons_replication_request(request, replication_type): - volume_id_info = get_volume_id_info(request.volume_id) - volume_internal_id = volume_id_info.ids.internal_id - +def generate_addons_replication_request(request, replication_type, volume_internal_id): other_volume_internal_id = _get_other_volume_internal_id(request, replication_type) other_system_id = request.parameters.get(servers_settings.PARAMETERS_SYSTEM_ID) @@ -685,6 +748,14 @@ def join_object_prefix_with_name(prefix, name): return name +def validate_volume_idempotency(volume, required_bytes, source_id): + volume_capacity_bytes = volume.capacity_bytes + if not source_id and volume_capacity_bytes < required_bytes: + raise array_errors.ObjectAlreadyExistError(volume.name, "size", volume_capacity_bytes, required_bytes) + if source_id and source_id != volume.source_id: + raise array_errors.ObjectAlreadyExistError(volume.name, "source id", volume.source_id, source_id) + + def validate_parameters_match_source_volume(space_efficiency, required_bytes, volume): _validate_space_efficiency_match(space_efficiency, volume) volume_capacity_bytes = volume.capacity_bytes @@ -710,13 +781,82 @@ def get_initiators_connectivity_type(initiators, connectivity_type): def get_connectivity_type_ports(initiators, connectivity_type): + _validate_connectivity_type(connectivity_type) ports = initiators.get_by_connectivity_type(connectivity_type) if ports: return ports raise array_errors.NoPortFoundByConnectivityType(initiators, connectivity_type) +def _validate_connectivity_type(connectivity_type): + if connectivity_type != array_settings.NVME_OVER_FC_CONNECTIVITY_TYPE and \ + connectivity_type != array_settings.FC_CONNECTIVITY_TYPE and \ + connectivity_type != array_settings.ISCSI_CONNECTIVITY_TYPE and connectivity_type: + raise array_errors.UnsupportedConnectivityTypeError(connectivity_type) + + def split_string(string, delimiter=' '): if isinstance(string, str): return string.split(delimiter) return string + + +def validate_delete_volume_group_request(request): + logger.debug("validating delete volume group request") + + _validate_minimum_request_fields(request, ["volume_group_id"]) + + logger.debug("delete volume group validation finished") + + +def validate_modify_volume_group_request(request): + logger.debug("validating modify volume group request") + + _validate_minimum_request_fields(request, ["volume_group_id"]) + + logger.debug("modify volume group validation finished") + + +def get_object_final_name(volume_parameters, name, array_mediator, object_type): + prefix = "" + if volume_parameters.prefix: + prefix = volume_parameters.prefix + if len(prefix) > array_mediator.max_object_prefix_length: + raise array_errors.InvalidArgumentError( + "The {} name prefix '{}' is too long, max allowed length is {}".format( + object_type, + prefix, + array_mediator.max_object_prefix_length + ) + ) + if not prefix: + prefix = array_mediator.default_object_prefix + full_name = join_object_prefix_with_name(prefix, name) + if len(full_name) > array_mediator.max_object_name_length: + hashed_name = hash_string(name) + full_name = join_object_prefix_with_name(prefix, hashed_name) + return full_name[:array_mediator.max_object_name_length] + + +def get_replication_object_type_and_id_info(request): + object_id = request.volume_id + object_type = servers_settings.VOLUME_TYPE_NAME + + replication_source = request.replication_source + if replication_source and replication_source.ListFields(): + logger.info(replication_source) + if replication_source.HasField(servers_settings.VOLUME_GROUP_TYPE_NAME): + object_id = replication_source.volumegroup.volume_group_id + object_type = servers_settings.VOLUME_GROUP_TYPE_NAME + elif replication_source.HasField(servers_settings.VOLUME_TYPE_NAME): + object_id = replication_source.volume.volume_id + object_type = servers_settings.VOLUME_TYPE_NAME + else: + logger.error(messages.UNSUPPORTED_REPLICATION_SOURCE_TYPE_MESSAGE) + raise ValidationException(messages.UNSUPPORTED_REPLICATION_SOURCE_TYPE_MESSAGE) + object_id_info = get_object_id_info(object_id, object_type) + return object_type, object_id_info + + +def is_call_home_enabled(): + return getenv(servers_settings.ENABLE_CALL_HOME_ENV_VAR, 'true') == 'true' diff --git a/controllers/tests/array_action/ds8k/test_array_mediator_ds8k.py b/controllers/tests/array_action/ds8k/test_array_mediator_ds8k.py index 019eb9275..b57bd88a3 100644 --- a/controllers/tests/array_action/ds8k/test_array_mediator_ds8k.py +++ b/controllers/tests/array_action/ds8k/test_array_mediator_ds8k.py @@ -116,7 +116,7 @@ def test_validate_space_efficiency_fail(self): def test_get_volume_with_no_pool(self): with self.assertRaises(array_errors.PoolParameterIsMissing): - self.array.get_volume(common_settings.VOLUME_NAME, None, False) + self.array.get_volume(common_settings.VOLUME_NAME, None, False, None) def _test_get_volume(self, with_cache=False): if with_cache: @@ -126,7 +126,7 @@ def _test_get_volume(self, with_cache=False): self.array.volume_cache.get.return_value = None self.client_mock.get_volumes_by_pool.return_value = [self.volume_response] volume = self.array.get_volume(self.volume_response.name, pool=self.volume_response.pool, - is_virt_snap_func=False) + is_virt_snap_func=False, source_type=None) self.assertEqual(self.volume_response.name, volume.name) self.array.volume_cache.add_or_delete.assert_called_once_with(self.volume_response.name, @@ -146,7 +146,7 @@ def test_get_volume_with_pool_context(self): self.volume_response, ] volume = self.array.get_volume(self.volume_response.name, pool=self.volume_response.pool, - is_virt_snap_func=False) + is_virt_snap_func=False, source_type=None) self.assertEqual(self.volume_response.name, volume.name) self.client_mock.get_volumes_by_pool.assert_called_once_with(self.volume_response.pool) @@ -156,7 +156,7 @@ def test_get_volume_with_pool_context_not_found(self): ] with self.assertRaises(array_errors.ObjectNotFoundError): self.array.get_volume(ds8k_settings.VOLUME_FAKE_NAME, pool=self.volume_response.pool, - is_virt_snap_func=False) + is_virt_snap_func=False, source_type=None) def test_create_volume_with_default_space_efficiency_success(self): self._test_create_volume_success(space_efficiency=SPACE_EFFICIENCY_NONE) diff --git a/controllers/tests/array_action/svc/array_mediator_svc_test.py b/controllers/tests/array_action/svc/array_mediator_svc_test.py index 2d0f113d1..6e2ec5272 100644 --- a/controllers/tests/array_action/svc/array_mediator_svc_test.py +++ b/controllers/tests/array_action/svc/array_mediator_svc_test.py @@ -1,4 +1,5 @@ import unittest +from datetime import datetime, timedelta from unittest.mock import MagicMock from mock import patch, Mock, call, PropertyMock @@ -7,21 +8,22 @@ from pysvc.unified.response import CLIFailureError, SVCResponse import controllers.array_action.errors as array_errors -from controllers.tests import utils import controllers.tests.array_action.svc.test_settings as svc_settings import controllers.tests.array_action.test_settings as array_settings import controllers.tests.common.test_settings as common_settings +from controllers.array_action.array_action_types import ReplicationRequest from controllers.array_action.array_mediator_svc import SVCArrayMediator, build_kwargs_from_parameters, \ FCMAP_STATUS_DONE, YES -from controllers.common.node_info import Initiators -from controllers.array_action.settings import REPLICATION_TYPE_MIRROR, REPLICATION_TYPE_EAR,\ +from controllers.array_action.settings import REPLICATION_TYPE_MIRROR, REPLICATION_TYPE_EAR, \ RCRELATIONSHIP_STATE_READY, ENDPOINT_TYPE_PRODUCTION -from controllers.array_action.array_action_types import ReplicationRequest -from controllers.tests.common.test_settings import OBJECT_INTERNAL_ID, \ - OTHER_OBJECT_INTERNAL_ID, REPLICATION_NAME, SYSTEM_ID, COPY_TYPE +from controllers.common.config import config +from controllers.common.node_info import Initiators from controllers.common.settings import ARRAY_TYPE_SVC, SPACE_EFFICIENCY_THIN, SPACE_EFFICIENCY_COMPRESSED, \ SPACE_EFFICIENCY_DEDUPLICATED_COMPRESSED, SPACE_EFFICIENCY_DEDUPLICATED_THIN, SPACE_EFFICIENCY_DEDUPLICATED, \ - SPACE_EFFICIENCY_THICK, VOLUME_GROUP_NAME_SUFFIX, EAR_VOLUME_FC_MAP_COUNT, SCSI_PROTOCOL, NVME_PROTOCOL + SPACE_EFFICIENCY_THICK, EAR_VOLUME_FC_MAP_COUNT, SCSI_PROTOCOL, NVME_PROTOCOL +from controllers.tests import utils +from controllers.tests.common.test_settings import OBJECT_INTERNAL_ID, \ + OTHER_OBJECT_INTERNAL_ID, REPLICATION_NAME, SYSTEM_ID, COPY_TYPE EMPTY_BYTES = b"" @@ -159,7 +161,7 @@ def _prepare_mocks_for_ear_replication(self, is_ear_supported=True): if is_ear_supported: self.svc.client.svctask.chvolumereplicationinternals = Mock() - cli_volume = self._get_cli_volume() + cli_volume = self._get_cli_volume(in_volume_group=True) self.svc.client.svcinfo.lsvdisk.return_value = self._mock_cli_object(cli_volume) return replication, replication_request @@ -194,15 +196,15 @@ def test_demote_replication_volume_success(self): def test_get_ear_replication_success(self): _, replication_request = self._prepare_mocks_for_ear_replication() + replication_request.replication_policy = None + self.svc.client.svcinfo.lsvolumegroupreplication.side_effect = [Mock(as_single_element=None), Mock()] replication = self.svc.get_replication(replication_request) self.assertEqual(replication.replication_type, REPLICATION_TYPE_EAR) - self.assertEqual(replication.volume_group_id, svc_settings.VOLUME_GROUP_ID_ATTR_KEY) + self.assertEqual(replication.volume_group_id, OBJECT_INTERNAL_ID) - self.svc.client.svcinfo.lsvdisk.assert_called_once_with(object_id=OBJECT_INTERNAL_ID, bytes=True) - self.svc.client.svcinfo.lsvolumegroupreplication.assert_called_once_with(object_id=svc_settings. - VOLUME_GROUP_ID_ATTR_KEY) + self.svc.client.svcinfo.lsvolumegroupreplication.assert_called_with(object_id=OBJECT_INTERNAL_ID) def test_get_ear_replication_not_supported(self): _, replication_request = self._prepare_mocks_for_ear_replication(is_ear_supported=False) @@ -220,26 +222,21 @@ def test_get_ear_replication_illegal_mode_failure(self): replication = self.svc.get_replication(replication_request) self.assertEqual(replication, None) - self.svc.client.svcinfo.lsvolumegroupreplication.assert_called_once_with(object_id=svc_settings. - VOLUME_GROUP_ID_ATTR_KEY) + self.svc.client.svcinfo.lsvolumegroupreplication.assert_called_once_with(object_id=OBJECT_INTERNAL_ID) def test_create_ear_replication_success(self): _, replication_request = self._prepare_mocks_for_ear_replication() self.svc.client.svcinfo.lsvolumegroupreplication.return_value = Mock(as_single_element=None) - self.svc.client.svctask.mkvolumegroup.return_value = Mock(response=(b"id [1]\n", b"")) self.svc.create_replication(replication_request) - self.svc.client.svctask.mkvolumegroup.assert_called_once_with(name=common_settings. - SOURCE_VOLUME_NAME + VOLUME_GROUP_NAME_SUFFIX) - self.svc.client.svctask.chvolumegroup.assert_called_once_with(object_id=int(svc_settings.DUMMY_INTERNAL_ID1), - replicationpolicy=REPLICATION_NAME) + self.svc.client.svctask.chvolumegroup.assert_called_with(object_id=OBJECT_INTERNAL_ID, + replicationpolicy=REPLICATION_NAME) def test_create_ear_replication_not_supported(self): _, replication_request = self._prepare_mocks_for_ear_replication(is_ear_supported=False) self.svc.create_replication(replication_request) - self.svc.client.svcinfo.mkvolumegroup.assert_not_called() self.svc.client.svcinfo.chvolumegroup.assert_not_called() def test_promote_ear_replication_volume_from_independent(self): @@ -260,15 +257,12 @@ def test_delete_ear_replication_success(self): self.svc.delete_replication(replication) self.svc.client.svctask.chvolumegroup.assert_called_once_with(object_id=OBJECT_INTERNAL_ID, noreplicationpolicy=True) - self.svc.client.svctask.rmvolumegroup.assert_called_once_with(object_id=OBJECT_INTERNAL_ID) def test_delete_ear_replication_not_supported(self): replication, _ = self._prepare_mocks_for_ear_replication(is_ear_supported=False) self.svc.delete_replication(replication) self.svc.client.svcinfo.chvolumegroup.assert_not_called() - self.svc.client.svctask.chvdisk.assert_not_called() - self.svc.client.svctask.rmvolumegroup.assert_not_called() def test_demote_ear_replication_volume(self): replication, _ = self._prepare_mocks_for_ear_replication(is_ear_supported=True) @@ -300,7 +294,8 @@ def _test_mediator_method_client_cli_failure_error(self, mediator_method, args, def _test_get_volume_lsvdisk_cli_failure_error(self, volume_name, error_message_id, expected_error): self._test_mediator_method_client_cli_failure_error(self.svc.get_volume, (volume_name, - common_settings.DUMMY_POOL1, False), + common_settings.DUMMY_POOL1, False, + None), self.svc.client.svcinfo.lsvdisk, error_message_id, expected_error) @@ -319,7 +314,7 @@ def _test_get_volume(self, get_cli_volume_args=None, is_virt_snap_func=False, ls cli_volume_mock = Mock(as_single_element=self._get_cli_volume(**get_cli_volume_args)) self.svc.client.svcinfo.lsvdisk.return_value = cli_volume_mock volume = self.svc.get_volume(common_settings.VOLUME_NAME, pool=common_settings.DUMMY_POOL1, - is_virt_snap_func=is_virt_snap_func) + is_virt_snap_func=is_virt_snap_func, source_type=None) self.assertEqual(array_settings.DUMMY_CAPACITY_INT, volume.capacity_bytes) self.assertEqual(common_settings.DUMMY_POOL1, volume.pool) self.assertEqual(ARRAY_TYPE_SVC, volume.array_type) @@ -349,7 +344,7 @@ def test_get_volume_hyperswap_has_no_source(self): self._prepare_fcmaps_for_hyperswap() volume = self.svc.get_volume(common_settings.VOLUME_NAME, pool=common_settings.DUMMY_POOL1, - is_virt_snap_func=False) + is_virt_snap_func=False, source_type=None) self.assertIsNone(volume.source_id) @@ -362,7 +357,7 @@ def test_get_volume_stretched_return_correct_pools(self): self._prepare_stretched_volume_mock() volume = self.svc.get_volume(common_settings.VOLUME_NAME, pool=common_settings.DUMMY_POOL1, - is_virt_snap_func=False) + is_virt_snap_func=False, source_type=None) self.assertEqual(common_settings.STRETCHED_POOL, volume.pool) @@ -374,7 +369,8 @@ def test_get_volume_returns_nothing(self): vol_ret = Mock(as_single_element=Munch({})) self.svc.client.svcinfo.lsvdisk.return_value = vol_ret with self.assertRaises(array_errors.ObjectNotFoundError): - self.svc.get_volume(common_settings.VOLUME_NAME, pool=common_settings.DUMMY_POOL1, is_virt_snap_func=False) + self.svc.get_volume(common_settings.VOLUME_NAME, pool=common_settings.DUMMY_POOL1, is_virt_snap_func=False, + source_type=None) def _test_create_volume_mkvolume_cli_failure_error(self, error_message_id, expected_error, volume_name=common_settings.VOLUME_NAME): @@ -655,12 +651,17 @@ def _get_cli_volume(with_deduplicated_copy=True, name=common_settings.SOURCE_VOL fc_id="", capacity=array_settings.DUMMY_CAPACITY_STR, thick=False, replication_mode=None, - fc_map_count=EAR_VOLUME_FC_MAP_COUNT): + fc_map_count=EAR_VOLUME_FC_MAP_COUNT, + in_volume_group=False): deduplicated_copy = svc_settings.NO_VALUE_ALIAS compressed_copy = svc_settings.NO_VALUE_ALIAS se_copy = svc_settings.NO_VALUE_ALIAS - volume_group_id = svc_settings.DUMMY_VOLUME_GROUP_ID + volume_group_id = "" + volume_group_name = "" + if in_volume_group: + volume_group_id = svc_settings.DUMMY_VOLUME_GROUP_ID + volume_group_name = common_settings.VOLUME_GROUP_NAME if with_deduplicated_copy: deduplicated_copy = YES compressed_copy = YES @@ -678,7 +679,8 @@ def _get_cli_volume(with_deduplicated_copy=True, name=common_settings.SOURCE_VOL svc_settings.VOLUME_COMPRESSED_COPY_ATTR_KEY: compressed_copy, svc_settings.VOLUME_GROUP_ID_ATTR_KEY: volume_group_id, svc_settings.VOLUME_REPLICATION_MODE_ATTR_KEY: replication_mode, - svc_settings.VOLUME_FC_MAP_COUNT_ATTR_KEY: fc_map_count + svc_settings.VOLUME_FC_MAP_COUNT_ATTR_KEY: fc_map_count, + svc_settings.VOLUME_VG_NAME_ATTR_KEY: volume_group_name }) @staticmethod @@ -812,10 +814,7 @@ def test_get_snapshot_lsvolumesnapshot_success(self): self.assertEqual(common_settings.SNAPSHOT_NAME, snapshot.name) filtervalue = self._get_filtervalue(svc_settings.SNAPSHOT_NAME_ATTR_KEY, common_settings.SNAPSHOT_NAME) self.svc.client.svcinfo.lsvolumesnapshot.assert_called_once_with(filtervalue=filtervalue) - self.svc.client.svcinfo.lsvdisk.assert_called_once_with(bytes=True, - filtervalue=self._get_filtervalue( - svc_settings.VOLUME_VDISK_UID_ATTR_KEY, - common_settings.VOLUME_UID)) + self.svc.client.svcinfo.lsvdisk.assert_called_once_with(bytes=True, object_id=common_settings.VOLUME_NAME) def test_get_snapshot_lsvolumesnapshot_not_supported_error(self): with self.assertRaises(array_errors.VirtSnapshotFunctionNotSupportedMessage): @@ -1252,6 +1251,7 @@ def test_delete_snapshot_with_stopfcmap_raise_error(self, mock_warning): def _prepare_mocks_for_delete_snapshot_addsnapshot(self): self.svc.client.svctask.addsnapshot = Mock() + self.svc.client.svcinfo.lsvolumesnapshot.return_value = self._mock_cli_object(self._get_cli_snapshot()) def _test_delete_snapshot_rmsnapshot_cli_failure_error(self, error_message_id, expected_error): self._test_mediator_method_client_cli_failure_error(self.svc.delete_snapshot, @@ -1269,6 +1269,18 @@ def test_delete_snapshot_rmsnapshot_success(self): self.svc.delete_snapshot("", common_settings.INTERNAL_SNAPSHOT_ID) self.svc.client.svctask.rmsnapshot.assert_called_once_with(snapshotid=common_settings.INTERNAL_SNAPSHOT_ID) + def test_delete_snapshot_rmsnapshot_by_name_success(self): + self._prepare_mocks_for_delete_snapshot_addsnapshot() + self.svc.delete_snapshot(common_settings.SNAPSHOT_NAME, common_settings.INTERNAL_SNAPSHOT_ID) + self.svc.client.svctask.rmsnapshot.assert_called_once_with(snapshotid=common_settings.INTERNAL_SNAPSHOT_ID) + + def test_delete_snapshot_rmsnapshot_by_name_not_found(self): + self._prepare_mocks_for_delete_snapshot_addsnapshot() + self.svc.client.svcinfo.lsvolumesnapshot.side_effect = CLIFailureError("CMMVC5753E") + self.svc.delete_snapshot(common_settings.SNAPSHOT_NAME, common_settings.INTERNAL_SNAPSHOT_ID) + self.svc.client.svctask.rmsnapshot.assert_not_called() + self.svc.client.svctask.rmvolume.assert_called() + def test_validate_supported_space_efficiency_raise_error(self): space_efficiency = svc_settings.DUMMY_SPACE_EFFICIENCY with self.assertRaises( @@ -1397,19 +1409,19 @@ def _prepare_mocks_for_get_host_by_identifiers_slow(self, svc_response, custom_h self._prepare_mocks_for_get_host_by_identifiers_no_hosts() host_1 = self._get_host_as_munch(array_settings.DUMMY_HOST_ID1, array_settings.DUMMY_HOST_NAME1, nqn_list=[ array_settings.DUMMY_NVME_NQN1], - wwpns_list=[array_settings.DUMMY_FC_WWN1], - iscsi_names_list=[array_settings.DUMMY_NODE1_IQN]) + wwpns_list=[array_settings.DUMMY_FC_WWN1], + iscsi_names_list=[array_settings.DUMMY_NODE1_IQN]) host_2 = self._get_host_as_munch(array_settings.DUMMY_HOST_ID2, array_settings.DUMMY_HOST_NAME2, nqn_list=[ array_settings.DUMMY_NVME_NQN2], - wwpns_list=[array_settings.DUMMY_FC_WWN2], - iscsi_names_list=[array_settings.DUMMY_NODE2_IQN]) + wwpns_list=[array_settings.DUMMY_FC_WWN2], + iscsi_names_list=[array_settings.DUMMY_NODE2_IQN]) if custom_host: host_3 = custom_host else: host_3 = self._get_host_as_munch(array_settings.DUMMY_HOST_ID3, array_settings.DUMMY_HOST_NAME3, nqn_list=[ array_settings.DUMMY_NVME_NQN3], - wwpns_list=[array_settings.DUMMY_FC_WWN3], iscsi_names_list=[ - array_settings.DUMMY_NODE3_IQN]) + wwpns_list=[array_settings.DUMMY_FC_WWN3], iscsi_names_list=[ + array_settings.DUMMY_NODE3_IQN]) hosts = [host_1, host_2, host_3] self.svc.client.svcinfo.lshost = Mock() self.svc.client.svcinfo.lshost.return_value = self._get_hosts_list_result(hosts) @@ -1550,13 +1562,13 @@ def test_get_host_by_identifiers_slow_no_other_ports_return_nvme_host(self, svc_ def test_get_host_by_identifiers_slow_return_fc_host(self, svc_response): host_1 = self._get_host_as_munch(array_settings.DUMMY_HOST_ID1, array_settings.DUMMY_HOST_NAME1, wwpns_list=[ array_settings.DUMMY_FC_WWN1], - iscsi_names_list=[]) + iscsi_names_list=[]) host_2 = self._get_host_as_munch(array_settings.DUMMY_HOST_ID2, array_settings.DUMMY_HOST_NAME2, wwpns_list=[ array_settings.DUMMY_FC_WWN2], - iscsi_names_list=[]) + iscsi_names_list=[]) host_3 = self._get_host_as_munch(array_settings.DUMMY_HOST_ID3, array_settings.DUMMY_HOST_NAME3, wwpns_list=[ array_settings.DUMMY_FC_WWN3, array_settings.DUMMY_FC_WWN4], - iscsi_names_list=[array_settings.DUMMY_NODE3_IQN]) + iscsi_names_list=[array_settings.DUMMY_NODE3_IQN]) hosts = [host_1, host_2, host_3] self._prepare_mocks_for_get_host_by_identifiers_slow(svc_response) hostname, connectivity_types = self.svc.get_host_by_host_identifiers( @@ -1606,13 +1618,13 @@ def test_get_host_by_identifiers_no_other_ports_return_fc_host(self): def test_get_host_by_identifiers_slow_with_wrong_fc_iscsi_raise_not_found(self, svc_response): host_1 = self._get_host_as_munch(array_settings.DUMMY_HOST_ID1, array_settings.DUMMY_HOST_NAME1, wwpns_list=[ array_settings.DUMMY_FC_WWN1], - iscsi_names_list=[]) + iscsi_names_list=[]) host_2 = self._get_host_as_munch(array_settings.DUMMY_HOST_ID2, array_settings.DUMMY_HOST_NAME2, wwpns_list=[ array_settings.DUMMY_FC_WWN3], - iscsi_names_list=[array_settings.DUMMY_NODE2_IQN]) + iscsi_names_list=[array_settings.DUMMY_NODE2_IQN]) host_3 = self._get_host_as_munch(array_settings.DUMMY_HOST_ID3, array_settings.DUMMY_HOST_NAME3, wwpns_list=[ array_settings.DUMMY_FC_WWN3], - iscsi_names_list=[array_settings.DUMMY_NODE3_IQN]) + iscsi_names_list=[array_settings.DUMMY_NODE3_IQN]) hosts = [host_1, host_2, host_3] self._prepare_mocks_for_get_host_by_identifiers_slow(svc_response) with self.assertRaises(array_errors.HostNotFoundError): @@ -1766,8 +1778,8 @@ def _test_map_volume_mkvdiskhostmap_error(self, client_error, expected_error, mo self._test_mediator_method_client_error(self.svc.map_volume, ( common_settings.VOLUME_UID, common_settings.HOST_NAME, array_settings.DUMMY_CONNECTIVITY_TYPE), - self.svc.client.svctask.mkvdiskhostmap, client_error, - expected_error) + self.svc.client.svctask.mkvdiskhostmap, client_error, + expected_error) def test_map_volume_mkvdiskhostmap_errors(self): self._test_map_volume_mkvdiskhostmap_error(svc_errors.CommandExecutionError("CMMVC5804E"), @@ -1808,8 +1820,8 @@ def test_map_volume_nvme_success(self): def _test_unmap_volume_rmvdiskhostmap_error(self, client_error, expected_error): self._test_mediator_method_client_error(self.svc.unmap_volume, ( common_settings.VOLUME_UID, common_settings.HOST_NAME), - self.svc.client.svctask.rmvdiskhostmap, client_error, - expected_error) + self.svc.client.svctask.rmvdiskhostmap, client_error, + expected_error) def test_unmap_volume_rmvdiskhostmap_errors(self): self._test_unmap_volume_rmvdiskhostmap_error(svc_errors.CommandExecutionError("CMMVC5753E"), @@ -1830,9 +1842,9 @@ def test_unmap_volume_success(self): def _prepare_mocks_for_get_iscsi_targets(self, portset_id=None): host = self._get_host_as_munch(array_settings.DUMMY_HOST_ID1, common_settings.HOST_NAME, wwpns_list=[ array_settings.DUMMY_FC_WWN1], - iscsi_names_list=[array_settings.DUMMY_NODE1_IQN, - array_settings.DUMMY_NODE2_IQN], - portset_id=portset_id) + iscsi_names_list=[array_settings.DUMMY_NODE1_IQN, + array_settings.DUMMY_NODE2_IQN], + portset_id=portset_id) self.svc.client.svcinfo.lshost = Mock() self.svc.client.svcinfo.lshost.return_value = Mock(as_single_element=host) @@ -2082,6 +2094,13 @@ def test_create_host_iscsi_success(self): iscsiname=array_settings.DUMMY_NODE1_IQN, iogrp=array_settings.DUMMY_MULTIPLE_IO_GROUP_STRING) + def test_create_host_iscsi_with_empty_io_groupsuccess(self): + self.svc.create_host(common_settings.HOST_NAME, Initiators([], [], [array_settings.DUMMY_NODE1_IQN]), + array_settings.ISCSI_CONNECTIVITY_TYPE, "") + self.svc.client.svctask.mkhost.assert_called_once_with(name=common_settings.HOST_NAME, + iscsiname=array_settings.DUMMY_NODE1_IQN, + iogrp=common_settings.DUMMY_FULL_IO_GROUP) + def test_create_host_fc_when_two_ports_are_not_valid_failed(self): self.svc.client.svctask.mkhost.side_effect = [CLIFailureError('CMMVC5867E'), CLIFailureError('CMMVC5867E')] with self.assertRaises(array_errors.NoPortIsValid): @@ -2090,8 +2109,16 @@ def test_create_host_fc_when_two_ports_are_not_valid_failed(self): array_settings.FC_CONNECTIVITY_TYPE, array_settings.DUMMY_MULTIPLE_IO_GROUP_STRING) self.assertEqual(self.svc.client.svctask.mkhost.call_count, 2) - def test_create_host_with_connectivity_type_failed(self): + def test_create_host_with_empty_ports_failed(self): with self.assertRaises(array_errors.NoPortFoundByConnectivityType): + self.svc.create_host(common_settings.HOST_NAME, + Initiators([], [], []), + "", + array_settings.DUMMY_MULTIPLE_IO_GROUP_STRING) + self.svc.client.svctask.mkhost.assert_not_called() + + def test_create_host_with_connectivity_type_failed(self): + with self.assertRaises(array_errors.UnsupportedConnectivityTypeError): self.svc.create_host(common_settings.HOST_NAME, Initiators([], [], [array_settings.DUMMY_NODE1_IQN]), svc_settings.MKHOST_NVME_PROTOCOL_VALUE, @@ -2186,6 +2213,14 @@ def test_add_ports_to_host_falied(self): array_settings.ISCSI_CONNECTIVITY_TYPE), self.svc.client.svctask.addhostport, Exception("Failed"), Exception) + def test_add_ports_to_not_exist_host_falied(self): + self.svc.client.svctask.addhostport.side_effect = [CLIFailureError('CMMVC5753E')] + with self.assertRaises(array_errors.HostNotFoundError): + self.svc.add_ports_to_host(common_settings.HOST_NAME, + Initiators([], [array_settings.DUMMY_FC_WWN1, array_settings.DUMMY_FC_WWN2], []), + array_settings.FC_CONNECTIVITY_TYPE) + self.assertEqual(self.svc.client.svctask.addhostport.call_count, 1) + def test_remove_nvme_ports_from_host_success(self): self.svc.remove_ports_from_host(common_settings.HOST_NAME, [array_settings.DUMMY_NVME_NQN1], @@ -2222,6 +2257,14 @@ def test_remove_ports_from_host_falied(self): array_settings.ISCSI_CONNECTIVITY_TYPE), self.svc.client.svctask.rmhostport, Exception("Failed"), Exception) + def test_remove_ports_from_not_exist_host_falied(self): + self.svc.client.svctask.rmhostport.side_effect = [CLIFailureError('CMMVC5753E')] + with self.assertRaises(array_errors.HostNotFoundError): + self.svc.remove_ports_from_host(common_settings.HOST_NAME, + [array_settings.DUMMY_FC_WWN1, array_settings.DUMMY_FC_WWN2], + array_settings.ISCSI_CONNECTIVITY_TYPE) + self.assertEqual(self.svc.client.svctask.rmhostport.call_count, 1) + def _prepare_mocks_for_get_host_with_ports(self, attribute_name): self.svc.client.svcinfo.lshost = Mock() self.svc.client.svcinfo.lshost.return_value = Mock(as_single_element=Munch({ @@ -2365,3 +2408,145 @@ def test_change_host_protocol_errors(self): self._test_change_host_protocol_chhost_errors(CLIFailureError('CMMVC9331E'), array_errors.CannotChangeHostProtocolBecauseOfMappedPorts) self._test_change_host_protocol_chhost_errors(Exception("Failed"), Exception) + + def _mock_cli_volume_group(self, uid="", volume_count=0): + volume_group = {svc_settings.VOLUME_GROUP_VOLUME_COUNT_ATTR_KEY: str(volume_count), + svc_settings.VOLUME_GROUP_NAME_ATTR_KEY: common_settings.VOLUME_GROUP_NAME, + svc_settings.VOLUME_GROUP_ATTR_KEY: common_settings.INTERNAL_VOLUME_GROUP_ID} + if uid: + volume_group["uid"] = uid + + return Munch(volume_group) + + def _prepare_lsvolumegroup(self, no_return=None, volume_count=0): + if no_return: + return_value = None + else: + return_value = self._mock_cli_volume_group(volume_count=volume_count) + self.svc.client.svcinfo.lsvolumegroup.return_value = Mock(as_single_element=return_value) + + def test_create_volume_group_success(self): + self.svc.client.svctask.mkvolumegroup.return_value = Mock(response=(b"id [1]\n", b"")) + self._prepare_lsvolumegroup() + + self.svc.create_volume_group(common_settings.VOLUME_GROUP_NAME) + + self.svc.client.svcinfo.lsvolumegroup.assert_called_once_with(object_id=1) + self.svc.client.svctask.mkvolumegroup.assert_called_once_with(name=common_settings.VOLUME_GROUP_NAME) + + def test_get_volume_group_success(self): + self._prepare_lsvolumegroup() + volume_group = self.svc.get_volume_group(common_settings.INTERNAL_VOLUME_GROUP_ID) + + self.svc.client.svcinfo.lsvolumegroup.assert_called_once_with( + object_id=common_settings.INTERNAL_VOLUME_GROUP_ID) + self.assertEqual(0, len(volume_group.volumes)) + + def test_get_volume_group_not_found_failed(self): + self._prepare_lsvolumegroup(no_return=True) + with self.assertRaises(array_errors.ObjectNotFoundError): + self.svc.get_volume_group(common_settings.INTERNAL_VOLUME_GROUP_ID) + + def test_get_volume_group_with_volumes_success(self): + self._prepare_lsvolumegroup(volume_count=1) + cli_volume = self._get_cli_volume() + self.svc.client.svcinfo.lsvdisk.return_value = Mock(as_list=[cli_volume]) + + volume_group = self.svc.get_volume_group(common_settings.INTERNAL_VOLUME_GROUP_ID) + + self.svc.client.svcinfo.lsvolumegroup.assert_called_once_with( + object_id=common_settings.INTERNAL_VOLUME_GROUP_ID) + self.svc.client.svcinfo.lsvdisk.assert_called_once_with(filtervalue='volume_group_name=volume_group_name', + bytes=True) + self.assertEqual(common_settings.VOLUME_GROUP_NAME, volume_group.name) + self.assertEqual(1, len(volume_group.volumes)) + + def test_delete_volume_group_success(self): + self.svc.delete_volume_group(common_settings.INTERNAL_VOLUME_GROUP_ID) + + self.svc.client.svctask.rmvolumegroup.assert_called_once_with( + object_id=common_settings.INTERNAL_VOLUME_GROUP_ID) + + def test_add_volume_to_volume_group_success(self): + cli_volume = self._get_cli_volume() + self.svc.client.svcinfo.lsvdisk.return_value = Mock(as_single_element=cli_volume) + + self.svc.add_volume_to_volume_group(common_settings.INTERNAL_VOLUME_GROUP_ID, common_settings.VOLUME_UID) + + self.svc.client.svctask.chvdisk.assert_called_once_with(vdisk_id=common_settings.INTERNAL_VOLUME_ID, + volumegroup=common_settings.INTERNAL_VOLUME_GROUP_ID) + + def test_add_volume_to_volume_group_already_in_volume_group_failed(self): + cli_volume = self._get_cli_volume(in_volume_group=True) + self.svc.client.svcinfo.lsvdisk.return_value = Mock(as_single_element=cli_volume) + + with self.assertRaises(array_errors.VolumeAlreadyInVolumeGroup): + self.svc.add_volume_to_volume_group(common_settings.INTERNAL_VOLUME_GROUP_ID, common_settings.VOLUME_UID) + + self.svc.client.svctask.chvdisk.assert_not_called() + + def test_remove_volume_from_volume_group_success(self): + cli_volume = self._get_cli_volume(in_volume_group=True) + self.svc.client.svcinfo.lsvdisk.return_value = Mock(as_single_element=cli_volume) + self.svc.remove_volume_from_volume_group(common_settings.VOLUME_UID) + + self.svc.client.svctask.chvdisk.assert_called_once_with(vdisk_id=common_settings.INTERNAL_VOLUME_ID, + novolumegroup=True) + + @patch('{}.is_call_home_enabled'.format('controllers.array_action.array_mediator_svc')) + @patch('controllers.array_action.array_mediator_svc.SVC_REGISTRATION_CACHE') + def test_register_plugin_when_there_is_no_registered_storage_success(self, mock_cache, is_enabled_mock): + mock_cache.get.return_value = {} + is_enabled_mock.return_value = True + + self._test_register_plugin_success(True) + + @patch('{}.is_call_home_enabled'.format('controllers.array_action.array_mediator_svc')) + @patch('controllers.array_action.array_mediator_svc.SVC_REGISTRATION_CACHE') + def test_register_plugin_when_unique_key_is_registered_more_than_two_hours_ago_success( + self, mock_cache, is_enabled_mock): + current_time = datetime.now() + three_hours_ago = current_time - timedelta(hours=3) + mock_cache.get.return_value = {'test_key': three_hours_ago} + is_enabled_mock.return_value = True + + self._test_register_plugin_success(True) + + @patch('{}.is_call_home_enabled'.format('controllers.array_action.array_mediator_svc')) + @patch('controllers.array_action.array_mediator_svc.SVC_REGISTRATION_CACHE') + def test_do_not_register_plugin_when_unique_key_is_registered_less_than_two_hours_ago_success( + self, mock_cache, is_enabled_mock): + current_time = datetime.now() + one_hours_ago = current_time - timedelta(hours=1) + mock_cache.get.return_value = {'test_key': one_hours_ago} + is_enabled_mock.return_value = True + + self._test_register_plugin_success(False) + + @patch('{}.is_call_home_enabled'.format('controllers.array_action.array_mediator_svc')) + @patch('controllers.array_action.array_mediator_svc.SVC_REGISTRATION_CACHE') + def test_assert_no_exception_when_register_fail_success(self, mock_cache, is_enabled_mock): + self.svc.client.svctask.registerplugin.side_effect = [Exception] + mock_cache.get.return_value = {} + is_enabled_mock.return_value = True + + self._test_register_plugin_success(True) + + @patch('{}.is_call_home_enabled'.format('controllers.array_action.array_mediator_svc')) + @patch('controllers.array_action.array_mediator_svc.SVC_REGISTRATION_CACHE') + def test_do_not_register_plugin_when_call_home_is_not_enabled_success(self, mock_cache, is_enabled_mock): + is_enabled_mock.return_value = False + + self._test_register_plugin_success(False) + self.assertEqual(mock_cache.get.call_count, 0) + + def _test_register_plugin_success(self, should_register): + self.svc.register_plugin('test_key', 'some_metadata') + + if should_register: + self.svc.client.svctask.registerplugin.assert_called_once_with(name='block.csi.ibm.com', + uniquekey='test_key', + version=config.identity.version, + metadata='some_metadata') + else: + self.svc.client.svctask.registerplugin.not_called() diff --git a/controllers/tests/array_action/svc/test_settings.py b/controllers/tests/array_action/svc/test_settings.py index c2bac55ed..26a972eda 100644 --- a/controllers/tests/array_action/svc/test_settings.py +++ b/controllers/tests/array_action/svc/test_settings.py @@ -61,6 +61,7 @@ VOLUME_GROUP_ID_ATTR_KEY = "volume_group_id" VOLUME_REPLICATION_MODE_ATTR_KEY = "replication_mode" VOLUME_FC_MAP_COUNT_ATTR_KEY = "fc_map_count" +VOLUME_VG_NAME_ATTR_KEY = "volume_group_name" MANY_VALUE = "many" VOLUME_FC_ID_MANY = MANY_VALUE @@ -155,3 +156,7 @@ MULTIPLE_IO_GROUP_NAMES = ['io_grp0', 'io_grp2'] SINGLE_IO_GROUP_ID = '0' SINGLE_IO_GROUP_NAME = 'io_grp0' + +VOLUME_GROUP_VOLUME_COUNT_ATTR_KEY = "volume_count" +VOLUME_GROUP_NAME_ATTR_KEY = NAME_KEY +VOLUME_GROUP_ATTR_KEY = ID_KEY diff --git a/controllers/tests/array_action/xiv/array_mediator_xiv_tests.py b/controllers/tests/array_action/xiv/array_mediator_xiv_tests.py index 1029bcadf..886c9007e 100644 --- a/controllers/tests/array_action/xiv/array_mediator_xiv_tests.py +++ b/controllers/tests/array_action/xiv/array_mediator_xiv_tests.py @@ -27,14 +27,14 @@ def test_get_volume_raise_correct_errors(self): error_msg = array_settings.DUMMY_ERROR_MESSAGE self.mediator.client.cmd.vol_list.side_effect = [Exception(array_settings.DUMMY_ERROR_MESSAGE)] with self.assertRaises(Exception) as ex: - self.mediator.get_volume(common_settings.VOLUME_NAME, None, False) + self.mediator.get_volume(common_settings.VOLUME_NAME, None, False, None) self.assertIn(error_msg, str(ex.exception)) def test_get_volume_return_correct_value(self): xcli_volume = utils.get_mock_xiv_volume(10, common_settings.VOLUME_NAME, common_settings.VOLUME_UID) self.mediator.client.cmd.vol_list.return_value = Mock(as_single_element=xcli_volume) - volume = self.mediator.get_volume(common_settings.VOLUME_NAME, None, False) + volume = self.mediator.get_volume(common_settings.VOLUME_NAME, None, False, None) self.assertEqual(xcli_volume.capacity * array_settings.DUMMY_SMALL_CAPACITY_INT, volume.capacity_bytes) self.assertEqual(xcli_volume.capacity * array_settings.DUMMY_SMALL_CAPACITY_INT, volume.capacity_bytes) @@ -42,12 +42,12 @@ def test_get_volume_return_correct_value(self): def test_get_volume_raise_illegal_object_name(self): self.mediator.client.cmd.vol_list.side_effect = [xcli_errors.IllegalNameForObjectError("", "", "")] with self.assertRaises(array_errors.InvalidArgumentError): - self.mediator.get_volume(common_settings.VOLUME_NAME, None, False) + self.mediator.get_volume(common_settings.VOLUME_NAME, None, False, None) def test_get_volume_returns_nothing(self): self.mediator.client.cmd.vol_list.return_value = Mock(as_single_element=None) with self.assertRaises(array_errors.ObjectNotFoundError): - self.mediator.get_volume(common_settings.VOLUME_NAME, None, False) + self.mediator.get_volume(common_settings.VOLUME_NAME, None, False, None) @patch("controllers.array_action.array_mediator_xiv.XCLIClient") def test_connect_errors(self, client): diff --git a/controllers/tests/common/test_settings.py b/controllers/tests/common/test_settings.py index c73939650..21a87535f 100644 --- a/controllers/tests/common/test_settings.py +++ b/controllers/tests/common/test_settings.py @@ -1,3 +1,4 @@ +from controllers.common import settings as common_settings SECRET_USERNAME_KEY = "username" SECRET_USERNAME_VALUE = "dummy_username" SECRET_PASSWORD_KEY = "password" @@ -14,17 +15,24 @@ SPACE_EFFICIENCY = "thin" VIRT_SNAP_FUNC_TRUE = "true" DUMMY_IO_GROUP = "iogrp1" +DUMMY_FULL_IO_GROUP = common_settings.FULL_IO_GROUP DUMMY_VOLUME_GROUP = "volgrp1" +ID_FORMAT = "a9k:{};{}" + VOLUME_OBJECT_TYPE = "volume" VOLUME_NAME = "volume_name" VOLUME_UID = "volume_wwn" +REAL_VOLUME_UID = "600507607181869980000000000030E8" +REAL_NGUID = "80000000000030E80050760071818699" SOURCE_VOLUME_NAME = "source_volume" SOURCE_ID = "source_id" SOURCE_VOLUME_ID = "source_volume_id" TARGET_VOLUME_ID = "target_volume_id" TARGET_VOLUME_NAME = "target_volume_name" INTERNAL_VOLUME_ID = "internal_volume_id" +REQUEST_VOLUME_ID = ID_FORMAT.format(INTERNAL_VOLUME_ID, VOLUME_UID) +REQUEST_REAL_VOLUME_ID = ID_FORMAT.format(INTERNAL_VOLUME_ID, REAL_VOLUME_UID) NAME_PREFIX = "prefix" @@ -44,3 +52,11 @@ SYSTEM_ID = "system_id" COPY_TYPE = "async" FCS_DELIMITER = ":" + +VOLUME_GROUP_OBJECT_TYPE = "volume group" +VOLUME_GROUP_NAME = "volume_group_name" +VOLUME_GROUP_UID = "volume_group_wwn" +INTERNAL_VOLUME_GROUP_ID = "internal_volume_group_id" +REQUEST_VOLUME_GROUP_ID = ID_FORMAT.format(INTERNAL_VOLUME_GROUP_ID, VOLUME_GROUP_NAME) + +HOST_OBJECT_TYPE = "host" diff --git a/controllers/tests/controller_server/addons_server_test.py b/controllers/tests/controller_server/addons_server_test.py index a46244bcc..7a323ce1b 100644 --- a/controllers/tests/controller_server/addons_server_test.py +++ b/controllers/tests/controller_server/addons_server_test.py @@ -32,8 +32,9 @@ def setUp(self): self.request = ProtoBufMock() self.request.secrets = {"username": SECRET_USERNAME_VALUE, "password": SECRET_PASSWORD_VALUE, "management_address": SECRET_MANAGEMENT_ADDRESS_VALUE} - self.request.volume_id = "{}:{};{}".format("A9000", OBJECT_INTERNAL_ID, VOLUME_UID) + self.request.volume_id = "{0}:{1};{1}".format("A9000", OBJECT_INTERNAL_ID) self.request.replication_id = "{}:{};{}".format("A9000", OTHER_OBJECT_INTERNAL_ID, VOLUME_UID) + self.request.replication_source.volumegroup.volume_group_id = self.request.volume_id self.context = utils.FakeContext() def _prepare_replication_mocks(self, replication_type=None, copy_type=COPY_TYPE, is_primary=False, @@ -134,21 +135,8 @@ def test_enable_ear_replication_idempotency_succeeds(self): copy_type=REPLICATION_COPY_TYPE_SYNC, grpc_status=grpc.StatusCode.OK) - def test_enable_ear_replication_volume_in_group_fails(self): - self.mediator.get_object_by_id.return_value = utils.get_mock_mediator_response_volume( - volume_group_id=DUMMY_VOLUME_GROUP) - replication_request = self._prepare_request_params(replication_type=REPLICATION_TYPE_EAR, - replication_id="") - self._prepare_replication_mocks() - - self.servicer.EnableVolumeReplication(self.request, self.context) - - self.assertEqual(grpc.StatusCode.FAILED_PRECONDITION, self.context.code) - self.mediator.get_replication.assert_called_once_with(replication_request) - self.mediator.create_replication.assert_not_called() - def test_enable_replication_already_processing(self): - self._test_request_already_processing("volume_id", self.request.volume_id) + self._test_request_already_processing("replication_source", self.request.volume_id) def test_enable_replication_with_wrong_secrets(self): self._test_request_with_wrong_secrets() @@ -165,17 +153,6 @@ def test_enable_ear_replication_obsolete_request_parameters_fails(self): def test_enable_ear_replication_succeeds(self): self._test_enable_replication_succeeds(REPLICATION_TYPE_EAR) - def test_enable_ear_replication_idempotency_volume_belongs_to_another_group_fails(self): - self._test_enable_replication_idempotency(replication_type=REPLICATION_TYPE_EAR, - copy_type=REPLICATION_COPY_TYPE_SYNC, - grpc_status=grpc.StatusCode.ALREADY_EXISTS) - - def test_enable_ear_replication_idempotency_volume_has_another_policy_fails(self): - self._test_enable_replication_idempotency(replication_type=REPLICATION_TYPE_EAR, - replication_name="", - copy_type=REPLICATION_COPY_TYPE_SYNC, - grpc_status=grpc.StatusCode.ALREADY_EXISTS) - class TestDisableVolumeReplication(BaseReplicationSetUp, CommonControllerTest): @property @@ -215,7 +192,7 @@ def test_disable_replication_idempotency_succeeds(self): self._test_disable_replication_idempotency_succeeds(REPLICATION_TYPE_MIRROR) def test_disable_replication_already_processing(self): - self._test_request_already_processing("volume_id", self.request.volume_id) + self._test_request_already_processing("replication_source", self.request.volume_id) def test_disable_replication_with_wrong_secrets(self): self._test_request_with_wrong_secrets() @@ -269,7 +246,7 @@ def test_promote_replication_fails(self): self._test_promote_replication_fails(REPLICATION_TYPE_MIRROR) def test_promote_replication_already_processing(self): - self._test_request_already_processing("volume_id", self.request.volume_id) + self._test_request_already_processing("replication_source", self.request.volume_id) def test_promote_replication_with_wrong_secrets(self): self._test_request_with_wrong_secrets() @@ -327,7 +304,7 @@ def test_demote_replication_idempotency_succeeds(self): self.mediator.demote_replication_volume.assert_not_called() def test_demote_replication_already_processing(self): - self._test_request_already_processing("volume_id", self.request.volume_id) + self._test_request_already_processing("replication_source", self.request.volume_id) def test_demote_replication_with_wrong_secrets(self): self._test_request_with_wrong_secrets() diff --git a/controllers/tests/controller_server/common.py b/controllers/tests/controller_server/common.py index a82e41f6c..25a70cab2 100644 --- a/controllers/tests/controller_server/common.py +++ b/controllers/tests/controller_server/common.py @@ -17,6 +17,15 @@ def mock_array_type(contex, server_path): contex.addCleanup(detect_array_type_patcher.stop) +def mock_utils_method(contex, utils_method_name, return_value=None): + utils_method_path = '.'.join(('controllers.servers.utils', utils_method_name)) + utils_method_patcher = patch(utils_method_path) + utils_method = utils_method_patcher.start() + if return_value: + utils_method.return_value = return_value + contex.addCleanup(utils_method_patcher.stop) + + def mock_mediator(): mediator = Mock() mediator.maximal_volume_size_in_bytes = 10 diff --git a/controllers/tests/controller_server/csi_controller_server_test.py b/controllers/tests/controller_server/csi_controller_server_test.py index 0c03a16d7..8768df53d 100644 --- a/controllers/tests/controller_server/csi_controller_server_test.py +++ b/controllers/tests/controller_server/csi_controller_server_test.py @@ -1,1558 +1,1572 @@ -import abc -import json -import unittest - -import grpc -from csi_general import csi_pb2 -from mock import patch, Mock, MagicMock, call - -import controllers.array_action.errors as array_errors -import controllers.servers.errors as controller_errors -import controllers.servers.settings as servers_settings -from controllers.array_action.array_action_types import ObjectIds -from controllers.array_action.array_mediator_xiv import XIVArrayMediator -from controllers.servers.csi.csi_controller_server import CSIControllerServicer -from controllers.servers.csi.sync_lock import SyncLock -from controllers.tests import utils -from controllers.tests.common.test_settings import (CLONE_VOLUME_NAME, - OBJECT_INTERNAL_ID, - DUMMY_POOL1, SPACE_EFFICIENCY, - DUMMY_IO_GROUP, DUMMY_VOLUME_GROUP, - VOLUME_NAME, SNAPSHOT_NAME, - SNAPSHOT_VOLUME_NAME, - SNAPSHOT_VOLUME_UID, VIRT_SNAP_FUNC_TRUE, SECRET_PASSWORD_VALUE, - SECRET_USERNAME_VALUE, - VOLUME_UID, INTERNAL_VOLUME_ID, DUMMY_POOL2, - SECRET_MANAGEMENT_ADDRESS_VALUE, - NAME_PREFIX, INTERNAL_SNAPSHOT_ID, SOURCE_VOLUME_ID, - SECRET_MANAGEMENT_ADDRESS_KEY, SECRET_PASSWORD_KEY, - SECRET_USERNAME_KEY, SECRET) -from controllers.tests.controller_server.common import mock_get_agent, mock_array_type, mock_mediator -from controllers.tests.utils import ProtoBufMock - -CONTROLLER_SERVER_PATH = "controllers.servers.csi.csi_controller_server" - - -class BaseControllerSetUp(unittest.TestCase): - - def setUp(self): - self.servicer = CSIControllerServicer() - - mock_array_type(self, CONTROLLER_SERVER_PATH) - - self.mediator = mock_mediator() - - self.storage_agent = MagicMock() - mock_get_agent(self, CONTROLLER_SERVER_PATH) - - self.request = ProtoBufMock() - self.request.secrets = SECRET - - self.request.parameters = {} - self.request.volume_context = {} - self.volume_capability = utils.get_mock_volume_capability() - self.capacity_bytes = 10 - self.request.capacity_range = Mock() - self.request.capacity_range.required_bytes = self.capacity_bytes - self.context = utils.FakeContext() - - -class CommonControllerTest: - - @property - @abc.abstractmethod - def tested_method(self): - raise NotImplementedError - - @property - @abc.abstractmethod - def tested_method_response_class(self): - raise NotImplementedError - - def _test_create_object_with_empty_name(self): - self.request.name = "" - context = utils.FakeContext() - response = self.tested_method(self.request, context) - self.assertEqual(grpc.StatusCode.INVALID_ARGUMENT, context.code) - self.assertIn("name", context.details) - self.assertEqual(self.tested_method_response_class(), response) - - def _test_request_with_wrong_secrets_parameters(self, secrets, message="secret"): - context = utils.FakeContext() - - self.request.secrets = secrets - self.tested_method(self.request, context) - self.assertEqual(grpc.StatusCode.INVALID_ARGUMENT, context.code) - self.assertIn(message, context.details) - - def _test_request_with_wrong_secrets(self): - secrets = {SECRET_PASSWORD_KEY: SECRET_PASSWORD_VALUE, - SECRET_MANAGEMENT_ADDRESS_KEY: SECRET_MANAGEMENT_ADDRESS_VALUE} - self._test_request_with_wrong_secrets_parameters(secrets) - - secrets = {SECRET_USERNAME_KEY: SECRET_USERNAME_VALUE, - SECRET_MANAGEMENT_ADDRESS_KEY: SECRET_MANAGEMENT_ADDRESS_VALUE} - self._test_request_with_wrong_secrets_parameters(secrets) - - secrets = {SECRET_USERNAME_KEY: SECRET_USERNAME_VALUE, SECRET_PASSWORD_KEY: SECRET_PASSWORD_VALUE} - self._test_request_with_wrong_secrets_parameters(secrets) - - secrets = utils.get_fake_secret_config(system_id="u-") - self._test_request_with_wrong_secrets_parameters(secrets, message="system id") - - self.request.secrets = [] - - def _test_request_already_processing(self, request_attribute, object_id): - with SyncLock(request_attribute, object_id, "test_request_already_processing"): - response = self.tested_method(self.request, self.context) - self.assertEqual(grpc.StatusCode.ABORTED, self.context.code) - self.assertEqual(self.tested_method_response_class, type(response)) - - def _test_request_with_array_connection_exception(self): - self.get_agent.side_effect = [Exception("error")] - context = utils.FakeContext() - self.tested_method(self.request, context) - self.assertEqual(grpc.StatusCode.INTERNAL, context.code) - self.assertIn("error", context.details) - - def _test_request_with_get_array_type_exception(self): - context = utils.FakeContext() - self.detect_array_type.side_effect = [array_errors.FailedToFindStorageSystemType("endpoint")] - self.tested_method(self.request, context) - self.assertEqual(grpc.StatusCode.INTERNAL, context.code) - msg = array_errors.FailedToFindStorageSystemType("endpoint").message - self.assertIn(msg, context.details) - - def _test_request_with_wrong_parameters(self): - context = utils.FakeContext() - parameters = [{}, {"": ""}, {"pool": ""}] - - for request_parameters in parameters: - self.request.parameters = request_parameters - self.tested_method(self.request, context) - self.assertEqual(grpc.StatusCode.INVALID_ARGUMENT, context.code) - - -class TestCreateSnapshot(BaseControllerSetUp, CommonControllerTest): - - @property - def tested_method(self): - return self.servicer.CreateSnapshot - - @property - def tested_method_response_class(self): - return csi_pb2.CreateSnapshotResponse - - def setUp(self): - super().setUp() - - self.mediator.get_snapshot = Mock() - self.mediator.get_snapshot.return_value = None - - self.mediator.create_snapshot = Mock() - - self.request.name = SNAPSHOT_NAME - self.request.source_volume_id = "{}:{};{}".format("A9000", OBJECT_INTERNAL_ID, SNAPSHOT_VOLUME_UID) - self.mediator.get_object_by_id = Mock() - self.mediator.get_object_by_id.return_value = utils.get_mock_mediator_response_volume(10, SNAPSHOT_VOLUME_NAME, - VOLUME_UID, "xiv") - self.context = utils.FakeContext() - - def test_create_snapshot_with_empty_name(self): - self._test_create_object_with_empty_name() - - def _prepare_create_snapshot_mocks(self, ): - self.mediator.get_snapshot = Mock() - self.mediator.get_snapshot.return_value = None - self.mediator.create_snapshot = Mock() - self.mediator.create_snapshot.return_value = utils.get_mock_mediator_response_snapshot(10, SNAPSHOT_NAME, - SNAPSHOT_VOLUME_UID, - SNAPSHOT_VOLUME_NAME, - "xiv") - - def _test_create_snapshot_succeeds(self, expected_space_efficiency=None, expected_pool=None, - system_id=None): - self._prepare_create_snapshot_mocks() - - response_snapshot = self.servicer.CreateSnapshot(self.request, self.context) - - self.assertEqual(grpc.StatusCode.OK, self.context.code) - self.mediator.get_snapshot.assert_called_once_with(SNAPSHOT_VOLUME_UID, SNAPSHOT_NAME, expected_pool, False) - self.mediator.create_snapshot.assert_called_once_with(SNAPSHOT_VOLUME_UID, SNAPSHOT_NAME, - expected_space_efficiency, expected_pool, False) - system_id_part = ':{}'.format(system_id) if system_id else '' - snapshot_id = 'xiv{}:0;{}'.format(system_id_part, SNAPSHOT_VOLUME_UID) - self.assertEqual(snapshot_id, response_snapshot.snapshot.snapshot_id) - - def test_create_snapshot_succeeds(self, ): - self._test_create_snapshot_succeeds() - - def test_create_snapshot_with_pool_parameter_succeeds(self, ): - self.request.parameters = {servers_settings.PARAMETERS_POOL: DUMMY_POOL1} - self._test_create_snapshot_succeeds(expected_pool=DUMMY_POOL1) - - def test_create_snapshot_with_space_efficiency_parameter_succeeds(self): - self.mediator.validate_supported_space_efficiency = Mock() - self.request.parameters = {servers_settings.PARAMETERS_SPACE_EFFICIENCY: SPACE_EFFICIENCY} - self._test_create_snapshot_succeeds(expected_space_efficiency=SPACE_EFFICIENCY) - - def test_create_snapshot_with_space_efficiency_and_virt_snap_func_enabled_fail(self): - self.request.parameters = {servers_settings.PARAMETERS_SPACE_EFFICIENCY: SPACE_EFFICIENCY, - servers_settings.PARAMETERS_VIRT_SNAP_FUNC: VIRT_SNAP_FUNC_TRUE} - - self.servicer.CreateSnapshot(self.request, self.context) - - self.assertEqual(grpc.StatusCode.INVALID_ARGUMENT, self.context.code) - - def test_create_snapshot_already_processing(self): - self._test_request_already_processing("name", self.request.name) - - def _test_create_snapshot_with_by_system_id_parameter(self, system_id, expected_pool): - system_id_part = ':{}'.format(system_id) if system_id else '' - self.request.source_volume_id = "{}{}:{}".format("A9000", system_id_part, SNAPSHOT_VOLUME_UID) - self.request.parameters = {servers_settings.PARAMETERS_BY_SYSTEM: json.dumps( - {"u1": {servers_settings.PARAMETERS_POOL: DUMMY_POOL1}, - "u2": {servers_settings.PARAMETERS_POOL: DUMMY_POOL2}})} - self._test_create_snapshot_succeeds(expected_pool=expected_pool, system_id=system_id) - - def test_create_snapshot_with_by_system_id_parameter_succeeds(self): - self._test_create_snapshot_with_by_system_id_parameter("u1", DUMMY_POOL1) - self._test_create_snapshot_with_by_system_id_parameter("u2", DUMMY_POOL2) - self._test_create_snapshot_with_by_system_id_parameter(None, None) - - def test_create_snapshot_belongs_to_wrong_volume(self): - self.mediator.create_snapshot = Mock() - self.mediator.get_snapshot.return_value = utils.get_mock_mediator_response_snapshot(10, SNAPSHOT_NAME, - VOLUME_UID, - "wrong_volume_name", "xiv") - - self.servicer.CreateSnapshot(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.ALREADY_EXISTS) - - def test_create_snapshot_no_source_volume(self): - self.request.source_volume_id = None - - self.servicer.CreateSnapshot(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) - - def test_create_snapshot_with_wrong_secrets(self, ): - self._test_request_with_wrong_secrets() - - def test_create_snapshot_with_array_connection_exception(self): - self._test_request_with_array_connection_exception() - - def _test_create_snapshot_get_snapshot_raise_error(self, exception, grpc_status): - self.mediator.get_snapshot.side_effect = [exception] - - self.servicer.CreateSnapshot(self.request, self.context) - - self.assertEqual(self.context.code, grpc_status) - self.assertIn(str(exception), self.context.details) - self.mediator.get_snapshot.assert_called_once_with(SNAPSHOT_VOLUME_UID, SNAPSHOT_NAME, None, False) - - def test_create_snapshot_get_snapshot_exception(self): - self._test_create_snapshot_get_snapshot_raise_error(exception=Exception("error"), - grpc_status=grpc.StatusCode.INTERNAL) - - def test_create_snapshot_with_get_snapshot_illegal_object_name_exception(self): - self._test_create_snapshot_get_snapshot_raise_error(exception=array_errors.InvalidArgumentError("snapshot"), - grpc_status=grpc.StatusCode.INVALID_ARGUMENT) - - def test_create_snapshot_with_get_snapshot_illegal_object_id_exception(self): - self._test_create_snapshot_get_snapshot_raise_error(exception=array_errors.InvalidArgumentError("volume-id"), - grpc_status=grpc.StatusCode.INVALID_ARGUMENT) - - def test_create_snapshot_with_prefix_too_long_exception(self): - self.request.parameters.update({"snapshot_name_prefix": "a" * 128}) - self.servicer.CreateSnapshot(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) - - def test_create_snapshot_with_get_snapshot_name_too_long_success(self): - self._prepare_create_snapshot_mocks() - self.mediator.max_object_name_length = 63 - self.request.name = "a" * 128 - - self.servicer.CreateSnapshot(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.OK) - - def create_snapshot_returns_error(self, return_code, err): - self.mediator.create_snapshot.side_effect = [err] - msg = str(err) - - self.servicer.CreateSnapshot(self.request, self.context) - - self.assertEqual(self.context.code, return_code) - self.assertIn(msg, self.context.details) - self.mediator.get_snapshot.assert_called_once_with(SNAPSHOT_VOLUME_UID, SNAPSHOT_NAME, None, False) - self.mediator.create_snapshot.assert_called_once_with(SNAPSHOT_VOLUME_UID, SNAPSHOT_NAME, None, None, False) - - def test_create_snapshot_with_not_found_exception(self): - self.create_snapshot_returns_error(return_code=grpc.StatusCode.NOT_FOUND, - err=array_errors.ObjectNotFoundError("source_volume")) - - def test_create_snapshot_with_illegal_object_name_exception(self): - self.create_snapshot_returns_error(return_code=grpc.StatusCode.INVALID_ARGUMENT, - err=array_errors.InvalidArgumentError("snapshot")) - - def test_create_snapshot_with_snapshot_source_pool_mismatch_exception(self): - self.create_snapshot_returns_error(return_code=grpc.StatusCode.INVALID_ARGUMENT, - err=array_errors.SnapshotSourcePoolMismatch("snapshot_pool", "source_pool")) - - def test_create_snapshot_with_same_volume_name_exists_exception(self): - self.create_snapshot_returns_error(return_code=grpc.StatusCode.INTERNAL, - err=array_errors.ExpectedSnapshotButFoundVolumeError("snapshot", - "endpoint")) - - def test_create_snapshot_with_illegal_object_id_exception(self): - self.create_snapshot_returns_error(return_code=grpc.StatusCode.INVALID_ARGUMENT, - err=array_errors.InvalidArgumentError("volume-id")) - - def test_create_snapshot_with_space_efficiency_not_supported_exception(self): - self.create_snapshot_returns_error(return_code=grpc.StatusCode.INVALID_ARGUMENT, - err=array_errors.SpaceEfficiencyNotSupported(["fake"])) - - def test_create_snapshot_with_other_exception(self): - self.create_snapshot_returns_error(return_code=grpc.StatusCode.INTERNAL, err=Exception("error")) - - def test_create_snapshot_with_name_prefix(self): - self.request.name = VOLUME_NAME - self.request.parameters[servers_settings.PARAMETERS_SNAPSHOT_NAME_PREFIX] = NAME_PREFIX - self.mediator.create_snapshot = Mock() - self.mediator.create_snapshot.return_value = utils.get_mock_mediator_response_snapshot(10, SNAPSHOT_NAME, - VOLUME_UID, - SNAPSHOT_VOLUME_NAME, - "xiv") - - self.servicer.CreateSnapshot(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.OK) - full_name = "{}_{}".format(NAME_PREFIX, VOLUME_NAME) - self.mediator.create_snapshot.assert_called_once_with(SNAPSHOT_VOLUME_UID, full_name, None, None, - False) - - -class TestDeleteSnapshot(BaseControllerSetUp, CommonControllerTest): - @property - def tested_method(self): - return self.servicer.DeleteSnapshot - - @property - def tested_method_response_class(self): - return csi_pb2.DeleteSnapshotResponse - - def setUp(self): - super().setUp() - self.mediator.get_snapshot = Mock() - self.mediator.get_snapshot.return_value = None - self.mediator.delete_snapshot = Mock() - self.request.snapshot_id = "A9000:{};{}".format(INTERNAL_SNAPSHOT_ID, SNAPSHOT_VOLUME_UID) - - @patch("controllers.array_action.array_mediator_xiv.XIVArrayMediator.delete_snapshot", Mock()) - def _test_delete_snapshot_succeeds(self, snapshot_id): - self.request.snapshot_id = snapshot_id - - self.servicer.DeleteSnapshot(self.request, self.context) - self.assertEqual(self.context.code, grpc.StatusCode.OK) - - def test_delete_snapshot_with_internal_id_succeeds(self): - self._test_delete_snapshot_succeeds("xiv:{};{}".format(INTERNAL_SNAPSHOT_ID, SNAPSHOT_VOLUME_UID)) - self.mediator.delete_snapshot.assert_called_once() - - def test_delete_snapshot_with_system_id_succeeds(self): - self._test_delete_snapshot_succeeds("xiv:system_id:{}".format(SNAPSHOT_VOLUME_UID)) - self.mediator.delete_snapshot.assert_called_once() - - def test_delete_snapshot_with_system_id_internal_id_succeeds(self): - self._test_delete_snapshot_succeeds("xiv:system_id:{};{}".format(INTERNAL_SNAPSHOT_ID, SNAPSHOT_VOLUME_UID)) - self.mediator.delete_snapshot.assert_called_once() - - def test_delete_snapshot_no_internal_id_succeeds(self): - self._test_delete_snapshot_succeeds("xiv:{}".format(SNAPSHOT_VOLUME_UID)) - self.mediator.delete_snapshot.assert_called_once() - - def test_delete_snapshot_bad_id_succeeds(self): - self._test_delete_snapshot_succeeds("xiv:a:a:volume-id") - self.mediator.delete_snapshot.assert_not_called() - - def test_delete_snapshot_already_processing(self): - self._test_request_already_processing("snapshot_id", self.request.snapshot_id) - - def test_delete_snapshot_with_wrong_secrets(self): - self._test_request_with_wrong_secrets() - - def test_delete_snapshot_with_array_connection_exception(self): - self._test_request_with_array_connection_exception() - - def test_delete_snapshot_invalid_snapshot_id(self): - self.request.snapshot_id = "wrong_id" - - self.servicer.DeleteSnapshot(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.OK) - - -class TestCreateVolume(BaseControllerSetUp, CommonControllerTest): - - @property - def tested_method(self): - return self.servicer.CreateVolume - - @property - def tested_method_response_class(self): - return csi_pb2.CreateVolumeResponse - - def setUp(self): - super().setUp() - - self.mediator.create_volume = Mock() - self.mediator.get_volume = Mock() - self.mediator.get_volume.side_effect = array_errors.ObjectNotFoundError("vol") - self.mediator.get_object_by_id = Mock() - self.mediator.copy_to_existing_volume_from_source = Mock() - - self.request.parameters = {servers_settings.PARAMETERS_POOL: DUMMY_POOL1, - servers_settings.PARAMETERS_IO_GROUP: DUMMY_IO_GROUP, - servers_settings.PARAMETERS_VOLUME_GROUP: DUMMY_VOLUME_GROUP} - self.request.volume_capabilities = [self.volume_capability] - self.request.name = VOLUME_NAME - self.request.volume_content_source = None - - def test_create_volume_with_empty_name(self): - self._test_create_object_with_empty_name() - - def _prepare_create_volume_mocks(self): - self.mediator.create_volume = Mock() - self.mediator.create_volume.return_value = utils.get_mock_mediator_response_volume(10, "volume", VOLUME_UID, - "xiv") - - def _test_create_volume_succeeds(self, expected_volume_id, expected_pool=DUMMY_POOL1): - self._prepare_create_volume_mocks() - - response_volume = self.servicer.CreateVolume(self.request, self.context) - self.assertEqual(self.context.code, grpc.StatusCode.OK) - self.mediator.get_volume.assert_called_once_with(VOLUME_NAME, expected_pool, False) - self.mediator.create_volume.assert_called_once_with(VOLUME_NAME, 10, None, expected_pool, DUMMY_IO_GROUP, - DUMMY_VOLUME_GROUP, - ObjectIds(internal_id='', uid=''), None, False) - self.assertEqual(response_volume.volume.content_source.volume.volume_id, '') - self.assertEqual(response_volume.volume.content_source.snapshot.snapshot_id, '') - self.assertEqual(response_volume.volume.volume_id, expected_volume_id) - - def test_create_volume_already_processing(self): - self._test_request_already_processing("name", self.request.name) - - def test_create_volume_succeeds(self): - self._test_create_volume_succeeds('xiv:{};{}'.format(INTERNAL_VOLUME_ID, VOLUME_UID)) - - def test_create_volume_with_topologies_succeeds(self): - self.request.secrets = utils.get_fake_secret_config(system_id="u2", supported_topologies=[ - {"topology.block.csi.ibm.com/test": "topology_value"}]) - self.request.accessibility_requirements.preferred = [ - ProtoBufMock(segments={"topology.block.csi.ibm.com/test": "topology_value", - "topology.block.csi.ibm.com/test2": "topology_value2"})] - second_system_parameters = self.request.parameters.copy() - second_system_parameters[servers_settings.PARAMETERS_POOL] = DUMMY_POOL2 - self.request.parameters = {servers_settings.PARAMETERS_BY_SYSTEM: json.dumps( - {"u1": self.request.parameters, "u2": second_system_parameters})} - self._test_create_volume_succeeds('xiv:u2:{};{}'.format(INTERNAL_VOLUME_ID, VOLUME_UID), - expected_pool=DUMMY_POOL2) - - def test_create_volume_with_space_efficiency_succeeds(self): - self._prepare_create_volume_mocks() - self.request.parameters.update({servers_settings.PARAMETERS_SPACE_EFFICIENCY: "not_none"}) - self.mediator.validate_supported_space_efficiency = Mock() - - self.servicer.CreateVolume(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.OK) - self.mediator.get_volume.assert_called_once_with(VOLUME_NAME, DUMMY_POOL1, False) - self.mediator.create_volume.assert_called_once_with(VOLUME_NAME, 10, "not_none", DUMMY_POOL1, DUMMY_IO_GROUP, - DUMMY_VOLUME_GROUP, - ObjectIds(internal_id='', uid=''), None, False) - - def test_create_volume_idempotent_no_source_succeeds(self): - self._prepare_create_volume_mocks() - self.mediator.get_volume = Mock() - self.mediator.get_volume.return_value = utils.get_mock_mediator_response_volume(10, VOLUME_NAME, VOLUME_UID, - "xiv") - - response_volume = self.servicer.CreateVolume(self.request, self.context) - self.assertEqual(self.context.code, grpc.StatusCode.OK) - self.mediator.get_volume.assert_called_once_with(VOLUME_NAME, DUMMY_POOL1, False) - self.mediator.create_volume.assert_not_called() - self.assertEqual(response_volume.volume.content_source.volume.volume_id, '') - self.assertEqual(response_volume.volume.content_source.snapshot.snapshot_id, '') - - def test_create_volume_with_wrong_secrets(self): - self._test_request_with_wrong_secrets() - - def test_create_volume_no_pool(self): - self._prepare_create_volume_mocks() - self.request.parameters = {"by_system_id": json.dumps({"u1": DUMMY_POOL1, "u2": DUMMY_POOL2})} - self.servicer.CreateVolume(self.request, self.context) - self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) - - def test_create_volume_with_wrong_parameters(self): - self._test_request_with_wrong_parameters() - - def test_create_volume_with_wrong_volume_capabilities(self): - - volume_capability = utils.get_mock_volume_capability(fs_type="ext42") - self.request.volume_capabilities = [volume_capability] - - self.servicer.CreateVolume(self.request, self.context) - self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT, "wrong fs_type") - self.assertIn("fs_type", self.context.details) - - access_mode = csi_pb2.VolumeCapability.AccessMode - volume_capability = utils.get_mock_volume_capability(mode=access_mode.MULTI_NODE_SINGLE_WRITER) - self.request.volume_capabilities = [volume_capability] - - self.servicer.CreateVolume(self.request, self.context) - self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) - self.assertTrue("access mode" in self.context.details) - - volume_capability = utils.get_mock_volume_capability(mount_flags=["no_formatting"]) - self.request.volume_capabilities = [volume_capability] - - self.servicer.CreateVolume(self.request, self.context) - self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) - self.assertTrue("mount_flags is unsupported" in self.context.details) - - def test_create_volume_with_array_connection_exception(self): - self._test_request_with_array_connection_exception() - - def test_create_volume_with_get_array_type_exception(self): - self._test_request_with_get_array_type_exception() - - def test_create_volume_get_volume_exception(self): - self.mediator.get_volume.side_effect = [Exception("error")] - - self.servicer.CreateVolume(self.request, self.context) - self.assertEqual(self.context.code, grpc.StatusCode.INTERNAL) - self.assertIn("error", self.context.details) - self.mediator.get_volume.assert_called_once_with(VOLUME_NAME, DUMMY_POOL1, False) - - def test_create_volume_with_get_volume_illegal_object_name_exception(self): - self.mediator.get_volume.side_effect = [array_errors.InvalidArgumentError("volume")] - - self.servicer.CreateVolume(self.request, self.context) - msg = array_errors.InvalidArgumentError("volume").message - - self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) - self.assertIn(msg, self.context.details) - self.mediator.get_volume.assert_called_once_with(VOLUME_NAME, DUMMY_POOL1, False) - - def test_create_volume_with_prefix_too_long_exception(self): - self.request.parameters.update({"volume_name_prefix": "a" * 128}) - self.servicer.CreateVolume(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) - - def test_create_volume_with_get_volume_name_too_long_success(self): - self._prepare_create_volume_mocks() - self.mediator.max_object_name_length = 63 - - self.request.name = "a" * 128 - self.servicer.CreateVolume(self.request, self.context) - self.assertEqual(self.context.code, grpc.StatusCode.OK) - - def create_volume_returns_error(self, return_code, err): - self.mediator.create_volume = Mock() - self.mediator.create_volume.side_effect = [err] - - self.servicer.CreateVolume(self.request, self.context) - msg = str(err) - - self.assertEqual(self.context.code, return_code) - self.assertIn(msg, self.context.details) - self.mediator.get_volume.assert_called_once_with(VOLUME_NAME, DUMMY_POOL1, False) - self.mediator.create_volume.assert_called_once_with(VOLUME_NAME, self.capacity_bytes, None, DUMMY_POOL1, - DUMMY_IO_GROUP, - DUMMY_VOLUME_GROUP, ObjectIds(internal_id='', uid=''), - None, False) - - def test_create_volume_with_illegal_object_name_exception(self): - self.create_volume_returns_error(return_code=grpc.StatusCode.INVALID_ARGUMENT, - err=array_errors.InvalidArgumentError("volume")) - - def test_create_volume_with_volume_exists_exception(self): - self.create_volume_returns_error(return_code=grpc.StatusCode.ALREADY_EXISTS, - err=array_errors.VolumeAlreadyExists(VOLUME_NAME, "endpoint")) - - def test_create_volume_with_pool_does_not_exist_exception(self): - self.create_volume_returns_error(return_code=grpc.StatusCode.INVALID_ARGUMENT, - err=array_errors.PoolDoesNotExist(DUMMY_POOL1, "endpoint")) - - def test_create_volume_with_pool_does_not_match_space_efficiency_exception(self): - self.create_volume_returns_error(return_code=grpc.StatusCode.INVALID_ARGUMENT, - err=array_errors.PoolDoesNotMatchSpaceEfficiency(DUMMY_POOL1, "", "endpoint")) - - def test_create_volume_with_space_efficiency_not_supported_exception(self): - self.create_volume_returns_error(return_code=grpc.StatusCode.INVALID_ARGUMENT, - err=array_errors.SpaceEfficiencyNotSupported(["fake"])) - - def test_create_volume_with_other_exception(self): - self.create_volume_returns_error(return_code=grpc.StatusCode.INTERNAL, - err=Exception("error")) - - def _test_create_volume_parameters(self, final_name="default_some_name", space_efficiency=None): - self.mediator.default_object_prefix = "default" - self.request.name = "some_name" - self.mediator.create_volume = Mock() - self.mediator.create_volume.return_value = utils.get_mock_mediator_response_volume(10, VOLUME_NAME, VOLUME_UID, - "xiv") - self.mediator.validate_supported_space_efficiency = Mock() - self.servicer.CreateVolume(self.request, self.context) - self.assertEqual(self.context.code, grpc.StatusCode.OK) - self.mediator.create_volume.assert_called_once_with(final_name, 10, space_efficiency, DUMMY_POOL1, - DUMMY_IO_GROUP, - DUMMY_VOLUME_GROUP, ObjectIds(internal_id='', uid=''), - None, False) - - def test_create_volume_with_name_prefix(self): - self.request.parameters[servers_settings.PARAMETERS_VOLUME_NAME_PREFIX] = NAME_PREFIX - self._test_create_volume_parameters("prefix_some_name") - - def test_create_volume_with_no_name_prefix(self): - self.request.parameters[servers_settings.PARAMETERS_VOLUME_NAME_PREFIX] = "" - self._test_create_volume_parameters() - - def _test_create_volume_with_parameters_by_system_prefix(self, get_array_connection_info_from_secrets, prefix, - final_name="default_some_name", - space_efficiency=None): - get_array_connection_info_from_secrets.side_effect = [utils.get_fake_array_connection_info()] - system_parameters = self.request.parameters - system_parameters.update({servers_settings.PARAMETERS_VOLUME_NAME_PREFIX: prefix, - servers_settings.PARAMETERS_SPACE_EFFICIENCY: space_efficiency}) - self.request.parameters = {servers_settings.PARAMETERS_BY_SYSTEM: json.dumps({"u1": system_parameters})} - self._test_create_volume_parameters(final_name, space_efficiency) - - @patch("controllers.servers.utils.get_array_connection_info_from_secrets") - def test_create_volume_with_parameters_by_system_no_name_prefix(self, get_array_connection_info_from_secrets): - self._test_create_volume_with_parameters_by_system_prefix(get_array_connection_info_from_secrets, "") - - @patch("controllers.servers.utils.get_array_connection_info_from_secrets") - def test_create_volume_with_parameters_by_system_name_prefix(self, get_array_connection_info_from_secrets): - self._test_create_volume_with_parameters_by_system_prefix(get_array_connection_info_from_secrets, NAME_PREFIX, - "prefix_some_name") - - @patch("controllers.servers.utils.get_array_connection_info_from_secrets") - def test_create_volume_with_parameters_by_system_space_efficiency(self, get_array_connection_info_from_secrets): - self._test_create_volume_with_parameters_by_system_prefix(get_array_connection_info_from_secrets, "", - space_efficiency="not_none") - - def test_create_volume_with_required_bytes_zero(self): - self._prepare_create_volume_mocks() - self.request.capacity_range.required_bytes = 0 - - self.servicer.CreateVolume(self.request, self.context) - self.assertEqual(self.context.code, grpc.StatusCode.OK) - self.mediator.create_volume.assert_called_once_with(self.request.name, 2, None, DUMMY_POOL1, DUMMY_IO_GROUP, - DUMMY_VOLUME_GROUP, - ObjectIds(internal_id='', uid=''), None, False) - - def test_create_volume_with_required_bytes_too_large_fail(self): - self._prepare_create_volume_mocks() - self.request.capacity_range.required_bytes = 11 - - self.servicer.CreateVolume(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.OUT_OF_RANGE) - self.mediator.create_volume.assert_not_called() - - def test_create_volume_with_no_space_in_pool(self): - self.create_volume_returns_error(return_code=grpc.StatusCode.INTERNAL, - err=array_errors.NotEnoughSpaceInPool(DUMMY_POOL1)) - - def _prepare_snapshot_request_volume_content_source(self): - self.request.volume_content_source = self._get_source_snapshot(SNAPSHOT_VOLUME_UID) - - def _prepare_idempotent_tests(self): - self.mediator.get_volume = Mock() - self.mediator.copy_to_existing_volume = Mock() - self._prepare_snapshot_request_volume_content_source() - - def test_create_volume_idempotent_with_source_succeed(self): - self._prepare_idempotent_tests() - snapshot_id = SNAPSHOT_VOLUME_UID - self.mediator.get_volume.return_value = utils.get_mock_mediator_response_volume(10, VOLUME_NAME, VOLUME_UID, - "a9k", - source_id=snapshot_id) - - response = self.servicer.CreateVolume(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.OK) - self.assertEqual(response.volume.content_source.snapshot.snapshot_id, snapshot_id) - self.mediator.copy_to_existing_volume.assert_not_called() - - def test_create_volume_idempotent_with_source_volume_have_no_source(self): - self._prepare_idempotent_tests() - self.mediator.get_volume.return_value = utils.get_mock_mediator_response_volume(10, VOLUME_NAME, VOLUME_UID, - "a9k") - response = self.servicer.CreateVolume(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.ALREADY_EXISTS) - self.assertFalse(response.HasField("volume")) - self.mediator.copy_to_existing_volume.assert_not_called() - - def test_create_volume_idempotent_source_not_requested_but_found_in_volume(self): - self._prepare_idempotent_tests() - snapshot_id = SNAPSHOT_VOLUME_UID - self.request.volume_content_source = None - self.mediator.get_volume.return_value = utils.get_mock_mediator_response_volume(10, VOLUME_NAME, VOLUME_UID, - "a9k", - source_id=snapshot_id) - response = self.servicer.CreateVolume(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.ALREADY_EXISTS) - self.assertFalse(response.HasField("volume")) - self.mediator.copy_to_existing_volume.assert_not_called() - - def _prepare_idempotent_test_with_other_source(self): - self._prepare_idempotent_tests() - volume_source_id = SOURCE_VOLUME_ID - self.mediator.get_volume.return_value = utils.get_mock_mediator_response_volume(10, VOLUME_NAME, - SNAPSHOT_VOLUME_UID, "a9k", - source_id=volume_source_id) - self.servicer.CreateVolume(self.request, self.context) - self.mediator.copy_to_existing_volume.assert_not_called() - - def test_create_volume_idempotent_with_source_volume_got_other_source(self): - self._prepare_idempotent_test_with_other_source() - self.assertEqual(self.context.code, grpc.StatusCode.ALREADY_EXISTS) - - def _enable_virt_snap_func(self): - self.request.parameters[servers_settings.PARAMETERS_VIRT_SNAP_FUNC] = "true" - - def test_create_volume_idempotent_with_other_source_and_virt_snap_func_enabled(self): - self._enable_virt_snap_func() - self.mediator.get_object_by_id.return_value = utils.get_mock_mediator_response_volume() - self._prepare_idempotent_test_with_other_source() - self.assertEqual(self.context.code, grpc.StatusCode.OK) - - def test_create_volume_virt_snap_func_enabled_no_source(self): - self._enable_virt_snap_func() - self._prepare_snapshot_request_volume_content_source() - self.mediator.get_object_by_id.return_value = None - self.servicer.CreateVolume(self.request, self.context) - self.assertEqual(self.context.code, grpc.StatusCode.NOT_FOUND) - - def test_create_volume_virt_snap_func_enabled_no_snapshot_source(self): - self._enable_virt_snap_func() - self._prepare_snapshot_request_volume_content_source() - self.mediator.get_object_by_id.side_effect = [utils.get_mock_mediator_response_snapshot(), None] - self.servicer.CreateVolume(self.request, self.context) - self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) - - def test_create_volume_idempotent_with_size_not_matched(self): - self.mediator.get_volume = Mock() - self.mediator.get_volume.return_value = utils.get_mock_mediator_response_volume(9, VOLUME_NAME, VOLUME_UID, - "a9k") - - self.servicer.CreateVolume(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.ALREADY_EXISTS) - - def _prepare_mocks_for_copy_from_source(self): - self.mediator.create_volume = Mock() - self.mediator.create_volume.return_value = utils.get_mock_mediator_response_volume(10, VOLUME_NAME, VOLUME_UID, - "a9k") - - def test_create_volume_from_snapshot_success(self): - self._prepare_mocks_for_copy_from_source() - snapshot_id = SNAPSHOT_VOLUME_UID - snapshot_capacity_bytes = 100 - self.request.volume_content_source = self._get_source_snapshot(snapshot_id) - self.mediator.get_object_by_id.return_value = utils.get_mock_mediator_response_snapshot(snapshot_capacity_bytes, - SNAPSHOT_NAME, - snapshot_id, - VOLUME_NAME, - "a9k") - - response_volume = self.servicer.CreateVolume(self.request, self.context) - self.assertEqual(self.context.code, grpc.StatusCode.OK) - self.mediator.copy_to_existing_volume_from_source.assert_called_once() - self.assertEqual(response_volume.volume.content_source.volume.volume_id, '') - self.assertEqual(response_volume.volume.content_source.snapshot.snapshot_id, snapshot_id) - - def test_create_volume_from_source_source_or_target_not_found(self): - array_exception = array_errors.ObjectNotFoundError("") - self._test_create_volume_from_snapshot_error(array_exception, grpc.StatusCode.NOT_FOUND) - - def test_create_volume_from_source_source_snapshot_invalid(self): - volume_content_source = self._get_source_snapshot(SNAPSHOT_VOLUME_UID) - volume_content_source.snapshot.snapshot_id = 'invalid_snapshot_id' - self.request.volume_content_source = volume_content_source - - self.servicer.CreateVolume(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.NOT_FOUND) - self.assertIn("invalid_snapshot_id", self.context.details) - - def test_create_volume_from_source_illegal_object_id(self): - array_exception = array_errors.InvalidArgumentError("") - self._test_create_volume_from_snapshot_error(array_exception, grpc.StatusCode.INVALID_ARGUMENT) - - def test_create_volume_from_source_permission_denied(self): - array_exception = array_errors.PermissionDeniedError("") - self._test_create_volume_from_snapshot_error(array_exception, grpc.StatusCode.PERMISSION_DENIED) - - def test_create_volume_from_source_pool_missing(self): - array_exception = array_errors.PoolParameterIsMissing("") - self._test_create_volume_from_snapshot_error(array_exception, grpc.StatusCode.INVALID_ARGUMENT) - - def test_create_volume_from_source_general_error(self): - array_exception = Exception("") - self._test_create_volume_from_snapshot_error(array_exception, - grpc.StatusCode.INTERNAL) - - def test_create_volume_from_source_get_object_general_error(self): - array_exception = Exception("") - self._test_create_volume_from_snapshot_error(None, - grpc.StatusCode.INTERNAL, get_exception=array_exception) - - def test_create_volume_from_source_get_object_error(self): - array_exception = array_errors.ExpectedSnapshotButFoundVolumeError("", "") - self._test_create_volume_from_snapshot_error(None, - grpc.StatusCode.INVALID_ARGUMENT, get_exception=array_exception) - - def test_create_volume_from_source_get_object_none(self): - self._test_create_volume_from_snapshot_error(None, - grpc.StatusCode.NOT_FOUND, - array_errors.ObjectNotFoundError("")) - - def _test_create_volume_from_snapshot_error(self, copy_exception, return_code, - get_exception=None): - self._prepare_mocks_for_copy_from_source() - source_id = SNAPSHOT_VOLUME_UID - self.request.volume_content_source = self._get_source_snapshot(source_id) - if not copy_exception: - self.mediator.copy_to_existing_volume_from_source.side_effect = [get_exception] - self.storage_agent.get_mediator.return_value.__exit__.side_effect = [get_exception] - else: - self.mediator.copy_to_existing_volume_from_source.side_effect = [copy_exception] - self.storage_agent.get_mediator.return_value.__exit__.side_effect = [copy_exception] - - response = self.servicer.CreateVolume(self.request, self.context) - self.assertEqual(self.context.code, return_code) - self.assertIsInstance(response, csi_pb2.CreateVolumeResponse) - - def test_clone_volume_success(self): - self._prepare_mocks_for_copy_from_source() - volume_id = SOURCE_VOLUME_ID - volume_capacity_bytes = 100 - self.request.volume_content_source = self._get_source_volume(volume_id) - self.mediator.get_object_by_id.return_value = utils.get_mock_mediator_response_volume(volume_capacity_bytes, - CLONE_VOLUME_NAME, - volume_id, "a9k") - response_volume = self.servicer.CreateVolume(self.request, self.context) - self.assertEqual(self.context.code, grpc.StatusCode.OK) - self.mediator.copy_to_existing_volume_from_source.assert_called_once() - self.assertEqual(response_volume.volume.content_source.volume.volume_id, volume_id) - self.assertEqual(response_volume.volume.content_source.snapshot.snapshot_id, '') - - def _get_source_volume(self, object_id): - return self._get_source(object_id, servers_settings.VOLUME_TYPE_NAME) - - def _get_source_snapshot(self, object_id): - return self._get_source(object_id, servers_settings.SNAPSHOT_TYPE_NAME) - - @staticmethod - def _get_source(object_id, object_type): - source = ProtoBufMock(spec=[object_type]) - id_field_name = servers_settings.VOLUME_SOURCE_ID_FIELDS[object_type] - object_field = MagicMock(spec=[id_field_name]) - setattr(source, object_type, object_field) - setattr(object_field, id_field_name, "a9000:{0}".format(object_id)) - return source - - -class TestDeleteVolume(BaseControllerSetUp, CommonControllerTest): - - @property - def tested_method(self): - return self.servicer.DeleteVolume - - @property - def tested_method_response_class(self): - return csi_pb2.DeleteVolumeResponse - - def get_create_object_method(self): - return self.servicer.DeleteVolume - - def get_create_object_response_method(self): - return csi_pb2.DeleteVolumeResponse - - def setUp(self): - super().setUp() - - self.mediator.get_volume = Mock() - self.mediator.delete_volume = Mock() - self.mediator.is_volume_has_snapshots = Mock() - self.mediator.is_volume_has_snapshots.return_value = False - - self.request.volume_id = "xiv:0;volume-id" - - def test_delete_volume_already_processing(self): - self._test_request_already_processing("volume_id", self.request.volume_id) - - def test_delete_volume_with_wrong_secrets(self): - self._test_request_with_wrong_secrets() - - def test_delete_volume_invalid_volume_id(self): - self.request.volume_id = "wrong_id" - - self.servicer.DeleteVolume(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.OK) - - @patch("controllers.servers.csi.csi_controller_server.get_agent") - def test_delete_volume_with_array_connection_exception(self, storage_agent): - storage_agent.side_effect = [Exception("a_enter error")] - - self.servicer.DeleteVolume(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.INTERNAL) - self.assertTrue("a_enter error" in self.context.details) - - def delete_volume_returns_error(self, error, return_code): - self.mediator.delete_volume.side_effect = [error] - - self.servicer.DeleteVolume(self.request, self.context) - - self.assertEqual(self.context.code, return_code) - if return_code != grpc.StatusCode.OK: - msg = str(error) - self.assertIn(msg, self.context.details, "msg : {0} is not in : {1}".format(msg, self.context.details)) - - def test_delete_volume_with_volume_not_found_error(self): - self.delete_volume_returns_error(error=array_errors.ObjectNotFoundError("volume"), - return_code=grpc.StatusCode.OK) - - def test_delete_volume_with_delete_volume_other_exception(self): - self.delete_volume_returns_error(error=Exception("error"), return_code=grpc.StatusCode.INTERNAL) - - def test_delete_volume_has_snapshots(self): - self.delete_volume_returns_error(error=array_errors.ObjectIsStillInUseError("a", ["b"]), - return_code=grpc.StatusCode.FAILED_PRECONDITION) - - @patch("controllers.array_action.array_mediator_xiv.XIVArrayMediator.delete_volume") - def _test_delete_volume_succeeds(self, volume_id, delete_volume): - delete_volume.return_value = Mock() - self.request.volume_id = volume_id - self.servicer.DeleteVolume(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.OK) - - def test_delete_volume_with_internal_id_succeeds(self): - self._test_delete_volume_succeeds("xiv:0;volume-id") - - def test_delete_volume_with_system_id_succeeds(self): - self._test_delete_volume_succeeds("xiv:system_id:volume-id") - - def test_delete_volume_with_system_id_internal_id_succeeds(self): - self._test_delete_volume_succeeds("xiv:system_id:0;volume-id") - - def test_delete_volume_no_internal_id_succeeds(self): - self._test_delete_volume_succeeds("xiv:volume-id") - - -class TestPublishVolume(BaseControllerSetUp, CommonControllerTest): - - @property - def tested_method(self): - return self.servicer.ControllerPublishVolume - - @property - def tested_method_response_class(self): - return csi_pb2.ControllerPublishVolumeResponse - - def setUp(self): - super().setUp() - - self.hostname = "hostname" - - self.mediator.map_volume_by_initiators = Mock() - self.mediator.map_volume_by_initiators.return_value = "2", "iscsi", {"iqn1": ["1.1.1.1", "2.2.2.2"], - "iqn2": ["[::1]"]} - - arr_type = XIVArrayMediator.array_type - self.request.volume_id = "{}:wwn1".format(arr_type) - self.iqn = "iqn.1994-05.com.redhat:686358c930fe" - self.fc_port = "500143802426baf4" - self.request.node_id = "{};;{};{}".format(self.hostname, self.fc_port, self.iqn) - self.request.readonly = False - - self.request.volume_capability = utils.get_mock_volume_capability() - - def test_publish_volume_success(self): - self.servicer.ControllerPublishVolume(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.OK) - - def test_publish_volume_already_processing(self): - self._test_request_already_processing("volume_id", self.request.volume_id) - - @patch("controllers.servers.utils.validate_publish_volume_request") - def test_publish_volume_validateion_exception(self, publish_validation): - publish_validation.side_effect = [controller_errors.ValidationException("msg")] - - self.servicer.ControllerPublishVolume(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) - self.assertIn("msg", self.context.details) - - def test_publish_volume_with_wrong_secrets(self): - self._test_request_with_wrong_secrets() - - def test_publish_volume_wrong_volume_id(self): - self.request.volume_id = "some-wrong-id-format" - - self.servicer.ControllerPublishVolume(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.NOT_FOUND) - - def test_publish_volume_wrong_node_id(self): - self.request.node_id = "some-wrong-id-format" - - self.servicer.ControllerPublishVolume(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.NOT_FOUND) - - def test_publish_volume_get_host_by_host_identifiers_exception(self): - self.mediator.map_volume_by_initiators = Mock() - self.mediator.map_volume_by_initiators.side_effect = [array_errors.MultipleHostsFoundError("", "")] - - self.servicer.ControllerPublishVolume(self.request, self.context) - self.assertTrue("Multiple hosts" in self.context.details) - self.assertEqual(self.context.code, grpc.StatusCode.INTERNAL) - - self.mediator.map_volume_by_initiators.side_effect = [array_errors.HostNotFoundError("")] - - self.servicer.ControllerPublishVolume(self.request, self.context) - self.assertEqual(self.context.code, grpc.StatusCode.NOT_FOUND) - - def test_publish_volume_with_connectivity_type_fc(self): - self.mediator.map_volume_by_initiators.return_value = "1", "fc", ["500143802426baf4"] - - response = self.servicer.ControllerPublishVolume(self.request, self.context) - self.assertEqual(self.context.code, grpc.StatusCode.OK) - - self.assertEqual(response.publish_context["PUBLISH_CONTEXT_LUN"], '1') - self.assertEqual(response.publish_context["PUBLISH_CONTEXT_CONNECTIVITY"], "fc") - self.assertEqual(response.publish_context["PUBLISH_CONTEXT_ARRAY_FC_INITIATORS"], "500143802426baf4") - - def test_publish_volume_with_connectivity_type_iscsi(self): - response = self.servicer.ControllerPublishVolume(self.request, self.context) - self.assertEqual(self.context.code, grpc.StatusCode.OK) - - self.assertEqual(response.publish_context["PUBLISH_CONTEXT_LUN"], '2') - self.assertEqual(response.publish_context["PUBLISH_CONTEXT_CONNECTIVITY"], - "iscsi") - self.assertEqual(response.publish_context["PUBLISH_CONTEXT_ARRAY_IQN"], - "iqn1,iqn2") - self.assertEqual(response.publish_context["iqn1"], - "1.1.1.1,2.2.2.2") - self.assertEqual(response.publish_context["iqn2"], - "[::1]") - - def test_publish_volume_get_volume_mappings_more_then_one_mapping(self): - self.mediator.map_volume_by_initiators.side_effect = [array_errors.VolumeAlreadyMappedToDifferentHostsError("")] - self.servicer.ControllerPublishVolume(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.FAILED_PRECONDITION) - self.assertTrue("Volume is already mapped" in self.context.details) - - def test_publish_volume_map_volume_excpetions(self): - self.mediator.map_volume_by_initiators.side_effect = [array_errors.PermissionDeniedError("msg")] - - self.servicer.ControllerPublishVolume(self.request, self.context) - self.assertEqual(self.context.code, grpc.StatusCode.PERMISSION_DENIED) - - self.mediator.map_volume_by_initiators.side_effect = [array_errors.ObjectNotFoundError("volume")] - - self.servicer.ControllerPublishVolume(self.request, self.context) - self.assertEqual(self.context.code, grpc.StatusCode.NOT_FOUND) - - self.mediator.map_volume_by_initiators.side_effect = [array_errors.HostNotFoundError("host")] - - self.servicer.ControllerPublishVolume(self.request, self.context) - self.assertEqual(self.context.code, grpc.StatusCode.NOT_FOUND) - - self.mediator.map_volume_by_initiators.side_effect = [array_errors.MappingError("", "", "")] - - self.servicer.ControllerPublishVolume(self.request, self.context) - self.assertEqual(self.context.code, grpc.StatusCode.INTERNAL) - - def test_publish_volume_map_volume_lun_already_in_use(self): - self.mediator.map_volume_by_initiators.side_effect = [array_errors.NoAvailableLunError("")] - - self.servicer.ControllerPublishVolume(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.RESOURCE_EXHAUSTED) - - def test_publish_volume_get_iscsi_targets_by_iqn_excpetions(self): - self.mediator.map_volume_by_initiators.side_effect = [array_errors.NoIscsiTargetsFoundError("some_endpoint")] - - self.servicer.ControllerPublishVolume(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.NOT_FOUND) - - def test_map_volume_by_initiators_exceptions(self): - self.mediator.map_volume_by_initiators.side_effect = [ - array_errors.UnsupportedConnectivityTypeError("usb")] - - self.servicer.ControllerPublishVolume(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) - - -class TestUnpublishVolume(BaseControllerSetUp, CommonControllerTest): - - @property - def tested_method(self): - return self.servicer.ControllerUnpublishVolume - - @property - def tested_method_response_class(self): - return csi_pb2.ControllerUnpublishVolumeResponse - - def setUp(self): - super().setUp() - self.hostname = "hostname" - - self.mediator.unmap_volume_by_initiators = Mock() - self.mediator.unmap_volume_by_initiators.return_value = None - - arr_type = XIVArrayMediator.array_type - self.request.volume_id = "{}:wwn1".format(arr_type) - self.request.node_id = "hostname;iqn1;500143802426baf4" - - def test_unpublish_volume_success(self): - self.servicer.ControllerUnpublishVolume(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.OK) - - def test_unpublish_volume_already_processing(self): - self._test_request_already_processing("volume_id", self.request.volume_id) - - @patch("controllers.servers.utils.validate_unpublish_volume_request") - def test_unpublish_volume_validation_exception(self, publish_validation): - publish_validation.side_effect = [controller_errors.ValidationException("msg")] - - self.servicer.ControllerUnpublishVolume(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) - self.assertIn("msg", self.context.details) - - @patch("controllers.servers.utils.get_volume_id_info") - def test_unpublish_volume_object_id_error(self, get_volume_id_info): - get_volume_id_info.side_effect = [controller_errors.ObjectIdError("object_type", "object_id")] - - self.servicer.ControllerUnpublishVolume(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) - self.assertIn("object_type", self.context.details) - self.assertIn("object_id", self.context.details) - - def test_unpublish_volume_with_wrong_secrets(self): - self._test_request_with_wrong_secrets() - - def test_unpublish_volume_with_too_much_delimiters_in_volume_id(self): - self.request.volume_id = "too:much:delimiters:in:id" - - self.servicer.ControllerUnpublishVolume(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) - - def test_unpublish_volume_wrong_node_id(self): - self.request.node_id = "some-wrong-id-format" - - self.servicer.ControllerUnpublishVolume(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.OK) - - def test_unpublish_volume_get_host_by_host_identifiers_multiple_hosts_found_error(self): - self.mediator.unmap_volume_by_initiators.side_effect = [array_errors.MultipleHostsFoundError("", "")] - - self.servicer.ControllerUnpublishVolume(self.request, self.context) - self.assertTrue("Multiple hosts" in self.context.details) - self.assertEqual(self.context.code, grpc.StatusCode.INTERNAL) - - def test_unpublish_volume_get_host_by_host_identifiers_host_not_found_error(self): - self.mediator.get_host_by_host_identifiers = Mock() - self.mediator.get_host_by_host_identifiers.side_effect = [array_errors.HostNotFoundError("")] - - self.servicer.ControllerUnpublishVolume(self.request, self.context) - self.assertEqual(self.context.code, grpc.StatusCode.OK) - - def _test_unpublish_volume_unmap_volume_by_initiators_with_error(self, array_error, status_code): - self.mediator.unmap_volume_by_initiators.side_effect = [array_error] - - self.servicer.ControllerUnpublishVolume(self.request, self.context) - self.assertEqual(self.context.code, status_code) - - def test_unpublish_volume_unmap_volume_by_initiators_object_not_found_error(self): - self._test_unpublish_volume_unmap_volume_by_initiators_with_error(array_errors.ObjectNotFoundError("volume"), - grpc.StatusCode.OK) - - def test_unpublish_volume_unmap_volume_by_initiators_volume_already_unmapped_error(self): - self._test_unpublish_volume_unmap_volume_by_initiators_with_error(array_errors.VolumeAlreadyUnmappedError(""), - grpc.StatusCode.OK) - - def test_unpublish_volume_unmap_volume_by_initiators_permission_denied_error(self): - self._test_unpublish_volume_unmap_volume_by_initiators_with_error(array_errors.PermissionDeniedError("msg"), - grpc.StatusCode.PERMISSION_DENIED) - - def test_unpublish_volume_unmap_volume_by_initiators_host_not_found_error(self): - self._test_unpublish_volume_unmap_volume_by_initiators_with_error(array_errors.HostNotFoundError("host"), - grpc.StatusCode.OK) - - def test_unpublish_volume_unmap_volume_by_initiators_unmapping_error(self): - self._test_unpublish_volume_unmap_volume_by_initiators_with_error(array_errors.UnmappingError("", "", ""), - grpc.StatusCode.INTERNAL) - - -class TestGetCapabilities(BaseControllerSetUp): - - def test_controller_get_capabilities(self): - self.servicer.ControllerGetCapabilities(self.request, self.context) - - -class TestExpandVolume(BaseControllerSetUp, CommonControllerTest): - - @property - def tested_method(self): - return self.servicer.ControllerExpandVolume - - @property - def tested_method_response_class(self): - return csi_pb2.ControllerExpandVolumeResponse - - def setUp(self): - super().setUp() - - self.mediator.expand_volume = Mock() - - self.request.parameters = {} - self.volume_id = "vol-id" - self.request.volume_id = "{}:{}".format("xiv", self.volume_id) - self.request.volume_content_source = None - self.mediator.get_object_by_id = Mock() - self.volume_before_expand = utils.get_mock_mediator_response_volume(2, VOLUME_NAME, self.volume_id, "a9k") - self.volume_after_expand = utils.get_mock_mediator_response_volume(self.capacity_bytes, VOLUME_NAME, - self.volume_id, "a9k") - self.mediator.get_object_by_id.side_effect = [self.volume_before_expand, self.volume_after_expand] - self.request.volume_capability = self.volume_capability - - def _prepare_expand_volume_mocks(self): - self.mediator.expand_volume = Mock() - - def test_expand_volume_already_processing(self): - self._test_request_already_processing("volume_id", self.request.volume_id) - - def test_expand_volume_with_required_bytes_too_large_fail(self): - self._prepare_expand_volume_mocks() - self.request.capacity_range.required_bytes = 11 - - self.servicer.ControllerExpandVolume(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.OUT_OF_RANGE) - self.mediator.expand_volume.assert_not_called() - - def _test_no_expand_needed(self): - response = self.servicer.ControllerExpandVolume(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.OK) - self.assertFalse(response.node_expansion_required) - self.assertEqual(response.capacity_bytes, self.volume_before_expand.capacity_bytes) - self.mediator.expand_volume.assert_not_called() - - def test_expand_volume_with_required_bytes_below_minimal(self): - self._prepare_expand_volume_mocks() - self.request.capacity_range.required_bytes = 1 - self._test_no_expand_needed() - - def test_expand_volume_with_required_bytes_zero(self): - self._prepare_expand_volume_mocks() - self.request.capacity_range.required_bytes = 0 - self._test_no_expand_needed() - - def test_expand_volume_with_volume_size_already_in_range(self): - self._prepare_expand_volume_mocks() - self.request.capacity_range.required_bytes = 2 - self._test_no_expand_needed() - - def test_expand_volume_succeeds(self): - self._prepare_expand_volume_mocks() - - response = self.servicer.ControllerExpandVolume(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.OK) - self.assertTrue(response.node_expansion_required) - self.assertEqual(response.capacity_bytes, self.volume_after_expand.capacity_bytes) - self.mediator.expand_volume.assert_called_once_with(volume_id=self.volume_id, - required_bytes=self.capacity_bytes) - - def test_expand_volume_with_bad_id(self): - self._prepare_expand_volume_mocks() - self.request.volume_id = "123" - - self.servicer.ControllerExpandVolume(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) - self.mediator.expand_volume.assert_not_called() - - def test_expand_volume_not_found_before_expansion(self): - self._prepare_expand_volume_mocks() - self.mediator.get_object_by_id.side_effect = [None, None] - - self.servicer.ControllerExpandVolume(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.NOT_FOUND) - - def test_expand_volume_not_found_after_expansion(self): - self._prepare_expand_volume_mocks() - self.mediator.get_object_by_id.side_effect = [self.volume_before_expand, None] - - self.servicer.ControllerExpandVolume(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.NOT_FOUND) - - def test_expand_volume_with_wrong_secrets(self): - self._test_request_with_wrong_secrets() - - def test_expand_volume_with_array_connection_exception(self): - self._test_request_with_array_connection_exception() - - def _expand_volume_returns_error(self, return_code, err): - self.mediator.expand_volume.side_effect = [err] - msg = str(err) - - self.servicer.ControllerExpandVolume(self.request, self.context) - - self.assertEqual(self.context.code, return_code) - self.assertIn(msg, self.context.details) - self.mediator.expand_volume.assert_called_once_with(volume_id=self.volume_id, - required_bytes=self.capacity_bytes) - - def test_expand_volume_with_illegal_object_id_exception(self): - self._expand_volume_returns_error(return_code=grpc.StatusCode.INVALID_ARGUMENT, - err=array_errors.InvalidArgumentError("123")) - - def test_expand_volume_with_permission_denied_exception(self): - self._expand_volume_returns_error(return_code=grpc.StatusCode.PERMISSION_DENIED, - err=array_errors.PermissionDeniedError("msg")) - - def test_expand_volume_with_object_not_found_exception(self): - self._expand_volume_returns_error(return_code=grpc.StatusCode.NOT_FOUND, - err=array_errors.ObjectNotFoundError("name")) - - def test_expand_volume_with_object_in_use_exception(self): - self._expand_volume_returns_error(return_code=grpc.StatusCode.INTERNAL, - err=array_errors.ObjectIsStillInUseError("a", ["b"])) - - def test_expand_volume_with_other_exception(self): - self._expand_volume_returns_error(return_code=grpc.StatusCode.INTERNAL, - err=Exception("error")) - - def test_expand_volume_with_no_space_in_pool_exception(self): - self._expand_volume_returns_error(return_code=grpc.StatusCode.RESOURCE_EXHAUSTED, - err=array_errors.NotEnoughSpaceInPool(DUMMY_POOL1)) - - -class TestIdentityServer(BaseControllerSetUp): - - @patch("controllers.common.config.config.identity") - def test_identity_plugin_get_info_succeeds(self, identity_config): - plugin_name = "plugin-name" - version = "1.1.0" - identity_config.name = plugin_name - identity_config.version = version - request = Mock() - context = Mock() - request.volume_capabilities = [] - response = self.servicer.GetPluginInfo(request, context) - self.assertEqual(response, csi_pb2.GetPluginInfoResponse(name=plugin_name, vendor_version=version)) - - @patch("controllers.common.config.config.identity") - def test_identity_plugin_get_info_fails_when_attributes_from_config_are_missing(self, identity_config): - request = Mock() - context = Mock() - - identity_config.mock_add_spec(spec=["name"]) - response = self.servicer.GetPluginInfo(request, context) - context.set_code.assert_called_once_with(grpc.StatusCode.INTERNAL) - self.assertEqual(response, csi_pb2.GetPluginInfoResponse()) - - identity_config.mock_add_spec(spec=["version"]) - response = self.servicer.GetPluginInfo(request, context) - self.assertEqual(response, csi_pb2.GetPluginInfoResponse()) - context.set_code.assert_called_with(grpc.StatusCode.INTERNAL) - - @patch("controllers.common.config.config.identity") - def test_identity_plugin_get_info_fails_when_name_or_version_are_empty(self, identity_config): - request = Mock() - context = Mock() - - identity_config.name = "" - identity_config.version = "1.1.0" - response = self.servicer.GetPluginInfo(request, context) - context.set_code.assert_called_once_with(grpc.StatusCode.INTERNAL) - self.assertEqual(response, csi_pb2.GetPluginInfoResponse()) - - identity_config.name = "name" - identity_config.version = "" - response = self.servicer.GetPluginInfo(request, context) - self.assertEqual(response, csi_pb2.GetPluginInfoResponse()) - self.assertEqual(context.set_code.call_args_list, - [call(grpc.StatusCode.INTERNAL), call(grpc.StatusCode.INTERNAL)]) - - def test_identity_get_plugin_capabilities(self): - request = Mock() - context = Mock() - self.servicer.GetPluginCapabilities(request, context) - - def test_identity_probe(self): - request = Mock() - context = Mock() - self.servicer.Probe(request, context) - - -class TestValidateVolumeCapabilities(BaseControllerSetUp, CommonControllerTest): - - @property - def tested_method(self): - return self.servicer.ValidateVolumeCapabilities - - @property - def tested_method_response_class(self): - return csi_pb2.ValidateVolumeCapabilitiesResponse - - def setUp(self): - super().setUp() - - arr_type = XIVArrayMediator.array_type - self.request.volume_id = "{}:{}".format(arr_type, VOLUME_UID) - self.request.parameters = {servers_settings.PARAMETERS_POOL: DUMMY_POOL1} - - self.mediator.get_object_by_id = Mock() - self.mediator.get_object_by_id.return_value = utils.get_mock_mediator_response_volume(10, "vol", VOLUME_UID, - "a9k") - self.request.volume_capabilities = [self.volume_capability] - - def _assert_response(self, expected_status_code, expected_details_substring): - self.assertEqual(self.context.code, expected_status_code) - self.assertTrue(expected_details_substring in self.context.details) - - def test_validate_volume_capabilities_already_processing(self): - self._test_request_already_processing("volume_id", self.request.volume_id) - - def test_validate_volume_capabilities_success(self): - self.servicer.ValidateVolumeCapabilities(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.OK) - - @patch("controllers.servers.utils.get_volume_id_info") - def test_validate_volume_capabilities_object_id_error(self, get_volume_id_info): - get_volume_id_info.side_effect = [controller_errors.ObjectIdError("object_type", "object_id")] - - self.servicer.ValidateVolumeCapabilities(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.NOT_FOUND) - self.assertIn("object_type", self.context.details) - self.assertIn("object_id", self.context.details) - - def test_validate_volume_capabilities_with_empty_id(self): - self.request.volume_id = "" - - self.servicer.ValidateVolumeCapabilities(self.request, self.context) - - self._assert_response(grpc.StatusCode.INVALID_ARGUMENT, "volume id") - - def test_validate_volume_capabilities_with_wrong_secrets(self): - self._test_request_with_wrong_secrets() - - def test_validate_volume_capabilities_with_unsupported_access_mode(self): - self.request.volume_capabilities[0].access_mode.mode = 999 - - self.servicer.ValidateVolumeCapabilities(self.request, self.context) - - self._assert_response(grpc.StatusCode.INVALID_ARGUMENT, "unsupported access mode") - - def test_validate_volume_capabilities_with_unsupported_fs_type(self): - volume_capability = utils.get_mock_volume_capability(fs_type="ext3") - self.request.volume_capabilities = [volume_capability] - - self.servicer.ValidateVolumeCapabilities(self.request, self.context) - - self._assert_response(grpc.StatusCode.INVALID_ARGUMENT, "fs_type") - - def test_validate_volume_capabilities_with_no_capabilities(self): - self.request.volume_capabilities = {} - - self.servicer.ValidateVolumeCapabilities(self.request, self.context) - - self._assert_response(grpc.StatusCode.INVALID_ARGUMENT, "not set") - - def test_validate_volume_capabilities_with_bad_id(self): - self.request.volume_id = VOLUME_UID - - self.servicer.ValidateVolumeCapabilities(self.request, self.context) - - self._assert_response(grpc.StatusCode.NOT_FOUND, "id format") - - def test_validate_volume_capabilities_with_volume_not_found(self): - self.mediator.get_object_by_id.return_value = None - - self.servicer.ValidateVolumeCapabilities(self.request, self.context) - - self._assert_response(grpc.StatusCode.NOT_FOUND, VOLUME_UID) - - def test_validate_volume_capabilities_with_volume_context_not_match(self): - self.request.volume_context = {servers_settings.VOLUME_CONTEXT_VOLUME_NAME: "fake"} - - self.servicer.ValidateVolumeCapabilities(self.request, self.context) - - self._assert_response(grpc.StatusCode.INVALID_ARGUMENT, "volume context") - - def test_validate_volume_capabilities_with_space_efficiency_not_match(self): - self.request.parameters.update({servers_settings.PARAMETERS_SPACE_EFFICIENCY: "not_none"}) - self.mediator.validate_supported_space_efficiency = Mock() - - self.servicer.ValidateVolumeCapabilities(self.request, self.context) - - self._assert_response(grpc.StatusCode.INVALID_ARGUMENT, "space efficiency") - - def test_validate_volume_capabilities_with_pool_not_match(self): - self.request.parameters.update({servers_settings.PARAMETERS_POOL: "other pool"}) - - self.servicer.ValidateVolumeCapabilities(self.request, self.context) - - self._assert_response(grpc.StatusCode.INVALID_ARGUMENT, DUMMY_POOL1) - - def test_validate_volume_capabilities_with_prefix_not_match(self): - self.request.parameters.update({servers_settings.PARAMETERS_VOLUME_NAME_PREFIX: NAME_PREFIX}) - - self.servicer.ValidateVolumeCapabilities(self.request, self.context) - - self._assert_response(grpc.StatusCode.INVALID_ARGUMENT, NAME_PREFIX) - - def test_validate_volume_capabilities_parameters_success(self): - self.request.parameters = {servers_settings.PARAMETERS_VOLUME_NAME_PREFIX: NAME_PREFIX, - servers_settings.PARAMETERS_POOL: "pool2", - servers_settings.PARAMETERS_SPACE_EFFICIENCY: "not_none"} - volume_response = utils.get_mock_mediator_response_volume(10, "prefix_vol", VOLUME_UID, "a9k", - space_efficiency="not_none") - volume_response.pool = "pool2" - self.mediator.get_object_by_id.return_value = volume_response - self.mediator.validate_supported_space_efficiency = Mock() - - self.servicer.ValidateVolumeCapabilities(self.request, self.context) - - self.assertEqual(self.context.code, grpc.StatusCode.OK) +import abc +import json +import unittest + +import grpc +from csi_general import csi_pb2 +from mock import patch, Mock, MagicMock, call + +import controllers.array_action.errors as array_errors +import controllers.servers.errors as controller_errors +import controllers.servers.settings as servers_settings +from controllers.array_action.array_action_types import ObjectIds +from controllers.array_action.array_mediator_xiv import XIVArrayMediator +from controllers.servers.csi.csi_controller_server import CSIControllerServicer +from controllers.servers.csi.sync_lock import SyncLock +from controllers.tests import utils +from controllers.tests.common.test_settings import (CLONE_VOLUME_NAME, + OBJECT_INTERNAL_ID, + DUMMY_POOL1, SPACE_EFFICIENCY, + DUMMY_IO_GROUP, DUMMY_VOLUME_GROUP, + VOLUME_NAME, SNAPSHOT_NAME, + SNAPSHOT_VOLUME_NAME, + SNAPSHOT_VOLUME_UID, VIRT_SNAP_FUNC_TRUE, SECRET_PASSWORD_VALUE, + SECRET_USERNAME_VALUE, + VOLUME_UID, INTERNAL_VOLUME_ID, DUMMY_POOL2, + SECRET_MANAGEMENT_ADDRESS_VALUE, + NAME_PREFIX, INTERNAL_SNAPSHOT_ID, SOURCE_VOLUME_ID, + SECRET_MANAGEMENT_ADDRESS_KEY, SECRET_PASSWORD_KEY, + SECRET_USERNAME_KEY, SECRET) +from controllers.tests.controller_server.common import mock_get_agent, mock_array_type, mock_mediator +from controllers.tests.utils import ProtoBufMock + +CONTROLLER_SERVER_PATH = "controllers.servers.csi.csi_controller_server" + + +class BaseControllerSetUp(unittest.TestCase): + + def setUp(self): + self.servicer = CSIControllerServicer() + + mock_array_type(self, CONTROLLER_SERVER_PATH) + + self.mediator = mock_mediator() + + self.storage_agent = MagicMock() + mock_get_agent(self, CONTROLLER_SERVER_PATH) + + self.request = ProtoBufMock() + self.request.secrets = SECRET + + self.request.parameters = {} + self.request.volume_context = {} + self.volume_capability = utils.get_mock_volume_capability() + self.capacity_bytes = 10 + self.request.capacity_range = Mock() + self.request.capacity_range.required_bytes = self.capacity_bytes + self.context = utils.FakeContext() + + +class CommonControllerTest: + + @property + @abc.abstractmethod + def tested_method(self): + raise NotImplementedError + + @property + @abc.abstractmethod + def tested_method_response_class(self): + raise NotImplementedError + + def _test_create_object_with_empty_name(self): + self.request.name = "" + context = utils.FakeContext() + response = self.tested_method(self.request, context) + self.assertEqual(grpc.StatusCode.INVALID_ARGUMENT, context.code) + self.assertIn("name", context.details) + self.assertEqual(self.tested_method_response_class(), response) + + def _test_request_with_wrong_secrets_parameters(self, secrets, message="secret"): + context = utils.FakeContext() + + self.request.secrets = secrets + self.tested_method(self.request, context) + self.assertEqual(grpc.StatusCode.INVALID_ARGUMENT, context.code) + self.assertIn(message, context.details) + + def _test_request_with_wrong_secrets(self): + secrets = {SECRET_PASSWORD_KEY: SECRET_PASSWORD_VALUE, + SECRET_MANAGEMENT_ADDRESS_KEY: SECRET_MANAGEMENT_ADDRESS_VALUE} + self._test_request_with_wrong_secrets_parameters(secrets) + + secrets = {SECRET_USERNAME_KEY: SECRET_USERNAME_VALUE, + SECRET_MANAGEMENT_ADDRESS_KEY: SECRET_MANAGEMENT_ADDRESS_VALUE} + self._test_request_with_wrong_secrets_parameters(secrets) + + secrets = {SECRET_USERNAME_KEY: SECRET_USERNAME_VALUE, SECRET_PASSWORD_KEY: SECRET_PASSWORD_VALUE} + self._test_request_with_wrong_secrets_parameters(secrets) + + secrets = utils.get_fake_secret_config(system_id="u-") + self._test_request_with_wrong_secrets_parameters(secrets, message="system id") + + self.request.secrets = [] + + def _test_request_already_processing(self, request_attribute, object_id): + with SyncLock(request_attribute, object_id, "test_request_already_processing"): + response = self.tested_method(self.request, self.context) + self.assertEqual(grpc.StatusCode.ABORTED, self.context.code) + self.assertEqual(self.tested_method_response_class, type(response)) + + def _test_request_with_array_connection_exception(self): + self.get_agent.side_effect = [Exception("error")] + context = utils.FakeContext() + self.tested_method(self.request, context) + self.assertEqual(grpc.StatusCode.INTERNAL, context.code) + self.assertIn("error", context.details) + + def _test_request_with_get_array_type_exception(self): + context = utils.FakeContext() + self.detect_array_type.side_effect = [array_errors.FailedToFindStorageSystemType("endpoint")] + self.tested_method(self.request, context) + self.assertEqual(grpc.StatusCode.INTERNAL, context.code) + msg = array_errors.FailedToFindStorageSystemType("endpoint").message + self.assertIn(msg, context.details) + + def _test_request_with_wrong_parameters(self): + context = utils.FakeContext() + parameters = [{}, {"": ""}, {"pool": ""}] + + for request_parameters in parameters: + self.request.parameters = request_parameters + self.tested_method(self.request, context) + self.assertEqual(grpc.StatusCode.INVALID_ARGUMENT, context.code) + + +class TestCreateSnapshot(BaseControllerSetUp, CommonControllerTest): + + @property + def tested_method(self): + return self.servicer.CreateSnapshot + + @property + def tested_method_response_class(self): + return csi_pb2.CreateSnapshotResponse + + def setUp(self): + super().setUp() + + self.mediator.get_snapshot = Mock() + self.mediator.get_snapshot.return_value = None + + self.mediator.create_snapshot = Mock() + + self.request.name = SNAPSHOT_NAME + self.request.source_volume_id = "{}:{};{}".format("A9000", OBJECT_INTERNAL_ID, SNAPSHOT_VOLUME_UID) + self.mediator.get_object_by_id = Mock() + self.mediator.get_object_by_id.return_value = utils.get_mock_mediator_response_volume(10, SNAPSHOT_VOLUME_NAME, + VOLUME_UID, "xiv") + self.context = utils.FakeContext() + + def test_create_snapshot_with_empty_name(self): + self._test_create_object_with_empty_name() + + def _prepare_create_snapshot_mocks(self, virt_snap_func=False): + self.mediator.get_snapshot = Mock() + self.mediator.get_snapshot.return_value = None + self.mediator.create_snapshot = Mock() + if virt_snap_func: + snapshot_returned = utils.get_mock_mediator_response_snapshot(snapshot_id=None) + else: + snapshot_returned = utils.get_mock_mediator_response_snapshot() + self.mediator.create_snapshot.return_value = snapshot_returned + + def _test_create_snapshot_succeeds(self, expected_space_efficiency=None, expected_pool=None, + system_id=None, virt_snap_func=False): + self._prepare_create_snapshot_mocks(virt_snap_func) + + response_snapshot = self.servicer.CreateSnapshot(self.request, self.context) + + self.assertEqual(grpc.StatusCode.OK, self.context.code) + self.mediator.get_snapshot.assert_called_once_with(SNAPSHOT_VOLUME_UID, SNAPSHOT_NAME, expected_pool, + virt_snap_func) + self.mediator.create_snapshot.assert_called_once_with(SNAPSHOT_VOLUME_UID, SNAPSHOT_NAME, + expected_space_efficiency, expected_pool, virt_snap_func) + system_id_part = ':{}'.format(system_id) if system_id else '' + if virt_snap_func: + snapshot_id = 'xiv{}:0;{}'.format(system_id_part, SNAPSHOT_NAME) + else: + snapshot_id = 'xiv{}:0;{}'.format(system_id_part, SNAPSHOT_VOLUME_UID) + self.assertEqual(snapshot_id, response_snapshot.snapshot.snapshot_id) + + def test_create_snapshot_virt_snap_func_enabled_succeeds(self): + self.request.parameters[servers_settings.PARAMETERS_VIRT_SNAP_FUNC] = VIRT_SNAP_FUNC_TRUE + self._test_create_snapshot_succeeds(virt_snap_func=True) + + def test_create_snapshot_succeeds(self, ): + self._test_create_snapshot_succeeds() + + def test_create_snapshot_with_pool_parameter_succeeds(self, ): + self.request.parameters = {servers_settings.PARAMETERS_POOL: DUMMY_POOL1} + self._test_create_snapshot_succeeds(expected_pool=DUMMY_POOL1) + + def test_create_snapshot_with_space_efficiency_parameter_succeeds(self): + self.mediator.validate_supported_space_efficiency = Mock() + self.request.parameters = {servers_settings.PARAMETERS_SPACE_EFFICIENCY: SPACE_EFFICIENCY} + self._test_create_snapshot_succeeds(expected_space_efficiency=SPACE_EFFICIENCY) + + def test_create_snapshot_with_space_efficiency_and_virt_snap_func_enabled_fail(self): + self.request.parameters = {servers_settings.PARAMETERS_SPACE_EFFICIENCY: SPACE_EFFICIENCY, + servers_settings.PARAMETERS_VIRT_SNAP_FUNC: VIRT_SNAP_FUNC_TRUE} + + self.servicer.CreateSnapshot(self.request, self.context) + + self.assertEqual(grpc.StatusCode.INVALID_ARGUMENT, self.context.code) + + def test_create_snapshot_already_processing(self): + self._test_request_already_processing("name", self.request.name) + + def _test_create_snapshot_with_by_system_id_parameter(self, system_id, expected_pool): + system_id_part = ':{}'.format(system_id) if system_id else '' + self.request.source_volume_id = "{}{}:{}".format("A9000", system_id_part, SNAPSHOT_VOLUME_UID) + self.request.parameters = {servers_settings.PARAMETERS_BY_SYSTEM: json.dumps( + {"u1": {servers_settings.PARAMETERS_POOL: DUMMY_POOL1}, + "u2": {servers_settings.PARAMETERS_POOL: DUMMY_POOL2}})} + self._test_create_snapshot_succeeds(expected_pool=expected_pool, system_id=system_id) + + def test_create_snapshot_with_by_system_id_parameter_succeeds(self): + self._test_create_snapshot_with_by_system_id_parameter("u1", DUMMY_POOL1) + self._test_create_snapshot_with_by_system_id_parameter("u2", DUMMY_POOL2) + self._test_create_snapshot_with_by_system_id_parameter(None, None) + + def test_create_snapshot_belongs_to_wrong_volume(self): + self.mediator.create_snapshot = Mock() + self.mediator.get_snapshot.return_value = utils.get_mock_mediator_response_snapshot(10, SNAPSHOT_NAME, + VOLUME_UID, + "wrong_volume_name", "xiv") + + self.servicer.CreateSnapshot(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.ALREADY_EXISTS) + + def test_create_snapshot_no_source_volume(self): + self.request.source_volume_id = None + + self.servicer.CreateSnapshot(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) + + def test_create_snapshot_with_wrong_secrets(self, ): + self._test_request_with_wrong_secrets() + + def test_create_snapshot_with_array_connection_exception(self): + self._test_request_with_array_connection_exception() + + def _test_create_snapshot_get_snapshot_raise_error(self, exception, grpc_status): + self.mediator.get_snapshot.side_effect = [exception] + + self.servicer.CreateSnapshot(self.request, self.context) + + self.assertEqual(self.context.code, grpc_status) + self.assertIn(str(exception), self.context.details) + self.mediator.get_snapshot.assert_called_once_with(SNAPSHOT_VOLUME_UID, SNAPSHOT_NAME, None, False) + + def test_create_snapshot_get_snapshot_exception(self): + self._test_create_snapshot_get_snapshot_raise_error(exception=Exception("error"), + grpc_status=grpc.StatusCode.INTERNAL) + + def test_create_snapshot_with_get_snapshot_illegal_object_name_exception(self): + self._test_create_snapshot_get_snapshot_raise_error(exception=array_errors.InvalidArgumentError("snapshot"), + grpc_status=grpc.StatusCode.INVALID_ARGUMENT) + + def test_create_snapshot_with_get_snapshot_illegal_object_id_exception(self): + self._test_create_snapshot_get_snapshot_raise_error(exception=array_errors.InvalidArgumentError("volume-id"), + grpc_status=grpc.StatusCode.INVALID_ARGUMENT) + + def test_create_snapshot_with_prefix_too_long_exception(self): + self.request.parameters.update({"snapshot_name_prefix": "a" * 128}) + self.servicer.CreateSnapshot(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) + + def test_create_snapshot_with_get_snapshot_name_too_long_success(self): + self._prepare_create_snapshot_mocks() + self.mediator.max_object_name_length = 63 + self.request.name = "a" * 128 + + self.servicer.CreateSnapshot(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.OK) + + def create_snapshot_returns_error(self, return_code, err): + self.mediator.create_snapshot.side_effect = [err] + msg = str(err) + + self.servicer.CreateSnapshot(self.request, self.context) + + self.assertEqual(self.context.code, return_code) + self.assertIn(msg, self.context.details) + self.mediator.get_snapshot.assert_called_once_with(SNAPSHOT_VOLUME_UID, SNAPSHOT_NAME, None, False) + self.mediator.create_snapshot.assert_called_once_with(SNAPSHOT_VOLUME_UID, SNAPSHOT_NAME, None, None, False) + + def test_create_snapshot_with_not_found_exception(self): + self.create_snapshot_returns_error(return_code=grpc.StatusCode.NOT_FOUND, + err=array_errors.ObjectNotFoundError("source_volume")) + + def test_create_snapshot_with_illegal_object_name_exception(self): + self.create_snapshot_returns_error(return_code=grpc.StatusCode.INVALID_ARGUMENT, + err=array_errors.InvalidArgumentError("snapshot")) + + def test_create_snapshot_with_snapshot_source_pool_mismatch_exception(self): + self.create_snapshot_returns_error(return_code=grpc.StatusCode.INVALID_ARGUMENT, + err=array_errors.SnapshotSourcePoolMismatch("snapshot_pool", "source_pool")) + + def test_create_snapshot_with_same_volume_name_exists_exception(self): + self.create_snapshot_returns_error(return_code=grpc.StatusCode.INTERNAL, + err=array_errors.ExpectedSnapshotButFoundVolumeError("snapshot", + "endpoint")) + + def test_create_snapshot_with_illegal_object_id_exception(self): + self.create_snapshot_returns_error(return_code=grpc.StatusCode.INVALID_ARGUMENT, + err=array_errors.InvalidArgumentError("volume-id")) + + def test_create_snapshot_with_space_efficiency_not_supported_exception(self): + self.create_snapshot_returns_error(return_code=grpc.StatusCode.INVALID_ARGUMENT, + err=array_errors.SpaceEfficiencyNotSupported(["fake"])) + + def test_create_snapshot_with_other_exception(self): + self.create_snapshot_returns_error(return_code=grpc.StatusCode.INTERNAL, err=Exception("error")) + + def test_create_snapshot_with_name_prefix(self): + self.request.name = VOLUME_NAME + self.request.parameters[servers_settings.PARAMETERS_SNAPSHOT_NAME_PREFIX] = NAME_PREFIX + self.mediator.create_snapshot = Mock() + self.mediator.create_snapshot.return_value = utils.get_mock_mediator_response_snapshot(10, SNAPSHOT_NAME, + VOLUME_UID, + SNAPSHOT_VOLUME_NAME, + "xiv") + + self.servicer.CreateSnapshot(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.OK) + full_name = "{}_{}".format(NAME_PREFIX, VOLUME_NAME) + self.mediator.create_snapshot.assert_called_once_with(SNAPSHOT_VOLUME_UID, full_name, None, None, + False) + + +class TestDeleteSnapshot(BaseControllerSetUp, CommonControllerTest): + @property + def tested_method(self): + return self.servicer.DeleteSnapshot + + @property + def tested_method_response_class(self): + return csi_pb2.DeleteSnapshotResponse + + def setUp(self): + super().setUp() + self.mediator.get_snapshot = Mock() + self.mediator.get_snapshot.return_value = None + self.mediator.delete_snapshot = Mock() + self.request.snapshot_id = "A9000:{};{}".format(INTERNAL_SNAPSHOT_ID, SNAPSHOT_VOLUME_UID) + + @patch("controllers.array_action.array_mediator_xiv.XIVArrayMediator.delete_snapshot", Mock()) + def _test_delete_snapshot_succeeds(self, snapshot_id): + self.request.snapshot_id = snapshot_id + + self.servicer.DeleteSnapshot(self.request, self.context) + self.assertEqual(self.context.code, grpc.StatusCode.OK) + + def test_delete_snapshot_with_internal_id_succeeds(self): + self._test_delete_snapshot_succeeds("xiv:{};{}".format(INTERNAL_SNAPSHOT_ID, SNAPSHOT_VOLUME_UID)) + self.mediator.delete_snapshot.assert_called_once() + + def test_delete_snapshot_with_system_id_succeeds(self): + self._test_delete_snapshot_succeeds("xiv:system_id:{}".format(SNAPSHOT_VOLUME_UID)) + self.mediator.delete_snapshot.assert_called_once() + + def test_delete_snapshot_with_system_id_internal_id_succeeds(self): + self._test_delete_snapshot_succeeds("xiv:system_id:{};{}".format(INTERNAL_SNAPSHOT_ID, SNAPSHOT_VOLUME_UID)) + self.mediator.delete_snapshot.assert_called_once() + + def test_delete_snapshot_no_internal_id_succeeds(self): + self._test_delete_snapshot_succeeds("xiv:{}".format(SNAPSHOT_VOLUME_UID)) + self.mediator.delete_snapshot.assert_called_once() + + def test_delete_snapshot_bad_id_succeeds(self): + self._test_delete_snapshot_succeeds("xiv:a:a:volume-id") + self.mediator.delete_snapshot.assert_not_called() + + def test_delete_snapshot_already_processing(self): + self._test_request_already_processing("snapshot_id", self.request.snapshot_id) + + def test_delete_snapshot_with_wrong_secrets(self): + self._test_request_with_wrong_secrets() + + def test_delete_snapshot_with_array_connection_exception(self): + self._test_request_with_array_connection_exception() + + def test_delete_snapshot_invalid_snapshot_id(self): + self.request.snapshot_id = "wrong_id" + + self.servicer.DeleteSnapshot(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.OK) + + +class TestCreateVolume(BaseControllerSetUp, CommonControllerTest): + + @property + def tested_method(self): + return self.servicer.CreateVolume + + @property + def tested_method_response_class(self): + return csi_pb2.CreateVolumeResponse + + def setUp(self): + super().setUp() + + self.mediator.create_volume = Mock() + self.mediator.get_volume = Mock() + self.mediator.get_volume.side_effect = array_errors.ObjectNotFoundError("vol") + self.mediator.get_object_by_id = Mock() + self.mediator.copy_to_existing_volume_from_source = Mock() + + self.request.parameters = {servers_settings.PARAMETERS_POOL: DUMMY_POOL1, + servers_settings.PARAMETERS_IO_GROUP: DUMMY_IO_GROUP, + servers_settings.PARAMETERS_VOLUME_GROUP: DUMMY_VOLUME_GROUP} + self.request.volume_capabilities = [self.volume_capability] + self.request.name = VOLUME_NAME + self.request.volume_content_source = None + + def test_create_volume_with_empty_name(self): + self._test_create_object_with_empty_name() + + def _prepare_create_volume_mocks(self): + self.mediator.create_volume = Mock() + self.mediator.create_volume.return_value = utils.get_mock_mediator_response_volume(10, "volume", VOLUME_UID, + "xiv") + + def _test_create_volume_succeeds(self, expected_volume_id, expected_pool=DUMMY_POOL1): + self._prepare_create_volume_mocks() + + response_volume = self.servicer.CreateVolume(self.request, self.context) + self.assertEqual(self.context.code, grpc.StatusCode.OK) + self.mediator.get_volume.assert_called_once_with(VOLUME_NAME, expected_pool, False, None) + self.mediator.create_volume.assert_called_once_with(VOLUME_NAME, 10, None, expected_pool, DUMMY_IO_GROUP, + DUMMY_VOLUME_GROUP, + ObjectIds(internal_id='', uid=''), None, False) + self.assertEqual(response_volume.volume.content_source.volume.volume_id, '') + self.assertEqual(response_volume.volume.content_source.snapshot.snapshot_id, '') + self.assertEqual(response_volume.volume.volume_id, expected_volume_id) + + def test_create_volume_already_processing(self): + self._test_request_already_processing("name", self.request.name) + + def test_create_volume_succeeds(self): + self._test_create_volume_succeeds('xiv:{};{}'.format(INTERNAL_VOLUME_ID, VOLUME_UID)) + self.mediator.register_plugin.not_called() + + def test_create_volume_with_topologies_succeeds(self): + self._test_create_volume_with_topologies_succeeds() + + def _test_create_volume_with_topologies_succeeds(self): + self.request.secrets = utils.get_fake_secret_config(system_id="u2", supported_topologies=[ + {"topology.block.csi.ibm.com/test": "topology_value"}]) + self.request.accessibility_requirements.preferred = [ + ProtoBufMock(segments={"topology.block.csi.ibm.com/test": "topology_value", + "topology.block.csi.ibm.com/test2": "topology_value2"})] + second_system_parameters = self.request.parameters.copy() + second_system_parameters[servers_settings.PARAMETERS_POOL] = DUMMY_POOL2 + self.request.parameters = {servers_settings.PARAMETERS_BY_SYSTEM: json.dumps( + {"u1": self.request.parameters, "u2": second_system_parameters})} + self._test_create_volume_succeeds('xiv:u2:{};{}'.format(INTERNAL_VOLUME_ID, VOLUME_UID), + expected_pool=DUMMY_POOL2) + self.mediator.register_plugin.assert_called_once_with('topology', '') + + def test_create_volume_with_space_efficiency_succeeds(self): + self._prepare_create_volume_mocks() + self.request.parameters.update({servers_settings.PARAMETERS_SPACE_EFFICIENCY: "not_none"}) + self.mediator.validate_supported_space_efficiency = Mock() + + self.servicer.CreateVolume(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.OK) + self.mediator.get_volume.assert_called_once_with(VOLUME_NAME, DUMMY_POOL1, False, None) + self.mediator.create_volume.assert_called_once_with(VOLUME_NAME, 10, "not_none", DUMMY_POOL1, DUMMY_IO_GROUP, + DUMMY_VOLUME_GROUP, + ObjectIds(internal_id='', uid=''), None, False) + + def test_create_volume_idempotent_no_source_succeeds(self): + self._prepare_create_volume_mocks() + self.mediator.get_volume = Mock() + self.mediator.get_volume.return_value = utils.get_mock_mediator_response_volume(10, VOLUME_NAME, VOLUME_UID, + "xiv") + + response_volume = self.servicer.CreateVolume(self.request, self.context) + self.assertEqual(self.context.code, grpc.StatusCode.OK) + self.mediator.get_volume.assert_called_once_with(VOLUME_NAME, DUMMY_POOL1, False, None) + self.mediator.create_volume.assert_not_called() + self.assertEqual(response_volume.volume.content_source.volume.volume_id, '') + self.assertEqual(response_volume.volume.content_source.snapshot.snapshot_id, '') + + def test_create_volume_with_wrong_secrets(self): + self._test_request_with_wrong_secrets() + + def test_create_volume_no_pool(self): + self._prepare_create_volume_mocks() + self.request.parameters = {"by_system_id": json.dumps({"u1": DUMMY_POOL1, "u2": DUMMY_POOL2})} + self.servicer.CreateVolume(self.request, self.context) + self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) + + def test_create_volume_with_wrong_parameters(self): + self._test_request_with_wrong_parameters() + + def test_create_volume_with_wrong_volume_capabilities(self): + + volume_capability = utils.get_mock_volume_capability(fs_type="ext42") + self.request.volume_capabilities = [volume_capability] + + self.servicer.CreateVolume(self.request, self.context) + self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT, "wrong fs_type") + self.assertIn("fs_type", self.context.details) + + access_mode = csi_pb2.VolumeCapability.AccessMode + volume_capability = utils.get_mock_volume_capability(mode=access_mode.MULTI_NODE_SINGLE_WRITER) + self.request.volume_capabilities = [volume_capability] + + self.servicer.CreateVolume(self.request, self.context) + self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) + self.assertTrue("access mode" in self.context.details) + + volume_capability = utils.get_mock_volume_capability(mount_flags=["no_formatting"]) + self.request.volume_capabilities = [volume_capability] + + self.servicer.CreateVolume(self.request, self.context) + self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) + self.assertTrue("mount_flags is unsupported" in self.context.details) + + def test_create_volume_with_array_connection_exception(self): + self._test_request_with_array_connection_exception() + + def test_create_volume_with_get_array_type_exception(self): + self._test_request_with_get_array_type_exception() + + def test_create_volume_get_volume_exception(self): + self.mediator.get_volume.side_effect = [Exception("error")] + + self.servicer.CreateVolume(self.request, self.context) + self.assertEqual(self.context.code, grpc.StatusCode.INTERNAL) + self.assertIn("error", self.context.details) + self.mediator.get_volume.assert_called_once_with(VOLUME_NAME, DUMMY_POOL1, False, None) + + def test_create_volume_with_get_volume_illegal_object_name_exception(self): + self.mediator.get_volume.side_effect = [array_errors.InvalidArgumentError("volume")] + + self.servicer.CreateVolume(self.request, self.context) + msg = array_errors.InvalidArgumentError("volume").message + + self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) + self.assertIn(msg, self.context.details) + self.mediator.get_volume.assert_called_once_with(VOLUME_NAME, DUMMY_POOL1, False, None) + + def test_create_volume_with_prefix_too_long_exception(self): + self.request.parameters.update({"volume_name_prefix": "a" * 128}) + self.servicer.CreateVolume(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) + + def test_create_volume_with_get_volume_name_too_long_success(self): + self._prepare_create_volume_mocks() + self.mediator.max_object_name_length = 63 + + self.request.name = "a" * 128 + self.servicer.CreateVolume(self.request, self.context) + self.assertEqual(self.context.code, grpc.StatusCode.OK) + + def create_volume_returns_error(self, return_code, err): + self.mediator.create_volume = Mock() + self.mediator.create_volume.side_effect = [err] + + self.servicer.CreateVolume(self.request, self.context) + msg = str(err) + + self.assertEqual(self.context.code, return_code) + self.assertIn(msg, self.context.details) + self.mediator.get_volume.assert_called_once_with(VOLUME_NAME, DUMMY_POOL1, False, None) + self.mediator.create_volume.assert_called_once_with(VOLUME_NAME, self.capacity_bytes, None, DUMMY_POOL1, + DUMMY_IO_GROUP, + DUMMY_VOLUME_GROUP, ObjectIds(internal_id='', uid=''), + None, False) + + def test_create_volume_with_illegal_object_name_exception(self): + self.create_volume_returns_error(return_code=grpc.StatusCode.INVALID_ARGUMENT, + err=array_errors.InvalidArgumentError("volume")) + + def test_create_volume_with_volume_exists_exception(self): + self.create_volume_returns_error(return_code=grpc.StatusCode.ALREADY_EXISTS, + err=array_errors.VolumeAlreadyExists(VOLUME_NAME, "endpoint")) + + def test_create_volume_with_pool_does_not_exist_exception(self): + self.create_volume_returns_error(return_code=grpc.StatusCode.INVALID_ARGUMENT, + err=array_errors.PoolDoesNotExist(DUMMY_POOL1, "endpoint")) + + def test_create_volume_with_pool_does_not_match_space_efficiency_exception(self): + self.create_volume_returns_error(return_code=grpc.StatusCode.INVALID_ARGUMENT, + err=array_errors.PoolDoesNotMatchSpaceEfficiency(DUMMY_POOL1, "", "endpoint")) + + def test_create_volume_with_space_efficiency_not_supported_exception(self): + self.create_volume_returns_error(return_code=grpc.StatusCode.INVALID_ARGUMENT, + err=array_errors.SpaceEfficiencyNotSupported(["fake"])) + + def test_create_volume_with_other_exception(self): + self.create_volume_returns_error(return_code=grpc.StatusCode.INTERNAL, + err=Exception("error")) + + def _test_create_volume_parameters(self, final_name="default_some_name", space_efficiency=None): + self.mediator.default_object_prefix = "default" + self.request.name = "some_name" + self.mediator.create_volume = Mock() + self.mediator.create_volume.return_value = utils.get_mock_mediator_response_volume(10, VOLUME_NAME, VOLUME_UID, + "xiv") + self.mediator.validate_supported_space_efficiency = Mock() + self.servicer.CreateVolume(self.request, self.context) + self.assertEqual(self.context.code, grpc.StatusCode.OK) + self.mediator.create_volume.assert_called_once_with(final_name, 10, space_efficiency, DUMMY_POOL1, + DUMMY_IO_GROUP, + DUMMY_VOLUME_GROUP, ObjectIds(internal_id='', uid=''), + None, False) + + def test_create_volume_with_name_prefix(self): + self.request.parameters[servers_settings.PARAMETERS_VOLUME_NAME_PREFIX] = NAME_PREFIX + self._test_create_volume_parameters("prefix_some_name") + + def test_create_volume_with_no_name_prefix(self): + self.request.parameters[servers_settings.PARAMETERS_VOLUME_NAME_PREFIX] = "" + self._test_create_volume_parameters() + + def _test_create_volume_with_parameters_by_system_prefix(self, get_array_connection_info_from_secrets, prefix, + final_name="default_some_name", + space_efficiency=None): + get_array_connection_info_from_secrets.side_effect = [utils.get_fake_array_connection_info()] + system_parameters = self.request.parameters + system_parameters.update({servers_settings.PARAMETERS_VOLUME_NAME_PREFIX: prefix, + servers_settings.PARAMETERS_SPACE_EFFICIENCY: space_efficiency}) + self.request.parameters = {servers_settings.PARAMETERS_BY_SYSTEM: json.dumps({"u1": system_parameters})} + self._test_create_volume_parameters(final_name, space_efficiency) + + @patch("controllers.servers.utils.get_array_connection_info_from_secrets") + def test_create_volume_with_parameters_by_system_no_name_prefix(self, get_array_connection_info_from_secrets): + self._test_create_volume_with_parameters_by_system_prefix(get_array_connection_info_from_secrets, "") + + @patch("controllers.servers.utils.get_array_connection_info_from_secrets") + def test_create_volume_with_parameters_by_system_name_prefix(self, get_array_connection_info_from_secrets): + self._test_create_volume_with_parameters_by_system_prefix(get_array_connection_info_from_secrets, NAME_PREFIX, + "prefix_some_name") + + @patch("controllers.servers.utils.get_array_connection_info_from_secrets") + def test_create_volume_with_parameters_by_system_space_efficiency(self, get_array_connection_info_from_secrets): + self._test_create_volume_with_parameters_by_system_prefix(get_array_connection_info_from_secrets, "", + space_efficiency="not_none") + + def test_create_volume_with_required_bytes_zero(self): + self._prepare_create_volume_mocks() + self.request.capacity_range.required_bytes = 0 + + self.servicer.CreateVolume(self.request, self.context) + self.assertEqual(self.context.code, grpc.StatusCode.OK) + self.mediator.create_volume.assert_called_once_with(self.request.name, 2, None, DUMMY_POOL1, DUMMY_IO_GROUP, + DUMMY_VOLUME_GROUP, + ObjectIds(internal_id='', uid=''), None, False) + + def test_create_volume_with_required_bytes_too_large_fail(self): + self._prepare_create_volume_mocks() + self.request.capacity_range.required_bytes = 11 + + self.servicer.CreateVolume(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.OUT_OF_RANGE) + self.mediator.create_volume.assert_not_called() + + def test_create_volume_with_no_space_in_pool(self): + self.create_volume_returns_error(return_code=grpc.StatusCode.INTERNAL, + err=array_errors.NotEnoughSpaceInPool(DUMMY_POOL1)) + + def _prepare_snapshot_request_volume_content_source(self): + self.request.volume_content_source = self._get_source_snapshot(SNAPSHOT_VOLUME_UID) + + def _prepare_idempotent_tests(self): + self.mediator.get_volume = Mock() + self.mediator.copy_to_existing_volume = Mock() + self._prepare_snapshot_request_volume_content_source() + + def test_create_volume_idempotent_with_source_succeed(self): + self._prepare_idempotent_tests() + snapshot_id = SNAPSHOT_VOLUME_UID + self.mediator.get_volume.return_value = utils.get_mock_mediator_response_volume(10, VOLUME_NAME, VOLUME_UID, + "a9k", + source_id=snapshot_id) + + response = self.servicer.CreateVolume(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.OK) + self.assertEqual(response.volume.content_source.snapshot.snapshot_id, snapshot_id) + self.mediator.copy_to_existing_volume.assert_not_called() + + def test_create_volume_idempotent_with_source_volume_have_no_source(self): + self._prepare_idempotent_tests() + self.mediator.get_volume.return_value = utils.get_mock_mediator_response_volume(10, VOLUME_NAME, VOLUME_UID, + "a9k") + response = self.servicer.CreateVolume(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.ALREADY_EXISTS) + self.assertFalse(response.HasField("volume")) + self.mediator.copy_to_existing_volume.assert_not_called() + + def test_create_volume_idempotent_source_not_requested_but_found_in_volume(self): + self._prepare_idempotent_tests() + snapshot_id = SNAPSHOT_VOLUME_UID + self.request.volume_content_source = None + self.mediator.get_volume.return_value = utils.get_mock_mediator_response_volume(10, VOLUME_NAME, VOLUME_UID, + "a9k", + source_id=snapshot_id) + response = self.servicer.CreateVolume(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.ALREADY_EXISTS) + self.assertFalse(response.HasField("volume")) + self.mediator.copy_to_existing_volume.assert_not_called() + + def _prepare_idempotent_test_with_other_source(self): + self._prepare_idempotent_tests() + volume_source_id = SOURCE_VOLUME_ID + self.mediator.get_volume.return_value = utils.get_mock_mediator_response_volume(10, VOLUME_NAME, + SNAPSHOT_VOLUME_UID, "a9k", + source_id=volume_source_id) + self.servicer.CreateVolume(self.request, self.context) + self.mediator.copy_to_existing_volume.assert_not_called() + + def test_create_volume_idempotent_with_source_volume_got_other_source(self): + self._prepare_idempotent_test_with_other_source() + self.assertEqual(self.context.code, grpc.StatusCode.ALREADY_EXISTS) + + def _enable_virt_snap_func(self): + self.request.parameters[servers_settings.PARAMETERS_VIRT_SNAP_FUNC] = "true" + + def test_create_volume_idempotent_with_other_source_and_virt_snap_func_enabled(self): + self._enable_virt_snap_func() + self.mediator.get_object_by_id.return_value = utils.get_mock_mediator_response_volume() + self._prepare_idempotent_test_with_other_source() + self.assertEqual(self.context.code, grpc.StatusCode.ALREADY_EXISTS) + + def test_create_volume_virt_snap_func_enabled_no_source(self): + self._enable_virt_snap_func() + self._prepare_snapshot_request_volume_content_source() + self.mediator.get_object_by_id.return_value = None + self.servicer.CreateVolume(self.request, self.context) + self.assertEqual(self.context.code, grpc.StatusCode.NOT_FOUND) + + def test_create_volume_virt_snap_func_enabled_no_snapshot_source(self): + self._enable_virt_snap_func() + self._prepare_snapshot_request_volume_content_source() + self.mediator.get_object_by_id.side_effect = [utils.get_mock_mediator_response_snapshot(), None] + self.servicer.CreateVolume(self.request, self.context) + self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) + + def test_create_volume_idempotent_with_size_not_matched(self): + self.mediator.get_volume = Mock() + self.mediator.get_volume.return_value = utils.get_mock_mediator_response_volume(9, VOLUME_NAME, VOLUME_UID, + "a9k") + + self.servicer.CreateVolume(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.ALREADY_EXISTS) + + def _prepare_mocks_for_copy_from_source(self): + self.mediator.create_volume = Mock() + self.mediator.create_volume.return_value = utils.get_mock_mediator_response_volume(10, VOLUME_NAME, VOLUME_UID, + "a9k") + + def test_create_volume_from_snapshot_success(self): + self._prepare_mocks_for_copy_from_source() + snapshot_id = SNAPSHOT_VOLUME_UID + snapshot_capacity_bytes = 100 + self.request.volume_content_source = self._get_source_snapshot(snapshot_id) + self.mediator.get_object_by_id.return_value = utils.get_mock_mediator_response_snapshot(snapshot_capacity_bytes, + SNAPSHOT_NAME, + snapshot_id, + VOLUME_NAME, + "a9k") + + response_volume = self.servicer.CreateVolume(self.request, self.context) + self.assertEqual(self.context.code, grpc.StatusCode.OK) + self.mediator.copy_to_existing_volume_from_source.assert_called_once() + self.assertEqual(response_volume.volume.content_source.volume.volume_id, '') + self.assertEqual(response_volume.volume.content_source.snapshot.snapshot_id, snapshot_id) + + def test_create_volume_from_source_source_or_target_not_found(self): + array_exception = array_errors.ObjectNotFoundError("") + self._test_create_volume_from_snapshot_error(array_exception, grpc.StatusCode.NOT_FOUND) + + def test_create_volume_from_source_source_snapshot_invalid(self): + volume_content_source = self._get_source_snapshot(SNAPSHOT_VOLUME_UID) + volume_content_source.snapshot.snapshot_id = 'invalid_snapshot_id' + self.request.volume_content_source = volume_content_source + + self.servicer.CreateVolume(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.NOT_FOUND) + self.assertIn("invalid_snapshot_id", self.context.details) + + def test_create_volume_from_source_illegal_object_id(self): + array_exception = array_errors.InvalidArgumentError("") + self._test_create_volume_from_snapshot_error(array_exception, grpc.StatusCode.INVALID_ARGUMENT) + + def test_create_volume_from_source_permission_denied(self): + array_exception = array_errors.PermissionDeniedError("") + self._test_create_volume_from_snapshot_error(array_exception, grpc.StatusCode.PERMISSION_DENIED) + + def test_create_volume_from_source_pool_missing(self): + array_exception = array_errors.PoolParameterIsMissing("") + self._test_create_volume_from_snapshot_error(array_exception, grpc.StatusCode.INVALID_ARGUMENT) + + def test_create_volume_from_source_general_error(self): + array_exception = Exception("") + self._test_create_volume_from_snapshot_error(array_exception, + grpc.StatusCode.INTERNAL) + + def test_create_volume_from_source_get_object_general_error(self): + array_exception = Exception("") + self._test_create_volume_from_snapshot_error(None, + grpc.StatusCode.INTERNAL, get_exception=array_exception) + + def test_create_volume_from_source_get_object_error(self): + array_exception = array_errors.ExpectedSnapshotButFoundVolumeError("", "") + self._test_create_volume_from_snapshot_error(None, + grpc.StatusCode.INVALID_ARGUMENT, get_exception=array_exception) + + def test_create_volume_from_source_get_object_none(self): + self._test_create_volume_from_snapshot_error(None, + grpc.StatusCode.NOT_FOUND, + array_errors.ObjectNotFoundError("")) + + def _test_create_volume_from_snapshot_error(self, copy_exception, return_code, + get_exception=None): + self._prepare_mocks_for_copy_from_source() + source_id = SNAPSHOT_VOLUME_UID + self.request.volume_content_source = self._get_source_snapshot(source_id) + if not copy_exception: + self.mediator.copy_to_existing_volume_from_source.side_effect = [get_exception] + self.storage_agent.get_mediator.return_value.__exit__.side_effect = [get_exception] + else: + self.mediator.copy_to_existing_volume_from_source.side_effect = [copy_exception] + self.storage_agent.get_mediator.return_value.__exit__.side_effect = [copy_exception] + + response = self.servicer.CreateVolume(self.request, self.context) + self.assertEqual(self.context.code, return_code) + self.assertIsInstance(response, csi_pb2.CreateVolumeResponse) + + def test_clone_volume_success(self): + self._prepare_mocks_for_copy_from_source() + volume_id = SOURCE_VOLUME_ID + volume_capacity_bytes = 100 + self.request.volume_content_source = self._get_source_volume(volume_id) + self.mediator.get_object_by_id.return_value = utils.get_mock_mediator_response_volume(volume_capacity_bytes, + CLONE_VOLUME_NAME, + volume_id, "a9k") + response_volume = self.servicer.CreateVolume(self.request, self.context) + self.assertEqual(self.context.code, grpc.StatusCode.OK) + self.mediator.copy_to_existing_volume_from_source.assert_called_once() + self.assertEqual(response_volume.volume.content_source.volume.volume_id, volume_id) + self.assertEqual(response_volume.volume.content_source.snapshot.snapshot_id, '') + + def _get_source_volume(self, object_id): + return self._get_source(object_id, servers_settings.VOLUME_TYPE_NAME) + + def _get_source_snapshot(self, object_id): + return self._get_source(object_id, servers_settings.SNAPSHOT_TYPE_NAME) + + @staticmethod + def _get_source(object_id, object_type): + source = ProtoBufMock(spec=[object_type]) + id_field_name = servers_settings.VOLUME_SOURCE_ID_FIELDS[object_type] + object_field = MagicMock(spec=[id_field_name]) + setattr(source, object_type, object_field) + setattr(object_field, id_field_name, "a9000:{0}".format(object_id)) + return source + + +class TestDeleteVolume(BaseControllerSetUp, CommonControllerTest): + + @property + def tested_method(self): + return self.servicer.DeleteVolume + + @property + def tested_method_response_class(self): + return csi_pb2.DeleteVolumeResponse + + def get_create_object_method(self): + return self.servicer.DeleteVolume + + def get_create_object_response_method(self): + return csi_pb2.DeleteVolumeResponse + + def setUp(self): + super().setUp() + + self.mediator.get_volume = Mock() + self.mediator.delete_volume = Mock() + self.mediator.is_volume_has_snapshots = Mock() + self.mediator.is_volume_has_snapshots.return_value = False + + self.request.volume_id = "xiv:0;volume-id" + + def test_delete_volume_already_processing(self): + self._test_request_already_processing("volume_id", self.request.volume_id) + + def test_delete_volume_with_wrong_secrets(self): + self._test_request_with_wrong_secrets() + + def test_delete_volume_invalid_volume_id(self): + self.request.volume_id = "wrong_id" + + self.servicer.DeleteVolume(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.OK) + + @patch("controllers.servers.csi.csi_controller_server.get_agent") + def test_delete_volume_with_array_connection_exception(self, storage_agent): + storage_agent.side_effect = [Exception("a_enter error")] + + self.servicer.DeleteVolume(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.INTERNAL) + self.assertTrue("a_enter error" in self.context.details) + + def delete_volume_returns_error(self, error, return_code): + self.mediator.delete_volume.side_effect = [error] + + self.servicer.DeleteVolume(self.request, self.context) + + self.assertEqual(self.context.code, return_code) + if return_code != grpc.StatusCode.OK: + msg = str(error) + self.assertIn(msg, self.context.details, "msg : {0} is not in : {1}".format(msg, self.context.details)) + + def test_delete_volume_with_volume_not_found_error(self): + self.delete_volume_returns_error(error=array_errors.ObjectNotFoundError("volume"), + return_code=grpc.StatusCode.OK) + + def test_delete_volume_with_delete_volume_other_exception(self): + self.delete_volume_returns_error(error=Exception("error"), return_code=grpc.StatusCode.INTERNAL) + + def test_delete_volume_has_snapshots(self): + self.delete_volume_returns_error(error=array_errors.ObjectIsStillInUseError("a", ["b"]), + return_code=grpc.StatusCode.FAILED_PRECONDITION) + + @patch("controllers.array_action.array_mediator_xiv.XIVArrayMediator.delete_volume") + def _test_delete_volume_succeeds(self, volume_id, delete_volume): + delete_volume.return_value = Mock() + self.request.volume_id = volume_id + self.servicer.DeleteVolume(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.OK) + + def test_delete_volume_with_internal_id_succeeds(self): + self._test_delete_volume_succeeds("xiv:0;volume-id") + + def test_delete_volume_with_system_id_succeeds(self): + self._test_delete_volume_succeeds("xiv:system_id:volume-id") + + def test_delete_volume_with_system_id_internal_id_succeeds(self): + self._test_delete_volume_succeeds("xiv:system_id:0;volume-id") + + def test_delete_volume_no_internal_id_succeeds(self): + self._test_delete_volume_succeeds("xiv:volume-id") + + +class TestPublishVolume(BaseControllerSetUp, CommonControllerTest): + + @property + def tested_method(self): + return self.servicer.ControllerPublishVolume + + @property + def tested_method_response_class(self): + return csi_pb2.ControllerPublishVolumeResponse + + def setUp(self): + super().setUp() + + self.hostname = "hostname" + + self.mediator.map_volume_by_initiators = Mock() + self.mediator.map_volume_by_initiators.return_value = "2", "iscsi", {"iqn1": ["1.1.1.1", "2.2.2.2"], + "iqn2": ["[::1]"]} + + arr_type = XIVArrayMediator.array_type + self.request.volume_id = "{}:wwn1".format(arr_type) + self.iqn = "iqn.1994-05.com.redhat:686358c930fe" + self.fc_port = "500143802426baf4" + self.request.node_id = "{};;{};{}".format(self.hostname, self.fc_port, self.iqn) + self.request.readonly = False + + self.request.volume_capability = utils.get_mock_volume_capability() + + def test_publish_volume_success(self): + self.servicer.ControllerPublishVolume(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.OK) + + def test_publish_volume_already_processing(self): + self._test_request_already_processing("volume_id", self.request.volume_id) + + @patch("controllers.servers.utils.validate_publish_volume_request") + def test_publish_volume_validateion_exception(self, publish_validation): + publish_validation.side_effect = [controller_errors.ValidationException("msg")] + + self.servicer.ControllerPublishVolume(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) + self.assertIn("msg", self.context.details) + + def test_publish_volume_with_wrong_secrets(self): + self._test_request_with_wrong_secrets() + + def test_publish_volume_wrong_volume_id(self): + self.request.volume_id = "some-wrong-id-format" + + self.servicer.ControllerPublishVolume(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.NOT_FOUND) + + def test_publish_volume_wrong_node_id(self): + self.request.node_id = "some-wrong-id-format" + + self.servicer.ControllerPublishVolume(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.NOT_FOUND) + + def test_publish_volume_get_host_by_host_identifiers_exception(self): + self.mediator.map_volume_by_initiators = Mock() + self.mediator.map_volume_by_initiators.side_effect = [array_errors.MultipleHostsFoundError("", "")] + + self.servicer.ControllerPublishVolume(self.request, self.context) + self.assertTrue("Multiple hosts" in self.context.details) + self.assertEqual(self.context.code, grpc.StatusCode.INTERNAL) + + self.mediator.map_volume_by_initiators.side_effect = [array_errors.HostNotFoundError("")] + + self.servicer.ControllerPublishVolume(self.request, self.context) + self.assertEqual(self.context.code, grpc.StatusCode.NOT_FOUND) + + def test_publish_volume_with_connectivity_type_fc(self): + self.mediator.map_volume_by_initiators.return_value = "1", "fc", ["500143802426baf4"] + + response = self.servicer.ControllerPublishVolume(self.request, self.context) + self.assertEqual(self.context.code, grpc.StatusCode.OK) + + self.assertEqual(response.publish_context["PUBLISH_CONTEXT_LUN"], '1') + self.assertEqual(response.publish_context["PUBLISH_CONTEXT_CONNECTIVITY"], "fc") + self.assertEqual(response.publish_context["PUBLISH_CONTEXT_ARRAY_FC_INITIATORS"], "500143802426baf4") + + def test_publish_volume_with_connectivity_type_iscsi(self): + response = self.servicer.ControllerPublishVolume(self.request, self.context) + self.assertEqual(self.context.code, grpc.StatusCode.OK) + + self.assertEqual(response.publish_context["PUBLISH_CONTEXT_LUN"], '2') + self.assertEqual(response.publish_context["PUBLISH_CONTEXT_CONNECTIVITY"], + "iscsi") + self.assertEqual(response.publish_context["PUBLISH_CONTEXT_ARRAY_IQN"], + "iqn1,iqn2") + self.assertEqual(response.publish_context["iqn1"], + "1.1.1.1,2.2.2.2") + self.assertEqual(response.publish_context["iqn2"], + "[::1]") + + def test_publish_volume_get_volume_mappings_more_then_one_mapping(self): + self.mediator.map_volume_by_initiators.side_effect = [array_errors.VolumeAlreadyMappedToDifferentHostsError("")] + self.servicer.ControllerPublishVolume(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.FAILED_PRECONDITION) + self.assertTrue("Volume is already mapped" in self.context.details) + + def test_publish_volume_map_volume_excpetions(self): + self.mediator.map_volume_by_initiators.side_effect = [array_errors.PermissionDeniedError("msg")] + + self.servicer.ControllerPublishVolume(self.request, self.context) + self.assertEqual(self.context.code, grpc.StatusCode.PERMISSION_DENIED) + + self.mediator.map_volume_by_initiators.side_effect = [array_errors.ObjectNotFoundError("volume")] + + self.servicer.ControllerPublishVolume(self.request, self.context) + self.assertEqual(self.context.code, grpc.StatusCode.NOT_FOUND) + + self.mediator.map_volume_by_initiators.side_effect = [array_errors.HostNotFoundError("host")] + + self.servicer.ControllerPublishVolume(self.request, self.context) + self.assertEqual(self.context.code, grpc.StatusCode.NOT_FOUND) + + self.mediator.map_volume_by_initiators.side_effect = [array_errors.MappingError("", "", "")] + + self.servicer.ControllerPublishVolume(self.request, self.context) + self.assertEqual(self.context.code, grpc.StatusCode.INTERNAL) + + def test_publish_volume_map_volume_lun_already_in_use(self): + self.mediator.map_volume_by_initiators.side_effect = [array_errors.NoAvailableLunError("")] + + self.servicer.ControllerPublishVolume(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.RESOURCE_EXHAUSTED) + + def test_publish_volume_get_iscsi_targets_by_iqn_excpetions(self): + self.mediator.map_volume_by_initiators.side_effect = [array_errors.NoIscsiTargetsFoundError("some_endpoint")] + + self.servicer.ControllerPublishVolume(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.NOT_FOUND) + + def test_map_volume_by_initiators_exceptions(self): + self.mediator.map_volume_by_initiators.side_effect = [ + array_errors.UnsupportedConnectivityTypeError("usb")] + + self.servicer.ControllerPublishVolume(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) + + +class TestUnpublishVolume(BaseControllerSetUp, CommonControllerTest): + + @property + def tested_method(self): + return self.servicer.ControllerUnpublishVolume + + @property + def tested_method_response_class(self): + return csi_pb2.ControllerUnpublishVolumeResponse + + def setUp(self): + super().setUp() + self.hostname = "hostname" + + self.mediator.unmap_volume_by_initiators = Mock() + self.mediator.unmap_volume_by_initiators.return_value = None + + arr_type = XIVArrayMediator.array_type + self.request.volume_id = "{}:wwn1".format(arr_type) + self.request.node_id = "hostname;iqn1;500143802426baf4" + + def test_unpublish_volume_success(self): + self.servicer.ControllerUnpublishVolume(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.OK) + + def test_unpublish_volume_already_processing(self): + self._test_request_already_processing("volume_id", self.request.volume_id) + + @patch("controllers.servers.utils.validate_unpublish_volume_request") + def test_unpublish_volume_validation_exception(self, publish_validation): + publish_validation.side_effect = [controller_errors.ValidationException("msg")] + + self.servicer.ControllerUnpublishVolume(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) + self.assertIn("msg", self.context.details) + + @patch("controllers.servers.utils.get_volume_id_info") + def test_unpublish_volume_object_id_error(self, get_volume_id_info): + get_volume_id_info.side_effect = [controller_errors.ObjectIdError("object_type", "object_id")] + + self.servicer.ControllerUnpublishVolume(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) + self.assertIn("object_type", self.context.details) + self.assertIn("object_id", self.context.details) + + def test_unpublish_volume_with_wrong_secrets(self): + self._test_request_with_wrong_secrets() + + def test_unpublish_volume_with_too_much_delimiters_in_volume_id(self): + self.request.volume_id = "too:much:delimiters:in:id" + + self.servicer.ControllerUnpublishVolume(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) + + def test_unpublish_volume_wrong_node_id(self): + self.request.node_id = "some-wrong-id-format" + + self.servicer.ControllerUnpublishVolume(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.OK) + + def test_unpublish_volume_get_host_by_host_identifiers_multiple_hosts_found_error(self): + self.mediator.unmap_volume_by_initiators.side_effect = [array_errors.MultipleHostsFoundError("", "")] + + self.servicer.ControllerUnpublishVolume(self.request, self.context) + self.assertTrue("Multiple hosts" in self.context.details) + self.assertEqual(self.context.code, grpc.StatusCode.INTERNAL) + + def test_unpublish_volume_get_host_by_host_identifiers_host_not_found_error(self): + self.mediator.get_host_by_host_identifiers = Mock() + self.mediator.get_host_by_host_identifiers.side_effect = [array_errors.HostNotFoundError("")] + + self.servicer.ControllerUnpublishVolume(self.request, self.context) + self.assertEqual(self.context.code, grpc.StatusCode.OK) + + def _test_unpublish_volume_unmap_volume_by_initiators_with_error(self, array_error, status_code): + self.mediator.unmap_volume_by_initiators.side_effect = [array_error] + + self.servicer.ControllerUnpublishVolume(self.request, self.context) + self.assertEqual(self.context.code, status_code) + + def test_unpublish_volume_unmap_volume_by_initiators_object_not_found_error(self): + self._test_unpublish_volume_unmap_volume_by_initiators_with_error(array_errors.ObjectNotFoundError("volume"), + grpc.StatusCode.OK) + + def test_unpublish_volume_unmap_volume_by_initiators_volume_already_unmapped_error(self): + self._test_unpublish_volume_unmap_volume_by_initiators_with_error(array_errors.VolumeAlreadyUnmappedError(""), + grpc.StatusCode.OK) + + def test_unpublish_volume_unmap_volume_by_initiators_permission_denied_error(self): + self._test_unpublish_volume_unmap_volume_by_initiators_with_error(array_errors.PermissionDeniedError("msg"), + grpc.StatusCode.PERMISSION_DENIED) + + def test_unpublish_volume_unmap_volume_by_initiators_host_not_found_error(self): + self._test_unpublish_volume_unmap_volume_by_initiators_with_error(array_errors.HostNotFoundError("host"), + grpc.StatusCode.OK) + + def test_unpublish_volume_unmap_volume_by_initiators_unmapping_error(self): + self._test_unpublish_volume_unmap_volume_by_initiators_with_error(array_errors.UnmappingError("", "", ""), + grpc.StatusCode.INTERNAL) + + +class TestGetCapabilities(BaseControllerSetUp): + + def test_controller_get_capabilities(self): + self.servicer.ControllerGetCapabilities(self.request, self.context) + + +class TestExpandVolume(BaseControllerSetUp, CommonControllerTest): + + @property + def tested_method(self): + return self.servicer.ControllerExpandVolume + + @property + def tested_method_response_class(self): + return csi_pb2.ControllerExpandVolumeResponse + + def setUp(self): + super().setUp() + + self.mediator.expand_volume = Mock() + + self.request.parameters = {} + self.volume_id = "vol-id" + self.request.volume_id = "{}:{}".format("xiv", self.volume_id) + self.request.volume_content_source = None + self.mediator.get_object_by_id = Mock() + self.volume_before_expand = utils.get_mock_mediator_response_volume(2, VOLUME_NAME, self.volume_id, "a9k") + self.volume_after_expand = utils.get_mock_mediator_response_volume(self.capacity_bytes, VOLUME_NAME, + self.volume_id, "a9k") + self.mediator.get_object_by_id.side_effect = [self.volume_before_expand, self.volume_after_expand] + self.request.volume_capability = self.volume_capability + + def _prepare_expand_volume_mocks(self): + self.mediator.expand_volume = Mock() + + def test_expand_volume_already_processing(self): + self._test_request_already_processing("volume_id", self.request.volume_id) + + def test_expand_volume_with_required_bytes_too_large_fail(self): + self._prepare_expand_volume_mocks() + self.request.capacity_range.required_bytes = 11 + + self.servicer.ControllerExpandVolume(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.OUT_OF_RANGE) + self.mediator.expand_volume.assert_not_called() + + def _test_no_expand_needed(self): + response = self.servicer.ControllerExpandVolume(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.OK) + self.assertFalse(response.node_expansion_required) + self.assertEqual(response.capacity_bytes, self.volume_before_expand.capacity_bytes) + self.mediator.expand_volume.assert_not_called() + + def test_expand_volume_with_required_bytes_below_minimal(self): + self._prepare_expand_volume_mocks() + self.request.capacity_range.required_bytes = 1 + self._test_no_expand_needed() + + def test_expand_volume_with_required_bytes_zero(self): + self._prepare_expand_volume_mocks() + self.request.capacity_range.required_bytes = 0 + self._test_no_expand_needed() + + def test_expand_volume_with_volume_size_already_in_range(self): + self._prepare_expand_volume_mocks() + self.request.capacity_range.required_bytes = 2 + self._test_no_expand_needed() + + def test_expand_volume_succeeds(self): + self._prepare_expand_volume_mocks() + + response = self.servicer.ControllerExpandVolume(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.OK) + self.assertTrue(response.node_expansion_required) + self.assertEqual(response.capacity_bytes, self.volume_after_expand.capacity_bytes) + self.mediator.expand_volume.assert_called_once_with(volume_id=self.volume_id, + required_bytes=self.capacity_bytes) + + def test_expand_volume_with_bad_id(self): + self._prepare_expand_volume_mocks() + self.request.volume_id = "123" + + self.servicer.ControllerExpandVolume(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) + self.mediator.expand_volume.assert_not_called() + + def test_expand_volume_not_found_before_expansion(self): + self._prepare_expand_volume_mocks() + self.mediator.get_object_by_id.side_effect = [None, None] + + self.servicer.ControllerExpandVolume(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.NOT_FOUND) + + def test_expand_volume_not_found_after_expansion(self): + self._prepare_expand_volume_mocks() + self.mediator.get_object_by_id.side_effect = [self.volume_before_expand, None] + + self.servicer.ControllerExpandVolume(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.NOT_FOUND) + + def test_expand_volume_with_wrong_secrets(self): + self._test_request_with_wrong_secrets() + + def test_expand_volume_with_array_connection_exception(self): + self._test_request_with_array_connection_exception() + + def _expand_volume_returns_error(self, return_code, err): + self.mediator.expand_volume.side_effect = [err] + msg = str(err) + + self.servicer.ControllerExpandVolume(self.request, self.context) + + self.assertEqual(self.context.code, return_code) + self.assertIn(msg, self.context.details) + self.mediator.expand_volume.assert_called_once_with(volume_id=self.volume_id, + required_bytes=self.capacity_bytes) + + def test_expand_volume_with_illegal_object_id_exception(self): + self._expand_volume_returns_error(return_code=grpc.StatusCode.INVALID_ARGUMENT, + err=array_errors.InvalidArgumentError("123")) + + def test_expand_volume_with_permission_denied_exception(self): + self._expand_volume_returns_error(return_code=grpc.StatusCode.PERMISSION_DENIED, + err=array_errors.PermissionDeniedError("msg")) + + def test_expand_volume_with_object_not_found_exception(self): + self._expand_volume_returns_error(return_code=grpc.StatusCode.NOT_FOUND, + err=array_errors.ObjectNotFoundError("name")) + + def test_expand_volume_with_object_in_use_exception(self): + self._expand_volume_returns_error(return_code=grpc.StatusCode.INTERNAL, + err=array_errors.ObjectIsStillInUseError("a", ["b"])) + + def test_expand_volume_with_other_exception(self): + self._expand_volume_returns_error(return_code=grpc.StatusCode.INTERNAL, + err=Exception("error")) + + def test_expand_volume_with_no_space_in_pool_exception(self): + self._expand_volume_returns_error(return_code=grpc.StatusCode.RESOURCE_EXHAUSTED, + err=array_errors.NotEnoughSpaceInPool(DUMMY_POOL1)) + + +class TestIdentityServer(BaseControllerSetUp): + + @patch("controllers.common.config.config.identity") + def test_identity_plugin_get_info_succeeds(self, identity_config): + plugin_name = "plugin-name" + version = "1.1.0" + identity_config.name = plugin_name + identity_config.version = version + request = Mock() + context = Mock() + request.volume_capabilities = [] + response = self.servicer.GetPluginInfo(request, context) + self.assertEqual(response, csi_pb2.GetPluginInfoResponse(name=plugin_name, vendor_version=version)) + + @patch("controllers.common.config.config.identity") + def test_identity_plugin_get_info_fails_when_attributes_from_config_are_missing(self, identity_config): + request = Mock() + context = Mock() + + identity_config.mock_add_spec(spec=["name"]) + response = self.servicer.GetPluginInfo(request, context) + context.set_code.assert_called_once_with(grpc.StatusCode.INTERNAL) + self.assertEqual(response, csi_pb2.GetPluginInfoResponse()) + + identity_config.mock_add_spec(spec=["version"]) + response = self.servicer.GetPluginInfo(request, context) + self.assertEqual(response, csi_pb2.GetPluginInfoResponse()) + context.set_code.assert_called_with(grpc.StatusCode.INTERNAL) + + @patch("controllers.common.config.config.identity") + def test_identity_plugin_get_info_fails_when_name_or_version_are_empty(self, identity_config): + request = Mock() + context = Mock() + + identity_config.name = "" + identity_config.version = "1.1.0" + response = self.servicer.GetPluginInfo(request, context) + context.set_code.assert_called_once_with(grpc.StatusCode.INTERNAL) + self.assertEqual(response, csi_pb2.GetPluginInfoResponse()) + + identity_config.name = "name" + identity_config.version = "" + response = self.servicer.GetPluginInfo(request, context) + self.assertEqual(response, csi_pb2.GetPluginInfoResponse()) + self.assertEqual(context.set_code.call_args_list, + [call(grpc.StatusCode.INTERNAL), call(grpc.StatusCode.INTERNAL)]) + + def test_identity_get_plugin_capabilities(self): + request = Mock() + context = Mock() + self.servicer.GetPluginCapabilities(request, context) + + def test_identity_probe(self): + request = Mock() + context = Mock() + self.servicer.Probe(request, context) + + +class TestValidateVolumeCapabilities(BaseControllerSetUp, CommonControllerTest): + + @property + def tested_method(self): + return self.servicer.ValidateVolumeCapabilities + + @property + def tested_method_response_class(self): + return csi_pb2.ValidateVolumeCapabilitiesResponse + + def setUp(self): + super().setUp() + + arr_type = XIVArrayMediator.array_type + self.request.volume_id = "{}:{}".format(arr_type, VOLUME_UID) + self.request.parameters = {servers_settings.PARAMETERS_POOL: DUMMY_POOL1} + + self.mediator.get_object_by_id = Mock() + self.mediator.get_object_by_id.return_value = utils.get_mock_mediator_response_volume(10, "vol", VOLUME_UID, + "a9k") + self.request.volume_capabilities = [self.volume_capability] + + def _assert_response(self, expected_status_code, expected_details_substring): + self.assertEqual(self.context.code, expected_status_code) + self.assertTrue(expected_details_substring in self.context.details) + + def test_validate_volume_capabilities_already_processing(self): + self._test_request_already_processing("volume_id", self.request.volume_id) + + def test_validate_volume_capabilities_success(self): + self.servicer.ValidateVolumeCapabilities(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.OK) + + @patch("controllers.servers.utils.get_volume_id_info") + def test_validate_volume_capabilities_object_id_error(self, get_volume_id_info): + get_volume_id_info.side_effect = [controller_errors.ObjectIdError("object_type", "object_id")] + + self.servicer.ValidateVolumeCapabilities(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.NOT_FOUND) + self.assertIn("object_type", self.context.details) + self.assertIn("object_id", self.context.details) + + def test_validate_volume_capabilities_with_empty_id(self): + self.request.volume_id = "" + + self.servicer.ValidateVolumeCapabilities(self.request, self.context) + + self._assert_response(grpc.StatusCode.INVALID_ARGUMENT, "volume_id") + + def test_validate_volume_capabilities_with_wrong_secrets(self): + self._test_request_with_wrong_secrets() + + def test_validate_volume_capabilities_with_unsupported_access_mode(self): + self.request.volume_capabilities[0].access_mode.mode = 999 + + self.servicer.ValidateVolumeCapabilities(self.request, self.context) + + self._assert_response(grpc.StatusCode.INVALID_ARGUMENT, "unsupported access mode") + + def test_validate_volume_capabilities_with_unsupported_fs_type(self): + volume_capability = utils.get_mock_volume_capability(fs_type="ext3") + self.request.volume_capabilities = [volume_capability] + + self.servicer.ValidateVolumeCapabilities(self.request, self.context) + + self._assert_response(grpc.StatusCode.INVALID_ARGUMENT, "fs_type") + + def test_validate_volume_capabilities_with_no_capabilities(self): + self.request.volume_capabilities = {} + + self.servicer.ValidateVolumeCapabilities(self.request, self.context) + + self._assert_response(grpc.StatusCode.INVALID_ARGUMENT, "not set") + + def test_validate_volume_capabilities_with_bad_id(self): + self.request.volume_id = VOLUME_UID + + self.servicer.ValidateVolumeCapabilities(self.request, self.context) + + self._assert_response(grpc.StatusCode.NOT_FOUND, "id format") + + def test_validate_volume_capabilities_with_volume_not_found(self): + self.mediator.get_object_by_id.return_value = None + + self.servicer.ValidateVolumeCapabilities(self.request, self.context) + + self._assert_response(grpc.StatusCode.NOT_FOUND, VOLUME_UID) + + def test_validate_volume_capabilities_with_volume_context_not_match(self): + self.request.volume_context = {servers_settings.VOLUME_CONTEXT_VOLUME_NAME: "fake"} + + self.servicer.ValidateVolumeCapabilities(self.request, self.context) + + self._assert_response(grpc.StatusCode.INVALID_ARGUMENT, "volume context") + + def test_validate_volume_capabilities_with_space_efficiency_not_match(self): + self.request.parameters.update({servers_settings.PARAMETERS_SPACE_EFFICIENCY: "not_none"}) + self.mediator.validate_supported_space_efficiency = Mock() + + self.servicer.ValidateVolumeCapabilities(self.request, self.context) + + self._assert_response(grpc.StatusCode.INVALID_ARGUMENT, "space efficiency") + + def test_validate_volume_capabilities_with_pool_not_match(self): + self.request.parameters.update({servers_settings.PARAMETERS_POOL: "other pool"}) + + self.servicer.ValidateVolumeCapabilities(self.request, self.context) + + self._assert_response(grpc.StatusCode.INVALID_ARGUMENT, DUMMY_POOL1) + + def test_validate_volume_capabilities_with_prefix_not_match(self): + self.request.parameters.update({servers_settings.PARAMETERS_VOLUME_NAME_PREFIX: NAME_PREFIX}) + + self.servicer.ValidateVolumeCapabilities(self.request, self.context) + + self._assert_response(grpc.StatusCode.INVALID_ARGUMENT, NAME_PREFIX) + + def test_validate_volume_capabilities_parameters_success(self): + self.request.parameters = {servers_settings.PARAMETERS_VOLUME_NAME_PREFIX: NAME_PREFIX, + servers_settings.PARAMETERS_POOL: "pool2", + servers_settings.PARAMETERS_SPACE_EFFICIENCY: "not_none"} + volume_response = utils.get_mock_mediator_response_volume(10, "prefix_vol", VOLUME_UID, "a9k", + space_efficiency="not_none") + volume_response.pool = "pool2" + self.mediator.get_object_by_id.return_value = volume_response + self.mediator.validate_supported_space_efficiency = Mock() + + self.servicer.ValidateVolumeCapabilities(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.OK) diff --git a/controllers/tests/controller_server/host_definer/common.py b/controllers/tests/controller_server/host_definer/common.py deleted file mode 100644 index 76de06b0f..000000000 --- a/controllers/tests/controller_server/host_definer/common.py +++ /dev/null @@ -1,20 +0,0 @@ -import unittest -from mock import patch -from kubernetes.client.rest import ApiException - -import controllers.tests.controller_server.host_definer.utils.test_utils as test_utils -import controllers.tests.controller_server.host_definer.settings as test_settings - - -class BaseSetUp(unittest.TestCase): - def setUp(self): - test_utils.patch_kubernetes_manager_init() - self.os = patch('{}.os'.format(test_settings.WATCHER_HELPER_PATH)).start() - self.nodes_on_watcher_helper = test_utils.patch_nodes_global_variable(test_settings.WATCHER_HELPER_PATH) - self.managed_secrets_on_watcher_helper = test_utils.patch_managed_secrets_global_variable( - test_settings.WATCHER_HELPER_PATH) - self.k8s_node_with_manage_node_label = test_utils.get_fake_k8s_node(test_settings.MANAGE_NODE_LABEL) - self.k8s_node_with_fake_label = test_utils.get_fake_k8s_node(test_settings.FAKE_LABEL) - self.ready_k8s_host_definitions = test_utils.get_fake_k8s_host_definitions_items(test_settings.READY_PHASE) - self.http_resp = test_utils.get_error_http_resp() - self.fake_api_exception = ApiException(http_resp=self.http_resp) diff --git a/controllers/tests/controller_server/host_definer/csi_node_watcher_test.py b/controllers/tests/controller_server/host_definer/csi_node_watcher_test.py deleted file mode 100644 index a224e11b4..000000000 --- a/controllers/tests/controller_server/host_definer/csi_node_watcher_test.py +++ /dev/null @@ -1,232 +0,0 @@ -from kubernetes.client.rest import ApiException - -import controllers.tests.controller_server.host_definer.utils.test_utils as test_utils -import controllers.tests.controller_server.host_definer.utils.k8s_manifests_utils as k8s_manifests_utils -import controllers.tests.controller_server.host_definer.settings as test_settings -import controllers.common.settings as common_settings -from controllers.tests.controller_server.host_definer.common import BaseSetUp -from controllers.servers.host_definer.types import DefineHostResponse -from controllers.servers.host_definer.watcher.csi_node_watcher import CsiNodeWatcher - - -class CsiNodeWatcherBase(BaseSetUp): - def setUp(self): - super().setUp() - self.csi_node_watcher = test_utils.get_class_mock(CsiNodeWatcher) - self.nodes_on_csi_node_watcher = test_utils.patch_nodes_global_variable(test_settings.CSI_NODE_WATCHER_PATH) - self.updated_daemon_set = test_utils.get_fake_k8s_daemon_set_items(1, 1) - self.not_updated_daemon_set = test_utils.get_fake_k8s_daemon_set_items(0, 1) - self.deleted_daemon_set = test_utils.get_fake_k8s_daemon_set_items(0, 0) - self.managed_secrets_on_csi_node_watcher = test_utils.patch_managed_secrets_global_variable( - test_settings.CSI_NODE_WATCHER_PATH) - - -class TestAddInitialCsiNodes(CsiNodeWatcherBase): - def test_host_not_defined_for_csi_node_without_ibm_block_provider(self): - self.csi_node_watcher.csi_nodes_api.get.return_value = test_utils.get_fake_k8s_csi_nodes( - test_settings.FAKE_CSI_PROVISIONER, 1) - self.csi_node_watcher.add_initial_csi_nodes() - self.assertEqual(0, len(self.nodes_on_watcher_helper)) - - def test_host_not_defined_for_node_without_labels_and_no_dynamic_labeling(self): - self._prepare_default_mocks_for_add_node() - self.os.getenv.return_value = '' - self.csi_node_watcher.core_api.read_node.return_value = self.k8s_node_with_fake_label - self.csi_node_watcher.add_initial_csi_nodes() - self.assertEqual(0, len(self.nodes_on_watcher_helper)) - - def test_host_defined_for_node_with_manage_label(self): - self._prepare_default_mocks_for_add_node() - self.os.getenv.return_value = '' - self.csi_node_watcher.add_initial_csi_nodes() - self.assertEqual(1, len(self.nodes_on_watcher_helper)) - - def test_host_defined_for_multiple_nodes_with_dynamic_labeling(self): - self._prepare_default_mocks_for_add_node() - self.csi_node_watcher.csi_nodes_api.get.return_value = test_utils.get_fake_k8s_csi_nodes( - test_settings.CSI_PROVISIONER_NAME, 2) - self.csi_node_watcher.add_initial_csi_nodes() - self.assertEqual(2, len(self.nodes_on_watcher_helper)) - - def test_add_node_not_update_labels(self): - self._prepare_default_mocks_for_add_node() - self.csi_node_watcher.add_initial_csi_nodes() - self.csi_node_watcher.core_api.patch_node.assert_not_called() - self.assertEqual(1, len(self.nodes_on_watcher_helper)) - - def test_add_node_update_labels(self): - self._prepare_default_mocks_for_add_node() - self.csi_node_watcher.core_api.read_node.return_value = self.k8s_node_with_fake_label - self.csi_node_watcher.add_initial_csi_nodes() - self.csi_node_watcher.core_api.patch_node.assert_called_once_with( - test_settings.FAKE_NODE_NAME + '-0', - k8s_manifests_utils.get_metadata_with_manage_node_labels_manifest(test_settings.TRUE_STRING)) - self.assertEqual(1, len(self.nodes_on_watcher_helper)) - - def test_update_node_label_fail(self): - self._prepare_default_mocks_for_add_node() - self.csi_node_watcher.core_api.read_node.return_value = self.k8s_node_with_fake_label - self.csi_node_watcher.core_api.patch_node.side_effect = self.fake_api_exception - self.csi_node_watcher.add_initial_csi_nodes() - self.assertEqual(1, len(self.nodes_on_watcher_helper)) - - def _prepare_default_mocks_for_add_node(self): - self.csi_node_watcher.csi_nodes_api.get.return_value = test_utils.get_fake_k8s_csi_nodes( - test_settings.CSI_PROVISIONER_NAME, 1) - self.os.getenv.return_value = test_settings.TRUE_STRING - self.csi_node_watcher.core_api.read_node.return_value = self.k8s_node_with_manage_node_label - - def test_get_csi_nodes_fail(self): - self.csi_node_watcher.csi_nodes_api.get.side_effect = self.fake_api_exception - self.csi_node_watcher.add_initial_csi_nodes() - self.assertEqual(0, len(self.nodes_on_watcher_helper)) - - -class TestWatchCsiNodesResources(CsiNodeWatcherBase): - - def test_updated_csi_node_not_removed(self): - self._prepare_mocks_for_updated_csi_node() - self.csi_node_watcher.host_definitions_api.get.return_value = test_utils.get_empty_k8s_host_definitions() - test_utils.run_function_with_timeout(self.csi_node_watcher.watch_csi_nodes_resources, 0.5) - self.assertEqual(1, len(self.nodes_on_csi_node_watcher)) - self.csi_node_watcher.storage_host_servicer.define_host.assert_not_called() - - def test_updated_node_id_of_csi_node(self): - self._prepare_mocks_for_updated_csi_node() - host_definitions = self.ready_k8s_host_definitions - host_definitions.items[0].spec.hostDefinition.nodeId = 'other_node_id' - self.csi_node_watcher.host_definitions_api.get.return_value = self.ready_k8s_host_definitions - self.csi_node_watcher.core_api.read_namespaced_secret.return_value = test_utils.get_fake_k8s_secret() - test_utils.run_function_with_timeout(self.csi_node_watcher.watch_csi_nodes_resources, 0.5) - self.assertEqual(1, len(self.nodes_on_csi_node_watcher)) - self.csi_node_watcher.storage_host_servicer.define_host.assert_called() - - def _prepare_mocks_for_updated_csi_node(self): - self.nodes_on_watcher_helper[test_settings.FAKE_NODE_NAME] = test_utils.get_fake_managed_node() - self.nodes_on_csi_node_watcher[test_settings.FAKE_NODE_NAME] = test_utils.get_fake_managed_node() - self.managed_secrets_on_csi_node_watcher.append(test_utils.get_fake_secret_info()) - self.csi_node_watcher.csi_nodes_api.watch.return_value = iter( - [test_utils.get_fake_csi_node_watch_event(test_settings.DELETED_EVENT_TYPE)]) - self.csi_node_watcher.core_api.read_node.return_value = self.k8s_node_with_manage_node_label - self.csi_node_watcher.apps_api.list_daemon_set_for_all_namespaces.side_effect = [ - self.not_updated_daemon_set, self.updated_daemon_set] - self.csi_node_watcher.core_api.list_pod_for_all_namespaces.return_value = test_utils.get_fake_k8s_pods_items() - - def test_delete_host_definition(self): - self._prepare_default_mocks_for_deletion() - test_utils.run_function_with_timeout(self.csi_node_watcher.watch_csi_nodes_resources, 0.5) - self.csi_node_watcher.host_definitions_api.delete.assert_called_once_with( - name=test_settings.FAKE_NODE_NAME, body={}) - self.assertEqual(0, len(self.nodes_on_csi_node_watcher)) - - def test_delete_host_from_storage_failed(self): - self._prepare_default_mocks_for_deletion() - self.csi_node_watcher.storage_host_servicer.undefine_host.return_value = DefineHostResponse( - error_message=test_settings.FAIL_MESSAGE_FROM_STORAGE) - self.csi_node_watcher.custom_object_api.patch_cluster_custom_object_status.return_value = None - test_utils.run_function_with_timeout(self.csi_node_watcher.watch_csi_nodes_resources, 0.5) - self.csi_node_watcher.core_api.create_namespaced_event.assert_called() - - def test_fail_to_get_host_definitions_delete_host_definition_not_called(self): - self._prepare_default_mocks_for_deletion() - self.csi_node_watcher.host_definitions_api.get.side_effect = self.fake_api_exception - test_utils.run_function_with_timeout(self.csi_node_watcher.watch_csi_nodes_resources, 0.5) - self.assertEqual(0, len(self.nodes_on_csi_node_watcher)) - self.csi_node_watcher.host_definitions_api.delete.assert_not_called() - - def test_remove_manage_node_label(self): - self._prepare_default_mocks_for_deletion() - self.csi_node_watcher.csi_nodes_api.get.return_value = test_utils.get_fake_k8s_csi_node( - test_settings.FAKE_CSI_PROVISIONER) - self.csi_node_watcher.host_definitions_api.get.return_value = test_utils.get_empty_k8s_host_definitions() - test_utils.run_function_with_timeout(self.csi_node_watcher.watch_csi_nodes_resources, 0.5) - self.assertEqual(0, len(self.nodes_on_csi_node_watcher)) - self.csi_node_watcher.core_api.patch_node.assert_called_once_with( - test_settings.FAKE_NODE_NAME, k8s_manifests_utils.get_metadata_with_manage_node_labels_manifest(None)) - - def test_nodes_global_variable_reduced_on_csi_node_deletion_and_definer_cannot_delete(self): - self._prepare_default_mocks_for_deletion() - self.os.getenv.return_value = '' - test_utils.run_function_with_timeout(self.csi_node_watcher.watch_csi_nodes_resources, 0.5) - self.assertEqual(0, len(self.nodes_on_csi_node_watcher)) - self.csi_node_watcher.storage_host_servicer.undefine_host.assert_not_called() - - def test_nodes_global_variable_reduced_on_failed_daemon_set_list(self): - self._prepare_default_mocks_for_deletion() - self.csi_node_watcher.apps_api.list_daemon_set_for_all_namespaces.side_effect = ApiException( - http_resp=self.http_resp) - test_utils.run_function_with_timeout(self.csi_node_watcher.watch_csi_nodes_resources, 0.5) - self.assertEqual(0, len(self.nodes_on_csi_node_watcher)) - - def test_failed_pods_list_log_message(self): - self._prepare_default_mocks_for_deletion() - self.csi_node_watcher.core_api.list_pod_for_all_namespaces.side_effect = ApiException( - http_resp=self.http_resp) - test_utils.run_function_with_timeout(self.csi_node_watcher.watch_csi_nodes_resources, 0.5) - self.assertEqual(0, len(self.nodes_on_csi_node_watcher)) - - def test_csi_node_deleted_with_modify_event(self): - self._prepare_default_mocks_for_deletion() - self.csi_node_watcher.csi_nodes_api.watch.return_value = iter( - [test_utils.get_fake_csi_node_watch_event(test_settings.MODIFIED_EVENT_TYPE)]) - test_utils.run_function_with_timeout(self.csi_node_watcher.watch_csi_nodes_resources, 0.5) - self.csi_node_watcher.host_definitions_api.delete.assert_called_once_with( - name=test_settings.FAKE_NODE_NAME, body={}) - - def _prepare_default_mocks_for_deletion(self): - self.nodes_on_watcher_helper[test_settings.FAKE_NODE_NAME] = test_utils.get_fake_managed_node() - self.nodes_on_csi_node_watcher[test_settings.FAKE_NODE_NAME] = test_utils.get_fake_managed_node() - self.csi_node_watcher.csi_nodes_api.watch.return_value = iter( - [test_utils.get_fake_csi_node_watch_event(test_settings.DELETED_EVENT_TYPE)]) - self.csi_node_watcher.core_api.read_node.side_effect = [ - self.k8s_node_with_manage_node_label, - self.k8s_node_with_fake_label, self.k8s_node_with_fake_label] - self.csi_node_watcher.apps_api.list_daemon_set_for_all_namespaces.return_value = self.deleted_daemon_set - self.csi_node_watcher.core_api.list_pod_for_all_namespaces.return_value = test_utils.get_empty_k8s_pods() - self.os.getenv.return_value = test_settings.TRUE_STRING - self.csi_node_watcher.storage_host_servicer.undefine_host.return_value = DefineHostResponse() - self.csi_node_watcher.host_definitions_api.get.return_value = self.ready_k8s_host_definitions - self.csi_node_watcher.core_api.read_namespaced_secret.return_value = test_utils.get_fake_k8s_secret() - self.csi_node_watcher.csi_nodes_api.get.return_value = test_utils.get_fake_k8s_csi_node( - test_settings.CSI_PROVISIONER_NAME) - self.managed_secrets_on_csi_node_watcher.append(test_utils.get_fake_secret_info()) - - def test_define_host_called_on_new_csi_node(self): - self._prepare_default_mocks_for_modified_event() - self.csi_node_watcher.host_definitions_api.get.side_effect = [ - test_utils.get_empty_k8s_host_definitions(), self.ready_k8s_host_definitions] - self.os.getenv.side_effect = [test_settings.TRUE_STRING, test_settings.FAKE_PREFIX, ''] - test_utils.run_function_with_timeout(self.csi_node_watcher.watch_csi_nodes_resources, 0.5) - self.assertEqual(1, len(self.nodes_on_watcher_helper)) - self.csi_node_watcher.storage_host_servicer.define_host.assert_called_once_with(test_utils.get_define_request( - prefix=test_settings.FAKE_PREFIX, node_id_from_host_definition=test_settings.FAKE_NODE_ID)) - - def test_define_host_not_called_on_new_csi_node_when_failed_to_get_secret(self): - self._prepare_default_mocks_for_modified_event() - self.csi_node_watcher.host_definitions_api.get.side_effect = [ - test_utils.get_empty_k8s_host_definitions(), self.ready_k8s_host_definitions] - self.csi_node_watcher.core_api.read_namespaced_secret.side_effect = self.fake_api_exception - test_utils.run_function_with_timeout(self.csi_node_watcher.watch_csi_nodes_resources, 0.5) - self.csi_node_watcher.storage_host_servicer.define_host.assert_not_called() - - def test_fail_define_host_on_storage(self): - self._prepare_default_mocks_for_modified_event() - self.csi_node_watcher.host_definitions_api.get.side_effect = [ - test_utils.get_empty_k8s_host_definitions(), self.ready_k8s_host_definitions] - self.csi_node_watcher.storage_host_servicer.define_host.return_value = DefineHostResponse( - error_message=test_settings.FAIL_MESSAGE_FROM_STORAGE) - test_utils.run_function_with_timeout(self.csi_node_watcher.watch_csi_nodes_resources, 0.5) - self.csi_node_watcher.custom_object_api.patch_cluster_custom_object_status.assert_called_with( - common_settings.CSI_IBM_GROUP, common_settings.VERSION, - common_settings.HOST_DEFINITION_PLURAL, test_settings.FAKE_NODE_NAME, - test_utils.get_pending_creation_status_manifest()) - - def _prepare_default_mocks_for_modified_event(self): - self.nodes_on_watcher_helper[test_settings.FAKE_NODE_NAME] = test_utils.get_fake_managed_node() - self.managed_secrets_on_watcher_helper.append(test_utils.get_fake_secret_info()) - self.csi_node_watcher.csi_nodes_api.watch.return_value = iter( - [test_utils.get_fake_csi_node_watch_event(test_settings.MODIFIED_EVENT_TYPE)]) - self.os.getenv.return_value = test_settings.TRUE_STRING - self.csi_node_watcher.core_api.read_namespaced_secret.return_value = test_utils.get_fake_k8s_secret() - self.csi_node_watcher.storage_host_servicer.define_host.return_value = DefineHostResponse() - self.csi_node_watcher.core_api.read_node.return_value = self.k8s_node_with_fake_label diff --git a/controllers/tests/controller_server/host_definer/definition_manager/__init__.py b/controllers/tests/controller_server/host_definer/definition_manager/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/controllers/tests/controller_server/host_definer/definition_manager/definition_manager_test.py b/controllers/tests/controller_server/host_definer/definition_manager/definition_manager_test.py new file mode 100644 index 000000000..478bbac14 --- /dev/null +++ b/controllers/tests/controller_server/host_definer/definition_manager/definition_manager_test.py @@ -0,0 +1,269 @@ +import unittest +from copy import deepcopy +from unittest.mock import MagicMock, Mock, patch + +import controllers.servers.host_definer.messages as messages +from controllers.servers.host_definer.types import DefineHostResponse +from controllers.servers.host_definer.k8s.api import K8SApi +import controllers.tests.controller_server.host_definer.utils.test_utils as test_utils +import controllers.tests.controller_server.host_definer.utils.k8s_manifests_utils as test_manifest_utils +import controllers.tests.controller_server.host_definer.settings as test_settings +from controllers.servers.host_definer.definition_manager.definition import DefinitionManager + + +class TestDefinitionManager(unittest.TestCase): + def setUp(self): + test_utils.patch_function(K8SApi, '_load_cluster_configuration') + test_utils.patch_function(K8SApi, '_get_dynamic_client') + self.manager = DefinitionManager() + self.manager.secret_manager = MagicMock() + self.manager.k8s_api = MagicMock() + self.manager.request_manager = MagicMock() + self.manager.host_definition_manager = MagicMock() + self.manager.storage_host_servicer = MagicMock() + self.global_managed_nodes = test_utils.patch_nodes_global_variable(test_settings.DEFINITION_MANAGER_PATH) + self.global_managed_secrets = test_utils.patch_managed_secrets_global_variable( + test_settings.DEFINITION_MANAGER_PATH) + self.fake_host_define_response = test_utils.get_fake_define_host_response() + self.fake_host_definition_info = test_utils.get_fake_host_definition_info() + self.fake_secret_info = test_utils.get_fake_secret_info() + self.fake_host_definition_manifest = test_manifest_utils.get_fake_k8s_host_definition_manifest() + self.secret_info_with_no_storage_classes = test_utils.get_fake_secret_info(0) + self.secret_info_with_storage_classes = test_utils.get_fake_secret_info(2) + + def test_define_host_on_all_storages_success(self): + self.global_managed_secrets.append(self.secret_info_with_no_storage_classes) + self.global_managed_secrets.append(self.secret_info_with_storage_classes) + self.manager.host_definition_manager.get_host_definition_info_from_secret_and_node_name.return_value = \ + self.fake_host_definition_info + self.manager.create_definition = Mock() + self.manager.define_node_on_all_storages(test_settings.FAKE_NODE_NAME) + self.manager.host_definition_manager.get_host_definition_info_from_secret_and_node_name.assert_called_once_with( + test_settings.FAKE_NODE_NAME, self.secret_info_with_storage_classes) + self.manager.create_definition.assert_called_once_with(self.fake_host_definition_info) + + def test_define_single_nodes(self): + self.global_managed_nodes[test_settings.FAKE_NODE_NAME] = test_utils.get_fake_managed_node() + host_definition_info = deepcopy(self.fake_host_definition_info) + host_definition_info.node_name = 'test_name' + self.manager.host_definition_manager.add_name_to_host_definition_info.return_value = \ + self.fake_host_definition_info + self.manager.create_definition = Mock() + self.manager.define_nodes(host_definition_info) + self.manager.host_definition_manager.add_name_to_host_definition_info.assert_called_once_with( + test_settings.FAKE_NODE_NAME, host_definition_info) + self.manager.create_definition.assert_called_once_with(self.fake_host_definition_info) + + def test_define_multiple_nodes(self): + self.global_managed_nodes[test_settings.FAKE_NODE_NAME] = test_utils.get_fake_managed_node() + self.global_managed_nodes[test_settings.FAKE_NODE_NAME + '2'] = test_utils.get_fake_managed_node() + self.manager.create_definition = Mock() + self.manager.define_nodes(self.fake_host_definition_info) + self.assertEqual(self.manager.create_definition.call_count, 2) + self.assertEqual(self.manager.host_definition_manager.add_name_to_host_definition_info.call_count, 2) + + def test_create_definition_success(self): + current_host_definition_info_on_cluster = self._prepare_create_definition(True) + self._test_create_host_definition() + + self.manager.host_definition_manager.update_host_definition_info.assert_called_once_with( + self.fake_host_definition_info) + self.manager.define_host.assert_called_once_with(self.fake_host_definition_info) + self.manager.host_definition_manager.create_host_definition_if_not_exist.assert_called_once_with( + self.fake_host_definition_info, self.fake_host_define_response) + self.manager.host_definition_manager.set_status_to_host_definition_after_definition.assert_called_once_with( + self.fake_host_define_response.error_message, current_host_definition_info_on_cluster) + + def test_do_not_create_definition_when_node_should_not_be_managed_by_secret(self): + self._prepare_create_definition(False) + self._test_create_host_definition() + self.manager.host_definition_manager.update_host_definition_info.assert_not_called() + self.manager.define_host.assert_not_called() + self.manager.host_definition_manager.create_host_definition_if_not_exist.assert_not_called() + self.manager.host_definition_manager.set_status_to_host_definition_after_definition.assert_not_called() + + def _prepare_create_definition(self, is_node_should_be_managed): + self.manager.secret_manager.is_node_should_be_managed_on_secret.return_value = is_node_should_be_managed + self.manager.define_host = Mock() + self.manager.define_host.return_value = self.fake_host_define_response + current_host_definition_info_on_cluster = deepcopy(self.fake_host_definition_info) + current_host_definition_info_on_cluster.node_name = 'current_host_on_cluster' + self.manager.host_definition_manager.update_host_definition_info.return_value = self.fake_host_definition_info + self.manager.host_definition_manager.create_host_definition_if_not_exist.return_value = \ + current_host_definition_info_on_cluster + return current_host_definition_info_on_cluster + + def _test_create_host_definition(self): + self.manager.create_definition(self.fake_host_definition_info) + self._assert_is_node_should_be_managed() + + def test_define_host_success(self): + self._test_define_host('request', 'response', self.manager.storage_host_servicer.define_host) + self.manager.storage_host_servicer.define_host.assert_called_once_with('request') + + def test_fail_to_generate_request_for_define_host(self): + expected_response = self._get_response_after_failing_to_generate_request() + self._test_define_host(None, expected_response, self.manager.storage_host_servicer.define_host) + self.manager.storage_host_servicer.define_host.assert_not_called() + + def _test_define_host(self, request, expected_response, define_function): + self._ensure_definition_state_function(request, expected_response, define_function) + result = self.manager.define_host(self.fake_host_definition_info) + self._assert_definition_state(expected_response, result) + + def test_undefine_host_success(self): + self._test_undefine_host('request', 'response', self.manager.storage_host_servicer.undefine_host) + self.manager.storage_host_servicer.undefine_host.assert_called_once_with('request') + + def test_fail_to_generate_request_for_undefine_host(self): + expected_response = self._get_response_after_failing_to_generate_request() + self._test_undefine_host(None, expected_response, self.manager.storage_host_servicer.undefine_host) + self.manager.storage_host_servicer.define_host.assert_not_called() + + def _get_response_after_failing_to_generate_request(self): + response = DefineHostResponse() + response.error_message = messages.FAILED_TO_GET_SECRET_EVENT.format( + test_settings.FAKE_SECRET, test_settings.FAKE_SECRET_NAMESPACE) + return response + + def _test_undefine_host(self, request, expected_response, define_function): + self._ensure_definition_state_function(request, expected_response, define_function) + result = self.manager.undefine_host(self.fake_host_definition_info) + self._assert_definition_state(expected_response, result) + + def _ensure_definition_state_function(self, request, expected_response, define_function): + self.manager.request_manager.generate_request.return_value = request + define_function.return_value = expected_response + + def _assert_definition_state(self, expected_response, result): + self.assertEqual(result, expected_response) + self.manager.request_manager.generate_request.assert_called_once_with(self.fake_host_definition_info) + + def test_delete_definition_success(self): + self._test_delete_definition(True, 'response') + self.manager.undefine_host.assert_called_once_with(self.fake_host_definition_info) + + def test_do_not_undefine_host_when_node_should_not_be_managed_by_secret(self): + self._test_delete_definition(False, DefineHostResponse()) + self.manager.undefine_host.assert_not_called() + + def _test_delete_definition(self, is_node_should_be_managed, expected_response): + self.manager.secret_manager.is_node_should_be_managed_on_secret.return_value = is_node_should_be_managed + self._prepare_undefine_host(expected_response) + self.manager.delete_definition(self.fake_host_definition_info) + self._assert_is_node_should_be_managed() + self.manager.host_definition_manager.handle_k8s_host_definition_after_undefine_action.assert_called_once_with( + self.fake_host_definition_info, expected_response) + + def test_undefine_multiple_node_definitions_success(self): + self.global_managed_secrets.append(self.secret_info_with_storage_classes) + self.global_managed_secrets.append(self.secret_info_with_storage_classes) + self._test_undefine_node_definitions() + self.assertEqual( + self.manager.host_definition_manager.get_host_definition_info_from_secret_and_node_name.call_count, 2) + self.assertEqual(self.manager.delete_definition.call_count, 2) + + def test_undefine_single_node_definition_success(self): + self.global_managed_secrets.append(self.secret_info_with_storage_classes) + self._test_undefine_node_definitions() + self.manager.host_definition_manager.get_host_definition_info_from_secret_and_node_name.assert_called_once_with( + test_settings.FAKE_NODE_NAME, self.secret_info_with_storage_classes) + self.manager.delete_definition.assert_called_once_with(self.fake_host_definition_info) + + def _test_undefine_node_definitions(self): + self.manager.delete_definition = Mock() + self.manager.host_definition_manager.get_host_definition_info_from_secret_and_node_name.return_value = \ + self.fake_host_definition_info + self.manager.undefine_node_definitions(test_settings.FAKE_NODE_NAME) + + def test_undefine_host_after_pending_success(self): + self._test_undefine_host_after_pending(True, 'response') + + def test_do_not_undefine_host_after_pending_when_node_should_not_be_managed_by_secret(self): + self._test_undefine_host_after_pending(False, DefineHostResponse()) + + def _test_undefine_host_after_pending(self, is_node_should_be_managed, expected_response): + self.manager.secret_manager.is_node_should_be_managed_on_secret.return_value = is_node_should_be_managed + self._prepare_undefine_host(expected_response) + result = self.manager.undefine_host_after_pending(self.fake_host_definition_info) + self.assertEqual(result, expected_response) + self._assert_is_node_should_be_managed() + + def _prepare_undefine_host(self, expected_response): + self.manager.undefine_host = Mock() + self.manager.undefine_host.return_value = expected_response + + @patch('{}.manifest_utils'.format(test_settings.DEFINITION_MANAGER_PATH)) + def test_define_host_after_pending_success(self, mock_manifest_utils): + self._test_define_host_after_pending(True, 'response', mock_manifest_utils) + mock_manifest_utils.generate_host_definition_response_fields_manifest.assert_called_once_with( + self.fake_host_definition_info.node_name, 'response') + self.manager.k8s_api.patch_host_definition.assert_called_once_with(self.fake_host_definition_manifest) + + @patch('{}.manifest_utils'.format(test_settings.DEFINITION_MANAGER_PATH)) + def test_do_not_define_host_after_pending_when_node_should_not_be_managed_by_secret(self, mock_manifest_utils): + self._test_define_host_after_pending(False, DefineHostResponse(), mock_manifest_utils) + mock_manifest_utils.generate_host_definition_response_fields_manifest.assert_not_called() + self.manager.k8s_api.patch_host_definition.assert_not_called() + + def _test_define_host_after_pending(self, is_node_should_be_managed, expected_response, mock_manifest_utils): + self.manager.secret_manager.is_node_should_be_managed_on_secret.return_value = is_node_should_be_managed + self._prepare_define_host(expected_response) + mock_manifest_utils.generate_host_definition_response_fields_manifest.return_value = \ + self.fake_host_definition_manifest + result = self.manager.define_host_after_pending(self.fake_host_definition_info) + self.assertEqual(result, expected_response) + self._assert_is_node_should_be_managed() + + def _prepare_define_host(self, expected_response): + self.manager.define_host = Mock() + self.manager.define_host.return_value = expected_response + + def _assert_is_node_should_be_managed(self): + self.manager.secret_manager.is_node_should_be_managed_on_secret.assert_called_once_with( + test_settings.FAKE_NODE_NAME, test_settings.FAKE_SECRET, test_settings.FAKE_SECRET_NAMESPACE) + + def test_define_nodes_when_new_secret_which_is_not_managed_yet(self): + secret_info = deepcopy(self.fake_secret_info) + self.manager.secret_manager.get_matching_managed_secret_info.return_value = ( + self.fake_secret_info, -1) + self._test_define_nodes_when_new_secret(secret_info, 1) + self._assert_define_nodes_from_secret_info_called(secret_info) + + def test_define_nodes_when_new_secret_which_is_managed_already_and_does_not_have_managed_storage_classes(self): + self.global_managed_secrets.append(self.secret_info_with_no_storage_classes) + secret_info = deepcopy(self.fake_secret_info) + self.manager.secret_manager.get_matching_managed_secret_info.return_value = ( + self.fake_secret_info, 0) + self._test_define_nodes_when_new_secret(secret_info, 1) + self._assert_define_nodes_from_secret_info_called(secret_info) + + def test_define_nodes_when_new_secret_which_is_managed_already_with_storage_classes(self): + self.global_managed_secrets.append(self.secret_info_with_storage_classes) + secret_info = deepcopy(self.fake_secret_info) + secret_info.managed_storage_classes = 2 + self.manager.secret_manager.get_matching_managed_secret_info.return_value = ( + secret_info, 0) + self._test_define_nodes_when_new_secret(secret_info, 3) + self.manager.host_definition_manager.get_host_definition_info_from_secret.assert_not_called() + self.manager.define_nodes.assert_not_called() + + def _test_define_nodes_when_new_secret(self, secret_info, expected_managed_storage_classes): + self._prepare_define_nodes_when_new_secret() + self.manager.define_nodes_when_new_secret(secret_info) + self.manager.secret_manager.get_matching_managed_secret_info.assert_called_once_with(secret_info) + self._assert_define_node_when_new_secret(secret_info, expected_managed_storage_classes) + + def _assert_define_node_when_new_secret(self, secret_info, expected_managed_storage_classes): + secret_info.managed_storage_classes = expected_managed_storage_classes + self.assertEqual(len(self.global_managed_secrets), 1) + self.assertEqual(self.global_managed_secrets, [secret_info]) + + def _prepare_define_nodes_when_new_secret(self): + self.manager.define_nodes = Mock() + self.manager.host_definition_manager.get_host_definition_info_from_secret.return_value = \ + self.fake_host_definition_info + + def _assert_define_nodes_from_secret_info_called(self, secret_info): + self.manager.host_definition_manager.get_host_definition_info_from_secret.assert_called_once_with(secret_info) + self.manager.define_nodes.assert_called_once_with(self.fake_host_definition_info) diff --git a/controllers/tests/controller_server/host_definer/definition_manager/request_manager_test.py b/controllers/tests/controller_server/host_definer/definition_manager/request_manager_test.py new file mode 100644 index 000000000..0fde1dd88 --- /dev/null +++ b/controllers/tests/controller_server/host_definer/definition_manager/request_manager_test.py @@ -0,0 +1,67 @@ +import unittest +from copy import deepcopy +from unittest.mock import MagicMock, patch + +import controllers.common.settings as common_settings +from controllers.servers.host_definer.k8s.api import K8SApi +import controllers.tests.controller_server.host_definer.utils.test_utils as test_utils +import controllers.tests.controller_server.host_definer.settings as test_settings +from controllers.servers.host_definer.definition_manager.request import RequestManager + + +class TestRequestManager(unittest.TestCase): + def setUp(self): + test_utils.patch_function(K8SApi, '_load_cluster_configuration') + test_utils.patch_function(K8SApi, '_get_dynamic_client') + self.request_manager = RequestManager() + self.request_manager.secret_manager = MagicMock() + self.request_manager.resource_info_manager = MagicMock() + self.mock_global_managed_nodes = test_utils.patch_nodes_global_variable( + test_settings.REQUEST_MANAGER_PATH) + self.fake_host_definition_info = test_utils.get_fake_host_definition_info() + self.fake_node_info = test_utils.get_fake_node_info() + self.fake_node_info.labels[common_settings.CONNECTIVITY_TYPE_LABEL] = test_settings.ISCSI_CONNECTIVITY_TYPE + self.array_connection_info = test_utils.get_array_connection_info() + self.define_request = test_utils.get_define_request( + test_settings.FAKE_PREFIX, test_settings.ISCSI_CONNECTIVITY_TYPE, test_settings.FAKE_NODE_ID) + + @patch('{}.utils'.format(test_settings.REQUEST_MANAGER_PATH)) + def test_generate_request_success(self, mock_utils): + self.mock_global_managed_nodes[test_settings.FAKE_NODE_NAME] = test_utils.get_fake_managed_node() + self._prepare_generate_request(self.array_connection_info, mock_utils) + result = self.request_manager.generate_request(self.fake_host_definition_info) + self._assert_called_generate_request(mock_utils) + self.assertEqual(result, self.define_request) + + @patch('{}.utils'.format(test_settings.REQUEST_MANAGER_PATH)) + def test_get_none_when_array_connection_info_is_empty_success(self, mock_utils): + self._prepare_generate_request(None, mock_utils) + result = self.request_manager.generate_request(self.fake_host_definition_info) + self._assert_called_generate_request(mock_utils) + self.assertEqual(result, None) + + @patch('{}.utils'.format(test_settings.REQUEST_MANAGER_PATH)) + def test_generate_request_when_node_is_not_in_managed_nodes_success(self, mock_utils): + host_definition_info = deepcopy(self.fake_host_definition_info) + host_definition_info.node_id = 'fake_new_node_id' + define_request = deepcopy(self.define_request) + define_request.node_id_from_csi_node = 'fake_new_node_id' + define_request.node_id_from_host_definition = 'fake_new_node_id' + define_request.io_group = '' + self._prepare_generate_request(self.array_connection_info, mock_utils) + result = self.request_manager.generate_request(host_definition_info) + self._assert_called_generate_request(mock_utils) + self.assertEqual(result, define_request) + + def _prepare_generate_request(self, array_connection_info, mock_utils): + self.request_manager.resource_info_manager.get_node_info.return_value = self.fake_node_info + mock_utils.get_prefix.return_value = test_settings.FAKE_PREFIX + mock_utils.get_connectivity_type_from_user.return_value = test_settings.ISCSI_CONNECTIVITY_TYPE + self.request_manager.secret_manager.get_array_connection_info.return_value = array_connection_info + + def _assert_called_generate_request(self, mock_utils): + self.request_manager.resource_info_manager.get_node_info.assert_called_once_with(test_settings.FAKE_NODE_NAME) + mock_utils.get_prefix.assert_called_once_with() + mock_utils.get_connectivity_type_from_user.assert_called_once_with(test_settings.ISCSI_CONNECTIVITY_TYPE) + self.request_manager.secret_manager.get_array_connection_info.assert_called_once_with( + test_settings.FAKE_SECRET, test_settings.FAKE_SECRET_NAMESPACE, self.fake_node_info.labels) diff --git a/controllers/tests/controller_server/host_definer/host_definer_utils_tests/__init__.py b/controllers/tests/controller_server/host_definer/host_definer_utils_tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/controllers/tests/controller_server/host_definer/host_definer_utils_tests/manifest_utils_test.py b/controllers/tests/controller_server/host_definer/host_definer_utils_tests/manifest_utils_test.py new file mode 100644 index 000000000..73b6d2ae9 --- /dev/null +++ b/controllers/tests/controller_server/host_definer/host_definer_utils_tests/manifest_utils_test.py @@ -0,0 +1,56 @@ +import unittest +from unittest.mock import patch + +from controllers.servers.host_definer.utils import manifest_utils +import controllers.tests.controller_server.host_definer.utils.test_utils as test_utils +import controllers.tests.controller_server.host_definer.utils.k8s_manifests_utils as test_manifest_utils +import controllers.tests.controller_server.host_definer.settings as test_settings +import controllers.common.settings as common_settings + + +class TestManifestUtils(unittest.TestCase): + def setUp(self): + self.define_response = test_utils.get_fake_define_host_response() + self.fake_host_definition_info = test_utils.get_fake_host_definition_info() + + def test_get_host_definition_manifest_success(self): + mock_generate_host_definition_response_fields_manifest = patch.object( + manifest_utils, 'generate_host_definition_response_fields_manifest').start() + expected_manifest = test_manifest_utils.get_fake_k8s_host_definition_manifest() + mock_generate_host_definition_response_fields_manifest.return_value = \ + test_manifest_utils.get_fake_k8s_host_definition_response_fields_manifest() + result = manifest_utils.get_host_definition_manifest( + self.fake_host_definition_info, self.define_response, test_settings.FAKE_NODE_ID) + self.assertEqual(result[common_settings.SPEC_FIELD], expected_manifest[common_settings.SPEC_FIELD]) + self.assertEqual(result[common_settings.API_VERSION_FIELD], + expected_manifest[common_settings.API_VERSION_FIELD]) + self.assertEqual(result[common_settings.KIND_FIELD], expected_manifest[common_settings.KIND_FIELD]) + self.assertEqual(result[common_settings.METADATA_FIELD][common_settings.NAME_FIELD], + test_settings.FAKE_NODE_NAME) + mock_generate_host_definition_response_fields_manifest.assert_called_once_with( + self.fake_host_definition_info.name, self.define_response) + + def test_get_host_definition_status_manifest_success(self): + fake_status_phase_manifest = test_manifest_utils.get_status_phase_manifest(common_settings.READY_PHASE) + result = manifest_utils.get_host_definition_status_manifest(common_settings.READY_PHASE) + self.assertEqual(result, fake_status_phase_manifest) + + def test_get_body_manifest_for_labels_success(self): + fake_labels_body = test_manifest_utils.get_metadata_with_manage_node_labels_manifest( + common_settings.TRUE_STRING) + result = manifest_utils.get_body_manifest_for_labels(common_settings.TRUE_STRING) + self.assertEqual(result, fake_labels_body) + + def test_get_finalizer_manifest_success(self): + fake_finalizers_manifest = test_manifest_utils.get_finalizers_manifest([common_settings.CSI_IBM_FINALIZER, ]) + result = manifest_utils.get_finalizer_manifest( + test_settings.FAKE_NODE_NAME, [common_settings.CSI_IBM_FINALIZER, ]) + self.assertEqual(result, fake_finalizers_manifest) + + def test_generate_host_definition_response_fields_manifest_success(self): + expected_manifest = test_manifest_utils.get_fake_k8s_host_definition_response_fields_manifest() + result = manifest_utils.generate_host_definition_response_fields_manifest( + self.fake_host_definition_info.name, self.define_response) + self.assertEqual(result[common_settings.SPEC_FIELD], expected_manifest[common_settings.SPEC_FIELD]) + self.assertEqual(result[common_settings.METADATA_FIELD][common_settings.NAME_FIELD], + test_settings.FAKE_NODE_NAME) diff --git a/controllers/tests/controller_server/host_definer/host_definer_utils_tests/utils_test.py b/controllers/tests/controller_server/host_definer/host_definer_utils_tests/utils_test.py new file mode 100644 index 000000000..4dfa2bbde --- /dev/null +++ b/controllers/tests/controller_server/host_definer/host_definer_utils_tests/utils_test.py @@ -0,0 +1,248 @@ +import unittest +from copy import deepcopy +from unittest.mock import patch + +from controllers.servers.host_definer.utils import utils +import controllers.tests.controller_server.host_definer.utils.test_utils as test_utils +import controllers.tests.controller_server.host_definer.utils.k8s_manifests_utils as test_manifest_utils +import controllers.tests.controller_server.host_definer.settings as test_settings +import controllers.common.settings as common_settings +from controllers.servers.errors import ValidationException + + +class TestUtils(unittest.TestCase): + def setUp(self): + self.fake_k8s_metadata = test_utils.get_fake_k8s_metadata() + self.fake_array_connectivity_info = test_utils.get_fake_array_connectivity_info() + self.mock_validate_secrets = patch('{}.validate_secrets'.format(test_settings.UTILS_PATH)).start() + self.mock_get_array_connectivity_info = patch('{}.get_array_connection_info_from_secrets'.format( + test_settings.UTILS_PATH)).start() + self.mock_json = patch('{}.json'.format(test_settings.UTILS_PATH)).start() + self.mock_os = patch('{}.os'.format(test_settings.UTILS_PATH)).start() + + def test_generate_multiple_io_group_from_labels_success(self): + result = utils.generate_io_group_from_labels(test_utils.get_fake_io_group_labels(2)) + self.assertEqual(result, test_settings.FAKE_MULTIPLE_IO_GROUP_STRING) + + def test_generate_single_io_group_from_labels_success(self): + result = utils.generate_io_group_from_labels(test_utils.get_fake_io_group_labels(1)) + self.assertEqual(result, test_settings.FAKE_SINGLE_IO_GROUP_STRING) + + def test_get_k8s_object_resource_version_success(self): + result = utils.get_k8s_object_resource_version(self.fake_k8s_metadata) + self.assertEqual(result, test_settings.FAKE_RESOURCE_VERSION) + + def test_get_k8s_object_resource_version_with_not_default_resource_version_field_success(self): + self.fake_k8s_metadata.metadata.resourceVersion = self.fake_k8s_metadata.metadata.pop( + common_settings.RESOURCE_VERSION_FIELD) + result = utils.get_k8s_object_resource_version(self.fake_k8s_metadata) + self.assertEqual(result, test_settings.FAKE_RESOURCE_VERSION) + + @patch('{}.decode_base64_to_string'.format(test_settings.UTILS_PATH)) + def test_get_secret_config_encoded_in_base64_success(self, mock_decode_base64_to_string): + secret_data = deepcopy(test_settings.FAKE_ENCODED_CONFIG) + mock_decode_base64_to_string.return_value = test_settings.FAKE_DECODED_CONFIG_STRING[ + common_settings.SECRET_CONFIG_FIELD] + result = utils.change_decode_base64_secret_config(secret_data) + self.assertEqual(result, test_settings.FAKE_DECODED_CONFIG) + mock_decode_base64_to_string.assert_called_once_with( + test_settings.FAKE_ENCODED_CONFIG[common_settings.SECRET_CONFIG_FIELD]) + + @patch('{}.decode_base64_to_string'.format(test_settings.UTILS_PATH)) + def test_get_decoded_secret_not_encoded_success(self, mock_decode_base64_to_string): + secret_data = deepcopy(test_settings.FAKE_DECODED_CONFIG_STRING) + mock_decode_base64_to_string.return_value = test_settings.FAKE_DECODED_CONFIG_STRING[ + common_settings.SECRET_CONFIG_FIELD] + result = utils.change_decode_base64_secret_config(secret_data) + self.assertEqual(result, test_settings.FAKE_DECODED_CONFIG) + mock_decode_base64_to_string.assert_called_once_with( + test_settings.FAKE_DECODED_CONFIG_STRING[common_settings.SECRET_CONFIG_FIELD]) + + def test_get_secret_config_success(self): + secret_data = deepcopy(test_settings.FAKE_DECODED_CONFIG_STRING) + self.mock_json.loads.return_value = test_settings.FAKE_DECODED_CONFIG[common_settings.SECRET_CONFIG_FIELD] + result = utils.get_secret_config(secret_data) + self.assertEqual(result, test_settings.FAKE_DECODED_CONFIG[common_settings.SECRET_CONFIG_FIELD]) + self.mock_json.loads.assert_called_once_with( + test_settings.FAKE_DECODED_CONFIG_STRING[common_settings.SECRET_CONFIG_FIELD]) + + def test_do_not_call_json_load_when_getting_dict_secret_config_success(self): + secret_data = deepcopy(test_settings.FAKE_DECODED_CONFIG) + result = utils.get_secret_config(secret_data) + self.assertEqual(result, test_settings.FAKE_DECODED_CONFIG[common_settings.SECRET_CONFIG_FIELD]) + self.mock_json.loads.assert_not_called() + + def test_get_secret_config_from_secret_data_with_no_config_field_success(self): + secret_data = deepcopy(test_manifest_utils.get_fake_k8s_secret_manifest()[test_settings.SECRET_DATA_FIELD]) + result = utils.get_secret_config(secret_data) + self.assertEqual(result, {}) + self.mock_json.loads.assert_not_called() + + def test_munch_success(self): + result = utils.munch(test_manifest_utils.get_empty_k8s_list_manifest()) + self.assertEqual(result, test_utils.get_fake_empty_k8s_list()) + + def test_loop_forever_success(self): + result = utils.loop_forever() + self.assertEqual(result, True) + + def test_validate_secret_success(self): + secret_data = deepcopy(test_settings.FAKE_DECODED_CONFIG) + self.mock_json.dumps.return_value = test_settings.FAKE_DECODED_CONFIG_STRING[ + common_settings.SECRET_CONFIG_FIELD] + utils.validate_secret(secret_data) + self.mock_validate_secrets.assert_called_once_with(test_settings.FAKE_DECODED_CONFIG_STRING) + self.mock_json.dumps.assert_called_once_with( + test_settings.FAKE_DECODED_CONFIG[common_settings.SECRET_CONFIG_FIELD]) + + def test_do_not_call_json_dumps_when_getting_string_secret_config_on_validate_secret_success(self): + secret_data = deepcopy(test_settings.FAKE_DECODED_CONFIG_STRING) + utils.validate_secret(secret_data) + self.mock_validate_secrets.assert_called_once_with(test_settings.FAKE_DECODED_CONFIG_STRING) + self.mock_json.dumps.assert_not_called() + + def test_validate_secret_from_secret_data_with_no_config_field_success(self): + secret_data = deepcopy(test_manifest_utils.get_fake_k8s_secret_manifest()[test_settings.SECRET_DATA_FIELD]) + utils.validate_secret(secret_data) + self.mock_validate_secrets.assert_called_once_with(secret_data) + self.mock_json.dumps.assert_not_called() + + def test_validate_secret_handle_validation_error_success(self): + secret_data = deepcopy(test_manifest_utils.get_fake_k8s_secret_manifest()[test_settings.SECRET_DATA_FIELD]) + self.mock_validate_secrets.side_effect = ValidationException('message') + utils.validate_secret(secret_data) + self.mock_validate_secrets.assert_called_once_with(secret_data) + self.mock_json.dumps.assert_not_called() + + def test_get_prefix_success(self): + self.mock_os.getenv.return_value = common_settings.TRUE_STRING + result = utils.get_prefix() + self.assertEqual(result, common_settings.TRUE_STRING) + self.mock_os.getenv.assert_called_once_with(common_settings.PREFIX_ENV_VAR) + + def test_get_connectivity_type_when_it_set_in_the_labels_success(self): + result = utils.get_connectivity_type_from_user(test_settings.ISCSI_CONNECTIVITY_TYPE) + self.assertEqual(result, test_settings.ISCSI_CONNECTIVITY_TYPE) + self.mock_os.getenv.assert_not_called() + + def test_get_connectivity_type_when_it_set_in_the_env_vars_success(self): + self.mock_os.getenv.return_value = test_settings.ISCSI_CONNECTIVITY_TYPE + result = utils.get_connectivity_type_from_user('') + self.assertEqual(result, test_settings.ISCSI_CONNECTIVITY_TYPE) + self.mock_os.getenv.assert_called_once_with(common_settings.CONNECTIVITY_ENV_VAR) + + def test_is_topology_label_true_success(self): + result = utils.is_topology_label(test_settings.FAKE_TOPOLOGY_LABEL) + self.assertEqual(result, True) + + def test_is_topology_label_false_success(self): + result = utils.is_topology_label(test_settings.FAKE_LABEL) + self.assertEqual(result, False) + + @patch('{}.decode_array_connectivity_info'.format(test_settings.UTILS_PATH)) + def test_get_array_connectivity_info_from_secret_config_success(self, mock_decode): + connectivity_info = 'connectivity_info' + secret_data = deepcopy(test_settings.FAKE_DECODED_CONFIG) + self.mock_json.dumps.return_value = test_settings.FAKE_DECODED_CONFIG_STRING[ + common_settings.SECRET_CONFIG_FIELD] + self.mock_get_array_connectivity_info.return_value = connectivity_info + mock_decode.return_value = connectivity_info + result = utils.get_array_connection_info_from_secret_data(secret_data, []) + self.assertEqual(result, connectivity_info) + self.mock_get_array_connectivity_info.assert_called_once_with(secret_data, []) + mock_decode.assert_called_once_with(connectivity_info) + self.mock_json.dumps.assert_called_once_with( + test_settings.FAKE_DECODED_CONFIG[common_settings.SECRET_CONFIG_FIELD]) + + @patch('{}.decode_array_connectivity_info'.format(test_settings.UTILS_PATH)) + def test_do_not_call_json_dumps_when_getting_string_secret_config_on_get_array_info_success(self, mock_decode): + secret_data = deepcopy(test_settings.FAKE_DECODED_CONFIG_STRING) + self.mock_get_array_connectivity_info.return_value = None + mock_decode.return_value = None + result = utils.get_array_connection_info_from_secret_data(secret_data, []) + self.assertEqual(result, None) + self.mock_get_array_connectivity_info.assert_called_once_with(secret_data, []) + mock_decode.assert_called_once_with(None) + self.mock_json.dumps.assert_not_called() + + @patch('{}.decode_array_connectivity_info'.format(test_settings.UTILS_PATH)) + def test_get_array_info_from_secret_data_with_no_config_field_success(self, mock_decode): + secret_data = deepcopy(test_manifest_utils.get_fake_k8s_secret_manifest()[test_settings.SECRET_DATA_FIELD]) + self.mock_get_array_connectivity_info.return_value = None + mock_decode.return_value = None + result = utils.get_array_connection_info_from_secret_data(secret_data, []) + self.assertEqual(result, None) + self.mock_get_array_connectivity_info.assert_called_once_with(secret_data, []) + mock_decode.assert_called_once_with(None) + self.mock_json.dumps.assert_not_called() + + @patch('{}.decode_array_connectivity_info'.format(test_settings.UTILS_PATH)) + def test_get_array_connection_info_from_secret_data_handle_validation_error_success(self, mock_decode): + secret_data = deepcopy(test_manifest_utils.get_fake_k8s_secret_manifest()[test_settings.SECRET_DATA_FIELD]) + self.mock_get_array_connectivity_info.side_effect = ValidationException('message') + result = utils.get_array_connection_info_from_secret_data(secret_data, []) + self.assertEqual(result, None) + self.mock_get_array_connectivity_info.assert_called_once_with(secret_data, []) + mock_decode.assert_not_called() + self.mock_json.dumps.assert_not_called() + + @patch('{}.decode_base64_to_string'.format(test_settings.UTILS_PATH)) + def test_decode_array_connectivity_info_success(self, mock_decode): + mock_decode.side_effect = [test_settings.FAKE_SECRET_ARRAY, + test_settings.FAKE_SECRET_USER_NAME, test_settings.FAKE_SECRET_PASSWORD] + result = utils.decode_array_connectivity_info(self.fake_array_connectivity_info) + self.assertEqual(result, self.fake_array_connectivity_info) + self.assertEqual(mock_decode.call_count, 3) + + def test_decode_base64_to_string_success(self): + result = utils.decode_base64_to_string(test_settings.BASE64_STRING) + self.assertIn(test_settings.DECODED_BASE64_STRING, result) + + def test_decode_base64_to_string_handle_getting_decoded_string_success(self): + result = utils.decode_base64_to_string(test_settings.DECODED_BASE64_STRING) + self.assertEqual(result, test_settings.DECODED_BASE64_STRING) + + def test_get_random_string_success(self): + result = utils.get_random_string() + self.assertEqual(type(result), str) + self.assertEqual(len(result), 20) + + def test_return_true_when_watch_object_is_deleted(self): + result = utils.is_watch_object_type_is_delete(common_settings.DELETED_EVENT_TYPE) + self.assertTrue(result) + + def test_return_false_when_watch_object_is_not_deleted(self): + result = utils.is_watch_object_type_is_delete(common_settings.ADDED_EVENT_TYPE) + self.assertFalse(result) + + def test_return_true_when_host_definer_can_delete_hosts_success(self): + self.mock_os.getenv.return_value = common_settings.TRUE_STRING + result = utils.is_host_definer_can_delete_hosts() + self.assertTrue(result) + self.mock_os.getenv.assert_called_once_with(common_settings.ALLOW_DELETE_ENV_VAR) + + def test_return_false_when_host_definer_cannot_delete_hosts_success(self): + self.mock_os.getenv.return_value = '' + result = utils.is_host_definer_can_delete_hosts() + self.assertFalse(result) + self.mock_os.getenv.assert_called_once_with(common_settings.ALLOW_DELETE_ENV_VAR) + + def test_return_true_when_dynamic_node_labeling_allowed_success(self): + self.mock_os.getenv.return_value = common_settings.TRUE_STRING + result = utils.is_dynamic_node_labeling_allowed() + self.assertTrue(result) + self.mock_os.getenv.assert_called_once_with(common_settings.DYNAMIC_NODE_LABELING_ENV_VAR) + + def test_return_false_when_dynamic_node_labeling_is_not_allowed_success(self): + self.mock_os.getenv.return_value = '' + result = utils.is_dynamic_node_labeling_allowed() + self.assertFalse(result) + self.mock_os.getenv.assert_called_once_with(common_settings.DYNAMIC_NODE_LABELING_ENV_VAR) + + def test_get_define_action_when_phase_is_pending_creation(self): + result = utils.get_action(common_settings.PENDING_CREATION_PHASE) + self.assertEqual(result, common_settings.DEFINE_ACTION) + + def test_get_undefine_action_when_phase_is_not_pending_creation(self): + result = utils.get_action(common_settings.PENDING_DELETION_PHASE) + self.assertEqual(result, common_settings.UNDEFINE_ACTION) diff --git a/controllers/tests/controller_server/host_definer/host_definition_watcher_test.py b/controllers/tests/controller_server/host_definer/host_definition_watcher_test.py deleted file mode 100644 index fa8696433..000000000 --- a/controllers/tests/controller_server/host_definer/host_definition_watcher_test.py +++ /dev/null @@ -1,88 +0,0 @@ -from unittest.mock import Mock - -import controllers.tests.controller_server.host_definer.utils.test_utils as test_utils -import controllers.tests.controller_server.host_definer.settings as test_settings -import controllers.common.settings as common_settings -from controllers.tests.controller_server.host_definer.common import BaseSetUp -from controllers.servers.host_definer.types import DefineHostResponse -from controllers.servers.host_definer.watcher.host_definition_watcher import HostDefinitionWatcher - - -class HostDefinitionWatcherBase(BaseSetUp): - def setUp(self): - super().setUp() - self.host_definition_watcher = test_utils.get_class_mock(HostDefinitionWatcher) - self.nodes_on_watcher_helper[test_settings.FAKE_NODE_NAME] = test_utils.get_fake_managed_node() - - -class TestWatchHostDefinitionsResources(HostDefinitionWatcherBase): - def setUp(self): - super().setUp() - self.host_definition_watcher._get_k8s_object_resource_version = Mock() - self.host_definition_watcher._get_k8s_object_resource_version.return_value = test_settings.FAKE_RESOURCE_VERSION - - def test_events_on_host_definition_in_ready_state(self): - self.host_definition_watcher._define_host_definition_after_pending_state = Mock() - self.host_definition_watcher.host_definitions_api.watch.return_value = iter( - [test_utils.get_fake_host_definition_watch_event(test_settings.MODIFIED_EVENT_TYPE, - test_settings.READY_PHASE)]) - test_utils.run_function_with_timeout(self.host_definition_watcher.watch_host_definitions_resources, 0.5) - self.host_definition_watcher._define_host_definition_after_pending_state.assert_not_called() - - def test_pending_deletion_that_managed_to_be_deleted_log_messages(self): - self._prepare_default_mocks_for_pending_deletion() - test_utils.run_function_with_timeout(self.host_definition_watcher.watch_host_definitions_resources, 0.5) - self.host_definition_watcher.csi_nodes_api.get.assert_called_with(name=test_settings.FAKE_NODE_NAME) - - def test_set_error_event_on_pending_deletion(self): - self._prepare_default_mocks_for_pending_deletion() - self.host_definition_watcher.storage_host_servicer.undefine_host.return_value = DefineHostResponse( - error_message=test_settings.FAIL_MESSAGE_FROM_STORAGE) - test_utils.patch_pending_variables() - test_utils.run_function_with_timeout(self.host_definition_watcher.watch_host_definitions_resources, 0.5) - self.assertEqual(self.host_definition_watcher.storage_host_servicer.undefine_host.call_count, - test_settings.HOST_DEFINITION_PENDING_VARS['HOST_DEFINITION_PENDING_RETRIES']) - - def _prepare_default_mocks_for_pending_deletion(self): - self.host_definition_watcher.host_definitions_api.watch.return_value = iter( - [test_utils.get_fake_host_definition_watch_event(test_settings.MODIFIED_EVENT_TYPE, - test_settings.PENDING_DELETION_PHASE)]) - self._prepare_default_mocks_for_pending() - self.os.getenv.return_value = test_settings.TRUE_STRING - self.host_definition_watcher.core_api.read_node.return_value = self.k8s_node_with_manage_node_label - self.host_definition_watcher.storage_host_servicer.undefine_host.return_value = DefineHostResponse() - self.host_definition_watcher.csi_nodes_api.get.return_value = test_utils.get_fake_k8s_csi_node( - test_settings.CSI_PROVISIONER_NAME) - - def test_handle_pending_host_definition_that_became_ready(self): - self._prepare_default_mocks_for_pending_creation() - self.host_definition_watcher.host_definitions_api.get.return_value = self.ready_k8s_host_definitions - test_utils.patch_pending_variables() - self.host_definition_watcher.core_api.read_node.return_value = self.k8s_node_with_fake_label - test_utils.run_function_with_timeout(self.host_definition_watcher.watch_host_definitions_resources, 0.5) - self.host_definition_watcher.storage_host_servicer.define_host.assert_called_once_with( - test_utils.get_define_request(node_id_from_host_definition=test_settings.FAKE_NODE_ID)) - - def test_pending_creation_that_managed_to_be_created(self): - self._prepare_default_mocks_for_pending_creation() - self.host_definition_watcher._loop_forever = Mock() - self.host_definition_watcher._loop_forever.side_effect = [True, False] - test_utils.run_function_with_timeout(self.host_definition_watcher.watch_host_definitions_resources, 0.5) - self.host_definition_watcher.custom_object_api.patch_cluster_custom_object_status.assert_called_once_with( - common_settings.CSI_IBM_GROUP, common_settings.VERSION, - common_settings.HOST_DEFINITION_PLURAL, test_settings.FAKE_NODE_NAME, - test_utils.get_ready_status_manifest()) - - def _prepare_default_mocks_for_pending_creation(self): - self.host_definition_watcher.host_definitions_api.watch.return_value = iter( - [test_utils.get_fake_host_definition_watch_event(test_settings.MODIFIED_EVENT_TYPE, - test_settings.PENDING_CREATION_PHASE)]) - self.host_definition_watcher.storage_host_servicer.define_host.return_value = DefineHostResponse() - self.os.getenv.return_value = '' - self.host_definition_watcher.core_api.read_node.return_value = self.k8s_node_with_manage_node_label - self._prepare_default_mocks_for_pending() - - def _prepare_default_mocks_for_pending(self): - self.host_definition_watcher.core_api.read_namespaced_secret.return_value = test_utils.get_fake_k8s_secret() - self.host_definition_watcher.host_definitions_api.get.return_value = \ - test_utils.get_fake_k8s_host_definitions_items(test_settings.PENDING_DELETION_PHASE) diff --git a/controllers/tests/controller_server/host_definer/k8s/__init__.py b/controllers/tests/controller_server/host_definer/k8s/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/controllers/tests/controller_server/host_definer/k8s/kubernetes_api_test.py b/controllers/tests/controller_server/host_definer/k8s/kubernetes_api_test.py new file mode 100644 index 000000000..fd3903524 --- /dev/null +++ b/controllers/tests/controller_server/host_definer/k8s/kubernetes_api_test.py @@ -0,0 +1,257 @@ +import unittest +from unittest.mock import MagicMock, patch +from kubernetes.client.rest import ApiException +from kubernetes.watch import Watch + +from controllers.servers.host_definer.k8s.api import K8SApi +import controllers.tests.controller_server.host_definer.utils.test_utils as test_utils +import controllers.tests.controller_server.host_definer.utils.k8s_manifests_utils as test_manifest_utils +import controllers.tests.controller_server.host_definer.settings as test_settings +import controllers.common.settings as common_settings + + +class TestKubernetesApi(unittest.TestCase): + def setUp(self): + test_utils.patch_k8s_api_init() + self.k8s_api = K8SApi() + self.not_found_api_exception = ApiException(http_resp=test_utils.get_error_http_resp(404)) + self.general_api_exception = ApiException(http_resp=test_utils.get_error_http_resp(405)) + self.k8s_api.csi_nodes_api = MagicMock() + self.k8s_api.host_definitions_api = MagicMock() + self.k8s_api.custom_object_api = MagicMock() + self.k8s_api.core_api = MagicMock() + self.k8s_api.apps_api = MagicMock() + self.k8s_api.storage_api = MagicMock() + self.mock_stream = patch.object(Watch, 'stream').start() + + def test_get_csi_node_success(self): + self.k8s_api.csi_nodes_api.get.return_value = test_utils.get_fake_k8s_csi_node() + result = self.k8s_api.get_csi_node(test_settings.FAKE_NODE_NAME) + self.assertEqual(result, test_utils.get_fake_k8s_csi_node()) + self.k8s_api.csi_nodes_api.get.assert_called_once_with(name=test_settings.FAKE_NODE_NAME) + + def test_get_csi_node_not_found(self): + self.k8s_api.csi_nodes_api.get.side_effect = self.not_found_api_exception + result = self.k8s_api.get_csi_node(test_settings.FAKE_NODE_NAME) + self.assertEqual(result, None) + + def test_get_csi_node_failure(self): + self.k8s_api.csi_nodes_api.get.side_effect = self.general_api_exception + result = self.k8s_api.get_csi_node(test_settings.FAKE_NODE_NAME) + self.assertEqual(result, None) + + def test_list_host_definition_success(self): + self.k8s_api.host_definitions_api.get.return_value = test_utils.get_fake_k8s_host_definitions_items() + result = self.k8s_api.list_host_definition() + self.assertEqual(result, test_utils.get_fake_k8s_host_definitions_items()) + self.k8s_api.host_definitions_api.get.assert_called_once_with() + + def test_list_host_definition_failure(self): + self._test_list_k8s_resource_failure(self.k8s_api.list_host_definition, self.k8s_api.host_definitions_api.get) + + def test_create_host_definition_success(self): + self.k8s_api.host_definitions_api.create.return_value = test_utils.get_fake_empty_k8s_list() + result = self.k8s_api.create_host_definition(test_manifest_utils.get_fake_k8s_host_definition_manifest()) + self.assertEqual(result, test_utils.get_fake_empty_k8s_list()) + self.k8s_api.host_definitions_api.create.assert_called_once_with( + body=test_manifest_utils.get_fake_k8s_host_definition_manifest()) + + def test_create_host_definition_failure(self): + self.k8s_api.host_definitions_api.create.side_effect = self.general_api_exception + result = self.k8s_api.create_host_definition(test_manifest_utils.get_fake_k8s_host_definition_manifest()) + self.assertEqual(result, None) + + def test_patch_cluster_custom_object_status_success(self): + self.k8s_api.csi_nodes_api.get.return_value = None + self.k8s_api.patch_cluster_custom_object_status( + common_settings.CSI_IBM_GROUP, common_settings.VERSION, common_settings.HOST_DEFINITION_PLURAL, + test_settings.FAKE_NODE_NAME, common_settings.READY_PHASE) + self.k8s_api.custom_object_api.patch_cluster_custom_object_status.assert_called_with( + common_settings.CSI_IBM_GROUP, common_settings.VERSION, + common_settings.HOST_DEFINITION_PLURAL, test_settings.FAKE_NODE_NAME, + common_settings.READY_PHASE) + + def test_create_event_success(self): + self.k8s_api.core_api.create_namespaced_event.return_value = None + self.k8s_api.create_event(test_settings.FAKE_SECRET_NAMESPACE, test_utils.get_fake_empty_k8s_list()) + self.k8s_api.core_api.create_namespaced_event.assert_called_with( + test_settings.FAKE_SECRET_NAMESPACE, test_utils.get_fake_empty_k8s_list()) + + def test_delete_host_definition_success(self): + self.k8s_api.host_definitions_api.delete.return_value = test_utils.get_fake_k8s_host_definitions_items() + result = self.k8s_api.delete_host_definition(test_settings.FAKE_NODE_NAME) + self.assertEqual(result, test_utils.get_fake_k8s_host_definitions_items()) + self.k8s_api.host_definitions_api.delete.assert_called_once_with(name=test_settings.FAKE_NODE_NAME, body={}) + + def test_delete_host_definition_failure(self): + self.k8s_api.host_definitions_api.delete.side_effect = self.general_api_exception + result = self.k8s_api.delete_host_definition(test_settings.FAKE_NODE_NAME) + self.assertEqual(result, None) + + def test_patch_host_definition_success(self): + self.k8s_api.host_definitions_api.patch.return_value = None + result = self.k8s_api.patch_host_definition(test_manifest_utils.get_fake_k8s_host_definition_manifest()) + self.assertEqual(result, 200) + self.k8s_api.host_definitions_api.patch.assert_called_once_with( + name=test_settings.FAKE_NODE_NAME, body=test_manifest_utils.get_fake_k8s_host_definition_manifest(), + content_type='application/merge-patch+json') + + def test_patch_host_definition_failure(self): + self.k8s_api.host_definitions_api.patch.side_effect = self.not_found_api_exception + result = self.k8s_api.patch_host_definition(test_manifest_utils.get_fake_k8s_host_definition_manifest()) + self.assertEqual(result, 404) + + def test_patch_node_success(self): + self.k8s_api.core_api.patch_node.return_value = None + self.k8s_api.patch_node(test_settings.FAKE_NODE_NAME, test_manifest_utils.get_fake_k8s_node_manifest( + common_settings.MANAGE_NODE_LABEL)) + self.k8s_api.core_api.patch_node.assert_called_once_with( + test_settings.FAKE_NODE_NAME, test_manifest_utils.get_fake_k8s_node_manifest( + common_settings.MANAGE_NODE_LABEL)) + + def test_get_secret_data_success(self): + self.k8s_api.core_api.read_namespaced_secret.return_value = test_utils.get_fake_k8s_secret() + result = self.k8s_api.get_secret_data(test_settings.FAKE_SECRET, test_settings.FAKE_SECRET_NAMESPACE) + self.assertEqual(result, test_utils.get_fake_k8s_secret().data) + self.k8s_api.core_api.read_namespaced_secret.assert_called_once_with( + name=test_settings.FAKE_SECRET, namespace=test_settings.FAKE_SECRET_NAMESPACE) + + def test_get_secret_data_failure(self): + self.k8s_api.core_api.read_namespaced_secret.side_effect = self.not_found_api_exception + result = self.k8s_api.get_secret_data(test_settings.FAKE_SECRET, test_settings.FAKE_SECRET_NAMESPACE) + self.assertEqual(result, {}) + + def test_read_node_success(self): + self.k8s_api.core_api.read_node.return_value = test_utils.get_fake_k8s_node(common_settings.MANAGE_NODE_LABEL) + result = self.k8s_api.read_node(test_settings.FAKE_NODE_NAME) + self.assertEqual(result, test_utils.get_fake_k8s_node(common_settings.MANAGE_NODE_LABEL)) + self.k8s_api.core_api.read_node.assert_called_once_with(name=test_settings.FAKE_NODE_NAME) + + def test_read_node_failure(self): + self.k8s_api.core_api.read_node.side_effect = self.not_found_api_exception + result = self.k8s_api.read_node(test_settings.FAKE_NODE_NAME) + self.assertEqual(result, None) + + def test_list_daemon_set_for_all_namespaces_success(self): + self.k8s_api.apps_api.list_daemon_set_for_all_namespaces.return_value = \ + test_utils.get_fake_k8s_daemon_set_items(0, 0) + result = self.k8s_api.list_daemon_set_for_all_namespaces(common_settings.MANAGE_NODE_LABEL) + self.assertEqual(result, test_utils.get_fake_k8s_daemon_set_items(0, 0)) + self.k8s_api.apps_api.list_daemon_set_for_all_namespaces.assert_called_once_with( + label_selector=common_settings.MANAGE_NODE_LABEL) + + def test_list_daemon_set_for_all_namespaces_failure(self): + self.k8s_api.apps_api.list_daemon_set_for_all_namespaces.side_effect = self.general_api_exception + result = self.k8s_api.list_daemon_set_for_all_namespaces(test_settings.FAKE_NODE_NAME) + self.assertEqual(result, None) + + def test_list_pod_for_all_namespaces_success(self): + self.k8s_api.core_api.list_pod_for_all_namespaces.return_value = \ + test_utils.get_fake_k8s_daemon_set_items(0, 0) + result = self.k8s_api.list_pod_for_all_namespaces(common_settings.MANAGE_NODE_LABEL) + self.assertEqual(result, test_utils.get_fake_k8s_daemon_set_items(0, 0)) + self.k8s_api.core_api.list_pod_for_all_namespaces.assert_called_once_with( + label_selector=common_settings.MANAGE_NODE_LABEL) + + def test_list_pod_for_all_namespaces_failure(self): + self.k8s_api.core_api.list_pod_for_all_namespaces.side_effect = self.general_api_exception + result = self.k8s_api.list_pod_for_all_namespaces(test_settings.FAKE_NODE_NAME) + self.assertEqual(result, None) + + @patch('{}.utils'.format(test_settings.K8S_API_PATH)) + def test_get_storage_class_stream_success(self, mock_utils): + self._test_basic_resource_stream_success( + self.k8s_api.get_storage_class_stream, self.k8s_api.storage_api.list_storage_class, mock_utils) + + @patch('{}.utils'.format(test_settings.K8S_API_PATH)) + def test_get_node_stream_success(self, mock_utils): + self._test_basic_resource_stream_success(self.k8s_api.get_node_stream, + self.k8s_api.core_api.list_node, mock_utils) + + @patch('{}.utils'.format(test_settings.K8S_API_PATH)) + def test_get_secret_stream_success(self, mock_utils): + self._test_basic_resource_stream_success(self.k8s_api.get_secret_stream, + self.k8s_api.core_api.list_secret_for_all_namespaces, + mock_utils) + + def _test_basic_resource_stream_success(self, function_to_test, k8s_function, mock_utils): + mock_utils.get_k8s_object_resource_version.return_value = test_settings.FAKE_RESOURCE_VERSION + result = function_to_test() + k8s_function.assert_called_once() + self.mock_stream.assert_called_once_with(k8s_function, resource_version=test_settings.FAKE_RESOURCE_VERSION, + timeout_seconds=5) + self.assertEqual(result, self.mock_stream.return_value) + + def test_get_storage_class_stream_failure(self): + self._test_basic_resource_stream_failure(self.k8s_api.get_storage_class_stream) + + def test_get_node_stream_failure(self): + self._test_basic_resource_stream_failure(self.k8s_api.get_node_stream) + + def test_get_secret_stream_failure(self): + self._test_basic_resource_stream_failure(self.k8s_api.get_secret_stream) + + def _test_basic_resource_stream_failure(self, function_to_test): + self.mock_stream.side_effect = self.general_api_exception + with self.assertRaises(ApiException): + function_to_test() + + def test_list_storage_class_success(self): + self.k8s_api.storage_api.list_storage_class.return_value = \ + test_utils.get_fake_k8s_storage_class_items(common_settings.CSI_PROVISIONER_NAME) + result = self.k8s_api.list_storage_class() + self.assertEqual(result, test_utils.get_fake_k8s_storage_class_items(common_settings.CSI_PROVISIONER_NAME)) + + def test_list_storage_class_failure(self): + self._test_list_k8s_resource_failure(self.k8s_api.list_storage_class, + self.k8s_api.storage_api.list_storage_class) + + def test_list_node_success(self): + self.k8s_api.core_api.list_node.return_value = test_utils.get_fake_k8s_nodes_items() + result = self.k8s_api.list_node() + self.assertEqual(result, test_utils.get_fake_k8s_nodes_items()) + + def test_list_node_failure(self): + self._test_list_k8s_resource_failure(self.k8s_api.list_node, self.k8s_api.core_api.list_node) + + def test_list_csi_node_success(self): + self.k8s_api.csi_nodes_api.get.return_value = test_utils.get_fake_k8s_csi_node( + common_settings.CSI_PROVISIONER_NAME) + result = self.k8s_api.list_csi_node() + self.assertEqual(result, test_utils.get_fake_k8s_csi_node(common_settings.CSI_PROVISIONER_NAME)) + + def test_list_csi_node_failure(self): + self._test_list_k8s_resource_failure(self.k8s_api.list_csi_node, self.k8s_api.csi_nodes_api.get) + + def _test_list_k8s_resource_failure(self, function_to_test, k8s_function): + k8s_function.side_effect = self.general_api_exception + result = function_to_test() + self.assertEqual(result, test_utils.get_fake_empty_k8s_list()) + + def test_get_host_definition_stream_success(self): + expected_output = iter([]) + self.k8s_api.host_definitions_api.watch.return_value = expected_output + result = self.k8s_api.get_host_definition_stream(test_settings.FAKE_RESOURCE_VERSION, 5) + self.k8s_api.host_definitions_api.watch.assert_called_once_with( + resource_version=test_settings.FAKE_RESOURCE_VERSION, timeout=5) + self.assertEqual(result, expected_output) + + def test_get_host_definition_stream_failure(self): + self.k8s_api.host_definitions_api.watch.side_effect = self.general_api_exception + with self.assertRaises(ApiException): + self.k8s_api.get_host_definition_stream(test_settings.FAKE_RESOURCE_VERSION, 5) + + @patch('{}.utils'.format(test_settings.K8S_API_PATH)) + def test_get_csi_node_stream_success(self, mock_utils): + expected_output = iter([]) + mock_utils.get_k8s_object_resource_version.return_value = test_settings.FAKE_RESOURCE_VERSION + self.k8s_api.csi_nodes_api.watch.return_value = expected_output + result = self.k8s_api.get_csi_node_stream() + self.k8s_api.csi_nodes_api.watch.assert_called_once_with( + resource_version=test_settings.FAKE_RESOURCE_VERSION, timeout=5) + self.assertEqual(result, expected_output) + + def test_get_csi_node_stream_failure(self): + self.k8s_api.csi_nodes_api.watch.side_effect = self.general_api_exception + with self.assertRaises(ApiException): + self.k8s_api.get_csi_node_stream() diff --git a/controllers/tests/controller_server/host_definer/node_watcher_test.py b/controllers/tests/controller_server/host_definer/node_watcher_test.py deleted file mode 100644 index cc37725f0..000000000 --- a/controllers/tests/controller_server/host_definer/node_watcher_test.py +++ /dev/null @@ -1,109 +0,0 @@ -from unittest.mock import Mock, patch - -import controllers.tests.controller_server.host_definer.utils.test_utils as test_utils -import controllers.tests.controller_server.host_definer.settings as test_settings -from controllers.tests.controller_server.host_definer.common import BaseSetUp -from controllers.servers.host_definer.watcher.node_watcher import NodeWatcher - - -class NodeWatcherBase(BaseSetUp): - def setUp(self): - super().setUp() - self.node_watcher = test_utils.get_class_mock(NodeWatcher) - self.unmanaged_csi_nodes_with_driver = patch( - '{}.unmanaged_csi_nodes_with_driver'.format(test_settings.NODES_WATCHER_PATH), set()).start() - self.expected_unmanaged_csi_nodes_with_driver = set() - self.expected_unmanaged_csi_nodes_with_driver.add(test_settings.FAKE_NODE_NAME) - self.nodes_on_node_watcher = test_utils.patch_nodes_global_variable(test_settings.NODES_WATCHER_PATH) - - -class TestAddInitialNodes(NodeWatcherBase): - def test_host_definer_does_not_delete_host_definitions_on_node_with_csi_node(self): - self._prepare_default_mocks_for_node() - self.node_watcher.add_initial_nodes() - self.node_watcher.storage_host_servicer.undefine_host.assert_not_called() - - def test_host_definer_deletes_host_definitions_on_node_with_csi_node(self): - self._prepare_default_mocks_for_node() - self.node_watcher.csi_nodes_api.get.return_value = test_utils.get_fake_k8s_csi_node( - test_settings.FAKE_CSI_PROVISIONER) - self.os.getenv.side_effect = [test_settings.TRUE_STRING, test_settings.FAKE_PREFIX, - '', test_settings.TRUE_STRING, test_settings.TRUE_STRING] - self.node_watcher.add_initial_nodes() - self.node_watcher.storage_host_servicer.undefine_host.assert_called_once_with(test_utils.get_define_request( - prefix=test_settings.FAKE_PREFIX, node_id_from_host_definition=test_settings.FAKE_NODE_ID)) - - def test_if_detect_unmanaged_node_with_csi_node(self): - self._prepare_default_mocks_for_node() - self.os.getenv.return_value = '' - self.node_watcher.core_api.read_node.return_value = self.k8s_node_with_fake_label - self.node_watcher.add_initial_nodes() - self.assertEqual(self.expected_unmanaged_csi_nodes_with_driver, self.unmanaged_csi_nodes_with_driver) - - def _prepare_default_mocks_for_node(self): - self.node_watcher.core_api.list_node.return_value = test_utils.get_fake_k8s_nodes_items() - self.node_watcher.csi_nodes_api.get.return_value = test_utils.get_fake_k8s_csi_node( - test_settings.CSI_PROVISIONER_NAME) - self.node_watcher.core_api.read_node.return_value = self.k8s_node_with_manage_node_label - self.node_watcher.host_definitions_api.get.return_value = self.ready_k8s_host_definitions - self.os.getenv.return_value = test_settings.TRUE_STRING - self.node_watcher.core_api.read_namespaced_secret.return_value = test_utils.get_fake_k8s_secret() - self.nodes_on_watcher_helper[test_settings.FAKE_NODE_NAME] = test_utils.get_fake_managed_node() - - -class TestWatchNodesResources(NodeWatcherBase): - def setUp(self): - super().setUp() - self.node_watcher._get_k8s_object_resource_version = Mock() - self.node_watcher._get_k8s_object_resource_version.return_value = test_settings.FAKE_RESOURCE_VERSION - self.nodes_stream = patch('{}.watch.Watch.stream'.format(test_settings.NODES_WATCHER_PATH)).start() - self.node_watcher._loop_forever = Mock() - self.node_watcher._loop_forever.side_effect = [True, False] - - def test_no_call_for_unmanaged_nodes_list_when_node_is_managed_already(self): - self._prepare_default_mocks_for_modified_event() - self.node_watcher.watch_nodes_resources() - self.expected_unmanaged_csi_nodes_with_driver.clear() - self.assertEqual(self.expected_unmanaged_csi_nodes_with_driver, self.unmanaged_csi_nodes_with_driver) - - def test_catch_node_with_new_manage_node_label(self): - self._prepare_default_mocks_for_modified_event() - self.managed_secrets_on_watcher_helper.append(test_utils.get_fake_secret_info()) - self.node_watcher.watch_nodes_resources() - self.assertEqual(1, len(self.nodes_on_watcher_helper)) - self.node_watcher.storage_host_servicer.define_host.assert_called_once_with( - test_utils.get_define_request(node_id_from_host_definition=test_settings.FAKE_NODE_ID)) - self.expected_unmanaged_csi_nodes_with_driver.clear() - self.assertEqual(self.expected_unmanaged_csi_nodes_with_driver, self.unmanaged_csi_nodes_with_driver) - - def test_do_not_create_host_definitions_on_modified_node_without_csi_node(self): - self._prepare_default_mocks_for_modified_event() - self.nodes_on_node_watcher[test_settings.FAKE_NODE_NAME] = test_utils.get_fake_managed_node() - self.node_watcher.watch_nodes_resources() - self.node_watcher.storage_host_servicer.define_host.assert_not_called() - self.assertEqual(self.expected_unmanaged_csi_nodes_with_driver, self.unmanaged_csi_nodes_with_driver) - - def test_do_not_create_host_definitions_on_modified_node_when_dynamic_node_labeling_enabled(self): - self._prepare_default_mocks_for_modified_event() - self.os.getenv.return_value = test_settings.TRUE_STRING - self.node_watcher.watch_nodes_resources() - self.node_watcher.storage_host_servicer.define_host.assert_not_called() - self.assertEqual(self.expected_unmanaged_csi_nodes_with_driver, self.unmanaged_csi_nodes_with_driver) - - def test_do_not_create_host_definitions_on_modified_node_with_no_manage_node_label(self): - self._prepare_default_mocks_for_modified_event() - self.node_watcher.core_api.read_node.return_value = self.k8s_node_with_fake_label - self.node_watcher.watch_nodes_resources() - self.node_watcher.storage_host_servicer.define_host.assert_not_called() - self.assertEqual(self.expected_unmanaged_csi_nodes_with_driver, self.unmanaged_csi_nodes_with_driver) - - def _prepare_default_mocks_for_modified_event(self): - self.nodes_stream.return_value = iter([test_utils.get_fake_node_watch_event( - test_settings.MODIFIED_EVENT_TYPE)]) - self.node_watcher.csi_nodes_api.get.return_value = test_utils.get_fake_k8s_csi_node( - test_settings.CSI_PROVISIONER_NAME) - self.os.getenv.return_value = '' - self.node_watcher.core_api.read_node.return_value = self.k8s_node_with_manage_node_label - self.node_watcher.host_definitions_api.get.return_value = test_utils.get_empty_k8s_host_definitions() - self.node_watcher.core_api.read_namespaced_secret.return_value = test_utils.get_fake_k8s_secret() - self.unmanaged_csi_nodes_with_driver.add(test_settings.FAKE_NODE_NAME) diff --git a/controllers/tests/controller_server/host_definer/resource_manager/__init__.py b/controllers/tests/controller_server/host_definer/resource_manager/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/controllers/tests/controller_server/host_definer/resource_manager/base_resource_manager.py b/controllers/tests/controller_server/host_definer/resource_manager/base_resource_manager.py new file mode 100644 index 000000000..00241a92d --- /dev/null +++ b/controllers/tests/controller_server/host_definer/resource_manager/base_resource_manager.py @@ -0,0 +1,22 @@ +import unittest + +import controllers.tests.controller_server.host_definer.utils.test_utils as test_utils + + +class BaseResourceManager(unittest.TestCase): + def setUp(self): + test_utils.patch_k8s_api_init() + + def _test_get_k8s_resources_info_success(self, function_to_test, k8s_function, + get_info_function, fake_resource_info, fake_k8s_items): + k8s_function.return_value = fake_k8s_items + get_info_function.return_value = fake_resource_info + result = function_to_test() + self.assertEqual(result, [fake_resource_info]) + get_info_function.assert_called_once_with(fake_k8s_items.items[0]) + + def _test_get_k8s_resources_info_empty_list_success(self, function_to_test, k8s_function, info_function): + k8s_function.return_value = test_utils.get_fake_empty_k8s_list() + result = function_to_test() + self.assertEqual(result, []) + info_function.assert_not_called() diff --git a/controllers/tests/controller_server/host_definer/resource_manager/csi_node_manager_test.py b/controllers/tests/controller_server/host_definer/resource_manager/csi_node_manager_test.py new file mode 100644 index 000000000..c57b0e5ba --- /dev/null +++ b/controllers/tests/controller_server/host_definer/resource_manager/csi_node_manager_test.py @@ -0,0 +1,82 @@ +from unittest.mock import MagicMock +from copy import deepcopy + +from controllers.tests.controller_server.host_definer.resource_manager.base_resource_manager import BaseResourceManager +from controllers.servers.host_definer.resource_manager.csi_node import CSINodeManager +import controllers.tests.controller_server.host_definer.utils.test_utils as test_utils +import controllers.tests.controller_server.host_definer.settings as test_settings +import controllers.common.settings as common_settings + + +class TestCSINodeManager(BaseResourceManager): + def setUp(self): + super().setUp() + self.csi_node = CSINodeManager() + self.csi_node.k8s_api = MagicMock() + self.csi_node.daemon_set_manager = MagicMock() + self.csi_node.resource_info_manager = MagicMock() + self.fake_csi_node_info = test_utils.get_fake_csi_node_info() + self.fake_pod_info = test_utils.get_fake_pod_info() + self.fake_k8s_csi_nodes_with_ibm_driver = test_utils.get_fake_k8s_csi_nodes( + common_settings.CSI_PROVISIONER_NAME, 1) + self.fake_k8s_csi_nodes_with_non_ibm_driver = test_utils.get_fake_k8s_csi_nodes( + test_settings.FAKE_CSI_PROVISIONER, 1) + + def test_get_csi_nodes_info_with_driver_success(self): + self._test_get_k8s_resources_info_success( + self.csi_node.get_csi_nodes_info_with_driver, self.csi_node.k8s_api.list_csi_node, + self.csi_node.resource_info_manager.generate_csi_node_info, self.fake_csi_node_info, + self.fake_k8s_csi_nodes_with_ibm_driver) + + def test_get_csi_nodes_info_with_driver_empty_list_success(self): + self._test_get_k8s_resources_info_empty_list_success( + self.csi_node.get_csi_nodes_info_with_driver, self.csi_node.k8s_api.list_csi_node, + self.csi_node.resource_info_manager.generate_csi_node_info) + + def test_get_csi_nodes_info_with_driver_non_ibm_driver_success(self): + self.csi_node.k8s_api.list_csi_node.return_value = self.fake_k8s_csi_nodes_with_non_ibm_driver + self.csi_node.resource_info_manager.generate_csi_node_info.return_value = self.fake_csi_node_info + result = self.csi_node.get_csi_nodes_info_with_driver() + self.assertEqual(result, []) + self.csi_node.resource_info_manager.generate_csi_node_info.assert_not_called() + + def test_host_is_part_of_update_when_the_node_has_matching_csi_node_pod(self): + self._test_is_host_part_of_update(True, test_settings.FAKE_DAEMON_SET_NAME, [self.fake_pod_info]) + self.csi_node.resource_info_manager.get_csi_pods_info.assert_called_once_with() + + def test_host_is_not_part_of_update_when_the_node_do_not_have_csi_node_pod_on_it(self): + pod_info = deepcopy(self.fake_pod_info) + pod_info.node_name = 'bad_node_name' + self._test_is_host_part_of_update(False, test_settings.FAKE_DAEMON_SET_NAME, [pod_info]) + self.csi_node.resource_info_manager.get_csi_pods_info.assert_called_once_with() + + def test_host_is_not_part_of_update_when_non_of_the_pods_has_the_daemon_set_name_in_their_name(self): + self._test_is_host_part_of_update(False, 'bad_daemon_set_name', [self.fake_pod_info, self.fake_pod_info]) + self.csi_node.resource_info_manager.get_csi_pods_info.assert_called_once_with() + + def test_host_is_not_part_of_update_when_fail_to_get_daemon_set(self): + self._test_is_host_part_of_update(False, None) + self.csi_node.resource_info_manager.get_csi_pods_info.assert_not_called() + + def _test_is_host_part_of_update(self, expected_result, daemon_set_name, pods_info=None): + self.csi_node.daemon_set_manager.wait_until_all_daemon_set_pods_are_up_to_date.return_value = daemon_set_name + self.csi_node.resource_info_manager.get_csi_pods_info.return_value = pods_info + result = self.csi_node.is_host_part_of_update(test_settings.FAKE_NODE_NAME) + self.assertEqual(result, expected_result) + self.csi_node.daemon_set_manager.wait_until_all_daemon_set_pods_are_up_to_date.assert_called_once_with() + + def test_return_true_when_node_id_changed(self): + self._test_is_node_id_changed(True, test_settings.FAKE_NODE_ID, 'different_node_id') + + def test_return_false_when_node_id_did_not_change(self): + self._test_is_node_id_changed(False, test_settings.FAKE_NODE_ID, test_settings.FAKE_NODE_ID) + + def test_return_false_when_host_definition_node_id_is_none(self): + self._test_is_node_id_changed(False, None, test_settings.FAKE_NODE_ID) + + def test_return_false_when_csi_node_node_id_is_none(self): + self._test_is_node_id_changed(False, test_settings.FAKE_NODE_ID, None) + + def _test_is_node_id_changed(self, expected_result, host_definition_node_id, csi_node_node_id): + result = self.csi_node.is_node_id_changed(host_definition_node_id, csi_node_node_id) + self.assertEqual(result, expected_result) diff --git a/controllers/tests/controller_server/host_definer/resource_manager/daemon_set_manager_test.py b/controllers/tests/controller_server/host_definer/resource_manager/daemon_set_manager_test.py new file mode 100644 index 000000000..f32dc16da --- /dev/null +++ b/controllers/tests/controller_server/host_definer/resource_manager/daemon_set_manager_test.py @@ -0,0 +1,44 @@ +from unittest.mock import MagicMock + +import controllers.common.settings as common_settings +from controllers.servers.host_definer.resource_manager.daemon_set import DaemonSetManager +import controllers.tests.controller_server.host_definer.utils.test_utils as test_utils +from controllers.tests.controller_server.host_definer.resource_manager.base_resource_manager import BaseResourceManager + + +class TestEventManagerTest(BaseResourceManager): + def setUp(self): + super().setUp() + self.daemon_set_manager = DaemonSetManager() + self.daemon_set_manager.k8s_api = MagicMock() + self.fake_updated_daemon_set_items = test_utils.get_fake_k8s_daemon_set_items(1, 1) + self.fake_not_updated_daemon_set_items = test_utils.get_fake_k8s_daemon_set_items(0, 1) + self.fake_updated_daemon_set = test_utils.get_fake_k8s_daemon_set(1, 1) + + def test_get_updated_daemon_set_so_it_will_return_the_daemon_set_name(self): + daemon_set_name = self.fake_updated_daemon_set.metadata.name + self._test_wait_for_updated_daemon_set_called_once(self.fake_updated_daemon_set_items, daemon_set_name) + + def test_get_none_when_fail_to_get_csi_daemon_set(self): + self._test_wait_for_updated_daemon_set_called_once(None, None) + + def _test_wait_for_updated_daemon_set_called_once(self, daemon_set, expected_result): + self.daemon_set_manager.k8s_api.list_daemon_set_for_all_namespaces.return_value = daemon_set + result = self.daemon_set_manager.wait_until_all_daemon_set_pods_are_up_to_date() + self.assertEqual(result, expected_result) + self.daemon_set_manager.k8s_api.list_daemon_set_for_all_namespaces.assert_called_once_with( + common_settings.DRIVER_PRODUCT_LABEL) + + def test_get_not_updated_daemon_and_wait_until_it_will_be_updated(self): + daemon_set_name = self.fake_updated_daemon_set.metadata.name + self._test_wait_for_updated_daemon_set_called_twice( + [self.fake_not_updated_daemon_set_items, self.fake_updated_daemon_set_items], daemon_set_name) + + def test_get_none_when_fail_to_get_csi_daemon_set_in_the_second_time(self): + self._test_wait_for_updated_daemon_set_called_twice([self.fake_not_updated_daemon_set_items, None], None) + + def _test_wait_for_updated_daemon_set_called_twice(self, daemon_sets, expected_result): + self.daemon_set_manager.k8s_api.list_daemon_set_for_all_namespaces.side_effect = daemon_sets + result = self.daemon_set_manager.wait_until_all_daemon_set_pods_are_up_to_date() + self.assertEqual(result, expected_result) + self.assertEqual(self.daemon_set_manager.k8s_api.list_daemon_set_for_all_namespaces.call_count, 2) diff --git a/controllers/tests/controller_server/host_definer/resource_manager/event_manager_test.py b/controllers/tests/controller_server/host_definer/resource_manager/event_manager_test.py new file mode 100644 index 000000000..4383c8fd4 --- /dev/null +++ b/controllers/tests/controller_server/host_definer/resource_manager/event_manager_test.py @@ -0,0 +1,31 @@ +import controllers.common.settings as common_settings +from controllers.tests.controller_server.host_definer.resource_manager.base_resource_manager import BaseResourceManager +from controllers.servers.host_definer.resource_manager.event import EventManager +import controllers.tests.controller_server.host_definer.utils.test_utils as test_utils +import controllers.tests.controller_server.host_definer.settings as test_settings + + +class TestEventManagerTest(BaseResourceManager): + def setUp(self): + self.event_manager = EventManager() + self.fake_host_definition_info = test_utils.get_fake_host_definition_info() + + def test_generate_k8s_normal_event_success(self): + self._test_generate_k8s_event_success(common_settings.SUCCESSFUL_MESSAGE_TYPE, + common_settings.NORMAL_EVENT_TYPE) + + def test_generate_k8s_warning_event_success(self): + self._test_generate_k8s_event_success('unsuccessful message type', common_settings.WARNING_EVENT_TYPE) + + def _test_generate_k8s_event_success(self, message_type, expected_event_type): + result = self.event_manager.generate_k8s_event( + self.fake_host_definition_info, test_settings.MESSAGE, + common_settings.DEFINE_ACTION, message_type) + self.assertEqual(result.metadata, test_utils.get_event_object_metadata()) + self.assertEqual(result.reporting_component, common_settings.HOST_DEFINER) + self.assertEqual(result.reporting_instance, common_settings.HOST_DEFINER) + self.assertEqual(result.action, common_settings.DEFINE_ACTION) + self.assertEqual(result.type, expected_event_type) + self.assertEqual(result.reason, message_type + common_settings.DEFINE_ACTION) + self.assertEqual(result.message, test_settings.MESSAGE) + self.assertEqual(result.involved_object, test_utils.get_object_reference()) diff --git a/controllers/tests/controller_server/host_definer/resource_manager/host_definition_manager_test.py b/controllers/tests/controller_server/host_definer/resource_manager/host_definition_manager_test.py new file mode 100644 index 000000000..573dca516 --- /dev/null +++ b/controllers/tests/controller_server/host_definer/resource_manager/host_definition_manager_test.py @@ -0,0 +1,341 @@ +from copy import deepcopy +from unittest.mock import MagicMock, Mock, patch + +import controllers.common.settings as common_settings +from controllers.tests.controller_server.host_definer.resource_manager.base_resource_manager import BaseResourceManager +from controllers.servers.host_definer.resource_manager.host_definition import HostDefinitionManager +import controllers.tests.controller_server.host_definer.utils.test_utils as test_utils +import controllers.tests.controller_server.host_definer.utils.k8s_manifests_utils as test_manifest_utils +import controllers.tests.controller_server.host_definer.settings as test_settings + + +class TestHostDefinitionManager(BaseResourceManager): + def setUp(self): + self.host_definition_manager = HostDefinitionManager() + self.host_definition_manager.k8s_api = MagicMock() + self.host_definition_manager.event_manager = MagicMock() + self.host_definition_manager.resource_info_manager = MagicMock() + self.fake_host_definition_info = test_utils.get_fake_host_definition_info() + self.fake_secret_info = test_utils.get_fake_secret_info() + self.mock_global_managed_nodes = test_utils.patch_nodes_global_variable( + test_settings.HOST_DEFINITION_MANAGER_PATH) + self.mock_global_managed_nodes[test_settings.FAKE_NODE_NAME] = test_utils.get_fake_managed_node() + self.define_response = test_utils.get_fake_define_host_response() + + def test_get_host_definition_info_from_secret_success(self): + result = self.host_definition_manager.get_host_definition_info_from_secret(self.fake_secret_info) + self.assertEqual(result.secret_name, test_settings.FAKE_SECRET) + self.assertEqual(result.secret_namespace, test_settings.FAKE_SECRET_NAMESPACE) + + def test_get_matching_host_definition_info_success(self): + self._test_get_matching_host_definition_info_success( + test_settings.FAKE_NODE_NAME, test_settings.FAKE_SECRET, + test_settings.FAKE_SECRET_NAMESPACE, self.fake_host_definition_info) + + def test_get_none_when_host_definition_node_name_is_not_matched(self): + self._test_get_matching_host_definition_info_success( + 'bad_node_name', test_settings.FAKE_SECRET, test_settings.FAKE_SECRET_NAMESPACE) + + def test_get_none_when_host_definition_secret_name_is_not_matched(self): + self._test_get_matching_host_definition_info_success( + test_settings.FAKE_NODE_NAME, 'bad_secret_name', test_settings.FAKE_SECRET_NAMESPACE) + + def test_get_none_when_host_definition_secret_namespace_is_not_matched(self): + self._test_get_matching_host_definition_info_success( + test_settings.FAKE_NODE_NAME, test_settings.FAKE_SECRET, 'bad_secret_namespace') + + def _test_get_matching_host_definition_info_success( + self, node_name, secret_name, secret_namespace, expected_result=None): + self.host_definition_manager.k8s_api.list_host_definition.return_value = \ + test_utils.get_fake_k8s_host_definitions_items() + self.host_definition_manager.resource_info_manager.generate_host_definition_info.return_value = \ + self.fake_host_definition_info + result = self.host_definition_manager.get_matching_host_definition_info( + node_name, secret_name, secret_namespace) + self.assertEqual(result, expected_result) + self.host_definition_manager.resource_info_manager.generate_host_definition_info.assert_called_once_with( + test_utils.get_fake_k8s_host_definitions_items().items[0]) + self.host_definition_manager.k8s_api.list_host_definition.assert_called_once_with() + + def test_get_none_when_host_definition_list_empty_success(self): + self.host_definition_manager.k8s_api.list_host_definition.return_value = test_utils.get_fake_empty_k8s_list() + self.host_definition_manager.resource_info_manager.generate_host_definition_info.return_value = \ + self.fake_host_definition_info + result = self.host_definition_manager.get_matching_host_definition_info('', '', '') + self.assertEqual(result, None) + self.host_definition_manager.resource_info_manager.generate_host_definition_info.assert_not_called() + self.host_definition_manager.k8s_api.list_host_definition.assert_called_once_with() + + @patch('{}.manifest_utils'.format(test_settings.HOST_DEFINITION_MANAGER_PATH)) + def test_create_host_definition_success(self, mock_manifest): + finalizers_manifest = test_manifest_utils.get_finalizers_manifest([common_settings.CSI_IBM_FINALIZER, ]) + self.host_definition_manager.k8s_api.create_host_definition.return_value = \ + test_utils.get_fake_k8s_host_definition(common_settings.READY_PHASE) + mock_manifest.get_finalizer_manifest.return_value = finalizers_manifest + self.host_definition_manager.resource_info_manager.generate_host_definition_info.return_value = \ + self.fake_host_definition_info + result = self.host_definition_manager.create_host_definition( + test_manifest_utils.get_fake_k8s_host_definition_manifest()) + self.assertEqual(result, self.fake_host_definition_info) + self.host_definition_manager.resource_info_manager.generate_host_definition_info.assert_called_once_with( + test_utils.get_fake_k8s_host_definition(common_settings.READY_PHASE)) + mock_manifest.get_finalizer_manifest.assert_called_once_with( + test_settings.FAKE_NODE_NAME, [common_settings.CSI_IBM_FINALIZER, ]) + self.host_definition_manager.k8s_api.patch_host_definition.assert_called_once_with(finalizers_manifest) + self.host_definition_manager.k8s_api.create_host_definition.assert_called_once_with( + test_manifest_utils.get_fake_k8s_host_definition_manifest()) + + @patch('{}.manifest_utils'.format(test_settings.HOST_DEFINITION_MANAGER_PATH)) + def test_create_host_definition_failure(self, mock_manifest_utils): + self.host_definition_manager.k8s_api.create_host_definition.return_value = None + self.host_definition_manager.resource_info_manager.generate_host_definition_info.return_value = \ + self.fake_host_definition_info + result = self.host_definition_manager.create_host_definition( + test_manifest_utils.get_fake_k8s_host_definition_manifest()) + self.assertEqual(result.name, "") + self.assertEqual(result.node_id, "") + mock_manifest_utils.get_finalizer_manifest.assert_not_called() + self.host_definition_manager.resource_info_manager.generate_host_definition_info.assert_not_called() + self.host_definition_manager.k8s_api.patch_host_definition.assert_not_called() + self.host_definition_manager.k8s_api.create_host_definition.assert_called_once_with( + test_manifest_utils.get_fake_k8s_host_definition_manifest()) + + @patch('{}.manifest_utils'.format(test_settings.HOST_DEFINITION_MANAGER_PATH)) + def test_delete_host_definition_success(self, mock_manifest_utils): + self._test_delete_host_definition(200, mock_manifest_utils) + self.host_definition_manager.k8s_api.delete_host_definition.assert_called_once_with( + test_settings.FAKE_NODE_NAME) + + @patch('{}.manifest_utils'.format(test_settings.HOST_DEFINITION_MANAGER_PATH)) + def test_fail_to_delete_host_definition_because_the_finalizers_fails_to_be_deleted(self, mock_manifest_utils): + self._test_delete_host_definition(405, mock_manifest_utils) + self.host_definition_manager.k8s_api.delete_host_definition.assert_not_called() + + def _test_delete_host_definition(self, finalizers_status_code, mock_manifest_utils): + mock_manifest_utils.get_finalizer_manifest.return_value = test_manifest_utils.get_finalizers_manifest([]) + self.host_definition_manager.k8s_api.patch_host_definition.return_value = finalizers_status_code + self.host_definition_manager.delete_host_definition(test_settings.FAKE_NODE_NAME) + self.host_definition_manager.k8s_api.patch_host_definition.assert_called_once_with( + test_manifest_utils.get_finalizers_manifest([])) + + @patch('{}.manifest_utils'.format(test_settings.HOST_DEFINITION_MANAGER_PATH)) + def test_set_host_definition_status_success(self, mock_manifest_utils): + status_phase_manifest = test_manifest_utils.get_status_phase_manifest(common_settings.READY_PHASE) + mock_manifest_utils.get_host_definition_status_manifest.return_value = status_phase_manifest + self.host_definition_manager.set_host_definition_status( + test_settings.FAKE_NODE_NAME, common_settings.READY_PHASE) + mock_manifest_utils.get_host_definition_status_manifest.assert_called_once_with(common_settings.READY_PHASE) + self.host_definition_manager.k8s_api.patch_cluster_custom_object_status.assert_called_once_with( + common_settings.CSI_IBM_GROUP, common_settings.VERSION, common_settings.HOST_DEFINITION_PLURAL, + test_settings.FAKE_NODE_NAME, status_phase_manifest) + + def test_get_host_definition_info_from_secret_and_node_name_success(self): + self.host_definition_manager.get_host_definition_info_from_secret = Mock() + self.host_definition_manager.add_name_to_host_definition_info = Mock() + self.host_definition_manager.get_host_definition_info_from_secret.return_value = self.fake_host_definition_info + self.host_definition_manager.add_name_to_host_definition_info.return_value = self.fake_host_definition_info + result = self.host_definition_manager.get_host_definition_info_from_secret_and_node_name( + test_settings.FAKE_NODE_NAME, self.fake_secret_info) + self.assertEqual(result, self.fake_host_definition_info) + self.host_definition_manager.get_host_definition_info_from_secret.assert_called_once_with( + self.fake_secret_info) + self.host_definition_manager.add_name_to_host_definition_info.assert_called_once_with( + test_settings.FAKE_NODE_NAME, self.fake_host_definition_info) + + @patch('{}.utils'.format(test_settings.HOST_DEFINITION_MANAGER_PATH)) + def test_add_name_to_host_definition_info_success(self, mock_utils): + random_string = '2049530945i3094i' + mock_utils.get_random_string.return_value = random_string + result = self.host_definition_manager.add_name_to_host_definition_info( + test_settings.FAKE_NODE_NAME, test_utils.get_fake_empty_host_definition_info()) + self.assertEqual(result.name, '{0}-{1}'.format(test_settings.FAKE_NODE_NAME, random_string).replace('_', '.')) + self.assertEqual(result.node_name, test_settings.FAKE_NODE_NAME) + self.assertEqual(result.node_id, test_settings.FAKE_NODE_ID) + + def test_update_host_definition_info_success(self): + result = self._test_update_host_definition_info(self.fake_host_definition_info) + self.assertEqual(result.connectivity_type, self.fake_host_definition_info.connectivity_type) + self.assertEqual(result.node_id, self.fake_host_definition_info.node_id) + + def test_do_not_update_not_found_host_definition_info_success(self): + result = self._test_update_host_definition_info(None) + self.assertEqual(result.connectivity_type, 'some_connectivity') + self.assertEqual(result.node_id, 'some_node_id') + + def _test_update_host_definition_info(self, matching_host_definition_info): + host_definition_info = deepcopy(self.fake_host_definition_info) + host_definition_info.connectivity_type = 'some_connectivity' + host_definition_info.node_id = 'some_node_id' + self._prepare_get_matching_host_definition_info_function_as_mock(matching_host_definition_info) + result = self.host_definition_manager.update_host_definition_info(host_definition_info) + self._assert_get_matching_host_definition_called_once_with() + return result + + @patch('{}.manifest_utils'.format(test_settings.HOST_DEFINITION_MANAGER_PATH)) + def test_create_exist_host_definition_success(self, mock_manifest_utils): + host_definition_manifest = self._test_create_host_definition_if_not_exist( + 'different_name', self.fake_host_definition_info, None, mock_manifest_utils) + host_definition_manifest[common_settings.METADATA_FIELD][common_settings.NAME_FIELD] = \ + test_settings.FAKE_NODE_NAME + self.host_definition_manager.k8s_api.patch_host_definition.assert_called_once_with(host_definition_manifest) + self.host_definition_manager.create_host_definition.assert_not_called() + + @patch('{}.manifest_utils'.format(test_settings.HOST_DEFINITION_MANAGER_PATH)) + def test_create_new_host_definition_success(self, mock_manifest_utils): + host_definition_manifest = self._test_create_host_definition_if_not_exist( + test_settings.FAKE_NODE_NAME, None, self.fake_host_definition_info, mock_manifest_utils) + self.host_definition_manager.k8s_api.patch_host_definition.assert_not_called() + self.host_definition_manager.create_host_definition.assert_called_once_with(host_definition_manifest) + + def _test_create_host_definition_if_not_exist(self, new_host_definition_name, matching_host_definition, + created_host_definition, mock_manifest_utils): + host_definition_manifest = deepcopy(test_manifest_utils.get_fake_k8s_host_definition_manifest()) + host_definition_manifest[common_settings.METADATA_FIELD][common_settings.NAME_FIELD] = new_host_definition_name + mock_manifest_utils.get_host_definition_manifest.return_value = host_definition_manifest + self._prepare_get_matching_host_definition_info_function_as_mock(matching_host_definition) + self.host_definition_manager.create_host_definition = Mock() + self.host_definition_manager.create_host_definition.return_value = created_host_definition + result = self.host_definition_manager.create_host_definition_if_not_exist( + self.fake_host_definition_info, self.define_response) + + self.assertEqual(result, self.fake_host_definition_info) + mock_manifest_utils.get_host_definition_manifest.assert_called_once_with( + self.fake_host_definition_info, self.define_response, test_settings.FAKE_NODE_ID) + self._assert_get_matching_host_definition_called_once_with() + return host_definition_manifest + + def test_set_host_definition_status_to_ready_success(self): + self.host_definition_manager.set_host_definition_status = Mock() + self.host_definition_manager.create_k8s_event_for_host_definition = Mock() + self.host_definition_manager.set_host_definition_status_to_ready(self.fake_host_definition_info) + self.host_definition_manager.set_host_definition_status.assert_called_once_with( + self.fake_host_definition_info.name, common_settings.READY_PHASE) + self.host_definition_manager.create_k8s_event_for_host_definition.assert_called_once_with( + self.fake_host_definition_info, test_settings.MESSAGE, + common_settings.DEFINE_ACTION, common_settings.SUCCESSFUL_MESSAGE_TYPE) + + def test_create_k8s_event_for_host_definition_success(self): + k8s_event = test_utils.get_event_object_metadata() + self.host_definition_manager.event_manager.generate_k8s_event.return_value = k8s_event + self.host_definition_manager.create_k8s_event_for_host_definition( + self.fake_host_definition_info, test_settings.MESSAGE, + common_settings.DEFINE_ACTION, common_settings.SUCCESSFUL_MESSAGE_TYPE) + self.host_definition_manager.event_manager.generate_k8s_event.assert_called_once_with( + self.fake_host_definition_info, test_settings.MESSAGE, + common_settings.DEFINE_ACTION, common_settings.SUCCESSFUL_MESSAGE_TYPE) + self.host_definition_manager.k8s_api.create_event.assert_called_once_with(common_settings.DEFAULT_NAMESPACE, + k8s_event) + + def test_set_host_definition_status_to_pending_and_create_event_after_failed_definition(self): + self._prepare_set_status_to_host_definition_after_definition(test_settings.MESSAGE) + self.host_definition_manager.set_host_definition_status.assert_called_once_with( + self.fake_host_definition_info.name, common_settings.PENDING_CREATION_PHASE) + self.host_definition_manager.create_k8s_event_for_host_definition.assert_called_once_with( + self.fake_host_definition_info, test_settings.MESSAGE, + common_settings.DEFINE_ACTION, common_settings.FAILED_MESSAGE_TYPE) + self.host_definition_manager.set_host_definition_status_to_ready.assert_not_called() + + def test_set_host_definition_status_to_ready_after_successful_definition(self): + self._prepare_set_status_to_host_definition_after_definition('') + self.host_definition_manager.set_host_definition_status.assert_not_called() + self.host_definition_manager.create_k8s_event_for_host_definition.assert_not_called() + self.host_definition_manager.set_host_definition_status_to_ready.assert_called_once_with( + self.fake_host_definition_info) + + def _prepare_set_status_to_host_definition_after_definition(self, message_from_storage): + self.host_definition_manager.set_host_definition_status = Mock() + self.host_definition_manager.create_k8s_event_for_host_definition = Mock() + self.host_definition_manager.set_host_definition_status_to_ready = Mock() + self.host_definition_manager.set_status_to_host_definition_after_definition( + message_from_storage, self.fake_host_definition_info) + + def test_handle_host_definition_after_failed_undefine_action_and_when_host_definition_exist(self): + self._test_handle_k8s_host_definition_after_undefine_action_if_exist( + self.fake_host_definition_info, self.define_response) + self.host_definition_manager.set_host_definition_status.assert_called_once_with( + self.fake_host_definition_info.name, common_settings.PENDING_DELETION_PHASE) + self.host_definition_manager.create_k8s_event_for_host_definition.assert_called_once_with( + self.fake_host_definition_info, self.define_response.error_message, common_settings.UNDEFINE_ACTION, + common_settings.FAILED_MESSAGE_TYPE) + self.host_definition_manager.delete_host_definition.assert_not_called() + + def test_handle_host_definition_after_successful_undefine_action_and_when_host_definition_exist(self): + define_response = deepcopy(self.define_response) + define_response.error_message = '' + self._test_handle_k8s_host_definition_after_undefine_action_if_exist( + self.fake_host_definition_info, define_response) + self.host_definition_manager.set_host_definition_status.assert_not_called() + self.host_definition_manager.create_k8s_event_for_host_definition.assert_not_called() + self.host_definition_manager.delete_host_definition.assert_called_once_with( + self.fake_host_definition_info.name) + + def test_handle_k8s_host_definition_after_undefine_action_when_not_exist(self): + self._test_handle_k8s_host_definition_after_undefine_action_if_exist(None, self.define_response) + self.host_definition_manager.set_host_definition_status.assert_not_called() + self.host_definition_manager.create_k8s_event_for_host_definition.assert_not_called() + self.host_definition_manager.delete_host_definition.assert_not_called() + + def _test_handle_k8s_host_definition_after_undefine_action_if_exist( + self, matching_host_definition, define_response): + self.host_definition_manager.set_host_definition_status = Mock() + self.host_definition_manager.create_k8s_event_for_host_definition = Mock() + self.host_definition_manager.delete_host_definition = Mock() + self._prepare_get_matching_host_definition_info_function_as_mock(matching_host_definition) + self.host_definition_manager.handle_k8s_host_definition_after_undefine_action( + self.fake_host_definition_info, define_response) + self._assert_get_matching_host_definition_called_once_with() + + def test_return_true_when_host_definition_phase_is_pending(self): + result = self.host_definition_manager.is_host_definition_in_pending_phase( + common_settings.PENDING_CREATION_PHASE) + self.assertTrue(result) + + def test_return_false_when_host_definition_phase_is_not_pending(self): + result = self.host_definition_manager.is_host_definition_in_pending_phase(common_settings.READY_PHASE) + self.assertFalse(result) + + def test_set_host_definition_status_to_error_success(self): + self.host_definition_manager.set_host_definition_status = Mock() + self.host_definition_manager.set_host_definition_phase_to_error(self.fake_host_definition_info) + self.host_definition_manager.set_host_definition_status.assert_called_once_with( + self.fake_host_definition_info.name, common_settings.ERROR_PHASE) + + def test_return_true_when_host_definition_is_not_pending_and_exist(self): + result = self._test_is_host_definition_not_pending(self.fake_host_definition_info) + self.assertTrue(result) + + def test_return_true_when_host_definition_is_not_exist_after_it_was_pending(self): + result = self._test_is_host_definition_not_pending(None) + self.assertTrue(result) + + def test_return_false_when_host_definition_exist_but_still_pending(self): + host_definition_info = deepcopy(self.fake_host_definition_info) + host_definition_info.phase = common_settings.PENDING_CREATION_PHASE + result = self._test_is_host_definition_not_pending(host_definition_info) + self.assertFalse(result) + + def _test_is_host_definition_not_pending(self, matching_host_definition): + self._prepare_get_matching_host_definition_info_function_as_mock(matching_host_definition) + result = self.host_definition_manager.is_host_definition_not_pending(self.fake_host_definition_info) + self._assert_get_matching_host_definition_called_once_with() + return result + + def _prepare_get_matching_host_definition_info_function_as_mock(self, matching_host_definition): + self.host_definition_manager.get_matching_host_definition_info = Mock() + self.host_definition_manager.get_matching_host_definition_info.return_value = matching_host_definition + + def _assert_get_matching_host_definition_called_once_with(self): + self.host_definition_manager.get_matching_host_definition_info.assert_called_once_with( + self.fake_host_definition_info.name, self.fake_host_definition_info.secret_name, + self.fake_host_definition_info.secret_namespace) + + def test_get_all_host_definitions_info_of_the_node(self): + self.host_definition_manager.k8s_api.list_host_definition.return_value = \ + test_utils.get_fake_k8s_host_definitions_items() + self.host_definition_manager.resource_info_manager.generate_host_definition_info.return_value = \ + self.fake_host_definition_info + result = self.host_definition_manager.get_all_host_definitions_info_of_the_node(test_settings.FAKE_NODE_NAME) + self.assertEqual(result, [self.fake_host_definition_info]) + self.host_definition_manager.k8s_api.list_host_definition.assert_called_once_with() + self.host_definition_manager.resource_info_manager.generate_host_definition_info.assert_called_once_with( + test_utils.get_fake_k8s_host_definitions_items().items[0]) diff --git a/controllers/tests/controller_server/host_definer/resource_manager/node_manager_test.py b/controllers/tests/controller_server/host_definer/resource_manager/node_manager_test.py new file mode 100644 index 000000000..61c26b740 --- /dev/null +++ b/controllers/tests/controller_server/host_definer/resource_manager/node_manager_test.py @@ -0,0 +1,481 @@ +from copy import deepcopy +from unittest.mock import MagicMock, Mock, patch + +from controllers.servers.host_definer.types import ManagedNode +from controllers.servers.errors import ValidationException +from controllers.servers.host_definer.resource_manager.node import NodeManager +from controllers.tests.controller_server.host_definer.resource_manager.base_resource_manager import BaseResourceManager +import controllers.tests.controller_server.host_definer.utils.test_utils as test_utils +import controllers.tests.controller_server.host_definer.utils.k8s_manifests_utils as test_manifest_utils +import controllers.tests.controller_server.host_definer.settings as test_settings +import controllers.common.settings as common_settings + + +class TestNodeManager(BaseResourceManager): + def setUp(self): + super().setUp() + self.node_manager = NodeManager() + self.node_manager.k8s_api = MagicMock() + self.node_manager.host_definition_manager = MagicMock() + self.node_manager.secret_manager = MagicMock() + self.node_manager.definition_manager = MagicMock() + self.node_manager.resource_info_manager = MagicMock() + self.fake_node_info = test_utils.get_fake_node_info() + self.fake_csi_node_info = test_utils.get_fake_csi_node_info() + self.fake_managed_node = test_utils.get_fake_managed_node() + self.fake_host_definitions_info = test_utils.get_fake_k8s_host_definitions_items() + self.fake_secret_config = 'fake_secret_config' + self.fake_secret_data = test_utils.get_fake_k8s_secret().data + self.fake_secret_info = test_utils.get_fake_secret_info() + self.global_managed_nodes = test_utils.patch_nodes_global_variable(test_settings.NODE_MANAGER_PATH) + self.global_managed_secrets = test_utils.patch_managed_secrets_global_variable( + test_settings.NODE_MANAGER_PATH) + self.manage_node_labels_manifest = test_manifest_utils.get_metadata_with_manage_node_labels_manifest( + common_settings.MANAGE_NODE_LABEL) + self.mock_get_system_info_for_topologies = patch('{}.get_system_info_for_topologies'.format( + test_settings.NODE_MANAGER_PATH)).start() + + def test_get_nodes_info_success(self): + self._test_get_k8s_resources_info_success( + self.node_manager.get_nodes_info, self.node_manager.k8s_api.list_node, + self.node_manager.resource_info_manager.generate_node_info, self.fake_node_info, + test_utils.get_fake_k8s_nodes_items()) + + def test_get_nodes_info_empty_list_success(self): + self._test_get_k8s_resources_info_empty_list_success( + self.node_manager.get_nodes_info, self.node_manager.k8s_api.list_node, + self.node_manager.resource_info_manager.generate_node_info) + + @patch('{}.utils'.format(test_settings.NODE_MANAGER_PATH)) + def test_node_can_be_defined_when_dynamic_node_labeling_allowed(self, manifest_utils): + self._prepare_is_node_can_be_defined(True, manifest_utils) + self._test_is_node_can_be_defined(True, manifest_utils) + self.node_manager.is_node_has_manage_node_label.assert_not_called() + + @patch('{}.utils'.format(test_settings.NODE_MANAGER_PATH)) + def test_node_can_be_defined_when_node_has_manage_node_label(self, manifest_utils): + self._prepare_is_node_can_be_defined(False, manifest_utils) + self.node_manager.is_node_has_manage_node_label.return_value = True + self._test_is_node_can_be_defined(True, manifest_utils) + self.node_manager.is_node_has_manage_node_label.assert_called_once_with(test_settings.FAKE_NODE_NAME) + + @patch('{}.utils'.format(test_settings.NODE_MANAGER_PATH)) + def test_node_cannot_be_defined(self, manifest_utils): + self._prepare_is_node_can_be_defined(False, manifest_utils) + self.node_manager.is_node_has_manage_node_label.return_value = False + self._test_is_node_can_be_defined(False, manifest_utils) + self.node_manager.is_node_has_manage_node_label.assert_called_once_with(test_settings.FAKE_NODE_NAME) + + def _prepare_is_node_can_be_defined(self, is_dynamic_node_labeling_allowed, manifest_utils): + manifest_utils.is_dynamic_node_labeling_allowed.return_value = is_dynamic_node_labeling_allowed + self.node_manager.is_node_has_manage_node_label = Mock() + + def _test_is_node_can_be_defined(self, expected_result, manifest_utils): + result = self.node_manager.is_node_can_be_defined(test_settings.FAKE_NODE_NAME) + self.assertEqual(result, expected_result) + manifest_utils.is_dynamic_node_labeling_allowed.assert_called_once_with() + + @patch('{}.utils'.format(test_settings.NODE_MANAGER_PATH)) + def test_node_can_be_undefined(self, mock_utils): + self._prepare_is_node_can_be_undefined(mock_utils, True, True, False) + self._test_is_node_can_be_undefined(mock_utils, True) + self.node_manager.is_node_has_manage_node_label.assert_called_once_with(test_settings.FAKE_NODE_NAME) + self.node_manager.is_node_has_forbid_deletion_label.assert_called_once_with(test_settings.FAKE_NODE_NAME) + + @patch('{}.utils'.format(test_settings.NODE_MANAGER_PATH)) + def test_node_cannot_be_undefined_when_host_definer_cannot_delete_hosts(self, mock_utils): + self._prepare_is_node_can_be_undefined(mock_utils, False) + self._test_is_node_can_be_undefined(mock_utils, False) + self.node_manager.is_node_has_manage_node_label.assert_not_called() + self.node_manager.is_node_has_forbid_deletion_label.assert_not_called() + + @patch('{}.utils'.format(test_settings.NODE_MANAGER_PATH)) + def test_node_cannot_be_undefined_when_node_not_has_manage_node_label(self, mock_utils): + self._prepare_is_node_can_be_undefined(mock_utils, True, False) + self._test_is_node_can_be_undefined(mock_utils, False) + self.node_manager.is_node_has_manage_node_label.assert_called_once_with(test_settings.FAKE_NODE_NAME) + self.node_manager.is_node_has_forbid_deletion_label.assert_not_called() + + @patch('{}.utils'.format(test_settings.NODE_MANAGER_PATH)) + def test_node_cannot_be_undefined_when_node_has_forbid_deletion_label(self, mock_utils): + self._prepare_is_node_can_be_undefined(mock_utils, True, True, True) + self._test_is_node_can_be_undefined(mock_utils, False) + self.node_manager.is_node_has_manage_node_label.assert_called_once_with(test_settings.FAKE_NODE_NAME) + self.node_manager.is_node_has_forbid_deletion_label.assert_called_once_with(test_settings.FAKE_NODE_NAME) + + def _prepare_is_node_can_be_undefined( + self, mock_utils, is_dynamic_node_labeling_allowed=False, is_node_has_manage_node_label=False, + is_node_has_forbid_deletion_label=False): + mock_utils.is_host_definer_can_delete_hosts.return_value = is_dynamic_node_labeling_allowed + self._prepare_is_node_has_manage_node_label_mock(is_node_has_manage_node_label) + self.node_manager.is_node_has_forbid_deletion_label = Mock() + self.node_manager.is_node_has_forbid_deletion_label.return_value = is_node_has_forbid_deletion_label + + def _test_is_node_can_be_undefined(self, mock_utils, expected_result): + result = self.node_manager.is_node_can_be_undefined(test_settings.FAKE_NODE_NAME) + self.assertEqual(result, expected_result) + mock_utils.is_host_definer_can_delete_hosts.assert_called_once_with() + + def test_node_has_manage_node_label(self): + self._test_is_node_has_label(self.fake_node_info, True, self.node_manager.is_node_has_manage_node_label) + + def test_node_do_not_has_manage_node_label(self): + node_info = deepcopy(self.fake_node_info) + node_info.labels.pop(common_settings.MANAGE_NODE_LABEL) + self._test_is_node_has_label(node_info, False, self.node_manager.is_node_has_manage_node_label) + + def test_node_has_forbid_deletion_label(self): + node_info = deepcopy(self.fake_node_info) + node_info.labels[common_settings.FORBID_DELETION_LABEL] = common_settings.TRUE_STRING + self._test_is_node_has_label(node_info, True, self.node_manager.is_node_has_forbid_deletion_label) + + def test_node_do_not_has_forbid_deletion_label(self): + node_info_without_forbid_deletion_label = deepcopy(self.fake_node_info) + self._test_is_node_has_label(node_info_without_forbid_deletion_label, False, + self.node_manager.is_node_has_forbid_deletion_label) + + def _test_is_node_has_label(self, node_info, expected_result, function_to_run): + self.node_manager.resource_info_manager.get_node_info.return_value = node_info + result = function_to_run(test_settings.FAKE_NODE_NAME) + self.node_manager.resource_info_manager.get_node_info.assert_called_once_with(test_settings.FAKE_NODE_NAME) + self.assertEqual(result, expected_result) + + @patch('{}.manifest_utils'.format(test_settings.NODE_MANAGER_PATH)) + def test_add_node_to_nodes_when_node_do_not_has_manage_node_label_success(self, mock_manifest_utils): + excepted_managed_node = {self.fake_csi_node_info.name: self.fake_managed_node} + self._prepare_add_node_to_nodes(False, mock_manifest_utils) + self.node_manager.add_node_to_nodes(self.fake_csi_node_info) + self._assert_add_node_to_nodes(excepted_managed_node) + self._assert_update_manage_node_label_called( + mock_manifest_utils, self.manage_node_labels_manifest, common_settings.TRUE_STRING) + + @patch('{}.manifest_utils'.format(test_settings.NODE_MANAGER_PATH)) + def test_add_node_to_nodes_when_node_has_manage_node_label_success(self, mock_manifest_utils): + excepted_managed_node = {self.fake_csi_node_info.name: self.fake_managed_node} + self._prepare_add_node_to_nodes(True, mock_manifest_utils) + self.node_manager.add_node_to_nodes(self.fake_csi_node_info) + self._assert_add_node_to_nodes(excepted_managed_node) + self._assert_update_manage_node_label_not_called(mock_manifest_utils) + + def _prepare_add_node_to_nodes(self, is_node_has_manage_node_label, mock_manifest_utils): + self._prepare_is_node_has_manage_node_label_mock(is_node_has_manage_node_label) + mock_manifest_utils.get_body_manifest_for_labels.return_value = self.manage_node_labels_manifest + self.node_manager.generate_managed_node = Mock() + self.node_manager.generate_managed_node.return_value = self.fake_managed_node + + def _prepare_is_node_has_manage_node_label_mock(self, is_node_has_manage_node_label): + self.node_manager.is_node_has_manage_node_label = Mock() + self.node_manager.is_node_has_manage_node_label.return_value = is_node_has_manage_node_label + + def _assert_add_node_to_nodes(self, excepted_managed_node): + self.assertEqual(self.global_managed_nodes, excepted_managed_node) + self.node_manager.generate_managed_node.assert_called_once_with(self.fake_csi_node_info) + + @patch('{}.utils'.format(test_settings.TYPES_PATH)) + def test_generate_managed_node(self, mock_utils): + self.node_manager.resource_info_manager.get_node_info.return_value = self.fake_node_info + mock_utils.generate_io_group_from_labels.return_value = test_settings.IO_GROUP_NAMES + result = self.node_manager.generate_managed_node(self.fake_csi_node_info) + self.assertEqual(result.name, self.fake_csi_node_info.name) + self.assertEqual(result.node_id, self.fake_csi_node_info.node_id) + self.assertEqual(result.io_group, test_settings.IO_GROUP_NAMES) + self.assertEqual(type(result), ManagedNode) + self.node_manager.resource_info_manager.get_node_info.assert_called_once_with(self.fake_csi_node_info.name) + mock_utils.generate_io_group_from_labels.assert_called_once_with(self.fake_node_info.labels) + + @patch('{}.utils'.format(test_settings.NODE_MANAGER_PATH)) + @patch('{}.manifest_utils'.format(test_settings.NODE_MANAGER_PATH)) + def test_remove_manage_node_label_when_node_should_be_removed(self, mock_manifest_utils, mock_utils): + self._prepare_remove_manage_node_label(mock_manifest_utils, mock_utils, True, '', False) + self._test_remove_manage_node_label(mock_utils) + self.node_manager.resource_info_manager.get_csi_node_info.assert_called_once_with(test_settings.FAKE_NODE_NAME) + self.node_manager.is_node_has_host_definitions.assert_called_once_with(test_settings.FAKE_NODE_NAME) + self._assert_update_manage_node_label_called(mock_manifest_utils, self.manage_node_labels_manifest, None) + + @patch('{}.utils'.format(test_settings.NODE_MANAGER_PATH)) + @patch('{}.manifest_utils'.format(test_settings.NODE_MANAGER_PATH)) + def test_do_not_remove_manage_node_label_when_dynamic_node_labeling_is_not_allowed( + self, mock_manifest_utils, mock_utils): + self._prepare_remove_manage_node_label(mock_manifest_utils, mock_utils, False, '') + self._test_remove_manage_node_label(mock_utils) + self.node_manager.resource_info_manager.get_csi_node_info.assert_not_called() + self.node_manager.is_node_has_host_definitions.assert_not_called() + self._assert_update_manage_node_label_not_called(mock_manifest_utils) + + @patch('{}.utils'.format(test_settings.NODE_MANAGER_PATH)) + @patch('{}.manifest_utils'.format(test_settings.NODE_MANAGER_PATH)) + def test_do_not_remove_manage_node_label_when_node_has_ibm_block_csi(self, mock_manifest_utils, mock_utils): + self._prepare_remove_manage_node_label(mock_manifest_utils, mock_utils, True, 'something') + self._test_remove_manage_node_label(mock_utils) + self.node_manager.resource_info_manager.get_csi_node_info.assert_called_once_with(test_settings.FAKE_NODE_NAME) + self.node_manager.is_node_has_host_definitions.assert_not_called() + self._assert_update_manage_node_label_not_called(mock_manifest_utils) + + @patch('{}.utils'.format(test_settings.NODE_MANAGER_PATH)) + @patch('{}.manifest_utils'.format(test_settings.NODE_MANAGER_PATH)) + def test_do_not_remove_manage_node_label_when_node_has_host_definitions(self, mock_manifest_utils, mock_utils): + self._prepare_remove_manage_node_label(mock_manifest_utils, mock_utils, True, '', True) + self._test_remove_manage_node_label(mock_utils) + self.node_manager.resource_info_manager.get_csi_node_info.assert_called_once_with(test_settings.FAKE_NODE_NAME) + self.node_manager.is_node_has_host_definitions.assert_called_once_with(test_settings.FAKE_NODE_NAME) + self._assert_update_manage_node_label_not_called(mock_manifest_utils) + + def _prepare_remove_manage_node_label(self, mock_manifest_utils, mock_utils, is_dynamic_node_labeling_allowed, + node_id, is_node_has_host_definitions=False): + csi_node_info = deepcopy(self.fake_csi_node_info) + csi_node_info.node_id = node_id + mock_utils.is_dynamic_node_labeling_allowed.return_value = is_dynamic_node_labeling_allowed + self.node_manager.resource_info_manager.get_csi_node_info.return_value = csi_node_info + self.node_manager.is_node_has_host_definitions = Mock() + self.node_manager.is_node_has_host_definitions.return_value = is_node_has_host_definitions + mock_manifest_utils.get_body_manifest_for_labels.return_value = self.manage_node_labels_manifest + + def _test_remove_manage_node_label(self, mock_utils): + self.node_manager.remove_manage_node_label(test_settings.FAKE_NODE_NAME) + mock_utils.is_dynamic_node_labeling_allowed.assert_called_once_with() + + def _assert_update_manage_node_label_called(self, mock_manifest_utils, excepted_body, expected_label_value): + mock_manifest_utils.get_body_manifest_for_labels.assert_called_once_with(expected_label_value) + self.node_manager.k8s_api.patch_node.assert_called_once_with(test_settings.FAKE_NODE_NAME, excepted_body) + + def _assert_update_manage_node_label_not_called(self, mock_manifest_utils): + mock_manifest_utils.get_body_manifest_for_labels.assert_not_called() + self.node_manager.k8s_api.patch_node.assert_not_called() + + def test_return_true_when_node_has_host_definitions(self): + self._test_is_node_has_host_definitions(self.fake_host_definitions_info.items, True) + + def test_return_false_when_node_has_host_definitions(self): + self._test_is_node_has_host_definitions([], False) + + def _test_is_node_has_host_definitions(self, host_definitions, expected_result): + self.node_manager.host_definition_manager.get_all_host_definitions_info_of_the_node.return_value = \ + host_definitions + result = self.node_manager.is_node_has_host_definitions(test_settings.FAKE_NODE_NAME) + self.assertEqual(result, expected_result) + self.node_manager.host_definition_manager.get_all_host_definitions_info_of_the_node.assert_called_with( + test_settings.FAKE_NODE_NAME) + + @patch('{}.utils'.format(test_settings.NODE_MANAGER_PATH)) + def test_generate_single_node_with_system_id_success(self, mock_utils): + self._prepare_generate_nodes_with_system_id(mock_utils, [self.fake_node_info]) + result = self.node_manager.generate_nodes_with_system_id(self.fake_secret_data) + self.assertEqual(result, {self.fake_node_info.name: test_settings.FAKE_SYSTEM_ID}) + self._assert_generate_nodes_with_system_id(mock_utils) + self._assert_get_system_id_for_node_called_once() + + @patch('{}.utils'.format(test_settings.NODE_MANAGER_PATH)) + def test_generate_single_node_with_empty_system_id_when_fail_to_get_system_info(self, mock_utils): + self._prepare_generate_nodes_with_system_id(mock_utils, [self.fake_node_info]) + self.mock_get_system_info_for_topologies.side_effect = ValidationException('fail') + result = self.node_manager.generate_nodes_with_system_id(self.fake_secret_data) + self.assertEqual(result, {self.fake_node_info.name: ''}) + self._assert_generate_nodes_with_system_id(mock_utils) + self._assert_get_system_id_for_node_called_once() + + @patch('{}.utils'.format(test_settings.NODE_MANAGER_PATH)) + def test_generate_multiple_nodes_with_system_id_success(self, mock_utils): + second_node_info = deepcopy(self.fake_node_info) + second_node_info.name = 'second_node_info' + expected_result = {self.fake_node_info.name: test_settings.FAKE_SYSTEM_ID, + second_node_info.name: test_settings.FAKE_SYSTEM_ID} + self._prepare_generate_nodes_with_system_id(mock_utils, [self.fake_node_info, second_node_info]) + result = self.node_manager.generate_nodes_with_system_id(self.fake_secret_data) + self.assertEqual(result, expected_result) + self._assert_generate_nodes_with_system_id(mock_utils) + self.assertEqual(self.node_manager.secret_manager.get_topology_labels.call_count, 2) + self.assertEqual(self.mock_get_system_info_for_topologies.call_count, 2) + + @patch('{}.utils'.format(test_settings.NODE_MANAGER_PATH)) + def test_get_empty_dict_when_there_are_no_nodes_success(self, mock_utils): + self._prepare_generate_nodes_with_system_id(mock_utils, []) + result = self.node_manager.generate_nodes_with_system_id(self.fake_secret_data) + self.assertEqual(result, {}) + self._assert_generate_nodes_with_system_id(mock_utils) + self.node_manager.secret_manager.get_topology_labels.assert_not_called() + self.mock_get_system_info_for_topologies.assert_not_called() + + def _prepare_generate_nodes_with_system_id(self, mock_utils, nodes_info): + mock_utils.get_secret_config.return_value = self.fake_secret_config + self.node_manager.get_nodes_info = Mock() + self.node_manager.get_nodes_info.return_value = nodes_info + self.node_manager.secret_manager.get_topology_labels.return_value = test_settings.FAKE_TOPOLOGY_LABELS + self.mock_get_system_info_for_topologies.return_value = (None, test_settings.FAKE_SYSTEM_ID) + + def _assert_generate_nodes_with_system_id(self, mock_utils): + mock_utils.get_secret_config.assert_called_once_with(self.fake_secret_data) + self.node_manager.get_nodes_info.assert_called_once_with() + + def _assert_get_system_id_for_node_called_once(self): + self.node_manager.secret_manager.get_topology_labels.assert_called_once_with(self.fake_node_info.labels) + self.mock_get_system_info_for_topologies.assert_called_once_with( + self.fake_secret_config, test_settings.FAKE_TOPOLOGY_LABELS) + + @patch('{}.utils'.format(test_settings.NODE_MANAGER_PATH)) + def test_return_true_when_node_has_new_manage_node_label_success(self, manifest_utils): + self._prepare_is_node_has_new_manage_node_label(manifest_utils, False, True) + self._test_is_node_has_new_manage_node_label(True, self.fake_csi_node_info) + manifest_utils.is_dynamic_node_labeling_allowed.assert_called_once_with() + self.node_manager.is_node_has_manage_node_label.assert_called_once_with(self.fake_csi_node_info.name) + + @patch('{}.utils'.format(test_settings.NODE_MANAGER_PATH)) + def test_return_false_when_dynamic_node_labeling_allowed_success(self, manifest_utils): + self._prepare_is_node_has_new_manage_node_label(manifest_utils, True) + self._test_is_node_has_new_manage_node_label(False, self.fake_csi_node_info) + manifest_utils.is_dynamic_node_labeling_allowed.assert_called_once_with() + self.node_manager.is_node_has_manage_node_label.assert_not_called() + + @patch('{}.utils'.format(test_settings.NODE_MANAGER_PATH)) + def test_return_false_when_node_not_has_manage_node_label_success(self, manifest_utils): + self._prepare_is_node_has_new_manage_node_label(manifest_utils, False, False) + self._test_is_node_has_new_manage_node_label(False, self.fake_csi_node_info) + manifest_utils.is_dynamic_node_labeling_allowed.assert_called_once_with() + self.node_manager.is_node_has_manage_node_label.assert_called_once_with(self.fake_csi_node_info.name) + + @patch('{}.utils'.format(test_settings.NODE_MANAGER_PATH)) + def test_return_false_when_node_is_already_managed_success(self, manifest_utils): + self.global_managed_nodes[test_settings.FAKE_NODE_NAME] = self.fake_managed_node + self._prepare_is_node_has_new_manage_node_label(manifest_utils, False, True) + self._test_is_node_has_new_manage_node_label(False, self.fake_csi_node_info) + manifest_utils.is_dynamic_node_labeling_allowed.assert_called_once_with() + self.node_manager.is_node_has_manage_node_label.assert_called_once_with(self.fake_csi_node_info.name) + + @patch('{}.utils'.format(test_settings.NODE_MANAGER_PATH)) + def test_return_false_when_csi_node_do_not_have_node_id_success(self, manifest_utils): + csi_node_info = deepcopy(self.fake_csi_node_info) + csi_node_info.node_id = '' + self._prepare_is_node_has_new_manage_node_label(manifest_utils, False, True) + self._test_is_node_has_new_manage_node_label(False, csi_node_info) + manifest_utils.is_dynamic_node_labeling_allowed.assert_called_once_with() + self.node_manager.is_node_has_manage_node_label.assert_called_once_with(csi_node_info.name) + + @patch('{}.utils'.format(test_settings.NODE_MANAGER_PATH)) + def test_return_false_when_node_is_not_in_unmanaged_nodes_list_success(self, manifest_utils): + csi_node_info = deepcopy(self.fake_csi_node_info) + csi_node_info.name = 'bad-name' + self._prepare_is_node_has_new_manage_node_label(manifest_utils, False, True) + self._test_is_node_has_new_manage_node_label(False, csi_node_info) + manifest_utils.is_dynamic_node_labeling_allowed.assert_called_once_with() + self.node_manager.is_node_has_manage_node_label.assert_called_once_with(csi_node_info.name) + + def _prepare_is_node_has_new_manage_node_label( + self, manifest_utils, is_dynamic_node_labeling_allowed, is_node_has_manage_node_label=False): + manifest_utils.is_dynamic_node_labeling_allowed.return_value = is_dynamic_node_labeling_allowed + self.node_manager.is_node_has_manage_node_label = Mock() + self.node_manager.is_node_has_manage_node_label.return_value = is_node_has_manage_node_label + + def _test_is_node_has_new_manage_node_label(self, expected_result, csi_node_info): + result = self.node_manager.is_node_has_new_manage_node_label( + csi_node_info, [test_settings.FAKE_NODE_NAME]) + self.assertEqual(result, expected_result) + + def test_do_not_handle_node_topologies_when_node_is_not_managed(self): + self.global_managed_nodes = {} + self.node_manager.handle_node_topologies(self.fake_node_info, common_settings.MODIFIED_EVENT_TYPE) + self._assert_do_not_handle_node_topologies() + + def test_do_not_handle_node_topologies_when_watch_event_is_not_modified_type(self): + self.global_managed_nodes[test_settings.FAKE_NODE_NAME] = self.fake_managed_node + self.node_manager.handle_node_topologies(self.fake_node_info, common_settings.ADDED_EVENT_TYPE) + self._assert_do_not_handle_node_topologies() + + def _assert_do_not_handle_node_topologies(self): + self.node_manager.secret_manager.is_node_should_managed_on_secret_info.assert_not_called() + self.node_manager.secret_manager.is_node_labels_in_system_ids_topologies.assert_not_called() + self.node_manager.secret_manager.get_system_id_for_node_labels.assert_not_called() + self.node_manager.definition_manager.define_node_on_all_storages.assert_not_called() + + def test_do_not_handle_node_topologies_when_node_is_not_in_secret_topologies(self): + self._prepare_handle_node_topologies(self.fake_secret_info, False, False) + global_managed_secrets = deepcopy(self.global_managed_secrets) + self.node_manager.handle_node_topologies(self.fake_node_info, common_settings.MODIFIED_EVENT_TYPE) + self.assertEqual(global_managed_secrets[0].nodes_with_system_id, + self.global_managed_secrets[0].nodes_with_system_id) + self.node_manager.secret_manager.is_node_should_managed_on_secret_info.assert_called_once_with( + self.fake_node_info.name, self.fake_secret_info) + self.node_manager.secret_manager.is_node_labels_in_system_ids_topologies.assert_called_once_with( + self.fake_secret_info.system_ids_topologies, self.fake_node_info.labels) + self.node_manager.secret_manager.get_system_id_for_node_labels.assert_not_called() + self.node_manager.definition_manager.define_node_on_all_storages.assert_not_called() + + def test_remove_node_from_secret_topology_fields_if_topology_not_match_anymore(self): + fake_secret_info = deepcopy(self.fake_secret_info) + self._prepare_handle_node_topologies(fake_secret_info, True, False) + self.node_manager.handle_node_topologies(self.fake_node_info, common_settings.MODIFIED_EVENT_TYPE) + self._assert_global_secrets_changed_as_wanted({}) + self._assert_called_remove_node_if_topology_not_match(fake_secret_info) + + def _assert_called_remove_node_if_topology_not_match(self, fake_secret_info): + self.node_manager.secret_manager.is_node_should_managed_on_secret_info.assert_called_once_with( + self.fake_node_info.name, fake_secret_info) + self.node_manager.secret_manager.is_node_labels_in_system_ids_topologies.assert_called_once_with( + fake_secret_info.system_ids_topologies, self.fake_node_info.labels) + self.node_manager.secret_manager.get_system_id_for_node_labels.assert_not_called() + self.node_manager.definition_manager.define_node_on_all_storages.assert_not_called() + + def test_define_host_with_new_topology(self): + fake_secret_info = deepcopy(self.fake_secret_info) + fake_secret_info.nodes_with_system_id = {} + self._prepare_handle_node_topologies(fake_secret_info, False, True) + self.node_manager.handle_node_topologies(self.fake_node_info, common_settings.MODIFIED_EVENT_TYPE) + self._assert_called_define_host_with_new_topology(fake_secret_info) + self._assert_global_secrets_changed_as_wanted(self.fake_secret_info.nodes_with_system_id) + + def _prepare_handle_node_topologies(self, fake_secret_info, is_node_should_managed, + is_node_labels_in_system_ids_topologies): + self.global_managed_nodes[test_settings.FAKE_NODE_NAME] = self.fake_managed_node + self.global_managed_secrets.append(fake_secret_info) + self.node_manager.secret_manager.is_node_should_managed_on_secret_info.return_value = is_node_should_managed + self.node_manager.secret_manager.is_node_labels_in_system_ids_topologies.return_value = \ + is_node_labels_in_system_ids_topologies + self.node_manager.secret_manager.get_system_id_for_node_labels.return_value = test_settings.FAKE_SYSTEM_ID + + def _assert_called_define_host_with_new_topology(self, fake_secret_info): + self.node_manager.secret_manager.is_node_should_managed_on_secret_info.assert_called_once_with( + self.fake_node_info.name, fake_secret_info) + self.node_manager.secret_manager.is_node_labels_in_system_ids_topologies.assert_called_once_with( + fake_secret_info.system_ids_topologies, self.fake_node_info.labels) + self.node_manager.secret_manager.get_system_id_for_node_labels.assert_called_once_with( + fake_secret_info.system_ids_topologies, self.fake_node_info.labels) + self.node_manager.definition_manager.define_node_on_all_storages.assert_called_once_with( + test_settings.FAKE_NODE_NAME) + + def _assert_global_secrets_changed_as_wanted(self, expected_nodes_with_system_id): + managed_secret_info = self.global_managed_secrets[0] + self.assertEqual(managed_secret_info.name, self.fake_secret_info.name) + self.assertEqual(managed_secret_info.namespace, self.fake_secret_info.namespace) + self.assertEqual(managed_secret_info.nodes_with_system_id, expected_nodes_with_system_id) + self.assertEqual(managed_secret_info.system_ids_topologies, self.fake_secret_info.system_ids_topologies) + self.assertEqual(managed_secret_info.managed_storage_classes, self.fake_secret_info.managed_storage_classes) + + @patch('{}.utils'.format(test_settings.NODE_MANAGER_PATH)) + def test_update_node_io_group_when_node_is_managed_and_his_io_group_was_changed(self, mock_utils): + fake_managed_node = deepcopy(self.fake_managed_node) + self._test_update_node_io_group_when_node_is_managed( + mock_utils, fake_managed_node, 'different_io_group', 'different_io_group') + self.node_manager.definition_manager.define_node_on_all_storages.assert_called_once_with( + test_settings.FAKE_NODE_NAME) + + @patch('{}.utils'.format(test_settings.NODE_MANAGER_PATH)) + def test_do_not_update_node_io_group_when_node_is_managed_and_his_io_group_was_not_changed(self, mock_utils): + fake_managed_node = deepcopy(self.fake_managed_node) + self._test_update_node_io_group_when_node_is_managed( + mock_utils, fake_managed_node, fake_managed_node.io_group, self.fake_managed_node.io_group) + + def _test_update_node_io_group_when_node_is_managed( + self, mock_utils, fake_managed_node, io_group_from_labels, expected_io_group): + self.global_managed_nodes[test_settings.FAKE_NODE_NAME] = fake_managed_node + mock_utils.generate_io_group_from_labels.return_value = io_group_from_labels + self.node_manager.update_node_io_group(self.fake_node_info) + managed_node = self.global_managed_nodes[test_settings.FAKE_NODE_NAME] + self.assertEqual(managed_node.name, self.fake_managed_node.name) + self.assertEqual(managed_node.node_id, self.fake_managed_node.node_id) + self.assertEqual(managed_node.io_group, expected_io_group) + + @patch('{}.utils'.format(test_settings.TYPES_PATH)) + def test_do_not_update_node_io_group_when_node_is_not_managed(self, mock_utils): + self.global_managed_nodes = [] + mock_utils.generate_io_group_from_labels.return_value = self.fake_managed_node.io_group + self.node_manager.update_node_io_group(self.fake_node_info) + self.assertEqual(len(self.global_managed_nodes), 0) + self.node_manager.definition_manager.define_node_on_all_storages.assert_not_called() diff --git a/controllers/tests/controller_server/host_definer/resource_manager/resource_info_manager_test.py b/controllers/tests/controller_server/host_definer/resource_manager/resource_info_manager_test.py new file mode 100644 index 000000000..47bd7c6da --- /dev/null +++ b/controllers/tests/controller_server/host_definer/resource_manager/resource_info_manager_test.py @@ -0,0 +1,171 @@ +from unittest.mock import MagicMock, Mock, patch + +from controllers.servers.host_definer.types import SecretInfo +from controllers.tests.controller_server.host_definer.resource_manager.base_resource_manager import BaseResourceManager +from controllers.servers.host_definer.resource_manager.resource_info import ResourceInfoManager +import controllers.tests.controller_server.host_definer.utils.test_utils as test_utils +import controllers.tests.controller_server.host_definer.settings as test_settings +import controllers.common.settings as common_settings + + +class TestCSINodeManager(BaseResourceManager): + def setUp(self): + super().setUp() + self.resource_info_manager = ResourceInfoManager() + self.resource_info_manager.k8s_api = MagicMock() + self.fake_node_info = test_utils.get_fake_node_info() + self.fake_csi_node_info = test_utils.get_fake_csi_node_info() + self.fake_storage_class_info = test_utils.get_fake_storage_class_info() + self.fake_host_definition_info = test_utils.get_fake_host_definition_info() + self.fake_k8s_secret = test_utils.get_fake_k8s_secret() + + def test_get_node_info_seccess(self): + self.resource_info_manager.k8s_api.read_node.return_value = test_utils.get_fake_k8s_node( + common_settings.MANAGE_NODE_LABEL) + self.resource_info_manager.generate_node_info = Mock() + self.resource_info_manager.generate_node_info.return_value = self.fake_node_info + result = self.resource_info_manager.get_node_info(common_settings.MANAGE_NODE_LABEL) + self.assertEqual(result.name, self.fake_node_info.name) + self.assertEqual(result.labels, self.fake_node_info.labels) + self.resource_info_manager.k8s_api.read_node.assert_called_once_with(common_settings.MANAGE_NODE_LABEL) + self.resource_info_manager.generate_node_info.assert_called_once_with(test_utils.get_fake_k8s_node( + common_settings.MANAGE_NODE_LABEL)) + + def test_fail_to_get_node_info(self): + self.resource_info_manager.k8s_api.read_node.return_value = None + self.resource_info_manager.generate_node_info = Mock() + result = self.resource_info_manager.get_node_info(common_settings.MANAGE_NODE_LABEL) + self.assertEqual(result.name, '') + self.assertEqual(result.labels, {}) + self.resource_info_manager.k8s_api.read_node.assert_called_once_with(common_settings.MANAGE_NODE_LABEL) + self.resource_info_manager.generate_node_info.assert_not_called() + + def test_generate_node_info_success(self): + result = self.resource_info_manager.generate_node_info(test_utils.get_fake_k8s_node( + common_settings.MANAGE_NODE_LABEL)) + self.assertEqual(result.name, self.fake_node_info.name) + self.assertEqual(result.labels, test_utils.get_fake_k8s_node(common_settings.MANAGE_NODE_LABEL).metadata.labels) + + def test_get_csi_node_info_success(self): + self.resource_info_manager.k8s_api.get_csi_node.return_value = test_utils.get_fake_k8s_csi_node() + self.resource_info_manager.generate_csi_node_info = Mock() + self.resource_info_manager.generate_csi_node_info.return_value = self.fake_csi_node_info + result = self.resource_info_manager.get_csi_node_info(test_settings.FAKE_NODE_NAME) + self.assertEqual(result.name, self.fake_csi_node_info.name) + self.assertEqual(result.node_id, self.fake_csi_node_info.node_id) + self.resource_info_manager.k8s_api.get_csi_node.assert_called_once_with(test_settings.FAKE_NODE_NAME) + self.resource_info_manager.generate_csi_node_info.assert_called_once_with(test_utils.get_fake_k8s_csi_node()) + + def test_get_non_exist_csi_node_info_success(self): + self.resource_info_manager.k8s_api.get_csi_node.return_value = None + self.resource_info_manager.generate_csi_node_info = Mock() + result = self.resource_info_manager.get_csi_node_info(test_settings.FAKE_NODE_NAME) + self.assertEqual(result.name, "") + self.assertEqual(result.node_id, "") + self.resource_info_manager.k8s_api.get_csi_node.assert_called_once_with(test_settings.FAKE_NODE_NAME) + self.resource_info_manager.generate_csi_node_info.assert_not_called() + + def test_generate_csi_node_info_with_ibm_driver_success(self): + result = self.resource_info_manager.generate_csi_node_info( + test_utils.get_fake_k8s_csi_node(common_settings.CSI_PROVISIONER_NAME)) + self.assertEqual(result.name, self.fake_csi_node_info.name) + self.assertEqual(result.node_id, self.fake_csi_node_info.node_id) + + def test_generate_csi_node_info_with_non_ibm_driver_success(self): + result = self.resource_info_manager.generate_csi_node_info( + test_utils.get_fake_k8s_csi_node(test_settings.FAKE_CSI_PROVISIONER)) + self.assertEqual(result.name, self.fake_csi_node_info.name) + self.assertEqual(result.node_id, '') + + def test_get_storage_classes_info_success(self): + self.resource_info_manager.generate_storage_class_info = Mock() + self._test_get_k8s_resources_info_success( + self.resource_info_manager.get_storage_classes_info, self.resource_info_manager.k8s_api.list_storage_class, + self.resource_info_manager.generate_storage_class_info, self.fake_storage_class_info, + test_utils.get_fake_k8s_storage_class_items(common_settings.CSI_PROVISIONER_NAME)) + + def test_get_storage_classes_info_empty_list_success(self): + self.resource_info_manager.generate_storage_class_info = Mock() + self._test_get_k8s_resources_info_empty_list_success(self.resource_info_manager.get_storage_classes_info, + self.resource_info_manager.k8s_api.list_storage_class, + self.resource_info_manager.generate_storage_class_info) + + def test_generate_storage_class_info_success(self): + k8s_storage_class = test_utils.get_fake_k8s_storage_class(common_settings.CSI_PROVISIONER_NAME) + result = self.resource_info_manager.generate_storage_class_info(k8s_storage_class) + self.assertEqual(result.name, self.fake_storage_class_info.name) + self.assertEqual(result.provisioner, self.fake_storage_class_info.provisioner) + self.assertEqual(result.parameters, self.fake_storage_class_info.parameters) + + def test_get_csi_pods_info_success(self): + self._test_get_pods_info(1) + + def test_get_multiple_csi_pods_info_success(self): + self._test_get_pods_info(2) + + def _test_get_pods_info(self, number_of_pods): + self.resource_info_manager.k8s_api.list_pod_for_all_namespaces.return_value = \ + test_utils.get_fake_k8s_pods_items(number_of_pods) + result = self.resource_info_manager.get_csi_pods_info() + self.assertEqual(result[0].name, test_utils.get_fake_k8s_pods_items().items[0].metadata.name) + self.assertEqual(result[0].node_name, test_utils.get_fake_k8s_pods_items().items[0].spec.node_name) + self.assertEqual(len(result), number_of_pods) + self.resource_info_manager.k8s_api.list_pod_for_all_namespaces.assert_called_once_with( + common_settings.DRIVER_PRODUCT_LABEL) + + def test_get_none_when_fail_to_get_k8s_pods(self): + self.resource_info_manager.k8s_api.list_pod_for_all_namespaces.return_value = None + result = self.resource_info_manager.get_csi_pods_info() + self.assertEqual(result, []) + self.resource_info_manager.k8s_api.list_pod_for_all_namespaces.assert_called_once_with( + common_settings.DRIVER_PRODUCT_LABEL) + + @patch('{}.utils'.format(test_settings.RESOURCE_INFO_MANAGER_PATH)) + def test_generate_host_definition_info_success(self, mock_utils): + k8s_host_definition = test_utils.get_fake_k8s_host_definition(common_settings.READY_PHASE) + mock_utils.get_k8s_object_resource_version.return_value = self.fake_host_definition_info.resource_version + result = self.resource_info_manager.generate_host_definition_info(k8s_host_definition) + self.assertEqual(result.name, self.fake_host_definition_info.name) + self.assertEqual(result.resource_version, self.fake_host_definition_info.resource_version) + self.assertEqual(result.uid, self.fake_host_definition_info.uid) + self.assertEqual(result.phase, self.fake_host_definition_info.phase) + self.assertEqual(result.secret_name, self.fake_host_definition_info.secret_name) + self.assertEqual(result.secret_namespace, self.fake_host_definition_info.secret_namespace) + self.assertEqual(result.node_name, self.fake_host_definition_info.node_name) + self.assertEqual(result.node_id, self.fake_host_definition_info.node_id) + self.assertEqual(result.connectivity_type, self.fake_host_definition_info.connectivity_type) + mock_utils.get_k8s_object_resource_version.assert_called_once_with(k8s_host_definition) + + def test_generate_k8s_secret_to_secret_info_success(self): + result = self.resource_info_manager.generate_k8s_secret_to_secret_info(self.fake_k8s_secret, 'input1', 'input2') + self.assertEqual(result.name, test_settings.FAKE_SECRET) + self.assertEqual(result.namespace, test_settings.FAKE_SECRET_NAMESPACE) + self.assertEqual(result.nodes_with_system_id, 'input1') + self.assertEqual(result.system_ids_topologies, 'input2') + self.assertEqual(type(result), SecretInfo) + + def test_generate_k8s_secret_to_secret_info_defaults_success(self): + result = self.resource_info_manager.generate_k8s_secret_to_secret_info(self.fake_k8s_secret) + self.assertEqual(result.name, test_settings.FAKE_SECRET) + self.assertEqual(result.namespace, test_settings.FAKE_SECRET_NAMESPACE) + self.assertEqual(result.nodes_with_system_id, {}) + self.assertEqual(result.system_ids_topologies, {}) + self.assertEqual(type(result), SecretInfo) + + def test_generate_secret_info_success(self): + result = self.resource_info_manager.generate_secret_info( + test_settings.FAKE_SECRET, test_settings.FAKE_SECRET_NAMESPACE, 'input1', 'input2') + self.assertEqual(result.name, test_settings.FAKE_SECRET) + self.assertEqual(result.namespace, test_settings.FAKE_SECRET_NAMESPACE) + self.assertEqual(result.nodes_with_system_id, 'input1') + self.assertEqual(result.system_ids_topologies, 'input2') + self.assertEqual(type(result), SecretInfo) + + def test_generate_secret_info_defaults_success(self): + result = self.resource_info_manager.generate_secret_info(test_settings.FAKE_SECRET, + test_settings.FAKE_SECRET_NAMESPACE) + self.assertEqual(result.name, test_settings.FAKE_SECRET) + self.assertEqual(result.namespace, test_settings.FAKE_SECRET_NAMESPACE) + self.assertEqual(result.nodes_with_system_id, {}) + self.assertEqual(result.system_ids_topologies, {}) + self.assertEqual(type(result), SecretInfo) diff --git a/controllers/tests/controller_server/host_definer/resource_manager/secret_manager_test.py b/controllers/tests/controller_server/host_definer/resource_manager/secret_manager_test.py new file mode 100644 index 000000000..a0304b05b --- /dev/null +++ b/controllers/tests/controller_server/host_definer/resource_manager/secret_manager_test.py @@ -0,0 +1,316 @@ +from copy import deepcopy +from unittest.mock import MagicMock, Mock, patch + +from controllers.tests.controller_server.host_definer.resource_manager.base_resource_manager import BaseResourceManager +from controllers.servers.host_definer.resource_manager.secret import SecretManager +import controllers.tests.controller_server.host_definer.utils.test_utils as test_utils +import controllers.tests.controller_server.host_definer.utils.k8s_manifests_utils as test_manifest_utils +import controllers.tests.controller_server.host_definer.settings as test_settings +import controllers.common.settings as common_settings + + +class TestSecretManager(BaseResourceManager): + def setUp(self): + self.secret_manager = SecretManager() + self.secret_manager.k8s_api = MagicMock() + self.secret_manager.resource_info_manager = MagicMock() + self.fake_secret_info = test_utils.get_fake_secret_info() + self.fake_secret_data = test_utils.get_fake_k8s_secret().data + self.fake_k8s_secret = test_utils.get_fake_k8s_secret() + self.secret_config_with_system_info = test_manifest_utils.get_fake_secret_config_with_system_info_manifest() + self.expected_decode_secret_config = 'decoded secret config' + self.managed_secrets = patch('{}.MANAGED_SECRETS'.format(test_settings.SECRET_MANAGER_PATH), + [self.fake_secret_info]).start() + self.mock_is_topology_match = patch('{}.is_topology_match'.format(test_settings.SECRET_MANAGER_PATH)).start() + + @patch('{}.utils'.format(test_settings.SECRET_MANAGER_PATH)) + def test_get_secret_data_success(self, mock_utils): + result = self._test_get_secret_data(self.fake_secret_data, mock_utils) + mock_utils.change_decode_base64_secret_config.assert_called_once_with(self.fake_secret_data) + self.assertEqual(result, self.expected_decode_secret_config) + + @patch('{}.utils'.format(test_settings.SECRET_MANAGER_PATH)) + def test_get_empty_dict_when_there_is_no_secret_data(self, mock_utils): + result = self._test_get_secret_data({}, mock_utils) + mock_utils.change_decode_base64_secret_config.assert_not_called() + self.assertEqual(result, {}) + + def _test_get_secret_data(self, secret_data, mock_utils): + self.secret_manager.k8s_api.get_secret_data.return_value = secret_data + mock_utils.change_decode_base64_secret_config.return_value = self.expected_decode_secret_config + result = self.secret_manager.get_secret_data(test_settings.FAKE_SECRET, test_settings.FAKE_SECRET_NAMESPACE) + self.secret_manager.k8s_api.get_secret_data.assert_called_once_with(test_settings.FAKE_SECRET, + test_settings.FAKE_SECRET_NAMESPACE) + return result + + @patch('{}.utils'.format(test_settings.SECRET_MANAGER_PATH)) + def test_return_true_when_node_should_be_managed_on_secret(self, manifest_utils): + self._prepare_is_node_should_be_managed_on_secret() + result = self._test_is_node_should_be_managed_on_secret(True, manifest_utils) + self.assertTrue(result) + + @patch('{}.utils'.format(test_settings.SECRET_MANAGER_PATH)) + def test_return_false_when_node_should_be_managed_on_secret(self, manifest_utils): + self._prepare_is_node_should_be_managed_on_secret() + result = self._test_is_node_should_be_managed_on_secret(False, manifest_utils) + self.assertFalse(result) + + def _prepare_is_node_should_be_managed_on_secret(self): + self._prepare_get_secret_data(self.fake_secret_data) + self.secret_manager.resource_info_manager.generate_secret_info.return_value = self.fake_secret_info + self._prepare_get_matching_managed_secret_info(0) + self.secret_manager.is_node_should_managed_on_secret_info = Mock() + + def _test_is_node_should_be_managed_on_secret(self, is_node_should_be_managed, manifest_utils): + self.secret_manager.is_node_should_managed_on_secret_info.return_value = is_node_should_be_managed + result = self.secret_manager.is_node_should_be_managed_on_secret( + test_settings.FAKE_NODE_NAME, test_settings.FAKE_SECRET, test_settings.FAKE_SECRET_NAMESPACE) + self.secret_manager.get_secret_data.assert_called_once_with( + test_settings.FAKE_SECRET, test_settings.FAKE_SECRET_NAMESPACE) + manifest_utils.validate_secret.assert_called_once_with(self.fake_secret_data) + self.secret_manager.resource_info_manager.generate_secret_info.assert_called_once_with( + test_settings.FAKE_SECRET, test_settings.FAKE_SECRET_NAMESPACE) + self.secret_manager.get_matching_managed_secret_info.assert_called_once_with(self.fake_secret_info) + self.secret_manager.is_node_should_managed_on_secret_info.assert_called_once_with( + test_settings.FAKE_NODE_NAME, self.fake_secret_info) + return result + + def test_node_should_be_managed_when_node_name_in_nodes_with_system_id(self): + result = self.secret_manager.is_node_should_managed_on_secret_info( + test_settings.FAKE_NODE_NAME, self.fake_secret_info) + self.assertTrue(result) + + def test_node_should_be_managed_when_nodes_with_system_id_is_empty(self): + secret_info = deepcopy(self.fake_secret_info) + secret_info.nodes_with_system_id = {} + result = self.secret_manager.is_node_should_managed_on_secret_info( + test_settings.FAKE_NODE_NAME, self.fake_secret_info) + self.assertTrue(result) + + def test_node_should_not_be_managed_when_node_not_in_nodes_with_system_id(self): + result = self.secret_manager.is_node_should_managed_on_secret_info('bad_node', self.fake_secret_info) + self.assertFalse(result) + + def test_node_should_not_be_managed_on_empty_secret_info(self): + result = self.secret_manager.is_node_should_managed_on_secret_info(test_settings.FAKE_NODE_NAME, None) + self.assertFalse(result) + + def test_get_matching_managed_secret_info_success(self): + result = self.secret_manager.get_matching_managed_secret_info(self.fake_secret_info) + self.assertEqual(result, (self.fake_secret_info, 0)) + + def test_do_not_find_matching_secret_info(self): + secret_info = deepcopy(self.fake_secret_info) + secret_info.name = 'bad_name' + result = self.secret_manager.get_matching_managed_secret_info(secret_info) + self.assertEqual(result, (secret_info, -1)) + + def test_get_second_matching_managed_secret_info_success(self): + secret_info = deepcopy(self.fake_secret_info) + secret_info.name = 'name' + self.managed_secrets.append(secret_info) + result = self.secret_manager.get_matching_managed_secret_info(secret_info) + self.assertEqual(result, (secret_info, 1)) + + def test_return_true_when_node_in_system_ids_topologies(self): + result = self._test_is_node_in_system_ids_topologies(test_settings.FAKE_SYSTEM_ID) + self.assertTrue(result) + + def test_return_false_when_node_not_in_system_ids_topologies(self): + result = self._test_is_node_in_system_ids_topologies('') + self.assertFalse(result) + + def _test_is_node_in_system_ids_topologies(self, system_id): + node_labels = [common_settings.MANAGE_NODE_LABEL] + self.secret_manager.get_system_id_for_node_labels = Mock() + self.secret_manager.get_system_id_for_node_labels.return_value = system_id + result = self.secret_manager.is_node_labels_in_system_ids_topologies(self.fake_secret_info, node_labels) + self.secret_manager.get_system_id_for_node_labels.assert_called_once_with(self.fake_secret_info, node_labels) + return result + + def test_get_system_id_when_system_ids_topologies_with_multiple_system_ids(self): + system_ids_topologies = { + test_settings.FAKE_SYSTEM_ID + '1': test_settings.FAKE_TOPOLOGY_LABELS, + test_settings.FAKE_SYSTEM_ID + '2': test_settings.FAKE_TOPOLOGY_LABELS} + result = self._test_get_system_id_for_node_labels([False, True], system_ids_topologies) + self.assertEqual(result, test_settings.FAKE_SYSTEM_ID + '2') + self.assertEqual(self.mock_is_topology_match.call_count, 2) + + def test_get_system_id_when_node_topology_labels_match(self): + result = self._test_get_system_id_for_node_labels([True], test_settings.FAKE_SYSTEM_IDS_TOPOLOGIES) + self.assertEqual(result, test_settings.FAKE_SYSTEM_ID) + self.mock_is_topology_match.assert_called_once_with(test_settings.FAKE_TOPOLOGY_LABELS, + test_settings.FAKE_TOPOLOGY_LABELS) + + def test_get_empty_string_when_node_topology_labels_do_not_match(self): + result = self._test_get_system_id_for_node_labels([False], test_settings.FAKE_SYSTEM_IDS_TOPOLOGIES) + self.assertEqual(result, '') + self.mock_is_topology_match.assert_called_once_with(test_settings.FAKE_TOPOLOGY_LABELS, + test_settings.FAKE_TOPOLOGY_LABELS) + + def test_get_empty_string_when_system_ids_topologies_is_empty(self): + result = self._test_get_system_id_for_node_labels([False], {}) + self.assertEqual(result, '') + self.mock_is_topology_match.assert_not_called() + + def _test_get_system_id_for_node_labels(self, is_topology_match, system_ids_topologies): + self._prepare_get_topology_labels() + self.mock_is_topology_match.side_effect = is_topology_match + result = self.secret_manager.get_system_id_for_node_labels(system_ids_topologies, + test_settings.FAKE_TOPOLOGY_LABELS) + self.secret_manager.get_topology_labels.assert_called_once_with(test_settings.FAKE_TOPOLOGY_LABELS) + return result + + @patch('{}.utils'.format(test_settings.SECRET_MANAGER_PATH)) + def test_when_secret_have_config_field_it_is_considered_as_topology(self, mock_utils): + result = self._test_is_topology_secret('secret_config', mock_utils) + self.assertTrue(result) + + @patch('{}.utils'.format(test_settings.SECRET_MANAGER_PATH)) + def test_when_secret_do_not_have_config_field_it_is_do_not_considered_as_topology(self, mock_utils): + result = self._test_is_topology_secret({}, mock_utils) + self.assertFalse(result) + + def _test_is_topology_secret(self, secret_config, mock_utils): + mock_utils.get_secret_config.return_value = secret_config + result = self.secret_manager.is_topology_secret(self.fake_secret_data) + mock_utils.validate_secret.assert_called_once_with(self.fake_secret_data) + mock_utils.get_secret_config.assert_called_once_with(self.fake_secret_data) + return result + + @patch('{}.utils'.format(test_settings.SECRET_MANAGER_PATH)) + def test_get_only_first_label_when_it_is_a_topology(self, manifest_utils): + expected_result = {test_settings.FAKE_TOPOLOGY_LABEL + '1': common_settings.TRUE_STRING} + self._test_get_topology_labels([True, False], test_settings.FAKE_TOPOLOGY_LABELS, + expected_result, 2, manifest_utils) + + @patch('{}.utils'.format(test_settings.SECRET_MANAGER_PATH)) + def test_get_both_labels_when_they_are_topology(self, manifest_utils): + expected_result = test_settings.FAKE_TOPOLOGY_LABELS + self._test_get_topology_labels([True, True], test_settings.FAKE_TOPOLOGY_LABELS, + expected_result, 2, manifest_utils) + + @patch('{}.utils'.format(test_settings.SECRET_MANAGER_PATH)) + def test_get_empty_dict_when_non_of_the_labels_are_topology(self, manifest_utils): + expected_result = {} + self._test_get_topology_labels([False, False], test_settings.FAKE_TOPOLOGY_LABELS, + expected_result, 2, manifest_utils) + + @patch('{}.utils'.format(test_settings.SECRET_MANAGER_PATH)) + def test_get_empty_dict_when_getting_empty_dict_to_check(self, manifest_utils): + expected_result = {} + self._test_get_topology_labels([], {}, expected_result, 0, manifest_utils) + + def _test_get_topology_labels(self, is_topology_label, labels_to_check, expected_result, + expected_is_topology_call_count, manifest_utils): + manifest_utils.is_topology_label.side_effect = is_topology_label + result = self.secret_manager.get_topology_labels(labels_to_check) + self.assertEqual(manifest_utils.is_topology_label.call_count, expected_is_topology_call_count) + self.assertEqual(result, expected_result) + + @patch('{}.utils'.format(test_settings.SECRET_MANAGER_PATH)) + def test_generate_secret_system_ids_topologies_success(self, manifest_utils): + expected_result = { + 'system_id_with_supported_topologies' + '1': [test_settings.FAKE_TOPOLOGY_LABEL], + 'system_id_with_no_supported_topologies' + '2': None + } + self._test_generate_secret_system_ids_topologies( + self.secret_config_with_system_info, expected_result, manifest_utils) + + @patch('{}.utils'.format(test_settings.SECRET_MANAGER_PATH)) + def test_generate_empty_secret_system_ids_topologies(self, manifest_utils): + self._test_generate_secret_system_ids_topologies({}, {}, manifest_utils) + + def _test_generate_secret_system_ids_topologies(self, secret_config, expected_result, mock_utils): + mock_utils.get_secret_config.return_value = secret_config + result = self.secret_manager.generate_secret_system_ids_topologies(self.fake_secret_data) + self.assertEqual(result, expected_result) + mock_utils.get_secret_config.assert_called_once_with(self.fake_secret_data) + + def test_return_true_when_parameter_is_secret_success(self): + self._test_is_secret_success(test_settings.STORAGE_CLASS_SECRET_FIELD, True) + + def test_return_false_when_parameter_has_bad_suffix_is_secret_success(self): + self._test_is_secret_success('csi.storage.k8s.io/bad_suffix', False) + + def test_return_false_when_parameter_has_bad_prefix_is_secret_success(self): + self._test_is_secret_success('bad_prefix/secret-name', False) + + def _test_is_secret_success(self, parameter, expected_result): + result = self.secret_manager.is_secret(parameter) + self.assertEqual(result, expected_result) + + def test_get_secret_name_and_namespace_from_storage_class_success(self): + result = self.secret_manager.get_secret_name_and_namespace( + test_utils.get_fake_storage_class_info(), test_settings.STORAGE_CLASS_SECRET_FIELD) + self.assertEqual(result, (test_settings.FAKE_SECRET, test_settings.FAKE_SECRET_NAMESPACE)) + + def test_add_unique_secret_info_to_list_success(self): + self._test_add_unique_secret_info_to_list([]) + + def test_do_not_change_list_when_secret_is_already_there_success(self): + self._test_add_unique_secret_info_to_list([self.fake_secret_info]) + + def _test_add_unique_secret_info_to_list(self, secrets_info_list): + result = self.secret_manager.add_unique_secret_info_to_list(self.fake_secret_info, secrets_info_list) + self.assertEqual(result, [self.fake_secret_info]) + + @patch('{}.utils'.format(test_settings.SECRET_MANAGER_PATH)) + def test_secret_can_be_changed_when_secret_is_managed_and_the_watch_event_type_is_not_deleted(self, + manifest_utils): + manifest_utils.is_watch_object_type_is_delete.return_value = False + self._test_is_secret_can_be_changed(0, True) + manifest_utils.is_watch_object_type_is_delete.assert_called_once_with('event_type') + + @patch('{}.utils'.format(test_settings.SECRET_MANAGER_PATH)) + def test_secret_cannot_be_changed_when_secret_is_not_managed(self, manifest_utils): + self._test_is_secret_can_be_changed(-1, False) + manifest_utils.is_watch_object_type_is_delete.assert_not_called() + + @patch('{}.utils'.format(test_settings.SECRET_MANAGER_PATH)) + def test_secret_cannot_be_changed_when_watch_event_type_is_deleted(self, manifest_utils): + manifest_utils.is_watch_object_type_is_delete.return_value = True + self._test_is_secret_can_be_changed(0, False) + manifest_utils.is_watch_object_type_is_delete.assert_called_once_with('event_type') + + def _test_is_secret_can_be_changed(self, managed_secret_index, expected_result): + self._prepare_get_matching_managed_secret_info(managed_secret_index) + result = self.secret_manager.is_secret_can_be_changed(self.fake_secret_info, 'event_type') + self.secret_manager.get_matching_managed_secret_info.assert_called_once_with(self.fake_secret_info) + self.assertEqual(result, expected_result) + + def _prepare_get_matching_managed_secret_info(self, managed_secret_index): + self.secret_manager.get_matching_managed_secret_info = Mock() + self.secret_manager.get_matching_managed_secret_info.return_value = ( + self.fake_secret_info, managed_secret_index) + + @patch('{}.utils'.format(test_settings.SECRET_MANAGER_PATH)) + def test_get_array_connectivity_info_success(self, manifest_utils): + manifest_utils.get_array_connection_info_from_secret_data.return_value = 'fake_array_connectivity_info' + self._test_get_array_connectivity_info(self.fake_secret_data, 'fake_array_connectivity_info') + self.secret_manager.get_topology_labels.assert_called_once_with(test_settings.FAKE_TOPOLOGY_LABELS) + manifest_utils.get_array_connection_info_from_secret_data.assert_called_once_with( + self.fake_secret_data, test_settings.FAKE_TOPOLOGY_LABELS) + + @patch('{}.utils'.format(test_settings.SECRET_MANAGER_PATH)) + def test_get_empty_array_connectivity_info_when_secret_data_is_empty_success(self, manifest_utils): + self._test_get_array_connectivity_info(None, {}) + self.secret_manager.get_topology_labels.assert_not_called() + manifest_utils.get_array_connection_info_from_secret_data.assert_not_called() + + def _test_get_array_connectivity_info(self, secret_data, expected_result): + self._prepare_get_secret_data(secret_data) + self._prepare_get_topology_labels() + result = self.secret_manager.get_array_connection_info( + test_settings.FAKE_SECRET, test_settings.FAKE_SECRET_NAMESPACE, test_settings.FAKE_TOPOLOGY_LABELS) + self.assertEqual(result, expected_result) + self.secret_manager.get_secret_data.assert_called_once_with( + test_settings.FAKE_SECRET, test_settings.FAKE_SECRET_NAMESPACE) + + def _prepare_get_secret_data(self, secret_data): + self.secret_manager.get_secret_data = Mock() + self.secret_manager.get_secret_data.return_value = secret_data + + def _prepare_get_topology_labels(self): + self.secret_manager.get_topology_labels = Mock() + self.secret_manager.get_topology_labels.return_value = test_settings.FAKE_TOPOLOGY_LABELS diff --git a/controllers/tests/controller_server/host_definer/resource_manager/storage_class_manager_test.py b/controllers/tests/controller_server/host_definer/resource_manager/storage_class_manager_test.py new file mode 100644 index 000000000..b722f3606 --- /dev/null +++ b/controllers/tests/controller_server/host_definer/resource_manager/storage_class_manager_test.py @@ -0,0 +1,23 @@ +from copy import deepcopy + +from controllers.tests.controller_server.host_definer.resource_manager.base_resource_manager import BaseResourceManager +from controllers.servers.host_definer.resource_manager.storage_class import StorageClassManager +import controllers.tests.controller_server.host_definer.utils.test_utils as test_utils + + +class TestStorageClassManager(BaseResourceManager): + def setUp(self): + self.storage_class_manager = StorageClassManager() + self.fake_storage_class_info = test_utils.get_fake_storage_class_info() + + def test_return_true_when_storage_class_has_csi_as_a_provisioner(self): + self._test_is_storage_class_has_csi_as_a_provisioner(self.fake_storage_class_info, True) + + def test_return_false_when_storage_class_does_not_have_csi_as_a_provisioner(self): + storage_class_info = deepcopy(self.fake_storage_class_info) + storage_class_info.provisioner = 'some_provisioner' + self._test_is_storage_class_has_csi_as_a_provisioner(storage_class_info, False) + + def _test_is_storage_class_has_csi_as_a_provisioner(self, storage_class_info, expected_result): + result = self.storage_class_manager.is_storage_class_has_csi_as_a_provisioner(storage_class_info) + self.assertEqual(result, expected_result) diff --git a/controllers/tests/controller_server/host_definer/secret_watcher_test.py b/controllers/tests/controller_server/host_definer/secret_watcher_test.py deleted file mode 100644 index fc851d36b..000000000 --- a/controllers/tests/controller_server/host_definer/secret_watcher_test.py +++ /dev/null @@ -1,60 +0,0 @@ -from unittest.mock import Mock, patch - -import controllers.tests.controller_server.host_definer.utils.test_utils as test_utils -import controllers.tests.controller_server.host_definer.settings as test_settings -from controllers.tests.controller_server.host_definer.common import BaseSetUp -from controllers.servers.host_definer.watcher.secret_watcher import SecretWatcher - - -class SecretWatcherBase(BaseSetUp): - def setUp(self): - super().setUp() - self.secret_watcher = test_utils.get_class_mock(SecretWatcher) - self.managed_secrets_on_secret_watcher = test_utils.patch_managed_secrets_global_variable( - test_settings.SECRET_WATCHER_PATH) - - -class TestWatchSecretResources(SecretWatcherBase): - def setUp(self): - super().setUp() - self.secret_stream = patch('{}.watch.Watch.stream'.format(test_settings.SECRET_WATCHER_PATH)).start() - self.secret_watcher._loop_forever = Mock() - self.secret_watcher._loop_forever.side_effect = [True, False] - self.secret_watcher.core_api.read_node.return_value = self.k8s_node_with_fake_label - - def test_create_definitions_managed_secret_was_modified(self): - self._prepare_default_mocks_for_secret() - self.nodes_on_watcher_helper[test_settings.FAKE_NODE_NAME] = test_utils.get_fake_managed_node() - self.managed_secrets_on_watcher_helper.append(test_utils.get_fake_secret_info()) - self.managed_secrets_on_secret_watcher.append(test_utils.get_fake_secret_info()) - self.secret_watcher.host_definitions_api.get.return_value = \ - test_utils.get_fake_k8s_host_definitions_items('not_ready') - self.secret_watcher.watch_secret_resources() - self.secret_watcher.storage_host_servicer.define_host.assert_called_once_with( - test_utils.get_define_request(node_id_from_host_definition=test_settings.FAKE_NODE_ID)) - - def test_ignore_deleted_events(self): - self._prepare_default_mocks_for_secret() - self.secret_stream.return_value = iter([test_utils.get_fake_secret_watch_event( - test_settings.DELETED_EVENT_TYPE)]) - self.secret_watcher.watch_secret_resources() - self.secret_watcher.storage_host_servicer.define_host.assert_not_called() - - def test_do_not_create_definitions_when_managed_secret_modified_but_no_managed_nodes(self): - self._prepare_default_mocks_for_secret() - self.secret_watcher.watch_secret_resources() - self.secret_watcher.storage_host_servicer.define_host.assert_not_called() - - def test_modified_secret_that_is_not_in_managed_secrets(self): - self._prepare_default_mocks_for_secret() - self.secret_watcher.watch_secret_resources() - self.secret_watcher.storage_host_servicer.define_host.assert_not_called() - - def _prepare_default_mocks_for_secret(self): - self.secret_stream.return_value = iter([test_utils.get_fake_secret_watch_event( - test_settings.MODIFIED_EVENT_TYPE)]) - self.secret_watcher.host_definitions_api.get.return_value = \ - test_utils.get_fake_k8s_host_definitions_items(test_settings.READY_PHASE) - self.secret_watcher.core_api.read_namespaced_secret.return_value = test_utils.get_fake_k8s_secret() - self.os.getenv.return_value = '' - self.secret_watcher.core_api.list_node.return_value = test_utils.get_fake_k8s_nodes_items() diff --git a/controllers/tests/controller_server/host_definer/settings.py b/controllers/tests/controller_server/host_definer/settings.py index 38ed3632b..2406071f9 100644 --- a/controllers/tests/controller_server/host_definer/settings.py +++ b/controllers/tests/controller_server/host_definer/settings.py @@ -1,12 +1,9 @@ +from controllers.common.settings import TOPOLOGY_IBM_BLOCK_PREFIX, CSI_PARAMETER_PREFIX, SECRET_NAME_SUFFIX from controllers.tests.common.test_settings import HOST_NAME import controllers.common.settings as common_settings -SPEC_FIELD = 'spec' -METADATA_FIELD = 'metadata' -STATUS_FIELD = 'status' STORAGE_CLASS_DRIVERS_FIELD = 'drivers' CSI_NODE_NODE_ID_FIELD = 'nodeID' -CSI_PROVISIONER_NAME = 'block.csi.ibm.com' FAKE_SECRET = 'fake_secret' FAKE_SECRET_NAMESPACE = 'fake_secret_namespace' FAKE_NODE_NAME = 'fake_node_name' @@ -16,23 +13,33 @@ FAKE_SECRET_PASSWORD = 'fake_password' FAKE_SECRET_USER_NAME = 'fake_user_name' FAKE_STORAGE_CLASS = 'fake_storage_class' +FAKE_CONNECTIVITY_TYPE = 'fake_connectivity_type' +FAKE_SYSTEM_ID = 'fake_system_id' IQN = 'iqn.1994-05.com.redhat:686358c930fe' WWPN = '34859340583048' NQN = 'nqn.2014-08.org.nvmexpress:uuid:b57708c7-5bb6-46a0-b2af-9d824bf539e1' FAKE_NODE_ID = '{};;;{}'.format(HOST_NAME, IQN) FAKE_CSI_PROVISIONER = 'fake_csi_provisioner' -TRUE_STRING = 'true' -NODE_LABELS_FIELD = 'labels' FAKE_LABEL = 'FAKE_LABEL' -MANAGE_NODE_LABEL = 'hostdefiner.block.csi.ibm.com/manage-node' -FORBID_DELETION_LABEL = 'hostdefiner.block.csi.ibm.com/do-not-delete-definition' -WATCHER_HELPER_PATH = 'controllers.servers.host_definer.watcher.watcher_helper' -NODES_WATCHER_PATH = 'controllers.servers.host_definer.watcher.node_watcher' -SECRET_WATCHER_PATH = 'controllers.servers.host_definer.watcher.secret_watcher' -CSI_NODE_WATCHER_PATH = 'controllers.servers.host_definer.watcher.csi_node_watcher' -STORAGE_CLASS_WATCHER_PATH = 'controllers.servers.host_definer.watcher.storage_class_watcher' +FAKE_TOPOLOGY_LABEL = '{}/topology'.format(TOPOLOGY_IBM_BLOCK_PREFIX) +HOST_DEFINER_PATH = 'controllers.servers.host_definer' +HOST_DEFINER_WATCHER_PATH = '{}.watcher'.format(HOST_DEFINER_PATH) +HOST_DEFINER_RESOURCE_MANAGER_PATH = '{}.resource_manager'.format(HOST_DEFINER_PATH) +NODES_WATCHER_PATH = '{}.node_watcher'.format(HOST_DEFINER_WATCHER_PATH) +SECRET_WATCHER_PATH = '{}.secret_watcher'.format(HOST_DEFINER_WATCHER_PATH) +CSI_NODE_WATCHER_PATH = '{}.csi_node_watcher'.format(HOST_DEFINER_WATCHER_PATH) +STORAGE_CLASS_WATCHER_PATH = '{}.storage_class_watcher'.format(HOST_DEFINER_WATCHER_PATH) +HOST_DEFINITION_WATCHER_PATH = '{}.host_definition_watcher'.format(HOST_DEFINER_WATCHER_PATH) +UTILS_PATH = 'controllers.servers.host_definer.utils.utils' SETTINGS_PATH = 'controllers.servers.host_definer.settings' -METADATA_RESOURCE_VERSION_FIELD = 'resource_version' +HOST_DEFINITION_MANAGER_PATH = '{}.host_definition'.format(HOST_DEFINER_RESOURCE_MANAGER_PATH) +SECRET_MANAGER_PATH = '{}.secret'.format(HOST_DEFINER_RESOURCE_MANAGER_PATH) +NODE_MANAGER_PATH = '{}.node'.format(HOST_DEFINER_RESOURCE_MANAGER_PATH) +RESOURCE_INFO_MANAGER_PATH = '{}.resource_info'.format(HOST_DEFINER_RESOURCE_MANAGER_PATH) +TYPES_PATH = 'controllers.servers.host_definer.types' +REQUEST_MANAGER_PATH = 'controllers.servers.host_definer.definition_manager.request' +DEFINITION_MANAGER_PATH = 'controllers.servers.host_definer.definition_manager.definition' +K8S_API_PATH = 'controllers.servers.host_definer.k8s.api' FAKE_RESOURCE_VERSION = '495873498573' FAKE_UID = '50345093486093' EVENT_TYPE_FIELD = 'type' @@ -41,30 +48,38 @@ UPDATED_PODS = 'updated_number_scheduled' POD_NODE_NAME_FIELD = 'node_name' DESIRED_UPDATED_PODS = 'desired_number_scheduled' -DELETED_EVENT_TYPE = 'DELETED' -MODIFIED_EVENT_TYPE = 'MODIFIED' -ADDED_EVENT = 'ADDED' METADATA_UID_FIELD = 'uid' -STATUS_PHASE_FIELD = 'phase' -READY_PHASE = 'Ready' -HOST_DEFINITION_FIELD = 'hostDefinition' -SECRET_NAME_FIELD = 'secretName' -SECRET_NAMESPACE_FIELD = 'secretNamespace' -HOST_DEFINITION_NODE_NAME_FIELD = 'nodeName' SECRET_DATA_FIELD = 'data' FAIL_MESSAGE_FROM_STORAGE = 'fail_from_storage' -PENDING_CREATION_PHASE = 'PendingCreation' -PENDING_DELETION_PHASE = 'PendingDeletion' -SUCCESS_MESSAGE = 'Host defined successfully on the array' +MESSAGE = 'Host defined successfully on the array' HOST_DEFINITION_PENDING_VARS = {'HOST_DEFINITION_PENDING_RETRIES': 3, 'HOST_DEFINITION_PENDING_EXPONENTIAL_BACKOFF_IN_SECONDS': 0.2, 'HOST_DEFINITION_PENDING_DELAY_IN_SECONDS': 0.2} STORAGE_CLASS_PROVISIONER_FIELD = 'provisioner' STORAGE_CLASS_PARAMETERS_FIELD = 'parameters' -STORAGE_CLASS_SECRET_FIELD = 'csi.storage.k8s.io/secret-name' -STORAGE_CLASS_SECRET_NAMESPACE_FIELD = 'csi.storage.k8s.io/secret-namespace' +STORAGE_CLASS_SECRET_FIELD = '{}{}'.format(CSI_PARAMETER_PREFIX, SECRET_NAME_SUFFIX) +STORAGE_CLASS_SECRET_NAMESPACE_FIELD = '{}secret-namespace'.format(CSI_PARAMETER_PREFIX) FAKE_PREFIX = 'fake-prefix' IO_GROUP_ID_FIELD = 'id' IO_GROUP_IDS = ['0', '2'] IO_GROUP_NAMES = ['io_grp0', 'io_grp2'] FAKE_STRING_IO_GROUP = common_settings.IO_GROUP_DELIMITER.join(IO_GROUP_IDS) +FAKE_STORAGE_CLASS_PARAMETERS = { + STORAGE_CLASS_SECRET_FIELD: FAKE_SECRET, + STORAGE_CLASS_SECRET_NAMESPACE_FIELD: FAKE_SECRET_NAMESPACE +} +CONNECTIVITY_TYPE_FIELD = 'connectivityType' +FAKE_FC_PORTS = ['532453845345', '532453845345'] +IO_GROUP_LABEL_PREFIX = 'hostdefiner.block.csi.ibm.com/io-group-' +FAKE_SINGLE_IO_GROUP_STRING = '0' +FAKE_MULTIPLE_IO_GROUP_STRING = '0:1' +BASE64_STRING = 'eydmYWtlX2tleSc6ICdmYWtlX3ZhbHVlJ30K' +DECODED_BASE64_STRING = "{'fake_key': 'fake_value'}" +FAKE_ENCODED_CONFIG = {"config": BASE64_STRING} +FAKE_DECODED_CONFIG_STRING = {"config": DECODED_BASE64_STRING} +FAKE_DECODED_CONFIG = {"config": {'fake_key': 'fake_value'}} +ISCSI_CONNECTIVITY_TYPE = 'iscsi' +FAKE_TOPOLOGY_LABELS = {FAKE_TOPOLOGY_LABEL + '1': common_settings.TRUE_STRING, + FAKE_TOPOLOGY_LABEL + '2': common_settings.TRUE_STRING} +FAKE_SYSTEM_IDS_TOPOLOGIES = {FAKE_SYSTEM_ID: FAKE_TOPOLOGY_LABELS} +SECRET_SUPPORTED_TOPOLOGIES_PARAMETER = "supported_topologies" diff --git a/controllers/tests/controller_server/host_definer/storage_class_watcher_test.py b/controllers/tests/controller_server/host_definer/storage_class_watcher_test.py deleted file mode 100644 index 58a5619ee..000000000 --- a/controllers/tests/controller_server/host_definer/storage_class_watcher_test.py +++ /dev/null @@ -1,94 +0,0 @@ -from unittest.mock import Mock, patch - -import controllers.tests.controller_server.host_definer.utils.test_utils as test_utils -import controllers.tests.controller_server.host_definer.settings as test_settings -from controllers.tests.controller_server.host_definer.common import BaseSetUp -from controllers.servers.host_definer.watcher.storage_class_watcher import StorageClassWatcher - - -class StorageClassWatcherBase(BaseSetUp): - def setUp(self): - super().setUp() - self.storage_class_watcher = test_utils.get_class_mock(StorageClassWatcher) - self.managed_secrets_on_storage_class_watcher = test_utils.patch_managed_secrets_global_variable( - test_settings.STORAGE_CLASS_WATCHER_PATH) - - -class TestAddInitialStorageClasses(StorageClassWatcherBase): - def setUp(self): - super().setUp() - self.storage_class_watcher.storage_api.list_storage_class.return_value = \ - test_utils.get_fake_k8s_storage_class_items(test_settings.CSI_PROVISIONER_NAME) - self.storage_class_watcher.host_definitions_api.get.return_value = \ - test_utils.get_fake_k8s_host_definitions_items(test_settings.READY_PHASE) - self.storage_class_watcher.core_api.read_namespaced_secret.return_value = test_utils.get_fake_k8s_secret() - self.os.getenv.return_value = '' - self.storage_class_watcher.core_api.read_node.return_value = self.k8s_node_with_fake_label - - def test_add_new_storage_class_with_new_secret(self): - self.storage_class_watcher.host_definitions_api.get.return_value = \ - test_utils.get_fake_k8s_host_definitions_items('not_ready') - self.nodes_on_watcher_helper[test_settings.FAKE_NODE_NAME] = test_utils.get_fake_managed_node() - self.storage_class_watcher.add_initial_storage_classes() - self.storage_class_watcher.storage_host_servicer.define_host.assert_called_once_with( - test_utils.get_define_request(node_id_from_host_definition=test_settings.FAKE_NODE_ID)) - - def test_add_new_storage_class_with_existing_secret(self): - self.managed_secrets_on_storage_class_watcher.append(test_utils.get_fake_secret_info()) - self.managed_secrets_on_watcher_helper.append(test_utils.get_fake_secret_info()) - self.storage_class_watcher.add_initial_storage_classes() - self.storage_class_watcher.storage_host_servicer.define_host.assert_not_called() - self.assertEqual(2, self.managed_secrets_on_storage_class_watcher[0].managed_storage_classes) - - def test_add_new_storage_class_without_ibm_csi_provisioner(self): - self.storage_class_watcher.storage_api.list_storage_class.return_value = \ - test_utils.get_fake_k8s_storage_class_items(test_settings.FAKE_CSI_PROVISIONER) - self.storage_class_watcher.add_initial_storage_classes() - self.assertEqual(0, len(self.managed_secrets_on_storage_class_watcher)) - self.storage_class_watcher.storage_host_servicer.define_host.assert_not_called() - - -class TestWatchStorageClassResources(StorageClassWatcherBase): - def setUp(self): - super().setUp() - self.storage_class_stream = patch('{}.watch.Watch.stream'.format(test_settings.SECRET_WATCHER_PATH)).start() - self.storage_class_stream.return_value = iter([test_utils.get_fake_secret_storage_event( - test_settings.ADDED_EVENT, test_settings.CSI_PROVISIONER_NAME)]) - self.storage_class_watcher.host_definitions_api.get.return_value = \ - test_utils.get_fake_k8s_host_definitions_items(test_settings.READY_PHASE) - self.storage_class_watcher.core_api.read_namespaced_secret.return_value = test_utils.get_fake_k8s_secret() - self.os.getenv.return_value = '' - self.storage_class_watcher._loop_forever = Mock() - self.storage_class_watcher._loop_forever.side_effect = [True, False] - self.storage_class_watcher.core_api.read_node.return_value = self.k8s_node_with_fake_label - - def test_add_new_storage_class_with_new_secret(self): - self.storage_class_watcher.host_definitions_api.get.return_value = \ - test_utils.get_fake_k8s_host_definitions_items('not_ready') - self.nodes_on_watcher_helper[test_settings.FAKE_NODE_NAME] = test_utils.get_fake_managed_node() - self.storage_class_watcher.watch_storage_class_resources() - self.storage_class_watcher.storage_host_servicer.define_host.assert_called_once_with( - test_utils.get_define_request(node_id_from_host_definition=test_settings.FAKE_NODE_ID)) - self.assertEqual(1, len(self.managed_secrets_on_storage_class_watcher)) - - def test_add_new_storage_class_with_existing_secret(self): - self.managed_secrets_on_storage_class_watcher.append(test_utils.get_fake_secret_info()) - self.managed_secrets_on_watcher_helper.append(test_utils.get_fake_secret_info()) - self.storage_class_watcher.watch_storage_class_resources() - self.storage_class_watcher.storage_host_servicer.define_host.assert_not_called() - self.assertEqual(2, self.managed_secrets_on_storage_class_watcher[0].managed_storage_classes) - - def test_add_new_storage_class_without_ibm_csi_provisioner(self): - self.storage_class_stream.return_value = iter([test_utils.get_fake_secret_storage_event( - test_settings.ADDED_EVENT, test_settings.FAKE_CSI_PROVISIONER)]) - self.storage_class_watcher.watch_storage_class_resources() - self.assertEqual(0, len(self.managed_secrets_on_storage_class_watcher)) - self.storage_class_watcher.storage_host_servicer.define_host.assert_not_called() - - def test_deleted_managed_storage_class(self): - self.storage_class_stream.return_value = iter([test_utils.get_fake_secret_storage_event( - test_settings.DELETED_EVENT_TYPE, test_settings.CSI_PROVISIONER_NAME)]) - self.managed_secrets_on_storage_class_watcher.append(test_utils.get_fake_secret_info()) - self.managed_secrets_on_watcher_helper.append(test_utils.get_fake_secret_info()) - self.storage_class_watcher.watch_storage_class_resources() - self.assertEqual(0, self.managed_secrets_on_storage_class_watcher[0].managed_storage_classes) diff --git a/controllers/tests/controller_server/host_definer/utils/k8s_manifests_utils.py b/controllers/tests/controller_server/host_definer/utils/k8s_manifests_utils.py index 1764b200c..7755445eb 100644 --- a/controllers/tests/controller_server/host_definer/utils/k8s_manifests_utils.py +++ b/controllers/tests/controller_server/host_definer/utils/k8s_manifests_utils.py @@ -7,7 +7,7 @@ def get_k8s_csi_node_manifest(csi_provisioner_name, csi_node_suffix=''): k8s_csi_node_spec = { - test_settings.SPEC_FIELD: { + common_settings.SPEC_FIELD: { test_settings.STORAGE_CLASS_DRIVERS_FIELD: [{ common_settings.NAME_FIELD: csi_provisioner_name, test_settings.CSI_NODE_NODE_ID_FIELD: test_settings.FAKE_NODE_ID @@ -19,49 +19,69 @@ def get_k8s_csi_node_manifest(csi_provisioner_name, csi_node_suffix=''): def get_fake_k8s_daemon_set_manifest(updated_pods, desired_updated_pods): k8s_daemon_set_status = { - test_settings.STATUS_FIELD: { + common_settings.STATUS_FIELD: { test_settings.UPDATED_PODS: updated_pods, test_settings.DESIRED_UPDATED_PODS: desired_updated_pods, }} return _generate_manifest(test_settings.FAKE_NODE_PODS_NAME, k8s_daemon_set_status) -def get_fake_k8s_pod_manifest(): +def get_fake_k8s_pod_manifest(pod_suffix=''): k8s_pod_spec = { - test_settings.SPEC_FIELD: { + common_settings.SPEC_FIELD: { test_settings.POD_NODE_NAME_FIELD: test_settings.FAKE_NODE_NAME }} - return _generate_manifest(test_settings.FAKE_NODE_PODS_NAME, k8s_pod_spec) + return _generate_manifest(test_settings.FAKE_NODE_PODS_NAME + pod_suffix, k8s_pod_spec) -def get_fake_k8s_host_definition_manifest(host_definition_phase): +def get_fake_k8s_host_definition_manifest(host_definition_phase='ready'): status_phase_manifest = get_status_phase_manifest(host_definition_phase) + fields_manifest = get_fake_k8s_host_definition_response_fields_manifest() k8s_host_definition_body = { - test_settings.SPEC_FIELD: { - test_settings.HOST_DEFINITION_FIELD: { - test_settings.SECRET_NAME_FIELD: test_settings.FAKE_SECRET, - test_settings.SECRET_NAMESPACE_FIELD: test_settings.FAKE_SECRET_NAMESPACE, - test_settings.HOST_DEFINITION_NODE_NAME_FIELD: test_settings.FAKE_NODE_NAME, - common_settings.HOST_DEFINITION_NODE_ID_FIELD: test_settings.FAKE_NODE_ID + common_settings.API_VERSION_FIELD: common_settings.CSI_IBM_API_VERSION, + common_settings.KIND_FIELD: common_settings.HOST_DEFINITION_KIND, + common_settings.SPEC_FIELD: { + common_settings.HOST_DEFINITION_FIELD: { + common_settings.HOST_DEFINITION_NODE_NAME_FIELD: test_settings.FAKE_NODE_NAME, + common_settings.SECRET_NAME_FIELD: test_settings.FAKE_SECRET, + common_settings.SECRET_NAMESPACE_FIELD: test_settings.FAKE_SECRET_NAMESPACE, + common_settings.HOST_DEFINITION_NODE_ID_FIELD: test_settings.FAKE_NODE_ID, } }} + k8s_host_definition_body[common_settings.SPEC_FIELD][common_settings.HOST_DEFINITION_FIELD].update( + fields_manifest[common_settings.SPEC_FIELD][common_settings.HOST_DEFINITION_FIELD]) return _generate_manifest(test_settings.FAKE_NODE_NAME, status_phase_manifest, k8s_host_definition_body) +def get_fake_k8s_host_definition_response_fields_manifest(): + manifest = { + common_settings.SPEC_FIELD: { + common_settings.HOST_DEFINITION_FIELD: { + common_settings.NODE_NAME_ON_STORAGE_FIELD: test_settings.FAKE_NODE_NAME, + common_settings.CONNECTIVITY_TYPE_FIELD: test_settings.FAKE_CONNECTIVITY_TYPE, + common_settings.PORTS_FIELD: test_settings.FAKE_FC_PORTS, + common_settings.IO_GROUP_FIELD: test_settings.IO_GROUP_IDS, + common_settings.MANAGEMENT_ADDRESS_FIELD: test_settings.FAKE_SECRET_ARRAY + } + } + } + return _generate_manifest(test_settings.FAKE_NODE_NAME, manifest) + + def get_status_phase_manifest(phase): return { - test_settings.STATUS_FIELD: { - test_settings.STATUS_PHASE_FIELD: phase + common_settings.STATUS_FIELD: { + common_settings.STATUS_PHASE_FIELD: phase } } def get_fake_k8s_node_manifest(label): node_manifest = _generate_manifest(test_settings.FAKE_NODE_NAME) - node_manifest[test_settings.METADATA_FIELD][test_settings.NODE_LABELS_FIELD] = { - label: test_settings.TRUE_STRING, - common_settings.IO_GROUP_LABEL_PREFIX + str(0): test_settings.TRUE_STRING, - common_settings.IO_GROUP_LABEL_PREFIX + str(2): test_settings.TRUE_STRING} + node_manifest[common_settings.METADATA_FIELD][common_settings.LABELS_FIELD] = { + label: common_settings.TRUE_STRING, + common_settings.IO_GROUP_LABEL_PREFIX + str(0): common_settings.TRUE_STRING, + common_settings.IO_GROUP_LABEL_PREFIX + str(2): common_settings.TRUE_STRING} return node_manifest @@ -73,23 +93,21 @@ def get_fake_k8s_secret_manifest(): SECRET_USERNAME_PARAMETER: test_settings.FAKE_SECRET_USER_NAME }} secret_manifest = _generate_manifest(test_settings.FAKE_SECRET, secret_data_manifest) - secret_manifest[test_settings.METADATA_FIELD][common_settings.NAMESPACE_FIELD] = test_settings.FAKE_SECRET_NAMESPACE + secret_manifest[common_settings.METADATA_FIELD][common_settings.NAMESPACE_FIELD] = \ + test_settings.FAKE_SECRET_NAMESPACE return secret_manifest def get_fake_k8s_storage_class_manifest(provisioner): k8s_storage_class_body = { test_settings.STORAGE_CLASS_PROVISIONER_FIELD: provisioner, - test_settings.STORAGE_CLASS_PARAMETERS_FIELD: { - test_settings.STORAGE_CLASS_SECRET_FIELD: test_settings.FAKE_SECRET, - test_settings.STORAGE_CLASS_SECRET_NAMESPACE_FIELD: test_settings.FAKE_SECRET_NAMESPACE - }} + test_settings.STORAGE_CLASS_PARAMETERS_FIELD: test_settings.FAKE_STORAGE_CLASS_PARAMETERS} return _generate_manifest(test_settings.FAKE_STORAGE_CLASS, k8s_storage_class_body) def _generate_manifest(object_name, *extra_dicts): - metadata_manifest = _get_metadata_manifest() - metadata_manifest[test_settings.METADATA_FIELD][common_settings.NAME_FIELD] = object_name + metadata_manifest = get_metadata_manifest() + metadata_manifest[common_settings.METADATA_FIELD][common_settings.NAME_FIELD] = object_name if len(extra_dicts) > 0: merged_dicts = _merge_dicts(metadata_manifest, extra_dicts[0]) else: @@ -99,10 +117,10 @@ def _generate_manifest(object_name, *extra_dicts): return merged_dicts -def _get_metadata_manifest(): +def get_metadata_manifest(): return { - test_settings.METADATA_FIELD: { - test_settings.METADATA_RESOURCE_VERSION_FIELD: test_settings.FAKE_RESOURCE_VERSION, + common_settings.METADATA_FIELD: { + common_settings.RESOURCE_VERSION_FIELD: test_settings.FAKE_RESOURCE_VERSION, test_settings.METADATA_UID_FIELD: test_settings.FAKE_UID }} @@ -120,8 +138,8 @@ def generate_watch_event(event_type, object_function): def get_metadata_with_manage_node_labels_manifest(label_value): return { - test_settings.METADATA_FIELD: { - test_settings.NODE_LABELS_FIELD: {test_settings.MANAGE_NODE_LABEL: label_value} + common_settings.METADATA_FIELD: { + common_settings.LABELS_FIELD: {common_settings.MANAGE_NODE_LABEL: label_value} } } @@ -131,3 +149,38 @@ def get_host_io_group_manifest(): test_settings.IO_GROUP_ID_FIELD: test_settings.IO_GROUP_IDS, common_settings.NAME_FIELD: test_settings.IO_GROUP_NAMES } + + +def get_empty_k8s_list_manifest(): + return { + common_settings.ITEMS_FIELD: [], + common_settings.METADATA_FIELD: { + common_settings.RESOURCE_VERSION_FIELD + } + } + + +def get_finalizers_manifest(finalizers): + return { + common_settings.METADATA_FIELD: { + common_settings.NAME_FIELD: test_settings.FAKE_NODE_NAME, + common_settings.FINALIZERS_FIELD: finalizers, + } + } + + +def get_general_labels_manifest(labels): + return { + common_settings.METADATA_FIELD: { + common_settings.LABELS_FIELD: labels + } + } + + +def get_fake_secret_config_with_system_info_manifest(): + return { + 'system_id_with_supported_topologies' + '1': { + test_settings.SECRET_SUPPORTED_TOPOLOGIES_PARAMETER: [test_settings.FAKE_TOPOLOGY_LABEL] + }, + 'system_id_with_no_supported_topologies' + '2': [test_settings.FAKE_TOPOLOGY_LABEL] + } diff --git a/controllers/tests/controller_server/host_definer/utils/test_utils.py b/controllers/tests/controller_server/host_definer/utils/test_utils.py index afe09dd08..54fa18572 100644 --- a/controllers/tests/controller_server/host_definer/utils/test_utils.py +++ b/controllers/tests/controller_server/host_definer/utils/test_utils.py @@ -1,14 +1,16 @@ from dataclasses import dataclass, field import func_timeout from munch import Munch +from kubernetes import client from mock import patch, Mock -import controllers.tests.controller_server.host_definer.utils.k8s_manifests_utils as manifest_utils -import controllers.tests.controller_server.host_definer.settings as test_settings -from controllers.tests.common.test_settings import HOST_NAME, SECRET_MANAGEMENT_ADDRESS_VALUE -from controllers.servers.host_definer.kubernetes_manager.manager import KubernetesManager from controllers.servers.host_definer.types import DefineHostRequest, DefineHostResponse from controllers.servers.csi.controller_types import ArrayConnectionInfo +from controllers.servers.host_definer.k8s.api import K8SApi +from controllers.tests.common.test_settings import HOST_NAME, SECRET_MANAGEMENT_ADDRESS_VALUE +import controllers.tests.controller_server.host_definer.settings as test_settings +import controllers.common.settings as common_settings +import controllers.tests.controller_server.host_definer.utils.k8s_manifests_utils as test_manifest_utils @dataclass @@ -29,84 +31,96 @@ def getheaders(self): def get_fake_k8s_csi_nodes(csi_provisioner_name, number_of_csi_nodes): k8s_csi_nodes = [] for csi_node_index in range(number_of_csi_nodes): - k8s_csi_node_manifest = manifest_utils.get_k8s_csi_node_manifest( + k8s_csi_node_manifest = test_manifest_utils.get_k8s_csi_node_manifest( csi_provisioner_name, '-{}'.format(csi_node_index)) k8s_csi_nodes.append(Munch.fromDict(k8s_csi_node_manifest)) return K8sResourceItems(k8s_csi_nodes) -def get_fake_k8s_csi_node(csi_provisioner_name): - csi_node_manifest = manifest_utils.get_k8s_csi_node_manifest(csi_provisioner_name) +def get_fake_k8s_csi_node(csi_provisioner_name=""): + csi_node_manifest = test_manifest_utils.get_k8s_csi_node_manifest(csi_provisioner_name) return Munch.fromDict(csi_node_manifest) def get_fake_csi_node_watch_event(event_type): - return manifest_utils.generate_watch_event(event_type, manifest_utils.get_k8s_csi_node_manifest( - test_settings.CSI_PROVISIONER_NAME)) + return test_manifest_utils.generate_watch_event(event_type, test_manifest_utils.get_k8s_csi_node_manifest( + common_settings.CSI_PROVISIONER_NAME)) def get_fake_k8s_node(label): - return Munch.fromDict(manifest_utils.get_fake_k8s_node_manifest(label)) + return Munch.fromDict(test_manifest_utils.get_fake_k8s_node_manifest(label)) def get_fake_k8s_daemon_set_items(updated_pods, desired_updated_pods): - k8s_daemon_set_manifest = manifest_utils.get_fake_k8s_daemon_set_manifest(updated_pods, desired_updated_pods) - return K8sResourceItems([Munch.fromDict(k8s_daemon_set_manifest)]) + return K8sResourceItems([get_fake_k8s_daemon_set(updated_pods, desired_updated_pods)]) + + +def get_fake_k8s_daemon_set(updated_pods, desired_updated_pods): + k8s_daemon_set_manifest = test_manifest_utils.get_fake_k8s_daemon_set_manifest(updated_pods, desired_updated_pods) + return Munch.fromDict(k8s_daemon_set_manifest) def get_empty_k8s_pods(): return K8sResourceItems() -def get_fake_k8s_pods_items(): - k8s_pod_manifest = manifest_utils.get_fake_k8s_pod_manifest() - return K8sResourceItems([Munch.fromDict(k8s_pod_manifest)]) +def get_fake_k8s_pods_items(number_of_pods=1): + k8s_pods = [] + for pod_index in range(number_of_pods): + k8s_pod_manifest = test_manifest_utils.get_fake_k8s_pod_manifest('-{}'.format(pod_index)) + k8s_pods.append(Munch.fromDict(k8s_pod_manifest)) + return K8sResourceItems(k8s_pods) def get_empty_k8s_host_definitions(): return K8sResourceItems() -def get_fake_k8s_host_definitions_items(host_definition_phase): - return K8sResourceItems([_get_fake_k8s_host_definitions(host_definition_phase)]) +def get_fake_k8s_host_definitions_items(host_definition_phase='ready'): + return K8sResourceItems([get_fake_k8s_host_definition(host_definition_phase)]) -def _get_fake_k8s_host_definitions(host_definition_phase): - return Munch.fromDict(manifest_utils.get_fake_k8s_host_definition_manifest(host_definition_phase)) +def get_fake_k8s_host_definition(host_definition_phase): + return Munch.fromDict(test_manifest_utils.get_fake_k8s_host_definition_manifest(host_definition_phase)) -def get_fake_host_definition_watch_event(event_type, host_definition_phase): - return manifest_utils.generate_watch_event( - event_type, manifest_utils.get_fake_k8s_host_definition_manifest(host_definition_phase)) +def get_fake_host_definition_watch_event(event_type): + return test_manifest_utils.generate_watch_event( + event_type, test_manifest_utils.get_fake_k8s_host_definition_manifest()) def get_fake_node_watch_event(event_type): - return manifest_utils.generate_watch_event(event_type, manifest_utils.get_fake_k8s_node_manifest( - test_settings.MANAGE_NODE_LABEL)) + return test_manifest_utils.generate_watch_event(event_type, test_manifest_utils.get_fake_k8s_node_manifest( + common_settings.MANAGE_NODE_LABEL)) def get_fake_k8s_nodes_items(): - k8s_node_manifest = manifest_utils.get_fake_k8s_node_manifest(test_settings.MANAGE_NODE_LABEL) + k8s_node_manifest = test_manifest_utils.get_fake_k8s_node_manifest(common_settings.MANAGE_NODE_LABEL) return K8sResourceItems([Munch.fromDict(k8s_node_manifest)]) def get_fake_secret_watch_event(event_type): - return manifest_utils.generate_watch_event(event_type, - manifest_utils.get_fake_k8s_secret_manifest()) + return test_manifest_utils.generate_watch_event(event_type, + test_manifest_utils.get_fake_k8s_secret_manifest()) def get_fake_k8s_secret(): - return Munch.fromDict(manifest_utils.get_fake_k8s_secret_manifest()) + return Munch.fromDict(test_manifest_utils.get_fake_k8s_secret_manifest()) def get_fake_k8s_storage_class_items(provisioner): - k8s_storage_classes_manifest = manifest_utils.get_fake_k8s_storage_class_manifest(provisioner) + k8s_storage_classes_manifest = test_manifest_utils.get_fake_k8s_storage_class_manifest(provisioner) return K8sResourceItems([Munch.fromDict(k8s_storage_classes_manifest)]) -def get_fake_secret_storage_event(event_type, provisioner): - return manifest_utils.generate_watch_event(event_type, - manifest_utils.get_fake_k8s_storage_class_manifest(provisioner)) +def get_fake_k8s_storage_class(provisioner): + k8s_storage_classes_manifest = test_manifest_utils.get_fake_k8s_storage_class_manifest(provisioner) + return Munch.fromDict(k8s_storage_classes_manifest) + + +def get_fake_storage_class_watch_event(event_type, provisioner='provisioner'): + return test_manifest_utils.generate_watch_event( + event_type, test_manifest_utils.get_fake_k8s_storage_class_manifest(provisioner)) def patch_pending_variables(): @@ -115,42 +129,16 @@ def patch_pending_variables(): test_settings.SETTINGS_PATH, pending_var), value).start() -def patch_kubernetes_manager_init(): +def patch_k8s_api_init(): for function_to_patch in test_settings.KUBERNETES_MANAGER_INIT_FUNCTIONS_TO_PATCH: - _patch_function(KubernetesManager, function_to_patch) + patch_function(K8SApi, function_to_patch) -def _patch_function(class_type, function): +def patch_function(class_type, function): patcher = patch.object(class_type, function) patcher.start() -def get_class_mock(class_type): - class_type_dict = _get_class_dict(class_type) - class_mock = _get_class(class_type, class_type_dict) - return _mock_class_vars(class_mock) - - -def _get_class_dict(class_type): - class_type_copy = class_type.__dict__.copy() - return class_type_copy - - -def _get_class(class_type, class_type_dict): - return type(_get_dummy_class_name(class_type), (class_type,), class_type_dict) - - -def _get_dummy_class_name(class_type): - return 'dummy_{}'.format(class_type.__name__) - - -def _mock_class_vars(class_type): - class_instance = class_type() - for method in vars(class_instance): - class_instance.__dict__[method] = Mock() - return class_instance - - def run_function_with_timeout(function, max_wait): try: func_timeout.func_timeout(max_wait, function) @@ -158,8 +146,8 @@ def run_function_with_timeout(function, max_wait): pass -def get_error_http_resp(): - return HttpResp(405, 'some problem', 'some reason') +def get_error_http_resp(status_code): + return HttpResp(status_code, 'some problem', 'some reason') def patch_nodes_global_variable(module_path): @@ -171,11 +159,11 @@ def patch_managed_secrets_global_variable(module_path): def get_pending_creation_status_manifest(): - return manifest_utils.get_status_phase_manifest(test_settings.PENDING_CREATION_PHASE) + return test_manifest_utils.get_status_phase_manifest(common_settings.PENDING_CREATION_PHASE) def get_ready_status_manifest(): - return manifest_utils.get_status_phase_manifest(test_settings.READY_PHASE) + return test_manifest_utils.get_status_phase_manifest(common_settings.READY_PHASE) def get_array_connection_info(): @@ -197,12 +185,14 @@ def get_define_response(connectivity_type, ports): SECRET_MANAGEMENT_ADDRESS_VALUE) -def get_fake_secret_info(): - secret_info = Mock(spec_set=['name', 'namespace', 'nodes_with_system_id', 'managed_storage_classes']) +def get_fake_secret_info(managed_storage_classes=0): + secret_info = Mock(spec_set=['name', 'namespace', 'nodes_with_system_id', + 'system_ids_topologies', 'managed_storage_classes']) secret_info.name = test_settings.FAKE_SECRET secret_info.namespace = test_settings.FAKE_SECRET_NAMESPACE - secret_info.nodes_with_system_id = {} - secret_info.managed_storage_classes = 1 + secret_info.nodes_with_system_id = {test_settings.FAKE_NODE_NAME: test_settings.FAKE_SYSTEM_ID} + secret_info.system_ids_topologies = {test_settings.FAKE_NODE_NAME: test_settings.FAKE_TOPOLOGY_LABELS} + secret_info.managed_storage_classes = managed_storage_classes return secret_info @@ -212,7 +202,13 @@ def get_fake_host_io_group_id(): def get_fake_host_io_group(): - return Munch.fromDict(manifest_utils.get_host_io_group_manifest()) + return Munch.fromDict(test_manifest_utils.get_host_io_group_manifest()) + + +def get_fake_empty_k8s_list(): + much_object = Munch.fromDict(test_manifest_utils.get_empty_k8s_list_manifest()) + much_object.items = [] + return much_object def get_fake_managed_node(): @@ -221,3 +217,102 @@ def get_fake_managed_node(): managed_node.node_id = test_settings.FAKE_NODE_ID managed_node.io_group = test_settings.FAKE_STRING_IO_GROUP return managed_node + + +def get_fake_csi_node_info(): + csi_node_info = Mock(spec_set=['name', 'node_id']) + csi_node_info.name = test_settings.FAKE_NODE_NAME + csi_node_info.node_id = test_settings.FAKE_NODE_ID + return csi_node_info + + +def get_fake_node_info(): + node_info = Mock(spec_set=['name', 'labels']) + node_info.name = test_settings.FAKE_NODE_NAME + node_info.labels = {common_settings.MANAGE_NODE_LABEL: common_settings.TRUE_STRING} + return node_info + + +def get_fake_storage_class_info(): + storage_class_info = Mock(spec_set=['name', 'provisioner', 'parameters']) + storage_class_info.name = test_settings.FAKE_STORAGE_CLASS + storage_class_info.provisioner = common_settings.CSI_PROVISIONER_NAME + storage_class_info.parameters = test_settings.FAKE_STORAGE_CLASS_PARAMETERS + return storage_class_info + + +def get_fake_host_definition_info(): + host_definition_info = Mock(spec_set=['name', 'resource_version', 'uid', 'phase', 'secret_name', + 'secret_namespace', 'node_name', 'node_id', 'connectivity_type']) + host_definition_info.name = test_settings.FAKE_NODE_NAME + host_definition_info.resource_version = test_settings.FAKE_RESOURCE_VERSION + host_definition_info.uid = test_settings.FAKE_UID + host_definition_info.phase = common_settings.READY_PHASE + host_definition_info.secret_name = test_settings.FAKE_SECRET + host_definition_info.secret_namespace = test_settings.FAKE_SECRET_NAMESPACE + host_definition_info.node_name = test_settings.FAKE_NODE_NAME + host_definition_info.node_id = test_settings.FAKE_NODE_ID + host_definition_info.connectivity_type = test_settings.FAKE_CONNECTIVITY_TYPE + return host_definition_info + + +def get_fake_empty_host_definition_info(): + host_definition_info = Mock(spec_set=['name', 'node_name', 'node_id']) + host_definition_info.name = '' + host_definition_info.node_name = '' + host_definition_info.node_id = '' + return host_definition_info + + +def get_object_reference(): + return client.V1ObjectReference( + api_version=common_settings.CSI_IBM_API_VERSION, kind=common_settings.HOST_DEFINITION_KIND, + name=test_settings.FAKE_NODE_NAME, resource_version=test_settings.FAKE_RESOURCE_VERSION, + uid=test_settings.FAKE_UID, ) + + +def get_event_object_metadata(): + return client.V1ObjectMeta(generate_name='{}.'.format(test_settings.FAKE_NODE_NAME), ) + + +def get_fake_define_host_response(): + response = Mock(spec_set=['error_message', 'connectivity_type', 'ports', + 'node_name_on_storage', 'io_group', 'management_address']) + response.error_message = test_settings.MESSAGE + response.connectivity_type = test_settings.FAKE_CONNECTIVITY_TYPE + response.ports = test_settings.FAKE_FC_PORTS + response.node_name_on_storage = test_settings.FAKE_NODE_NAME + response.io_group = test_settings.IO_GROUP_IDS + response.management_address = test_settings.FAKE_SECRET_ARRAY + return response + + +def get_fake_io_group_labels(number_of_io_groups): + labels = {} + for index in range(number_of_io_groups): + labels[test_settings.IO_GROUP_LABEL_PREFIX + str(index)] = common_settings.TRUE_STRING + return labels + + +def get_fake_k8s_metadata(): + return Munch.fromDict(test_manifest_utils.get_metadata_manifest()) + + +def get_fake_array_connectivity_info(): + array_connectivity_info = Mock(spec_set=['array_addresses', 'user', 'password', 'system_id']) + array_connectivity_info.array_addresses = [test_settings.FAKE_SECRET_ARRAY] + array_connectivity_info.user = test_settings.FAKE_SECRET_USER_NAME + array_connectivity_info.password = test_settings.FAKE_SECRET_PASSWORD + array_connectivity_info.system_id = '2' + return array_connectivity_info + + +def get_fake_pod_info(): + pod_info = Mock(spec_set=['name', 'node_name']) + pod_info.name = test_settings.FAKE_NODE_PODS_NAME + pod_info.node_name = test_settings.FAKE_NODE_NAME + return pod_info + + +def convert_manifest_to_munch(manifest): + return Munch.fromDict(manifest) diff --git a/controllers/tests/controller_server/host_definer/watchers/__init__.py b/controllers/tests/controller_server/host_definer/watchers/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/controllers/tests/controller_server/host_definer/watchers/csi_node_watcher_test.py b/controllers/tests/controller_server/host_definer/watchers/csi_node_watcher_test.py new file mode 100644 index 000000000..c3be5ebbd --- /dev/null +++ b/controllers/tests/controller_server/host_definer/watchers/csi_node_watcher_test.py @@ -0,0 +1,344 @@ +from unittest.mock import MagicMock, patch +from copy import deepcopy + +import controllers.common.settings as common_settings +import controllers.tests.controller_server.host_definer.utils.test_utils as test_utils +import controllers.tests.controller_server.host_definer.settings as test_settings +from controllers.tests.controller_server.host_definer.watchers.watcher_base import WatcherBaseSetUp +from controllers.servers.host_definer.watcher.csi_node_watcher import CsiNodeWatcher + + +class CsiNodeWatcherBase(WatcherBaseSetUp): + def setUp(self): + super().setUp() + self.watcher = CsiNodeWatcher() + self.watcher.resource_info_manager = MagicMock() + self.watcher.node_manager = MagicMock() + self.watcher.definition_manager = MagicMock() + self.watcher.csi_node = MagicMock() + self.watcher.k8s_api = MagicMock() + self.watcher.host_definition_manager = MagicMock() + self.fake_csi_node_info = test_utils.get_fake_csi_node_info() + + +class TestAddInitialCsiNodes(CsiNodeWatcherBase): + def test_add_initial_csi_nodes(self): + self.watcher.csi_node.get_csi_nodes_info_with_driver.return_value = [self.fake_csi_node_info] + self.watcher.node_manager.is_node_can_be_defined.return_value = True + self.watcher.add_initial_csi_nodes() + self.watcher.csi_node.get_csi_nodes_info_with_driver.assert_called_once_with() + self.watcher.node_manager.is_node_can_be_defined.assert_called_once_with(self.fake_csi_node_info.name) + self.watcher.node_manager.add_node_to_nodes.assert_called_once_with(self.fake_csi_node_info) + + def test_do_not_add_initial_csi_nodes_that_cannot_be_defined(self): + self.watcher.csi_node.get_csi_nodes_info_with_driver.return_value = [self.fake_csi_node_info] + self.watcher.node_manager.is_node_can_be_defined.return_value = False + self.watcher.add_initial_csi_nodes() + self.watcher.csi_node.get_csi_nodes_info_with_driver.assert_called_once_with() + self.watcher.node_manager.is_node_can_be_defined.assert_called_once_with(self.fake_csi_node_info.name) + self.watcher.node_manager.add_node_to_nodes.assert_not_called() + + def test_do_not_add_empty_initial_csi_nodes(self): + self.watcher.csi_node.get_csi_nodes_info_with_driver.return_value = [] + self.watcher.add_initial_csi_nodes() + self.watcher.csi_node.get_csi_nodes_info_with_driver.assert_called_once_with() + self.watcher.node_manager.is_node_can_be_defined.assert_not_called() + self.watcher.node_manager.add_node_to_nodes.assert_not_called() + + +class TestWatchCsiNodesResources(CsiNodeWatcherBase): + def setUp(self): + super().setUp() + self.fake_managed_nodes = test_utils.get_fake_managed_node() + self.managed_node_with_different_node_id = deepcopy(self.fake_managed_nodes) + self.managed_node_with_different_node_id.node_id = 'different_node_id' + self.fake_secret_info = test_utils.get_fake_secret_info() + self.fake_host_definition_info = test_utils.get_fake_host_definition_info() + self.csi_node_modified_watch_manifest = test_utils.get_fake_node_watch_event( + common_settings.MODIFIED_EVENT_TYPE) + self.csi_node_modified_watch_munch = test_utils.convert_manifest_to_munch( + self.csi_node_modified_watch_manifest) + self.csi_node_deleted_watch_manifest = test_utils.get_fake_node_watch_event(common_settings.DELETED_EVENT_TYPE) + self.csi_node_deleted_watch_munch = test_utils.convert_manifest_to_munch(self.csi_node_deleted_watch_manifest) + self.global_managed_nodes = test_utils.patch_nodes_global_variable( + test_settings.CSI_NODE_WATCHER_PATH) + self.global_managed_secret = test_utils.patch_managed_secrets_global_variable( + test_settings.CSI_NODE_WATCHER_PATH) + + @patch('{}.utils'.format(test_settings.CSI_NODE_WATCHER_PATH)) + def test_do_not_define_managed_csi_node_with_deleted_event_that_is_part_of_update_but_node_id_did_not_change( + self, mock_utils): + self._prepare_watch_csi_nodes_resources( + mock_utils, self.csi_node_deleted_watch_manifest, self.csi_node_deleted_watch_munch) + self._prepare_handle_deleted_csi_node_pod(mock_utils, True, True) + test_utils.run_function_with_timeout(self.watcher.watch_csi_nodes_resources, 0.2) + self._assert_watch_csi_nodes_resources( + mock_utils, self.csi_node_deleted_watch_manifest, self.csi_node_deleted_watch_munch) + self._assert_handle_deleted_csi_node_pod_called(mock_utils, True, True) + + @patch('{}.utils'.format(test_settings.CSI_NODE_WATCHER_PATH)) + def test_define_managed_csi_node_with_deleted_event_that_is_part_of_update_and_node_id_change(self, mock_utils): + self._prepare_watch_csi_nodes_resources( + mock_utils, self.csi_node_deleted_watch_manifest, self.csi_node_deleted_watch_munch) + self._prepare_handle_deleted_csi_node_pod(mock_utils, True, True, True) + test_utils.run_function_with_timeout(self.watcher.watch_csi_nodes_resources, 0.2) + self._assert_watch_csi_nodes_resources( + mock_utils, self.csi_node_deleted_watch_manifest, self.csi_node_deleted_watch_munch) + self._assert_handle_deleted_csi_node_pod_called(mock_utils, True, True, True) + + @patch('{}.utils'.format(test_settings.CSI_NODE_WATCHER_PATH)) + def test_undefine_managed_csi_node_with_deleted_event_that_is_not_part_of_update(self, mock_utils): + self._prepare_watch_csi_nodes_resources( + mock_utils, self.csi_node_deleted_watch_manifest, self.csi_node_deleted_watch_munch) + self._prepare_handle_deleted_csi_node_pod(mock_utils, True, False, False, True, False) + test_utils.run_function_with_timeout(self.watcher.watch_csi_nodes_resources, 0.2) + self._assert_watch_csi_nodes_resources( + mock_utils, self.csi_node_deleted_watch_manifest, self.csi_node_deleted_watch_munch) + self._assert_handle_deleted_csi_node_pod_called(mock_utils, True, False, False, True, False) + + @patch('{}.utils'.format(test_settings.CSI_NODE_WATCHER_PATH)) + def test_do_not_undefine_managed_csi_node_with_deleted_event_when_node_has_forbid_deletion_label(self, mock_utils): + self._prepare_watch_csi_nodes_resources( + mock_utils, self.csi_node_deleted_watch_manifest, self.csi_node_deleted_watch_munch) + self._prepare_handle_deleted_csi_node_pod(mock_utils, True, False, False, True, True) + test_utils.run_function_with_timeout(self.watcher.watch_csi_nodes_resources, 0.2) + self._assert_watch_csi_nodes_resources( + mock_utils, self.csi_node_deleted_watch_manifest, self.csi_node_deleted_watch_munch) + self._assert_handle_deleted_csi_node_pod_called(mock_utils, True, False, False, True, True) + + @patch('{}.utils'.format(test_settings.CSI_NODE_WATCHER_PATH)) + def test_do_not_undefine_managed_csi_node_with_deleted_event_when_host_definer_cannot_delete_hosts( + self, mock_utils): + self._prepare_watch_csi_nodes_resources( + mock_utils, self.csi_node_deleted_watch_manifest, self.csi_node_deleted_watch_munch) + self._prepare_handle_deleted_csi_node_pod(mock_utils, True, False, False, False, False) + test_utils.run_function_with_timeout(self.watcher.watch_csi_nodes_resources, 0.2) + self._assert_watch_csi_nodes_resources( + mock_utils, self.csi_node_deleted_watch_manifest, self.csi_node_deleted_watch_munch) + self._assert_handle_deleted_csi_node_pod_called(mock_utils, True, False, False, False, False) + + @patch('{}.utils'.format(test_settings.CSI_NODE_WATCHER_PATH)) + def test_do_not_handle_unmanaged_csi_node_with_deleted_event(self, mock_utils): + self._prepare_watch_csi_nodes_resources( + mock_utils, self.csi_node_deleted_watch_manifest, self.csi_node_deleted_watch_munch) + self._prepare_handle_deleted_csi_node_pod(mock_utils, False) + test_utils.run_function_with_timeout(self.watcher.watch_csi_nodes_resources, 0.2) + self._assert_watch_csi_nodes_resources( + mock_utils, self.csi_node_deleted_watch_manifest, self.csi_node_deleted_watch_munch) + self._assert_handle_deleted_csi_node_pod_called(mock_utils, False) + + @patch('{}.utils'.format(test_settings.CSI_NODE_WATCHER_PATH)) + def test_do_not_handle_csi_node_with_deleted_event_and_it_is_not_in_global_nodes(self, mock_utils): + self._prepare_watch_csi_nodes_resources( + mock_utils, self.csi_node_deleted_watch_manifest, self.csi_node_deleted_watch_munch) + self.global_managed_nodes.pop(self.fake_csi_node_info.name, None) + test_utils.run_function_with_timeout(self.watcher.watch_csi_nodes_resources, 0.2) + self._assert_watch_csi_nodes_resources( + mock_utils, self.csi_node_deleted_watch_manifest, self.csi_node_deleted_watch_munch) + self._assert_handle_deleted_csi_node_pod_not_called(mock_utils) + + @patch('{}.utils'.format(test_settings.CSI_NODE_WATCHER_PATH)) + def test_define_new_csi_node_with_ibm_block_csi_driver_with_modified_event(self, mock_utils): + self._prepare_watch_csi_nodes_resources( + mock_utils, self.csi_node_modified_watch_manifest, self.csi_node_modified_watch_munch) + self.global_managed_nodes.pop(self.fake_csi_node_info.name, None) + self.watcher.node_manager.is_node_can_be_defined.return_value = True + test_utils.run_function_with_timeout(self.watcher.watch_csi_nodes_resources, 0.2) + self._assert_watch_csi_nodes_resources( + mock_utils, self.csi_node_modified_watch_manifest, self.csi_node_modified_watch_munch) + self._assert_define_new_csi_node_called(mock_utils) + + @patch('{}.utils'.format(test_settings.CSI_NODE_WATCHER_PATH)) + def test_do_not_handle_new_csi_node_with_ibm_block_csi_driver_with_modified_event_but_cannot_be_defined( + self, mock_utils): + self._prepare_watch_csi_nodes_resources( + mock_utils, self.csi_node_modified_watch_manifest, self.csi_node_modified_watch_munch) + self.global_managed_nodes.pop(self.fake_csi_node_info.name, None) + self.watcher.node_manager.is_node_can_be_defined.return_value = False + test_utils.run_function_with_timeout(self.watcher.watch_csi_nodes_resources, 0.2) + self._assert_watch_csi_nodes_resources( + mock_utils, self.csi_node_modified_watch_manifest, self.csi_node_modified_watch_munch) + self._assert_define_new_csi_node_not_called() + self._assert_handle_deleted_csi_node_pod_not_called(mock_utils) + + @patch('{}.utils'.format(test_settings.CSI_NODE_WATCHER_PATH)) + def test_do_not_define_managed_csi_node_with_modified_event_that_is_part_of_update_but_node_id_did_not_change( + self, mock_utils): + self._prepare_watch_csi_nodes_resources( + mock_utils, self.csi_node_modified_watch_manifest, self.csi_node_modified_watch_munch) + self._prepare_handle_deleted_csi_node_pod(mock_utils, True, True) + test_utils.run_function_with_timeout(self.watcher.watch_csi_nodes_resources, 0.2) + self._assert_watch_csi_nodes_resources( + mock_utils, self.csi_node_modified_watch_manifest, self.csi_node_modified_watch_munch) + self._assert_define_new_csi_node_not_called() + self._assert_handle_deleted_csi_node_pod_called(mock_utils, True, True) + + @patch('{}.utils'.format(test_settings.CSI_NODE_WATCHER_PATH)) + def test_define_managed_csi_node_with_modified_event_that_is_part_of_update_and_node_id_change(self, mock_utils): + self._prepare_watch_csi_nodes_resources( + mock_utils, self.csi_node_modified_watch_manifest, self.csi_node_modified_watch_munch) + self._prepare_handle_deleted_csi_node_pod(mock_utils, True, True, True) + test_utils.run_function_with_timeout(self.watcher.watch_csi_nodes_resources, 0.2) + self._assert_watch_csi_nodes_resources( + mock_utils, self.csi_node_modified_watch_manifest, self.csi_node_modified_watch_munch) + self._assert_define_new_csi_node_not_called() + self._assert_handle_deleted_csi_node_pod_called(mock_utils, True, True, True) + + @patch('{}.utils'.format(test_settings.CSI_NODE_WATCHER_PATH)) + def test_undefine_managed_csi_node_with_modified_event_that_is_not_part_of_update(self, mock_utils): + self._prepare_watch_csi_nodes_resources( + mock_utils, self.csi_node_modified_watch_manifest, self.csi_node_modified_watch_munch) + self._prepare_handle_deleted_csi_node_pod(mock_utils, True, False, False, True, False) + test_utils.run_function_with_timeout(self.watcher.watch_csi_nodes_resources, 0.2) + self._assert_watch_csi_nodes_resources( + mock_utils, self.csi_node_modified_watch_manifest, self.csi_node_modified_watch_munch) + self._assert_define_new_csi_node_not_called() + self._assert_handle_deleted_csi_node_pod_called(mock_utils, True, False, False, True, False) + + @patch('{}.utils'.format(test_settings.CSI_NODE_WATCHER_PATH)) + def test_do_not_undefine_managed_csi_node_with_modified_event_when_node_has_forbid_deletion_label(self, mock_utils): + self._prepare_watch_csi_nodes_resources( + mock_utils, self.csi_node_modified_watch_manifest, self.csi_node_modified_watch_munch) + self._prepare_handle_deleted_csi_node_pod(mock_utils, True, False, False, True, True) + test_utils.run_function_with_timeout(self.watcher.watch_csi_nodes_resources, 0.2) + self._assert_watch_csi_nodes_resources( + mock_utils, self.csi_node_modified_watch_manifest, self.csi_node_modified_watch_munch) + self._assert_define_new_csi_node_not_called() + self._assert_handle_deleted_csi_node_pod_called(mock_utils, True, False, False, True, True) + + @patch('{}.utils'.format(test_settings.CSI_NODE_WATCHER_PATH)) + def test_do_not_undefine_managed_csi_node_with_modified_event_when_host_definer_cannot_delete_hosts( + self, mock_utils): + self._prepare_watch_csi_nodes_resources( + mock_utils, self.csi_node_modified_watch_manifest, self.csi_node_modified_watch_munch) + self._prepare_handle_deleted_csi_node_pod(mock_utils, True, False, False, False, False) + test_utils.run_function_with_timeout(self.watcher.watch_csi_nodes_resources, 0.2) + self._assert_watch_csi_nodes_resources( + mock_utils, self.csi_node_modified_watch_manifest, self.csi_node_modified_watch_munch) + self._assert_define_new_csi_node_not_called() + self._assert_handle_deleted_csi_node_pod_called(mock_utils, True, False, False, False, False) + + def _prepare_watch_csi_nodes_resources(self, mock_utils, csi_node_watch_manifest, csi_node_watch_munch): + self.global_managed_nodes[self.fake_csi_node_info.name] = self.fake_managed_nodes + self.watcher.k8s_api.get_csi_node_stream.return_value = iter([csi_node_watch_manifest]) + mock_utils.munch.return_value = csi_node_watch_munch + self.watcher.resource_info_manager.generate_csi_node_info.return_value = self.fake_csi_node_info + + def _prepare_handle_deleted_csi_node_pod( + self, mock_utils, is_node_has_manage_node_label, is_host_part_of_update=False, is_node_id_changed=False, + is_host_definer_can_delete_hosts=False, is_node_has_forbid_deletion_label=False): + self.watcher.node_manager.is_node_has_manage_node_label.return_value = is_node_has_manage_node_label + if is_node_has_manage_node_label: + self._prepare_undefine_host_when_node_pod_is_deleted( + mock_utils, is_host_part_of_update, is_node_id_changed, is_host_definer_can_delete_hosts, + is_node_has_forbid_deletion_label) + + def _prepare_undefine_host_when_node_pod_is_deleted( + self, mock_utils, is_host_part_of_update, is_node_id_changed, is_host_definer_can_delete_hosts, + is_node_has_forbid_deletion_label): + self.watcher.csi_node.is_host_part_of_update.return_value = is_host_part_of_update + if is_host_part_of_update: + self._prepare_create_definitions_when_csi_node_changed(is_node_id_changed) + else: + mock_utils.is_host_definer_can_delete_hosts.return_value = is_host_definer_can_delete_hosts + self.watcher.node_manager.is_node_has_forbid_deletion_label.return_value = is_node_has_forbid_deletion_label + + def _prepare_create_definitions_when_csi_node_changed(self, is_node_id_changed): + self.global_managed_secret.append(self.fake_secret_info) + self.watcher.host_definition_manager.get_matching_host_definition_info.return_value = \ + self.fake_host_definition_info + self.watcher.csi_node.is_node_id_changed.return_value = is_node_id_changed + if is_node_id_changed: + self.watcher.node_manager.generate_managed_node.return_value = self.managed_node_with_different_node_id + + def _assert_watch_csi_nodes_resources(self, mock_utils, csi_node_watch_manifest, csi_node_watch_munch): + mock_utils.loop_forever.assert_called() + self.watcher.k8s_api.get_csi_node_stream.assert_called_with() + mock_utils.munch.assert_called_once_with(csi_node_watch_manifest) + self.watcher.resource_info_manager.generate_csi_node_info.assert_called_once_with( + csi_node_watch_munch.object) + + def _assert_define_new_csi_node_called(self, mock_utils): + self.watcher.node_manager.is_node_can_be_defined.assert_called_once_with(self.fake_csi_node_info.name) + self.watcher.node_manager.add_node_to_nodes.assert_called_once_with(self.fake_csi_node_info) + self.watcher.definition_manager.define_node_on_all_storages.assert_called_once_with( + self.fake_csi_node_info.name) + self._assert_handle_deleted_csi_node_pod_not_called(mock_utils) + + def _assert_define_new_csi_node_not_called(self): + self.watcher.node_manager.is_node_can_be_defined.assert_called_once_with(self.fake_csi_node_info.name) + self.watcher.node_manager.add_node_to_nodes.assert_not_called() + self.watcher.definition_manager.define_node_on_all_storages.assert_not_called() + + def _assert_handle_deleted_csi_node_pod_called( + self, mock_utils, is_node_has_manage_node_label, is_host_part_of_update=False, is_node_id_changed=False, + is_host_definer_can_delete_hosts=False, is_node_has_forbid_deletion_label=False): + self.watcher.node_manager.is_node_has_manage_node_label.assert_called_once_with(self.fake_csi_node_info.name) + if is_node_has_manage_node_label: + self.watcher.csi_node.is_host_part_of_update.assert_called_once_with(self.fake_csi_node_info.name) + self._assert_undefine_host_when_node_pod_is_deleted_called( + mock_utils, is_host_part_of_update, is_node_id_changed, is_host_definer_can_delete_hosts, + is_node_has_forbid_deletion_label) + else: + self._assert_undefine_host_when_node_pod_is_deleted_not_called(mock_utils) + + def _assert_undefine_host_when_node_pod_is_deleted_called( + self, mock_utils, is_host_part_of_update, is_node_id_changed, is_host_definer_can_delete_hosts, + is_node_has_forbid_deletion_label): + self.watcher.csi_node.is_host_part_of_update.assert_called_once_with(self.fake_csi_node_info.name) + if is_host_part_of_update: + self._assert_create_definitions_when_csi_node_changed_called(is_node_id_changed) + mock_utils.is_host_definer_can_delete_hosts.assert_not_called() + self.watcher.node_manager.is_node_has_forbid_deletion_label.assert_not_called() + self._assert_undefine_all_the_definitions_of_a_node_not_called() + else: + self._assert_create_definitions_when_csi_node_changed_not_called() + mock_utils.is_host_definer_can_delete_hosts.assert_called_once_with() + if is_host_definer_can_delete_hosts: + self.watcher.node_manager.is_node_has_forbid_deletion_label.assert_called_once_with( + self.fake_csi_node_info.name) + if is_host_definer_can_delete_hosts and not is_node_has_forbid_deletion_label: + self._assert_undefine_all_the_definitions_of_a_node_called() + else: + self._assert_undefine_all_the_definitions_of_a_node_not_called() + self.assertEqual(self.global_managed_nodes, {}) + + def _assert_create_definitions_when_csi_node_changed_called(self, is_node_id_changed): + self.watcher.host_definition_manager.get_matching_host_definition_info.assert_called_once_with( + self.fake_csi_node_info.name, self.fake_secret_info.name, self.fake_secret_info.namespace) + self.watcher.csi_node.is_node_id_changed.assert_called_once_with(self.fake_host_definition_info.node_id, + self.fake_csi_node_info.node_id) + if is_node_id_changed: + self.watcher.node_manager.generate_managed_node.assert_called_once_with(self.fake_csi_node_info) + self.watcher.definition_manager.create_definition.assert_called_once_with(self.fake_host_definition_info) + self.assertEqual(self.global_managed_nodes[self.fake_csi_node_info.name], + self.managed_node_with_different_node_id) + else: + self.watcher.node_manager.generate_managed_node.assert_not_called() + self.watcher.definition_manager.create_definition.assert_not_called() + + def _assert_undefine_all_the_definitions_of_a_node_called(self): + self.watcher.definition_manager.undefine_node_definitions.assert_called_once_with(self.fake_csi_node_info.name) + self.watcher.node_manager.remove_manage_node_label.assert_called_once_with(self.fake_csi_node_info.name) + self.assertEqual(self.global_managed_nodes, {}) + + def _assert_handle_deleted_csi_node_pod_not_called(self, mock_utils): + self.watcher.node_manager.is_node_has_manage_node_label.assert_not_called() + self._assert_undefine_host_when_node_pod_is_deleted_not_called(mock_utils) + + def _assert_undefine_host_when_node_pod_is_deleted_not_called(self, mock_utils): + self.watcher.csi_node.is_host_part_of_update.assert_not_called() + mock_utils.is_host_definer_can_delete_hosts.assert_not_called() + self.watcher.node_manager.is_node_has_forbid_deletion_label.assert_not_called() + self._assert_create_definitions_when_csi_node_changed_not_called() + self._assert_undefine_all_the_definitions_of_a_node_not_called() + + def _assert_create_definitions_when_csi_node_changed_not_called(self): + self.watcher.host_definition_manager.get_matching_host_definition_info.assert_not_called() + self.watcher.csi_node.is_node_id_changed.assert_not_called() + self.watcher.node_manager.generate_managed_node.assert_not_called() + self.watcher.definition_manager.create_definition.assert_not_called() + + def _assert_undefine_all_the_definitions_of_a_node_not_called(self): + self.watcher.definition_manager.undefine_node_definitions.assert_not_called() + self.watcher.node_manager.remove_manage_node_label.assert_not_called() diff --git a/controllers/tests/controller_server/host_definer/watchers/host_definition_watcher_test.py b/controllers/tests/controller_server/host_definer/watchers/host_definition_watcher_test.py new file mode 100644 index 000000000..e13e06f27 --- /dev/null +++ b/controllers/tests/controller_server/host_definer/watchers/host_definition_watcher_test.py @@ -0,0 +1,201 @@ +from copy import deepcopy +from unittest.mock import patch, MagicMock + +import controllers.common.settings as common_settings +import controllers.tests.controller_server.host_definer.utils.test_utils as test_utils +import controllers.tests.controller_server.host_definer.settings as test_settings +from controllers.tests.controller_server.host_definer.watchers.watcher_base import WatcherBaseSetUp +from controllers.servers.host_definer.watcher.host_definition_watcher import HostDefinitionWatcher + + +class TestWatchHostDefinitionsResources(WatcherBaseSetUp): + def setUp(self): + super().setUp() + self.watcher = HostDefinitionWatcher() + self.watcher.k8s_api = MagicMock() + self.watcher.resource_info_manager = MagicMock() + self.watcher.host_definition_manager = MagicMock() + self.watcher.definition_manager = MagicMock() + self.watcher.node_manager = MagicMock() + test_utils.patch_pending_variables() + self.fake_define_response = test_utils.get_fake_define_host_response() + self.fake_define_response.error_message = '' + self.fake_action = common_settings.DEFINE_ACTION + self.fake_host_definition_info = test_utils.get_fake_host_definition_info() + self.fake_pending_deletion_host_definition_info = deepcopy(self.fake_host_definition_info) + self.fake_pending_deletion_host_definition_info.phase = common_settings.PENDING_DELETION_PHASE + self.fake_pending_creation_host_definition_info = deepcopy(self.fake_host_definition_info) + self.fake_pending_creation_host_definition_info.phase = common_settings.PENDING_CREATION_PHASE + self.host_definition_deleted_watch_manifest = test_utils.get_fake_host_definition_watch_event( + common_settings.DELETED_EVENT_TYPE) + self.host_definition_deleted_watch_munch = test_utils.convert_manifest_to_munch( + self.host_definition_deleted_watch_manifest) + + @patch('{}.utils'.format(test_settings.HOST_DEFINITION_WATCHER_PATH)) + def test_define_pending_host_definition(self, mock_utils): + host_definition_info = self.fake_pending_creation_host_definition_info + self._prepare_watch_host_definition_resources(mock_utils, host_definition_info, True, False) + self._prepare_define_host_using_exponential_backoff(mock_utils, host_definition_info, [False, True]) + test_utils.run_function_with_timeout(self.watcher.watch_host_definitions_resources, 0.5) + self._assert_watch_host_definition_resources(mock_utils, host_definition_info, True) + self._assert_define_host_using_exponential_backoff_called(mock_utils, host_definition_info, 2, False) + + @patch('{}.utils'.format(test_settings.HOST_DEFINITION_WATCHER_PATH)) + def test_undefine_pending_host_definition(self, mock_utils): + host_definition_info = self.fake_pending_deletion_host_definition_info + self._prepare_watch_host_definition_resources(mock_utils, host_definition_info, True, False) + self._prepare_define_host_using_exponential_backoff(mock_utils, host_definition_info, [False, True], True) + test_utils.run_function_with_timeout(self.watcher.watch_host_definitions_resources, 0.5) + self._assert_watch_host_definition_resources(mock_utils, host_definition_info, True) + self._assert_define_host_using_exponential_backoff_called(mock_utils, host_definition_info, 2, False, True) + + @patch('{}.utils'.format(test_settings.HOST_DEFINITION_WATCHER_PATH)) + def test_do_not_handle_pending_deletion_host_definition_that_cannot_be_defined(self, mock_utils): + host_definition_info = self.fake_pending_deletion_host_definition_info + self._prepare_watch_host_definition_resources(mock_utils, host_definition_info, True, False) + self._prepare_define_host_using_exponential_backoff(mock_utils, host_definition_info, [False, True], False) + test_utils.run_function_with_timeout(self.watcher.watch_host_definitions_resources, 0.5) + self._assert_watch_host_definition_resources(mock_utils, host_definition_info, True) + self._assert_define_host_using_exponential_backoff_called(mock_utils, host_definition_info, 2, False, False) + + @patch('{}.utils'.format(test_settings.HOST_DEFINITION_WATCHER_PATH)) + def test_pending_host_definition_phase_to_error(self, mock_utils): + host_definition_info = self.fake_pending_deletion_host_definition_info + self._prepare_watch_host_definition_resources(mock_utils, host_definition_info, True, False) + self._prepare_define_host_using_exponential_backoff(mock_utils, host_definition_info, [False, False, False]) + test_utils.run_function_with_timeout(self.watcher.watch_host_definitions_resources, 0.5) + self._assert_watch_host_definition_resources(mock_utils, host_definition_info, True) + self._assert_define_host_using_exponential_backoff_called(mock_utils, host_definition_info, 3, True) + + @patch('{}.utils'.format(test_settings.HOST_DEFINITION_WATCHER_PATH)) + def test_create_event_when_failing_to_undefine_pending_deletion_host_definition(self, mock_utils): + host_definition_info = self.fake_pending_deletion_host_definition_info + self.fake_define_response.error_message = test_settings.MESSAGE + self._prepare_watch_host_definition_resources(mock_utils, host_definition_info, True, False) + self._prepare_define_host_using_exponential_backoff(mock_utils, host_definition_info, [False, True], True) + test_utils.run_function_with_timeout(self.watcher.watch_host_definitions_resources, 0.5) + self._assert_watch_host_definition_resources(mock_utils, host_definition_info, True) + self._assert_define_host_using_exponential_backoff_called( + mock_utils, host_definition_info, 2, False, True, True) + + @patch('{}.utils'.format(test_settings.HOST_DEFINITION_WATCHER_PATH)) + def test_do_not_handle_not_pending_host_definition(self, mock_utils): + host_definition_info = self.fake_host_definition_info + self._prepare_watch_host_definition_resources(mock_utils, host_definition_info, False) + test_utils.run_function_with_timeout(self.watcher.watch_host_definitions_resources, 0.5) + self._assert_watch_host_definition_resources(mock_utils, host_definition_info, False) + self._assert_define_host_using_exponential_backoff_not_called(mock_utils) + + @patch('{}.utils'.format(test_settings.HOST_DEFINITION_WATCHER_PATH)) + def test_do_not_handle_pending_host_definition_when_event_type_is_deletion(self, mock_utils): + host_definition_info = self.fake_host_definition_info + self._prepare_watch_host_definition_resources(mock_utils, host_definition_info, True, True) + test_utils.run_function_with_timeout(self.watcher.watch_host_definitions_resources, 0.5) + self._assert_watch_host_definition_resources(mock_utils, host_definition_info, True) + self._assert_define_host_using_exponential_backoff_not_called(mock_utils) + + def _prepare_watch_host_definition_resources( + self, mock_utils, host_definition_info, is_host_definition_in_pending_phase, + is_watch_object_type_is_delete=False): + self.watcher.k8s_api.list_host_definition.return_value = [host_definition_info] + mock_utils.get_k8s_object_resource_version.return_value = test_settings.FAKE_RESOURCE_VERSION + self.watcher.k8s_api.get_host_definition_stream.return_value = iter( + [self.host_definition_deleted_watch_manifest]) + mock_utils.munch.return_value = self.host_definition_deleted_watch_munch + self.watcher.resource_info_manager.generate_host_definition_info.return_value = host_definition_info + self.watcher.host_definition_manager.is_host_definition_in_pending_phase.return_value = \ + is_host_definition_in_pending_phase + mock_utils.is_watch_object_type_is_delete.return_value = is_watch_object_type_is_delete + + def _prepare_define_host_using_exponential_backoff( + self, mock_utils, host_definition_info, is_host_definition_not_pending, is_node_can_be_undefined=False): + self.watcher.host_definition_manager.is_host_definition_not_pending.side_effect = \ + is_host_definition_not_pending + self._prepare_handle_pending_host_definition(mock_utils, host_definition_info, is_node_can_be_undefined) + + def _prepare_handle_pending_host_definition(self, mock_utils, host_definition_info, is_node_can_be_undefined): + mock_utils.get_action.return_value = self.fake_action + if host_definition_info.phase == common_settings.PENDING_CREATION_PHASE: + self.watcher.definition_manager.define_host_after_pending.return_value = self.fake_define_response + else: + self.watcher.node_manager.is_node_can_be_undefined.return_value = is_node_can_be_undefined + if is_node_can_be_undefined: + self.watcher.definition_manager.undefine_host_after_pending.return_value = self.fake_define_response + + def _assert_watch_host_definition_resources( + self, mock_utils, host_definition_info, is_host_definition_in_pending_phase): + self.watcher.k8s_api.list_host_definition.assert_called_with() + mock_utils.get_k8s_object_resource_version.assert_called_with([host_definition_info]) + mock_utils.loop_forever.assert_called_with() + self.watcher.k8s_api.get_host_definition_stream.assert_called_with(test_settings.FAKE_RESOURCE_VERSION, 5) + mock_utils.munch.assert_called_once_with(self.host_definition_deleted_watch_manifest) + self.watcher.resource_info_manager.generate_host_definition_info.assert_called_once_with( + self.host_definition_deleted_watch_munch.object) + self.watcher.host_definition_manager.is_host_definition_in_pending_phase.assert_called_once_with( + host_definition_info.phase) + if is_host_definition_in_pending_phase: + mock_utils.is_watch_object_type_is_delete.assert_called_once_with( + self.host_definition_deleted_watch_munch.type) + + def _assert_define_host_using_exponential_backoff_called( + self, mock_utils, host_definition_info, host_definition_not_pending_call_count, set_phase_to_error=False, + is_node_can_be_undefined=False, is_error_message=False): + self.watcher.host_definition_manager.is_host_definition_not_pending.assert_called_with( + host_definition_info) + self.assertEqual(self.watcher.host_definition_manager.is_host_definition_not_pending.call_count, + host_definition_not_pending_call_count) + if set_phase_to_error: + self.watcher.host_definition_manager.set_host_definition_phase_to_error.assert_called_once_with( + host_definition_info) + else: + self._assert_called_handle_pending_host_definition( + mock_utils, host_definition_info, is_node_can_be_undefined, is_error_message) + + def _assert_called_handle_pending_host_definition( + self, mock_utils, host_definition_info, is_node_can_be_undefined, is_error_message): + mock_utils.get_action.assert_called_once_with(host_definition_info.phase) + if host_definition_info.phase == common_settings.PENDING_CREATION_PHASE: + self.watcher.definition_manager.define_host_after_pending.assert_called_once_with(host_definition_info) + self.watcher.node_manager.is_node_can_be_undefined.assert_not_called() + self.watcher.definition_manager.undefine_host_after_pending.assert_not_called() + else: + if is_error_message: + self.watcher.node_manager.is_node_can_be_undefined.assert_called_once_with( + host_definition_info.node_name) + else: + self.watcher.node_manager.is_node_can_be_undefined.assert_called_with(host_definition_info.node_name) + self.assertEqual(self.watcher.node_manager.is_node_can_be_undefined.call_count, 2) + + self.watcher.definition_manager.define_host_after_pending.assert_not_called() + if is_node_can_be_undefined: + self.watcher.definition_manager.undefine_host_after_pending.assert_called_once_with( + host_definition_info) + else: + self.watcher.definition_manager.undefine_host_after_pending.assert_not_called() + + self._assert_handle_message_from_storage(host_definition_info, is_node_can_be_undefined, is_error_message) + + def _assert_handle_message_from_storage(self, host_definition_info, is_node_can_be_undefined, is_error_message): + if is_error_message: + self.watcher.host_definition_manager.create_k8s_event_for_host_definition.assert_called_once_with( + host_definition_info, self.fake_define_response.error_message, self.fake_action, + common_settings.FAILED_MESSAGE_TYPE) + elif host_definition_info.phase == common_settings.PENDING_CREATION_PHASE: + self.watcher.host_definition_manager.set_host_definition_status_to_ready.assert_called_once_with( + host_definition_info) + elif is_node_can_be_undefined: + self.watcher.host_definition_manager.delete_host_definition.assert_called_once_with( + host_definition_info.name) + self.watcher.node_manager.remove_manage_node_label.assert_called_once_with(host_definition_info.name) + + def _assert_define_host_using_exponential_backoff_not_called(self, mock_utils): + self.watcher.host_definition_manager.is_host_definition_not_pending.assert_not_called() + self.watcher.host_definition_manager.set_host_definition_phase_to_error.assert_not_called() + self.watcher.host_definition_manager.create_k8s_event_for_host_definition.assert_not_called() + self.watcher.host_definition_manager.set_host_definition_status_to_ready.assert_not_called() + self.watcher.host_definition_manager.delete_host_definition.assert_not_called() + mock_utils.get_action.assert_not_called() + self.watcher.definition_manager.define_host_after_pending.assert_not_called() + self.watcher.definition_manager.undefine_host_after_pending.assert_not_called() + self.watcher.node_manager.remove_manage_node_label.assert_not_called() + self.watcher.node_manager.is_node_can_be_undefined.assert_not_called() diff --git a/controllers/tests/controller_server/host_definer/watchers/node_watcher_test.py b/controllers/tests/controller_server/host_definer/watchers/node_watcher_test.py new file mode 100644 index 000000000..133c4a644 --- /dev/null +++ b/controllers/tests/controller_server/host_definer/watchers/node_watcher_test.py @@ -0,0 +1,213 @@ +from copy import deepcopy +from unittest.mock import patch, MagicMock + +import controllers.common.settings as common_settings +import controllers.tests.controller_server.host_definer.utils.test_utils as test_utils +import controllers.tests.controller_server.host_definer.settings as test_settings +from controllers.tests.controller_server.host_definer.watchers.watcher_base import WatcherBaseSetUp +from controllers.servers.host_definer.watcher.node_watcher import NodeWatcher + + +class NodeWatcherBase(WatcherBaseSetUp): + def setUp(self): + super().setUp() + self.watcher = NodeWatcher() + self.watcher.k8s_api = MagicMock() + self.watcher.resource_info_manager = MagicMock() + self.watcher.node_manager = MagicMock() + self.watcher.host_definition_manager = MagicMock() + self.watcher.definition_manager = MagicMock() + self.fake_node_info = test_utils.get_fake_node_info() + self.fake_csi_node_info = test_utils.get_fake_csi_node_info() + self.fake_host_definition_info = test_utils.get_fake_host_definition_info() + self.unmanaged_csi_nodes_with_driver = patch( + '{}.unmanaged_csi_nodes_with_driver'.format(test_settings.NODES_WATCHER_PATH), set()).start() + self.expected_unmanaged_csi_nodes_with_driver = set() + + def _prepare_is_unmanaged_csi_node_has_driver(self, is_node_can_be_defined): + self.watcher.node_manager.is_node_can_be_defined.return_value = is_node_can_be_defined + + +class TestAddInitialNodes(NodeWatcherBase): + def test_add_initial_unmanaged_node_with_ibm_block_csi_driver(self): + self.expected_unmanaged_csi_nodes_with_driver.add(test_settings.FAKE_NODE_NAME) + self._prepare_add_initial_nodes(self.fake_csi_node_info) + self._prepare_is_unmanaged_csi_node_has_driver(False) + self.watcher.add_initial_nodes() + self._assert_add_initial_nodes() + self._assert_delete_host_definitions_not_called() + self.watcher.node_manager.is_node_can_be_defined.assert_called_once_with(self.fake_csi_node_info.name) + + def test_do_not_add_initial_unmanaged_node_with_ibm_block_csi_driver_because_it_can_be_defined(self): + self._prepare_add_initial_nodes(self.fake_csi_node_info) + self._prepare_is_unmanaged_csi_node_has_driver(True) + self.watcher.add_initial_nodes() + self._assert_add_initial_nodes() + self._assert_delete_host_definitions_not_called() + self.watcher.node_manager.is_node_can_be_defined.assert_called_once_with(self.fake_csi_node_info.name) + + def test_undefined_initial_managed_node_that_do_not_have_ibm_block_csi_driver_anymore(self): + csi_node_info = deepcopy(self.fake_csi_node_info) + csi_node_info.node_id = '' + self._prepare_add_initial_nodes(csi_node_info) + self._prepare_csi_node_pod_deleted_while_host_definer_was_down(True, True) + self._prepare_delete_host_definitions_called(True) + self.watcher.add_initial_nodes() + self._assert_add_initial_nodes() + self._assert_delete_host_definitions_called(True) + self.watcher.node_manager.is_node_has_manage_node_label.assert_called_once_with(csi_node_info.name) + self.watcher.node_manager.is_node_has_host_definitions.assert_called_once_with(csi_node_info.name) + self.watcher.node_manager.is_node_can_be_defined.assert_not_called() + + def _prepare_delete_host_definitions_called(self, is_node_can_be_undefined): + self.watcher.node_manager.is_node_can_be_undefined.return_value = is_node_can_be_undefined + if is_node_can_be_undefined: + self.watcher.host_definition_manager.get_all_host_definitions_info_of_the_node.return_value = \ + [self.fake_host_definition_info] + + def _assert_delete_host_definitions_called(self, is_node_can_be_undefined): + self.watcher.node_manager.is_node_can_be_undefined.assert_called_once_with(self.fake_node_info.name) + if is_node_can_be_undefined: + self.watcher.host_definition_manager.get_all_host_definitions_info_of_the_node.assert_called_once_with( + self.fake_node_info.name) + self.watcher.definition_manager.delete_definition.assert_called_once_with(self.fake_host_definition_info) + self.assertEqual(self.watcher.node_manager.remove_manage_node_label.call_count, 2) + else: + self.watcher.host_definition_manager.get_all_host_definitions_info_of_the_node.assert_not_called() + self.watcher.definition_manager.delete_definition.assert_not_called() + self.watcher.node_manager.remove_manage_node_label.assert_called_once_with(self.fake_node_info.name) + + def test_do_not_undefined_initial_that_do_not_have_ibm_block_csi_driver_because_it_is_not_managed(self): + csi_node_info = deepcopy(self.fake_csi_node_info) + csi_node_info.node_id = '' + self._prepare_add_initial_nodes(csi_node_info) + self._prepare_csi_node_pod_deleted_while_host_definer_was_down(False, True) + self.watcher.add_initial_nodes() + self._assert_add_initial_nodes() + self._assert_delete_host_definitions_not_called() + self.watcher.node_manager.is_node_has_manage_node_label.assert_called_once_with(csi_node_info.name) + self.watcher.node_manager.is_node_has_host_definitions.assert_not_called() + self.watcher.node_manager.is_node_can_be_defined.assert_not_called() + + def test_do_not_undefined_initial_that_do_not_have_ibm_block_csi_driver_because_it_is_not_have_host_definitions( + self): + csi_node_info = deepcopy(self.fake_csi_node_info) + csi_node_info.node_id = '' + self._prepare_add_initial_nodes(csi_node_info) + self._prepare_csi_node_pod_deleted_while_host_definer_was_down(True, False) + self.watcher.add_initial_nodes() + self._assert_add_initial_nodes() + self._assert_delete_host_definitions_not_called() + self.watcher.node_manager.is_node_has_manage_node_label.assert_called_once_with(csi_node_info.name) + self.watcher.node_manager.is_node_has_host_definitions.assert_called_once_with(csi_node_info.name) + self.watcher.node_manager.is_node_can_be_defined.assert_not_called() + + def _prepare_add_initial_nodes(self, csi_node_info): + self.watcher.node_manager.get_nodes_info.return_value = [self.fake_node_info] + self.watcher.resource_info_manager.get_csi_node_info.return_value = csi_node_info + + def _prepare_csi_node_pod_deleted_while_host_definer_was_down( + self, is_node_has_manage_node_label, is_node_has_host_definitions): + self.watcher.node_manager.is_node_has_manage_node_label.return_value = is_node_has_manage_node_label + self.watcher.node_manager.is_node_has_host_definitions.return_value = is_node_has_host_definitions + + def _assert_add_initial_nodes(self): + self.watcher.node_manager.get_nodes_info.assert_called_once() + self.watcher.resource_info_manager.get_csi_node_info.assert_called_once_with(self.fake_node_info.name) + self.assertEqual(self.unmanaged_csi_nodes_with_driver, self.expected_unmanaged_csi_nodes_with_driver) + + def _assert_delete_host_definitions_not_called(self): + self.watcher.node_manager.is_node_can_be_undefined.assert_not_called() + self.watcher.host_definition_manager.get_all_host_definitions_info_of_the_node.assert_not_called() + self.watcher.definition_manager.delete_definition.assert_not_called() + self.watcher.node_manager.remove_manage_node_label.assert_not_called() + + +class TestWatchNodesResources(NodeWatcherBase): + def setUp(self): + super().setUp() + self.node_modified_watch_manifest = test_utils.get_fake_node_watch_event(common_settings.MODIFIED_EVENT_TYPE) + self.node_modified_watch_munch = test_utils.convert_manifest_to_munch(self.node_modified_watch_manifest) + self.node_added_watch_manifest = test_utils.get_fake_node_watch_event(common_settings.ADDED_EVENT_TYPE) + self.node_added_watch_munch = test_utils.convert_manifest_to_munch(self.node_added_watch_manifest) + + @patch('{}.utils'.format(test_settings.NODES_WATCHER_PATH)) + def test_watch_and_add_unmanaged_node_with_ibm_block_csi_driver_but_do_not_define_it(self, mock_utils): + self.expected_unmanaged_csi_nodes_with_driver.add(test_settings.FAKE_NODE_NAME) + self._prepare_watch_nodes_resources(self.node_modified_watch_manifest, + self.node_modified_watch_munch, mock_utils) + self._prepare_is_unmanaged_csi_node_has_driver(False) + self.watcher.node_manager.is_node_has_new_manage_node_label.return_value = False + self.watcher.watch_nodes_resources() + self._assert_watch_nodes_resources(self.node_modified_watch_manifest, + self.node_modified_watch_munch, mock_utils) + self._assert_define_node_not_called() + self.watcher.node_manager.is_node_has_new_manage_node_label.assert_called_once_with( + self.fake_csi_node_info, self.unmanaged_csi_nodes_with_driver) + + @patch('{}.utils'.format(test_settings.NODES_WATCHER_PATH)) + def test_watch_and_add_unmanaged_node_with_ibm_block_csi_driver_and_define_it(self, mock_utils): + self._prepare_watch_nodes_resources(self.node_modified_watch_manifest, + self.node_modified_watch_munch, mock_utils) + self._prepare_is_unmanaged_csi_node_has_driver(False) + self.watcher.node_manager.is_node_has_new_manage_node_label.return_value = True + self.watcher.watch_nodes_resources() + self._assert_watch_nodes_resources(self.node_modified_watch_manifest, + self.node_modified_watch_munch, mock_utils) + self._assert_define_node_called() + self.watcher.node_manager.is_node_has_new_manage_node_label.assert_called_once_with( + self.fake_csi_node_info, self.unmanaged_csi_nodes_with_driver) + + @patch('{}.utils'.format(test_settings.NODES_WATCHER_PATH)) + def test_watch_and_do_not_add_unmanaged_node_with_ibm_block_csi_driver_and_define_it(self, mock_utils): + self.unmanaged_csi_nodes_with_driver.add(test_settings.FAKE_NODE_NAME) + self.expected_unmanaged_csi_nodes_with_driver = set() + self._prepare_watch_nodes_resources(self.node_modified_watch_manifest, + self.node_modified_watch_munch, mock_utils) + self._prepare_is_unmanaged_csi_node_has_driver(True) + self.watcher.node_manager.is_node_has_new_manage_node_label.return_value = True + self.watcher.watch_nodes_resources() + self._assert_watch_nodes_resources(self.node_modified_watch_manifest, + self.node_modified_watch_munch, mock_utils) + self._assert_define_node_called() + self.watcher.node_manager.is_node_has_new_manage_node_label.assert_called_once_with( + self.fake_csi_node_info, self.unmanaged_csi_nodes_with_driver) + + @patch('{}.utils'.format(test_settings.NODES_WATCHER_PATH)) + def test_watch_and_do_not_add_unmanaged_node_and_do_not_define_it_when_the_event_type_is_not_modified( + self, mock_utils): + self.expected_unmanaged_csi_nodes_with_driver = set() + self._prepare_watch_nodes_resources(self.node_added_watch_manifest, self.node_added_watch_munch, mock_utils) + self._prepare_is_unmanaged_csi_node_has_driver(True) + self.watcher.watch_nodes_resources() + self._assert_watch_nodes_resources(self.node_added_watch_manifest, self.node_added_watch_munch, mock_utils) + self._assert_define_node_not_called() + self.watcher.node_manager.is_node_has_new_manage_node_label.assert_not_called() + self.watcher.node_manager.is_node_can_be_defined.assert_not_called() + + def _prepare_watch_nodes_resources(self, node_watch_manifest, node_watch_munch, mock_utils): + mock_utils.loop_forever.side_effect = [True, False] + self.watcher.k8s_api.get_node_stream.return_value = iter([node_watch_manifest]) + mock_utils.munch.return_value = node_watch_munch + self.watcher.resource_info_manager.get_csi_node_info.return_value = self.fake_csi_node_info + self.watcher.resource_info_manager.generate_node_info.return_value = self.fake_node_info + + def _assert_watch_nodes_resources(self, node_watch_manifest, node_watch_munch, mock_utils): + self.watcher.k8s_api.get_node_stream.assert_called_once_with() + mock_utils.munch.assert_called_once_with(node_watch_manifest) + self.watcher.resource_info_manager.get_csi_node_info.assert_called_once_with(test_settings.FAKE_NODE_NAME) + self.watcher.resource_info_manager.generate_node_info.assert_called_once_with( + node_watch_munch.object) + self.watcher.node_manager.handle_node_topologies.assert_called_once_with( + self.fake_node_info, node_watch_munch.type) + self.watcher.node_manager.update_node_io_group.assert_called_once_with(self.fake_node_info) + self.assertEqual(self.unmanaged_csi_nodes_with_driver, self.expected_unmanaged_csi_nodes_with_driver) + + def _assert_define_node_not_called(self): + self.watcher.node_manager.add_node_to_nodes.assert_not_called() + self.watcher.definition_manager.define_node_on_all_storages.assert_not_called() + + def _assert_define_node_called(self): + self.watcher.node_manager.add_node_to_nodes.assert_called_once_with(self.fake_csi_node_info) + self.watcher.definition_manager.define_node_on_all_storages.assert_called_once_with( + test_settings.FAKE_NODE_NAME) diff --git a/controllers/tests/controller_server/host_definer/watchers/secret_watcher_test.py b/controllers/tests/controller_server/host_definer/watchers/secret_watcher_test.py new file mode 100644 index 000000000..6cdbeba66 --- /dev/null +++ b/controllers/tests/controller_server/host_definer/watchers/secret_watcher_test.py @@ -0,0 +1,136 @@ +from copy import deepcopy +from unittest.mock import patch, MagicMock + +import controllers.common.settings as common_settings +from controllers.servers.host_definer.watcher.secret_watcher import SecretWatcher +import controllers.tests.controller_server.host_definer.utils.test_utils as test_utils +import controllers.tests.controller_server.host_definer.settings as test_settings +from controllers.tests.controller_server.host_definer.watchers.watcher_base import WatcherBaseSetUp + + +class TestWatchSecretResources(WatcherBaseSetUp): + def setUp(self): + super().setUp() + self.watcher = SecretWatcher() + self.watcher.k8s_api = MagicMock() + self.watcher.secret_manager = MagicMock() + self.watcher.host_definition_manager = MagicMock() + self.watcher.definition_manager = MagicMock() + self.watcher.node_manager = MagicMock() + self.watcher.resource_info_manager = MagicMock() + self.fake_secret_info = test_utils.get_fake_secret_info() + self.fake_secret_data = test_utils.get_fake_k8s_secret().data + self.fake_node_info = test_utils.get_fake_node_info() + self.fake_host_definition_info = test_utils.get_fake_host_definition_info() + self.global_managed_secrets = test_utils.patch_managed_secrets_global_variable( + test_settings.SECRET_WATCHER_PATH) + self.secret_modified_watch_manifest = test_utils.get_fake_secret_watch_event( + common_settings.MODIFIED_EVENT_TYPE) + self.secret_modified_watch_munch = test_utils.convert_manifest_to_munch(self.secret_modified_watch_manifest) + self.fake_nodes_with_system_id = {self.fake_node_info.name: test_settings.FAKE_SYSTEM_ID} + + @patch('{}.utils'.format(test_settings.SECRET_WATCHER_PATH)) + def test_watch_and_define_changed_topology_secret_topology(self, mock_utils): + self._prepare_watch_secret_resource(True, mock_utils) + self._prepare_get_secret_info(True, mock_utils) + self._prepare_handle_storage_class_secret(2) + self._test_watch_secret_resources(2, mock_utils) + self._assert_get_secret_info_called(True, mock_utils) + self._assert_handle_storage_class_secret_called() + self.watcher.host_definition_manager.get_host_definition_info_from_secret.assert_called_once_with( + self.fake_secret_info) + self.watcher.definition_manager.define_nodes.assert_called_once_with(self.fake_host_definition_info) + self.assertEqual(self.global_managed_secrets[0], self.fake_secret_info) + + @patch('{}.utils'.format(test_settings.SECRET_WATCHER_PATH)) + def test_watch_and_define_changed_non_topology_secret_topology(self, mock_utils): + self._prepare_watch_secret_resource(True, mock_utils) + self._prepare_get_secret_info(False, mock_utils) + self._prepare_handle_storage_class_secret(2) + self._test_watch_secret_resources(2, mock_utils) + self._assert_get_secret_info_called(False, mock_utils) + self._assert_handle_storage_class_secret_called() + self.watcher.host_definition_manager.get_host_definition_info_from_secret.assert_called_once_with( + self.fake_secret_info) + self.watcher.definition_manager.define_nodes.assert_called_once_with(self.fake_host_definition_info) + self.assertEqual(self.global_managed_secrets[0], self.fake_secret_info) + + @patch('{}.utils'.format(test_settings.SECRET_WATCHER_PATH)) + def test_watch_and_do_not_define_changed_secret_that_is_not_used_by_storage_class(self, mock_utils): + self._prepare_watch_secret_resource(True, mock_utils) + self._prepare_get_secret_info(True, mock_utils) + self._prepare_handle_storage_class_secret(0) + self._test_watch_secret_resources(2, mock_utils) + self._assert_get_secret_info_called(True, mock_utils) + self._assert_handle_storage_class_secret_called() + self.watcher.host_definition_manager.get_host_definition_info_from_secret.assert_not_called() + self.watcher.definition_manager.define_nodes.assert_not_called() + + @patch('{}.utils'.format(test_settings.SECRET_WATCHER_PATH)) + def test_watch_and_do_not_define_unchanged_secret(self, mock_utils): + self._prepare_watch_secret_resource(False, mock_utils) + self._test_watch_secret_resources(1, mock_utils) + self._assert_get_secret_info_not_called(mock_utils) + self._assert_handle_storage_class_secret_not_called() + self.watcher.resource_info_manager.generate_k8s_secret_to_secret_info.assert_called_once_with( + self.secret_modified_watch_munch.object) + + def _prepare_watch_secret_resource(self, is_secret_can_be_changed, mock_utils): + mock_utils.loop_forever.side_effect = [True, False] + self.watcher.k8s_api.get_secret_stream.return_value = iter([self.secret_modified_watch_manifest]) + mock_utils.munch.return_value = self.secret_modified_watch_munch + self.watcher.resource_info_manager.generate_k8s_secret_to_secret_info.return_value = self.fake_secret_info + self.watcher.secret_manager.is_secret_can_be_changed.return_value = is_secret_can_be_changed + + def _prepare_get_secret_info(self, is_topology_secret, mock_utils): + mock_utils.change_decode_base64_secret_config.return_value = self.fake_secret_data + self.watcher.secret_manager.is_topology_secret.return_value = is_topology_secret + if is_topology_secret: + self.watcher.node_manager.generate_nodes_with_system_id.return_value = self.fake_nodes_with_system_id + self.watcher.secret_manager.generate_secret_system_ids_topologies.return_value = \ + test_settings.FAKE_SYSTEM_IDS_TOPOLOGIES + self.watcher.resource_info_manager.generate_k8s_secret_to_secret_info.return_value = self.fake_secret_info + + def _prepare_handle_storage_class_secret(self, managed_storage_classes): + secret_info_with_storage_classes = test_utils.get_fake_secret_info(managed_storage_classes) + copy_secret_info = deepcopy(secret_info_with_storage_classes) + self.global_managed_secrets.append(copy_secret_info) + self.watcher.secret_manager.get_matching_managed_secret_info.return_value = ( + secret_info_with_storage_classes, 0) + if managed_storage_classes > 0: + self.watcher.host_definition_manager.get_host_definition_info_from_secret.return_value = \ + self.fake_host_definition_info + + def _test_watch_secret_resources(self, generate_secret_info_call_count, mock_utils): + self.watcher.watch_secret_resources() + mock_utils.loop_forever.assert_called() + self.watcher.k8s_api.get_secret_stream.assert_called_once_with() + mock_utils.munch.assert_called_once_with(self.secret_modified_watch_manifest) + self.assertEqual( + self.watcher.resource_info_manager.generate_k8s_secret_to_secret_info.call_count, + generate_secret_info_call_count) + self.watcher.secret_manager.is_secret_can_be_changed.assert_called_once_with( + self.fake_secret_info, self.secret_modified_watch_munch.type) + + def _assert_get_secret_info_called(self, is_topology_secret, mock_utils): + mock_utils.change_decode_base64_secret_config.assert_called_once_with( + self.secret_modified_watch_munch.object.data) + self.watcher.secret_manager.is_topology_secret.assert_called_once_with(self.fake_secret_data) + if is_topology_secret: + self.watcher.node_manager.generate_nodes_with_system_id.assert_called_once_with(self.fake_secret_data) + self.watcher.secret_manager.generate_secret_system_ids_topologies.assert_called_once_with( + self.fake_secret_data) + + def _assert_get_secret_info_not_called(self, mock_utils): + mock_utils.change_decode_base64_secret_config.assert_not_called() + self.watcher.secret_manager.is_topology_secret.assert_not_called() + self.watcher.node_manager.generate_nodes_with_system_id.assert_not_called() + self.watcher.secret_manager.generate_secret_system_ids_topologies.assert_not_called() + + def _assert_handle_storage_class_secret_called(self): + self.watcher.secret_manager.get_matching_managed_secret_info.assert_called_once_with(self.fake_secret_info) + + def _assert_handle_storage_class_secret_not_called(self): + self.watcher.secret_manager.get_matching_managed_secret_info.assert_not_called() + self.watcher.host_definition_manager.get_host_definition_info_from_secret.assert_not_called() + self.watcher.definition_manager.define_nodes.assert_not_called() diff --git a/controllers/tests/controller_server/host_definer/watchers/storage_class_watcher_test.py b/controllers/tests/controller_server/host_definer/watchers/storage_class_watcher_test.py new file mode 100644 index 000000000..25e7cf86a --- /dev/null +++ b/controllers/tests/controller_server/host_definer/watchers/storage_class_watcher_test.py @@ -0,0 +1,245 @@ +from copy import deepcopy +from unittest.mock import MagicMock, patch + +import controllers.common.settings as common_settings +import controllers.tests.controller_server.host_definer.utils.test_utils as test_utils +import controllers.tests.controller_server.host_definer.settings as test_settings +from controllers.tests.controller_server.host_definer.watchers.watcher_base import WatcherBaseSetUp +from controllers.servers.host_definer.watcher.storage_class_watcher import StorageClassWatcher + + +class StorageClassWatcherBase(WatcherBaseSetUp): + def setUp(self): + super().setUp() + self.watcher = StorageClassWatcher() + self.watcher.k8s_api = MagicMock() + self.watcher.storage_class_manager = MagicMock() + self.watcher.secret_manager = MagicMock() + self.watcher.node_manager = MagicMock() + self.watcher.resource_info_manager = MagicMock() + self.watcher.definition_manager = MagicMock() + self.fake_storage_class_info = test_utils.get_fake_storage_class_info() + self.fake_storage_class_info.parameters = {test_settings.STORAGE_CLASS_SECRET_FIELD: test_settings.FAKE_SECRET} + self.fake_secret_info = test_utils.get_fake_secret_info() + self.fake_node_info = test_utils.get_fake_node_info() + self.fake_secret_data = test_utils.get_fake_k8s_secret().data + self.managed_secrets_on_storage_class_watcher = test_utils.patch_managed_secrets_global_variable( + test_settings.STORAGE_CLASS_WATCHER_PATH) + self.fake_nodes_with_system_id = {self.fake_node_info.name: test_settings.FAKE_SYSTEM_ID} + + def _prepare_get_secrets_info_from_storage_class_with_driver_provisioner( + self, is_sc_has_csi_provisioner, is_secret, is_topology_secret=False): + self.watcher.storage_class_manager.is_storage_class_has_csi_as_a_provisioner.return_value = \ + is_sc_has_csi_provisioner + if is_sc_has_csi_provisioner: + self._prepare_get_secrets_info_from_storage_class(is_secret, is_topology_secret) + + def _prepare_get_secrets_info_from_storage_class(self, is_secret, is_topology_secret): + self.watcher.secret_manager.is_secret.return_value = is_secret + if is_secret: + self.watcher.secret_manager.get_secret_name_and_namespace.return_value = ( + test_settings.FAKE_SECRET, test_settings.FAKE_SECRET_NAMESPACE) + self.watcher.secret_manager.get_secret_data.return_value = self.fake_secret_data + self._prepare_get_secret_info(is_topology_secret) + self.watcher.secret_manager.add_unique_secret_info_to_list.return_value = [self.fake_secret_info] + + def _prepare_get_secret_info(self, is_topology_secret): + self.watcher.secret_manager.is_topology_secret.return_value = is_topology_secret + self.watcher.resource_info_manager.generate_secret_info.return_value = self.fake_secret_info + if is_topology_secret: + self.watcher.node_manager.generate_nodes_with_system_id.return_value = self.fake_nodes_with_system_id + self.watcher.secret_manager.generate_secret_system_ids_topologies.return_value = \ + test_settings.FAKE_SYSTEM_IDS_TOPOLOGIES + + def _assert_called_get_secrets_info_from_storage_class_called(self, is_secret, is_topology_secret=False): + self.watcher.secret_manager.is_secret.assert_called_once_with(test_settings.STORAGE_CLASS_SECRET_FIELD) + if is_secret: + self.watcher.secret_manager.get_secret_name_and_namespace.assert_called_once_with( + self.fake_storage_class_info, test_settings.STORAGE_CLASS_SECRET_FIELD) + self.watcher.secret_manager.get_secret_data.assert_called_once_with( + test_settings.FAKE_SECRET, test_settings.FAKE_SECRET_NAMESPACE) + self.watcher.secret_manager.is_topology_secret.assert_called_once_with(self.fake_secret_data) + self.watcher.secret_manager.add_unique_secret_info_to_list.assert_called_once_with( + self.fake_secret_info, []) + self._assert_get_secret_info(is_topology_secret) + else: + self._assert_get_secret_info_from_parameter_not_called() + + def _assert_get_secret_info(self, is_topology_secret): + if is_topology_secret: + self.watcher.node_manager.generate_nodes_with_system_id.assert_called_once_with(self.fake_secret_data) + self.watcher.secret_manager.generate_secret_system_ids_topologies.assert_called_once_with( + self.fake_secret_data) + self.watcher.resource_info_manager.generate_secret_info.assert_called_once_with( + test_settings.FAKE_SECRET, test_settings.FAKE_SECRET_NAMESPACE, self.fake_nodes_with_system_id, + test_settings.FAKE_SYSTEM_IDS_TOPOLOGIES) + else: + self.watcher.node_manager.generate_nodes_with_system_id.assert_not_called() + self.watcher.secret_manager.generate_secret_system_ids_topologies.assert_not_called() + self.watcher.resource_info_manager.generate_secret_info.assert_called_once_with( + test_settings.FAKE_SECRET, test_settings.FAKE_SECRET_NAMESPACE) + + def _assert_called_get_secrets_info_from_storage_class_not_called(self): + self.watcher.secret_manager.is_secret.assert_not_called() + self.watcher.secret_manager.get_secret_name_and_namespace.assert_not_called() + self.watcher.secret_manager.get_secret_data.assert_not_called() + self.watcher.secret_manager.is_topology_secret.assert_not_called() + self.watcher.secret_manager.add_unique_secret_info_to_list.assert_not_called() + self._assert_get_secret_info_from_parameter_not_called() + + def _assert_get_secret_info_from_parameter_not_called(self): + self.watcher.secret_manager.get_secret_name_and_namespace.assert_not_called() + self.watcher.secret_manager.get_secret_data.assert_not_called() + self.watcher.secret_manager.is_topology_secret.assert_not_called() + self.watcher.secret_manager.add_unique_secret_info_to_list.assert_not_called() + self.watcher.node_manager.generate_nodes_with_system_id.assert_not_called() + self.watcher.secret_manager.generate_secret_system_ids_topologies.assert_not_called() + self.watcher.resource_info_manager.generate_secret_info.assert_not_called() + + +class TestAddInitialStorageClasses(StorageClassWatcherBase): + + def test_define_initial_storage_class_with_secret_parameter(self): + self._prepare_add_initial_storage_classes([self.fake_storage_class_info], True, True, False) + self.watcher.add_initial_storage_classes() + self._assert_add_initial_storage_classes(True, True, False) + self.watcher.storage_class_manager.is_storage_class_has_csi_as_a_provisioner.assert_called_once_with( + self.fake_storage_class_info) + self.watcher.definition_manager.define_nodes_when_new_secret.assert_called_once_with(self.fake_secret_info) + + def test_do_not_define_initial_storage_class_with_non_secret_parameter(self): + self._prepare_add_initial_storage_classes([self.fake_storage_class_info], True, False) + self.watcher.add_initial_storage_classes() + self._assert_add_initial_storage_classes(True, False) + self.watcher.storage_class_manager.is_storage_class_has_csi_as_a_provisioner.assert_called_once_with( + self.fake_storage_class_info) + self.watcher.definition_manager.define_nodes_when_new_secret.assert_not_called() + + def test_do_not_define_initial_storage_class_with_no_ibm_block_csi_provisioner(self): + self._prepare_add_initial_storage_classes([self.fake_storage_class_info], False) + self.watcher.add_initial_storage_classes() + self._assert_add_initial_storage_classes(False) + self.watcher.storage_class_manager.is_storage_class_has_csi_as_a_provisioner.assert_called_once_with( + self.fake_storage_class_info) + self.watcher.definition_manager.define_nodes_when_new_secret.assert_not_called() + + def test_define_initial_storage_class_with_secret_topology_parameter(self): + self._prepare_add_initial_storage_classes([self.fake_storage_class_info], True, True, True) + self.watcher.add_initial_storage_classes() + self._assert_add_initial_storage_classes(True, True, True) + self.watcher.storage_class_manager.is_storage_class_has_csi_as_a_provisioner.assert_called_once_with( + self.fake_storage_class_info) + self.watcher.definition_manager.define_nodes_when_new_secret.assert_called_once_with(self.fake_secret_info) + + def test_do_not_define_initial_empty_storage_classes(self): + self._prepare_add_initial_storage_classes([]) + self.watcher.add_initial_storage_classes() + self._assert_add_initial_storage_classes(False) + self.watcher.storage_class_manager.is_storage_class_has_csi_as_a_provisioner.assert_not_called() + self.watcher.definition_manager.define_nodes_when_new_secret.assert_not_called() + + def _prepare_add_initial_storage_classes(self, storage_classes_info, is_sc_has_csi_provisioner=False, + is_secret=False, is_topology_secret=False): + self.watcher.resource_info_manager.get_storage_classes_info.return_value = storage_classes_info + self._prepare_get_secrets_info_from_storage_class_with_driver_provisioner( + is_sc_has_csi_provisioner, is_secret, is_topology_secret) + + def _assert_add_initial_storage_classes(self, is_sc_has_csi_provisioner=False, + is_secret=False, is_topology_secret=False): + self.watcher.resource_info_manager.get_storage_classes_info.assert_called_once_with() + if is_sc_has_csi_provisioner: + self._assert_called_get_secrets_info_from_storage_class_called(is_secret, is_topology_secret) + else: + self._assert_called_get_secrets_info_from_storage_class_not_called() + + +class TestWatchStorageClassResources(StorageClassWatcherBase): + def setUp(self): + super().setUp() + self.secret_info_with_storage_classes = test_utils.get_fake_secret_info(2) + self.copy_secret_info_with_storage_classes = deepcopy(self.secret_info_with_storage_classes) + self.storage_class_added_watch_manifest = test_utils.get_fake_storage_class_watch_event( + common_settings.ADDED_EVENT_TYPE) + self.storage_class_added_watch_munch = test_utils.convert_manifest_to_munch( + self.storage_class_added_watch_manifest) + self.storage_class_deleted_watch_manifest = test_utils.get_fake_storage_class_watch_event( + common_settings.DELETED_EVENT_TYPE) + self.storage_class_deleted_watch_munch = test_utils.convert_manifest_to_munch( + self.storage_class_deleted_watch_manifest) + self.global_managed_secrets = patch('{}.MANAGED_SECRETS'.format(test_settings.STORAGE_CLASS_WATCHER_PATH), + [self.copy_secret_info_with_storage_classes]).start() + + @patch('{}.utils'.format(test_settings.STORAGE_CLASS_WATCHER_PATH)) + def test_define_new_storage_class_with_topology_secret_parameter(self, mock_utils): + mock_utils.munch.return_value = self.storage_class_added_watch_munch + self._prepare_get_secrets_info_from_storage_class_with_driver_provisioner(True, True, True) + self._test_watch_storage_class_resources(self.storage_class_added_watch_manifest, mock_utils) + self._assert_called_get_secrets_info_from_storage_class_called(True, True) + self.watcher.definition_manager.define_nodes_when_new_secret.assert_called_once_with(self.fake_secret_info) + self.watcher.secret_manager.get_matching_managed_secret_info.assert_not_called() + + @patch('{}.utils'.format(test_settings.STORAGE_CLASS_WATCHER_PATH)) + def test_define_new_storage_class_with_non_topology_secret_parameter(self, mock_utils): + mock_utils.munch.return_value = self.storage_class_added_watch_munch + self._prepare_get_secrets_info_from_storage_class_with_driver_provisioner(True, True, False) + self._test_watch_storage_class_resources(self.storage_class_added_watch_manifest, mock_utils) + self._assert_called_get_secrets_info_from_storage_class_called(True, False) + self.watcher.definition_manager.define_nodes_when_new_secret.assert_called_once_with(self.fake_secret_info) + self.watcher.secret_manager.get_matching_managed_secret_info.assert_not_called() + + @patch('{}.utils'.format(test_settings.STORAGE_CLASS_WATCHER_PATH)) + def test_do_not_define_new_storage_class_with_non_secret_parameter(self, mock_utils): + mock_utils.munch.return_value = self.storage_class_added_watch_munch + self._prepare_get_secrets_info_from_storage_class_with_driver_provisioner(True, False) + self._test_watch_storage_class_resources(self.storage_class_added_watch_manifest, mock_utils) + self._assert_called_get_secrets_info_from_storage_class_called(False) + self.watcher.definition_manager.define_nodes_when_new_secret.assert_not_called() + self.watcher.secret_manager.get_matching_managed_secret_info.assert_not_called() + + @patch('{}.utils'.format(test_settings.STORAGE_CLASS_WATCHER_PATH)) + def test_undefine_new_storage_class_with_topology_secret_parameter(self, mock_utils): + self.global_managed_secrets.append(self.secret_info_with_storage_classes) + mock_utils.munch.return_value = self.storage_class_deleted_watch_munch + self._prepare_get_secrets_info_from_storage_class_with_driver_provisioner(True, True, True) + self.watcher.secret_manager.get_matching_managed_secret_info.return_value = (None, 0) + self._test_watch_storage_class_resources(self.storage_class_deleted_watch_manifest, mock_utils) + self._assert_called_get_secrets_info_from_storage_class_called(True, True) + self.watcher.secret_manager.get_matching_managed_secret_info.assert_called_once_with(self.fake_secret_info) + self.watcher.definition_manager.define_nodes_when_new_secret.assert_not_called() + self.assertEqual(self.global_managed_secrets[0].managed_storage_classes, + self.secret_info_with_storage_classes.managed_storage_classes - 1) + + @patch('{}.utils'.format(test_settings.STORAGE_CLASS_WATCHER_PATH)) + def test_undefine_new_storage_class_with_non_topology_secret_parameter(self, mock_utils): + self.global_managed_secrets.append(self.secret_info_with_storage_classes) + mock_utils.munch.return_value = self.storage_class_deleted_watch_munch + self._prepare_get_secrets_info_from_storage_class_with_driver_provisioner(True, True, False) + self.watcher.secret_manager.get_matching_managed_secret_info.return_value = (None, 0) + self._test_watch_storage_class_resources(self.storage_class_deleted_watch_manifest, mock_utils) + self._assert_called_get_secrets_info_from_storage_class_called(True, False) + self.watcher.secret_manager.get_matching_managed_secret_info.assert_called_once_with(self.fake_secret_info) + self.watcher.definition_manager.define_nodes_when_new_secret.assert_not_called() + self.assertEqual(self.global_managed_secrets[0].managed_storage_classes, + self.secret_info_with_storage_classes.managed_storage_classes - 1) + + @patch('{}.utils'.format(test_settings.STORAGE_CLASS_WATCHER_PATH)) + def test_do_not_undefine_new_storage_class_with_non_secret_parameter(self, mock_utils): + self.global_managed_secrets.append(self.secret_info_with_storage_classes) + mock_utils.munch.return_value = self.storage_class_deleted_watch_munch + self._prepare_get_secrets_info_from_storage_class_with_driver_provisioner(True, False) + self.watcher.secret_manager.get_matching_managed_secret_info.return_value = (None, 0) + self._test_watch_storage_class_resources(self.storage_class_deleted_watch_manifest, mock_utils) + self._assert_called_get_secrets_info_from_storage_class_called(False) + self.watcher.definition_manager.define_nodes_when_new_secret.assert_not_called() + self.watcher.secret_manager.get_matching_managed_secret_info.assert_not_called() + self.assertEqual(self.global_managed_secrets[0].managed_storage_classes, + self.secret_info_with_storage_classes.managed_storage_classes) + + def _test_watch_storage_class_resources(self, watch_manifest, mock_utils): + mock_utils.loop_forever.side_effect = [True, False] + self.watcher.k8s_api.get_storage_class_stream.return_value = iter([watch_manifest]) + self.watcher.resource_info_manager.generate_storage_class_info.return_value = self.fake_storage_class_info + self.watcher.watch_storage_class_resources() + mock_utils.munch.assert_called_once_with(watch_manifest) + self.watcher.storage_class_manager.is_storage_class_has_csi_as_a_provisioner.assert_called_once_with( + self.fake_storage_class_info) diff --git a/controllers/tests/controller_server/host_definer/watchers/watcher_base.py b/controllers/tests/controller_server/host_definer/watchers/watcher_base.py new file mode 100644 index 000000000..5c63dcc91 --- /dev/null +++ b/controllers/tests/controller_server/host_definer/watchers/watcher_base.py @@ -0,0 +1,8 @@ +import unittest + +import controllers.tests.controller_server.host_definer.utils.test_utils as test_utils + + +class WatcherBaseSetUp(unittest.TestCase): + def setUp(self): + test_utils.patch_k8s_api_init() diff --git a/controllers/tests/controller_server/utils_test.py b/controllers/tests/controller_server/utils_test.py index 04382d48e..4e6dc4ad5 100644 --- a/controllers/tests/controller_server/utils_test.py +++ b/controllers/tests/controller_server/utils_test.py @@ -534,3 +534,16 @@ def test_validate_parameters_match_volume_prefix_success(self): self._test_validate_parameters_match_volume(volume_field="name", volume_value="prefix_vol", parameter_field=controller_config.PARAMETERS_VOLUME_NAME_PREFIX, parameter_value="prefix") + + def test_is_call_home_enabled_true(self): + self._test_is_call_home_enabled('true', True) + + def test_is_call_home_enabled_false(self): + self._test_is_call_home_enabled('false', False) + + def _test_is_call_home_enabled(self, get_env_return_value, expected_result): + mock_getenv = patch('{}.getenv'.format('controllers.servers.utils')).start() + mock_getenv.return_value = get_env_return_value + result = utils.is_call_home_enabled() + self.assertEqual(result, expected_result) + mock_getenv.assert_called_once_with(controller_config.ENABLE_CALL_HOME_ENV_VAR, 'true') diff --git a/controllers/tests/controller_server/volume_group_server_test.py b/controllers/tests/controller_server/volume_group_server_test.py new file mode 100644 index 000000000..86901b761 --- /dev/null +++ b/controllers/tests/controller_server/volume_group_server_test.py @@ -0,0 +1,263 @@ +import unittest + +import grpc +from csi_general import volumegroup_pb2 +from mock import MagicMock, Mock +from munch import Munch + +from controllers.array_action import errors as array_errors +from controllers.servers import settings as servers_settings +from controllers.servers.csi.volume_group_server import VolumeGroupControllerServicer +from controllers.tests import utils +from controllers.tests.common.test_settings import SECRET, VOLUME_GROUP_NAME, NAME_PREFIX, REQUEST_VOLUME_GROUP_ID, \ + VOLUME_GROUP_UID, REQUEST_VOLUME_ID, VOLUME_UID, REQUEST_REAL_VOLUME_ID, REAL_NGUID +from controllers.tests.controller_server.common import mock_array_type, mock_mediator, mock_get_agent +from controllers.tests.controller_server.csi_controller_server_test import CommonControllerTest +from controllers.tests.utils import ProtoBufMock + +VG_CONTROLLER_SERVER_PATH = "controllers.servers.csi.volume_group_server" + + +class BaseVgControllerSetUp(unittest.TestCase): + + def setUp(self): + self.servicer = VolumeGroupControllerServicer() + + mock_array_type(self, VG_CONTROLLER_SERVER_PATH) + + self.mediator = mock_mediator() + + self.storage_agent = MagicMock() + mock_get_agent(self, VG_CONTROLLER_SERVER_PATH) + + self.request = ProtoBufMock() + self.request.secrets = SECRET + + self.request.parameters = {} + self.request.volume_context = {} + self.volume_capability = utils.get_mock_volume_capability() + self.capacity_bytes = 10 + self.request.capacity_range = Mock() + self.request.capacity_range.required_bytes = self.capacity_bytes + self.context = utils.FakeContext() + + +class TestCreateVolumeGroup(BaseVgControllerSetUp, CommonControllerTest): + + @property + def tested_method(self): + return self.servicer.CreateVolumeGroup + + @property + def tested_method_response_class(self): + return volumegroup_pb2.CreateVolumeGroupResponse + + def setUp(self): + super().setUp() + self.request.name = VOLUME_GROUP_NAME + + def test_create_volume_group_with_empty_name(self): + self._test_create_object_with_empty_name() + + def test_create_volume_group_with_wrong_secrets(self, ): + self._test_request_with_wrong_secrets() + + def test_create_volume_group_already_processing(self): + self._test_request_already_processing("name", self.request.name) + + def _prepare_create_volume_without_get(self): + self.mediator.get_volume_group = Mock(side_effect=array_errors.ObjectNotFoundError("")) + self.mediator.create_volume_group = Mock() + self.mediator.create_volume_group.return_value = utils.get_mock_mediator_response_volume_group() + + def test_create_volume_group_success(self): + self._prepare_create_volume_without_get() + + response = self.servicer.CreateVolumeGroup(self.request, self.context) + + self.mediator.create_volume_group.assert_called_once_with(VOLUME_GROUP_NAME) + self.assertEqual(type(response), volumegroup_pb2.CreateVolumeGroupResponse) + self.assertEqual(self.context.code, grpc.StatusCode.OK) + + def test_create_volume_group_with_prefix_success(self): + self._prepare_create_volume_without_get() + self.request.parameters = {servers_settings.PARAMETERS_VOLUME_GROUP_NAME_PREFIX: NAME_PREFIX} + + self.servicer.CreateVolumeGroup(self.request, self.context) + + self.mediator.create_volume_group.assert_called_once_with('prefix_volume_group_name') + self.assertEqual(self.context.code, grpc.StatusCode.OK) + + def test_create_volume_group_already_exist_fail(self): + self.mediator.get_volume_group = Mock(side_effect=array_errors.ObjectNotFoundError("")) + self.mediator.create_volume_group = Mock(side_effect=array_errors.VolumeGroupAlreadyExists("", "")) + + response = self.servicer.CreateVolumeGroup(self.request, self.context) + + self.mediator.create_volume_group.assert_called_once_with(VOLUME_GROUP_NAME) + self.assertEqual(type(response), volumegroup_pb2.CreateVolumeGroupResponse) + self.assertEqual(self.context.code, grpc.StatusCode.ALREADY_EXISTS) + + def test_get_volume_success(self): + self.mediator.get_volume_group = Mock(return_value=utils.get_mock_mediator_response_volume_group()) + + response = self.servicer.CreateVolumeGroup(self.request, self.context) + + self.mediator.get_volume_group.assert_called_once_with(VOLUME_GROUP_NAME) + self.mediator.create_volume_group.assert_not_called() + self.assertEqual(type(response), volumegroup_pb2.CreateVolumeGroupResponse) + self.assertEqual(self.context.code, grpc.StatusCode.OK) + + def test_group_get_volume_not_empty_fail(self): + volumes = [utils.get_mock_mediator_response_volume()] + response_volume_group = utils.get_mock_mediator_response_volume_group(volumes=volumes) + self.mediator.get_volume_group = Mock(return_value=response_volume_group) + + response = self.servicer.CreateVolumeGroup(self.request, self.context) + + self.mediator.get_volume_group.assert_called_once_with(VOLUME_GROUP_NAME) + self.assertEqual(type(response), volumegroup_pb2.CreateVolumeGroupResponse) + self.assertEqual(self.context.code, grpc.StatusCode.ALREADY_EXISTS) + + +class TestDeleteVolumeGroup(BaseVgControllerSetUp, CommonControllerTest): + + @property + def tested_method(self): + return self.servicer.DeleteVolumeGroup + + @property + def tested_method_response_class(self): + return volumegroup_pb2.DeleteVolumeGroupResponse + + def setUp(self): + super().setUp() + self.request.volume_group_id = REQUEST_VOLUME_GROUP_ID + + def test_delete_volume_group_success(self): + self.servicer.DeleteVolumeGroup(self.request, self.context) + self.assertEqual(self.context.code, grpc.StatusCode.OK) + + def _delete_volume_group_returns_error(self, error, return_code): + self.mediator.delete_volume_group.side_effect = [error] + + self.servicer.DeleteVolumeGroup(self.request, self.context) + + self.assertEqual(self.context.code, return_code) + if return_code != grpc.StatusCode.OK: + msg = str(error) + self.assertIn(msg, self.context.details, "msg : {0} is not in : {1}".format(msg, self.context.details)) + + def test_delete_volume_group_with_volume_not_found_error(self): + self._delete_volume_group_returns_error(error=array_errors.ObjectNotFoundError("volume"), + return_code=grpc.StatusCode.OK) + + def test_delete_volume_group_with_delete_volume_other_exception(self): + self._delete_volume_group_returns_error(error=Exception("error"), return_code=grpc.StatusCode.INTERNAL) + + def test_delete_volume_group_with_wrong_secrets(self): + self._test_request_with_wrong_secrets() + + def test_delete_volume_group_with_array_connection_exception(self): + self._test_request_with_array_connection_exception() + + def test_delete_volume_group_bad_id(self): + self.request.volume_group_id = VOLUME_GROUP_UID + self.servicer.DeleteVolumeGroup(self.request, self.context) + self.assertEqual(self.context.code, grpc.StatusCode.OK) + + +class TestModifyVolumeGroupMembership(BaseVgControllerSetUp, CommonControllerTest): + + @property + def tested_method(self): + return self.servicer.ModifyVolumeGroupMembership + + @property + def tested_method_response_class(self): + return volumegroup_pb2.ModifyVolumeGroupMembershipResponse + + def setUp(self): + super().setUp() + self.request.volume_group_id = REQUEST_VOLUME_GROUP_ID + + def test_modify_volume_group_success(self): + self.mediator.get_volume_group.return_value = utils.get_mock_mediator_response_volume_group() + self.servicer.ModifyVolumeGroupMembership(self.request, self.context) + self.assertEqual(self.context.code, grpc.StatusCode.OK) + + def _prepare_modify_volume_group_volumes(self, volume_ids_in_request=None, volumes_in_volume_group=None, + volumes_in_volume_group_after=None, nvme=False): + if volume_ids_in_request is None: + volume_ids_in_request = [] + else: + volume_id = VOLUME_UID + if nvme: + volume_id = REAL_NGUID + self.mediator.get_object_by_id.return_value = Munch({"id": volume_id}) + self.request.volume_ids = volume_ids_in_request + self.mediator.get_volume_group.side_effect = [ + utils.get_mock_mediator_response_volume_group(volumes=volumes_in_volume_group), + utils.get_mock_mediator_response_volume_group(volumes=volumes_in_volume_group_after)] + + def _verify_add_test(self, volume_group_response): + self.assertEqual(self.context.code, grpc.StatusCode.OK) + self.mediator.get_volume_group.assert_called_with(VOLUME_GROUP_NAME) + self.mediator.add_volume_to_volume_group.assert_called_once_with(VOLUME_GROUP_NAME, VOLUME_UID) + self.mediator.remove_volume_from_volume_group.assert_not_called() + self.assertEqual(volume_group_response.volume_group.volume_group_id, REQUEST_VOLUME_GROUP_ID) + self.assertEqual(len(volume_group_response.volume_group.volumes), 1) + + def test_modify_volume_group_add_success(self): + volume_in_volume_group = utils.get_mock_mediator_response_volume() + self._prepare_modify_volume_group_volumes(volume_ids_in_request=[REQUEST_VOLUME_ID], + volumes_in_volume_group_after=[volume_in_volume_group]) + + volume_group_response = self.servicer.ModifyVolumeGroupMembership(self.request, self.context) + self._verify_add_test(volume_group_response) + + def test_modify_volume_group_add_nvme_already_in_success(self): + volume_in_volume_group = utils.get_mock_mediator_response_volume() + self._prepare_modify_volume_group_volumes(volume_ids_in_request=[REQUEST_REAL_VOLUME_ID, REQUEST_VOLUME_ID], + volumes_in_volume_group=[Munch({"id": REAL_NGUID, "name": "nguid"})], + volumes_in_volume_group_after=[volume_in_volume_group], nvme=True) + + volume_group_response = self.servicer.ModifyVolumeGroupMembership(self.request, self.context) + self._verify_add_test(volume_group_response) + + def test_modify_volume_group_remove_success(self): + volume_in_volume_group = utils.get_mock_mediator_response_volume() + self._prepare_modify_volume_group_volumes(volumes_in_volume_group=[volume_in_volume_group]) + + volume_group_response = self.servicer.ModifyVolumeGroupMembership(self.request, self.context) + self.assertEqual(self.context.code, grpc.StatusCode.OK) + self.mediator.remove_volume_from_volume_group.assert_called_once_with(VOLUME_UID) + self.mediator.add_volume_to_volume_group.assert_not_called() + self.assertEqual(volume_group_response.volume_group.volume_group_id, REQUEST_VOLUME_GROUP_ID) + self.assertEqual(len(volume_group_response.volume_group.volumes), 0) + + def test_modify_volume_group_with_wrong_secrets(self): + self._test_request_with_wrong_secrets() + + def test_modify_volume_group_with_array_connection_exception(self): + self._test_request_with_array_connection_exception() + + def test_modify_volume_group_with_bad_id(self): + self.request.volume_group_id = "bad_id" + + response = self.servicer.ModifyVolumeGroupMembership(self.request, self.context) + + self.assertEqual(self.context.code, grpc.StatusCode.INVALID_ARGUMENT) + self.mediator.remove_volume_from_volume_group.assert_not_called() + self.mediator.add_volume_to_volume_group.assert_not_called() + self.assertEqual(type(response), volumegroup_pb2.ModifyVolumeGroupMembershipResponse) + + def test_modify_volume_group_already_exist_fail(self): + self.mediator.get_volume_group = Mock(side_effect=array_errors.ObjectNotFoundError("")) + + response = self.servicer.ModifyVolumeGroupMembership(self.request, self.context) + + self.mediator.remove_volume_from_volume_group.assert_not_called() + self.mediator.add_volume_to_volume_group.assert_not_called() + self.assertEqual(type(response), volumegroup_pb2.ModifyVolumeGroupMembershipResponse) + self.assertEqual(self.context.code, grpc.StatusCode.NOT_FOUND) diff --git a/controllers/tests/utils.py b/controllers/tests/utils.py index d3dcb4335..28e448c0a 100644 --- a/controllers/tests/utils.py +++ b/controllers/tests/utils.py @@ -3,11 +3,12 @@ import grpc from mock import Mock, MagicMock -from controllers.servers.csi.controller_types import ArrayConnectionInfo from controllers.array_action.array_action_types import Replication +from controllers.servers.csi.controller_types import ArrayConnectionInfo from controllers.tests.common.test_settings import SECRET_USERNAME_VALUE as test_user, \ SECRET_PASSWORD_VALUE as test_password, ARRAY as test_array, VOLUME_NAME, VOLUME_UID, DUMMY_POOL1, \ - INTERNAL_VOLUME_ID, COPY_TYPE, \ + VOLUME_GROUP_NAME, VOLUME_GROUP_UID, \ + INTERNAL_VOLUME_GROUP_ID, INTERNAL_VOLUME_ID, COPY_TYPE, \ SNAPSHOT_NAME, SNAPSHOT_VOLUME_NAME, SNAPSHOT_VOLUME_UID @@ -32,6 +33,25 @@ def get_mock_mediator_response_volume(size=10, name=VOLUME_NAME, volume_id=VOLUM return volume +def _get_mock_mediator_response_volumes(volumes): + response_volumes = [] + for volume in volumes: + response_volumes.append(get_mock_mediator_response_volume(name=volume.name, volume_id=volume.id)) + return response_volumes + + +def get_mock_mediator_response_volume_group(name=VOLUME_GROUP_NAME, volume_id=VOLUME_GROUP_UID, volumes=None): + if not volumes: + volumes = [] + volume_group = Mock() + volume_group.id = volume_id + volume_group.internal_id = INTERNAL_VOLUME_GROUP_ID + volume_group.name = name + volume_group.array_type = "a9k" + volume_group.volumes = _get_mock_mediator_response_volumes(volumes) + return volume_group + + def get_mock_mediator_response_snapshot(capacity=10, name=SNAPSHOT_NAME, snapshot_id=SNAPSHOT_VOLUME_UID, volume_name=SNAPSHOT_VOLUME_NAME, array_type="xiv"): snapshot = Mock() diff --git a/deploy/kubernetes/examples/demo-policy-based-volumereplication.yaml b/deploy/kubernetes/examples/demo-policy-based-volumereplication.yaml new file mode 100644 index 000000000..004380614 --- /dev/null +++ b/deploy/kubernetes/examples/demo-policy-based-volumereplication.yaml @@ -0,0 +1,11 @@ +apiVersion: replication.storage.openshift.io/v1alpha1 +kind: VolumeReplication +metadata: + name: demo-volumereplication + namespace: default +spec: + volumeReplicationClass: demo-volumereplicationclass + replicationState: primary # Required. Values primary/secondary. + dataSource: + kind: VolumeGroup + name: demo-volumegroup # Ensure that this is in the same namespace as VolumeReplication. diff --git a/deploy/kubernetes/examples/demo-policy-based-volumereplicationclass.yaml b/deploy/kubernetes/examples/demo-policy-based-volumereplicationclass.yaml new file mode 100644 index 000000000..aa106b162 --- /dev/null +++ b/deploy/kubernetes/examples/demo-policy-based-volumereplicationclass.yaml @@ -0,0 +1,11 @@ +apiVersion: replication.storage.openshift.io/v1alpha1 +kind: VolumeReplicationClass +metadata: + name: demo-volumereplicationclass +spec: + provisioner: block.csi.ibm.com + parameters: + replication_policy: demo-replication-policy-name # Ensure that this is a name of existing replication policy + + replication.storage.openshift.io/replication-secret-name: demo-secret + replication.storage.openshift.io/replication-secret-namespace: default diff --git a/deploy/kubernetes/examples/demo-pvc-in-volume-group.yaml b/deploy/kubernetes/examples/demo-pvc-in-volume-group.yaml new file mode 100644 index 000000000..9828f84da --- /dev/null +++ b/deploy/kubernetes/examples/demo-pvc-in-volume-group.yaml @@ -0,0 +1,14 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: demo-pvc-in-volume-group + labels: + demo-volumegroup-key: demo-volumegroup-value +spec: + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: demo-storageclass diff --git a/deploy/kubernetes/examples/demo-secret-config.json b/deploy/kubernetes/examples/demo-secret-config.json index b4539a55c..52d478eb9 100644 --- a/deploy/kubernetes/examples/demo-secret-config.json +++ b/deploy/kubernetes/examples/demo-secret-config.json @@ -1,24 +1,24 @@ -{ - "demo-management-id-1": { - "username": "demo-username-1", - "password": "demo-password-1", - "management_address": "demo-management-address-1", - "supported_topologies": [ - { - "topology.block.csi.ibm.com/demo-region": "demo-region-1", - "topology.block.csi.ibm.com/demo-zone": "demo-zone-1" - } - ] - }, - "demo-management-id-2": { - "username": "demo-username-2", - "password": "demo-password-2", - "management_address": "demo-management-address-2", - "supported_topologies": [ - { - "topology.block.csi.ibm.com/demo-region": "demo-region-2", - "topology.block.csi.ibm.com/demo-zone": "demo-zone-2" - } - ] - } -} +{ + "demo-management-id-1": { + "username": "demo-username-1", + "password": "demo-password-1", + "management_address": "demo-management-address-1", + "supported_topologies": [ + { + "topology.block.csi.ibm.com/demo-region": "demo-region-1", + "topology.block.csi.ibm.com/demo-zone": "demo-zone-1" + } + ] + }, + "demo-management-id-2": { + "username": "demo-username-2", + "password": "demo-password-2", + "management_address": "demo-management-address-2", + "supported_topologies": [ + { + "topology.block.csi.ibm.com/demo-region": "demo-region-2", + "topology.block.csi.ibm.com/demo-zone": "demo-zone-2" + } + ] + } +} diff --git a/deploy/kubernetes/examples/demo-storageclass-config-secret.yaml b/deploy/kubernetes/examples/demo-storageclass-config-secret.yaml index 85bcfea30..9f92a53af 100644 --- a/deploy/kubernetes/examples/demo-storageclass-config-secret.yaml +++ b/deploy/kubernetes/examples/demo-storageclass-config-secret.yaml @@ -1,19 +1,19 @@ -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: demo-storageclass-config-secret -provisioner: block.csi.ibm.com -volumeBindingMode: WaitForFirstConsumer -parameters: - # non-csi.storage.k8s.io parameters may be specified in by_management_id per system and/or outside by_management_id as the cross-system default. - - by_management_id: '{"demo-management-id-1":{"pool":"demo-pool-1","SpaceEfficiency":"dedup_compressed","volume_name_prefix":"demo-prefix-1"}, - "demo-management-id-2":{"pool":"demo-pool-2","volume_name_prefix":"demo-prefix-2", "io_group": "demo-iogrp"}}' # Optional. - pool: demo-pool - SpaceEfficiency: thin # Optional. - volume_name_prefix: demo-prefix # Optional. - - csi.storage.k8s.io/fstype: xfs # Optional. Values ext4/xfs. The default is ext4. - csi.storage.k8s.io/secret-name: demo-config-secret - csi.storage.k8s.io/secret-namespace: default -allowVolumeExpansion: true +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: demo-storageclass-config-secret +provisioner: block.csi.ibm.com +volumeBindingMode: WaitForFirstConsumer +parameters: + # non-csi.storage.k8s.io parameters may be specified in by_management_id per system and/or outside by_management_id as the cross-system default. + + by_management_id: '{"demo-management-id-1":{"pool":"demo-pool-1","SpaceEfficiency":"dedup_compressed","volume_name_prefix":"demo-prefix-1"}, + "demo-management-id-2":{"pool":"demo-pool-2","volume_name_prefix":"demo-prefix-2", "io_group": "demo-iogrp"}}' # Optional. + pool: demo-pool + SpaceEfficiency: thin # Optional. + volume_name_prefix: demo-prefix # Optional. + + csi.storage.k8s.io/fstype: xfs # Optional. Values ext4/xfs. The default is ext4. + csi.storage.k8s.io/secret-name: demo-config-secret + csi.storage.k8s.io/secret-namespace: default +allowVolumeExpansion: true diff --git a/deploy/kubernetes/examples/demo-volumegroup.yaml b/deploy/kubernetes/examples/demo-volumegroup.yaml new file mode 100644 index 000000000..8bd2c8d64 --- /dev/null +++ b/deploy/kubernetes/examples/demo-volumegroup.yaml @@ -0,0 +1,10 @@ +apiVersion: csi.ibm.com/v1 +kind: VolumeGroup +metadata: + name: demo-volumegroup +spec: + volumeGroupClassName: demo-volumegroupclass + source: + selector: + matchLabels: + demo-volumegroup-key: demo-volumegroup-value diff --git a/deploy/kubernetes/examples/demo-volumegroupclass.yaml b/deploy/kubernetes/examples/demo-volumegroupclass.yaml new file mode 100644 index 000000000..05335d5a1 --- /dev/null +++ b/deploy/kubernetes/examples/demo-volumegroupclass.yaml @@ -0,0 +1,10 @@ +apiVersion: csi.ibm.com/v1 +kind: VolumeGroupClass +metadata: + name: demo-volumegroupclass +driver: block.csi.ibm.com +parameters: + volume_group_name_prefix: demo-prefix + + volumegroup.storage.ibm.io/secret-name: demo-secret + volumegroup.storage.ibm.io/secret-namespace: default diff --git a/deploy/kubernetes/examples/demo-volumereplication.yaml b/deploy/kubernetes/examples/demo-volumereplication.yaml index c3b5f5f9b..ceb65ca5a 100644 --- a/deploy/kubernetes/examples/demo-volumereplication.yaml +++ b/deploy/kubernetes/examples/demo-volumereplication.yaml @@ -1,12 +1,12 @@ -apiVersion: replication.storage.openshift.io/v1alpha1 -kind: VolumeReplication -metadata: - name: demo-volumereplication - namespace: default -spec: - volumeReplicationClass: demo-volumereplicationclass - replicationState: primary - replicationHandle: demo-volumehandle - dataSource: - kind: PersistentVolumeClaim - name: demo-pvc-file-system # Ensure that this is in the same namespace as VolumeReplication. +apiVersion: replication.storage.openshift.io/v1alpha1 +kind: VolumeReplication +metadata: + name: demo-volumereplication + namespace: default +spec: + volumeReplicationClass: demo-volumereplicationclass + replicationState: primary + replicationHandle: demo-volumehandle + dataSource: + kind: PersistentVolumeClaim + name: demo-pvc-file-system # Ensure that this is in the same namespace as VolumeReplication. diff --git a/deploy/kubernetes/examples/demo-volumereplicationclass.yaml b/deploy/kubernetes/examples/demo-volumereplicationclass.yaml index c592ca323..8c9caa516 100644 --- a/deploy/kubernetes/examples/demo-volumereplicationclass.yaml +++ b/deploy/kubernetes/examples/demo-volumereplicationclass.yaml @@ -1,12 +1,12 @@ -apiVersion: replication.storage.openshift.io/v1alpha1 -kind: VolumeReplicationClass -metadata: - name: demo-volumereplicationclass -spec: - provisioner: block.csi.ibm.com - parameters: - system_id: demo-system-id - copy_type: async # Optional. Values sync/async. The default is sync. - - replication.storage.openshift.io/replication-secret-name: demo-secret - replication.storage.openshift.io/replication-secret-namespace: default +apiVersion: replication.storage.openshift.io/v1alpha1 +kind: VolumeReplicationClass +metadata: + name: demo-volumereplicationclass +spec: + provisioner: block.csi.ibm.com + parameters: + system_id: demo-system-id + copy_type: async # Optional. Values sync/async. The default is sync. + + replication.storage.openshift.io/replication-secret-name: demo-secret + replication.storage.openshift.io/replication-secret-namespace: default diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 626b3cc5e..b77dd315b 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -3,70 +3,88 @@ * [Welcome](book_files/csi_block_storage_kc_welcome.md) * [What's new](book_files/csi_block_storage_kc_whatsnew.md) * [Release notes](book_files/csi_block_storage_kc_rn.md) - * [What's new in 1.10.0](content/release_notes/whats_new.md) - * [Compatibility and requirements](content/release_notes/compatibility_requirements.md) - * [Supported storage systems](content/release_notes/supported_storage.md) - * [Supported operating systems](content/release_notes/supported_os.md) - * [Supported orchestration platforms](content/release_notes/supported_orchestration.md) - * [Change log](content/release_notes/change_log.md) - * [1.10.0 (July 2022)](content/release_notes/changelog_1.10.0.md) - * [1.9.0 (March 2022)](content/release_notes/changelog_1.9.0.md) - * [1.8.0 (December 2021)](content/release_notes/changelog_1.8.0.md) - * [1.7.0 (September 2021)](content/release_notes/changelog_1.7.0.md) - * [1.6.0 (June 2021)](content/release_notes/changelog_1.6.0.md) - * [1.5.1 (July 2021)](content/release_notes/changelog_1.5.1.md) - * [1.5.0 (March 2021)](content/release_notes/changelog_1.5.0.md) - * [1.4.0 (December 2020)](content/release_notes/changelog_1.4.0.md) - * [1.3.0 (September 2020)](content/release_notes/changelog_1.3.0.md) - * [1.2.0 (June 2020)](content/release_notes/changelog_1.2.0.md) - * [1.1.0 (March 2020)](content/release_notes/changelog_1.1.0.md) - * [1.0.0 (November 2019)](content/release_notes/changelog_1.0.0.md) - * [Limitations](content/release_notes/limitations.md) - * [Known issues](content/release_notes/known_issues.md) + * [What's new in 1.11.2](content/release_notes/whats_new.md) + * [Compatibility and requirements](content/release_notes/compatibility_requirements.md) + * [Supported storage systems](content/release_notes/supported_storage.md) + * [Supported operating systems](content/release_notes/supported_os.md) + * [Supported orchestration platforms](content/release_notes/supported_orchestration.md) + * [Change log](content/release_notes/change_log.md) + * [1.11.2 (May 2024)](content/release_notes/changelog_1.11.2.md) + * [1.11.1 (May 2023)](content/release_notes/changelog_1.11.1.md) + * [1.11.0 (January 2023)](content/release_notes/changelog_1.11.0.md) + * [1.10.0 (July 2022)](content/release_notes/changelog_1.10.0.md) + * [1.9.0 (March 2022)](content/release_notes/changelog_1.9.0.md) + * [1.8.0 (December 2021)](content/release_notes/changelog_1.8.0.md) + * [1.7.0 (September 2021)](content/release_notes/changelog_1.7.0.md) + * [1.6.0 (June 2021)](content/release_notes/changelog_1.6.0.md) + * [1.5.1 (July 2021)](content/release_notes/changelog_1.5.1.md) + * [1.5.0 (March 2021)](content/release_notes/changelog_1.5.0.md) + * [1.4.0 (December 2020)](content/release_notes/changelog_1.4.0.md) + * [1.3.0 (September 2020)](content/release_notes/changelog_1.3.0.md) + * [1.2.0 (June 2020)](content/release_notes/changelog_1.2.0.md) + * [1.1.0 (March 2020)](content/release_notes/changelog_1.1.0.md) + * [1.0.0 (November 2019)](content/release_notes/changelog_1.0.0.md) + * [Limitations](content/release_notes/limitations.md) + * [Known issues](content/release_notes/known_issues.md) * [Overview](content/overview.md) * [Installing](content/installation/installation.md) - * [Compatibility and requirements](content/installation/install_compatibility_requirements.md) - * [Installing the operator and driver](content/installation/install_operator_driver.md) - * [Installing the driver with the OpenShift web console](content/installation/install_driver_openshift_web.md) - * [Installing the driver with GitHub](content/installation/install_driver_github.md) - * [Installing the driver with OperatorHub.io](content/installation/install_driver_operatorhub.md) - * [Installing the host definer](content/installation/install_hostdefiner.md) - * [Installing the host definer with the OpenShift web console](content/installation/install_hostdefiner_openshift_web.md) - * [Installing the host definer with GitHub](content/installation/install_hostdefiner_github.md) - * [Installing the host definer with OperatorHub.io](content/installation/install_hostdefiner_operatorhub.md) - * [Uninstalling the operator and driver](content/installation/uninstalling.md) - * [Uninstalling the driver with the OpenShift web console](content/installation/uninstall_driver_openshift_web.md) - * [Uninstalling the driver with GitHub](content/installation/uninstall_driver_github.md) - * [Uninstalling the driver with OperatorHub.io](content/installation/uninstall_driver_operatorhub.md) - * [Uninstalling the host definer](content/installation/uninstalling_hostdefiner.md) - * [Upgrading](content/installation/upgrade.md) - * [Manual upgrade with OpenShift](content/installation/upgrade_manual_openshift.md) + * [Compatibility and requirements](content/installation/install_compatibility_requirements.md) + * [Installing the operator and driver](content/installation/install_operator_driver.md) + * [Installing the driver with the OpenShift web console](content/installation/install_driver_openshift_web.md) + * [Installing the driver with GitHub](content/installation/install_driver_github.md) + * [Installing the driver with OperatorHub.io](content/installation/install_driver_operatorhub.md) + * [Installing the host definer](content/installation/install_hostdefiner.md) + * [Installing the host definer with the OpenShift web console](content/installation/install_hostdefiner_openshift_web.md) + * [Installing the host definer with GitHub](content/installation/install_hostdefiner_github.md) + * [Installing the host definer with OperatorHub.io](content/installation/install_hostdefiner_operatorhub.md) + * [Uninstalling the operator and driver](content/installation/uninstalling.md) + * [Uninstalling the driver with the OpenShift web console](content/installation/uninstall_driver_openshift_web.md) + * [Uninstalling the driver with GitHub](content/installation/uninstall_driver_github.md) + * [Uninstalling the driver with OperatorHub.io](content/installation/uninstall_driver_operatorhub.md) + * [Uninstalling the host definer](content/installation/uninstalling_hostdefiner.md) + * [Upgrading](content/installation/upgrade.md) + * [Manual upgrade with OpenShift](content/installation/upgrade_manual_openshift.md) * [Configuring](content/configuration/configuring.md) - * [Creating a Secret](content/configuration/creating_secret.md) - * [Creating a StorageClass](content/configuration/creating_volumestorageclass.md) - * [Creating a PersistentVolumeClaim (PVC)](content/configuration/creating_pvc.md) - * [Creating a StatefulSet](content/configuration/creating_statefulset.md) - * [Creating a VolumeSnapshotClass](content/configuration/creating_volumesnapshotclass.md) - * [Creating a VolumeSnapshot](content/configuration/creating_volumesnapshot.md) - * [Creating a VolumeReplicationClass](content/configuration/creating_volumereplicationclass.md) - * [Finding a `system_id`](content/configuration/finding_systemid.md) - * [Creating a VolumeReplication](content/configuration/creating_volumereplication.md) - * [Expanding a PersistentVolumeClaim (PVC)](content/configuration/expanding_pvc.md) - * [Configuring for CSI Topology](content/configuration/configuring_topology.md) - * [Creating a Secret with topology awareness](content/configuration/creating_secret_topology_aware.md) - * [Creating a StorageClass with topology awareness](content/configuration/creating_storageclass_topology_aware.md) - * [Creating a VolumeSnapshotClass with topology awareness](content/configuration/creating_volumesnapshotclass_topology_aware.md) - * [Configuring the host definer](content/configuration/configuring_hostdefiner.md) - * [Advanced configuration](content/configuration/advanced_configuration.md) - * [Importing an existing volume](content/configuration/importing_existing_volume.md) + * [Creating a Secret](content/configuration/creating_secret.md) + * [Creating a StorageClass](content/configuration/creating_volumestorageclass.md) + * [Creating a PersistentVolumeClaim (PVC)](content/configuration/creating_pvc.md) + * [Creating a StatefulSet](content/configuration/creating_statefulset.md) + * [Creating a VolumeSnapshotClass](content/configuration/creating_volumesnapshotclass.md) + * [Creating a VolumeSnapshot](content/configuration/creating_volumesnapshot.md) + * [Creating a VolumeReplicationClass](content/configuration/creating_volumereplicationclass.md) + * [Finding the `system_id`](content/configuration/finding_systemid.md) + * [Finding the `replication_policy_name`](content/configuration/finding_replication_policy_name.md) + * [Creating a VolumeReplication](content/configuration/creating_volumereplication.md) + * [Creating a VolumeGroupClass](content/configuration/creating_volumegroupclass.md) + * [Creating a VolumeGroup](content/configuration/creating_volumegroup.md) + * [Expanding a PersistentVolumeClaim (PVC)](content/configuration/expanding_pvc.md) + * [Configuring for CSI Topology](content/configuration/configuring_topology.md) + * [Creating a Secret with topology awareness](content/configuration/creating_secret_topology_aware.md) + * [Creating a StorageClass with topology awareness](content/configuration/creating_storageclass_topology_aware.md) + * [Creating a VolumeSnapshotClass with topology awareness](content/configuration/creating_volumesnapshotclass_topology_aware.md) + * [Configuring the host definer](content/configuration/configuring_hostdefiner.md) + * [Configuring for policy-based replication](content/configuration/configuring_policy_based_replication.md) + - [Creating a StorageClass with volume groups](content/configuration/creating_storageclass_vg.md) + - [Creating a PersistentVolumeClaim (PVC) with volume groups](content/configuration/creating_pvc_vg.md) + - [Creating a VolumeReplication with policy-based replication](content/configuration/creating_volumereplication_pbr.md) + - [Creating a VolumeGroupClass](content/configuration/creating_volumegroupclass.md) + - [Creating a VolumeGroup](content/configuration/creating_volumegroup.md) + * [Advanced configuration](content/configuration/advanced_configuration.md) + * [Importing an existing volume](content/configuration/importing_existing_volume.md) + * [Importing an existing volume group](content/configuration/importing_existing_volume_group.md) * [Using](content/using/using.md) - * [Using dynamic host connectivity](content/using/using_hostdefinition.md) - * [Changing node connectivity](content/using/changing_node_connectivity.md) - * [Sample configurations for running a stateful container](content/using/sample_stateful_container.md) + * [Using dynamic host connectivity](content/using/using_hostdefinition.md) + * [Changing node connectivity](content/using/changing_node_connectivity.md) + * [Adding optional labels for dynamic host definition](content/using/using_hostdefinition_labels.md) + * [Using the CSI driver with policy-based replication](content/using/using_policy_based_replication.md) + * [Promoting a volume group](content/using/promoting_vg.md) + * [Deleting a VolumeGroup with a replication policy](content/using/delete_vg.md) + * [Removing a PVC from a volume group with a replication policy](content/using/removing_pvc_vg.md) + * [Sample configurations for running a stateful container](content/using/sample_stateful_container.md) * [Troubleshooting](content/troubleshooting/troubleshooting.md) - * [Log and status collection](content/troubleshooting/log_status_collect.md) - * [Recovering a pod volume attachment from a crashed Kubernetes node](content/troubleshooting/recovering_vol_attach_crashed_k8s.md) - * [Miscellaneous troubleshooting](content/troubleshooting/troubleshooting_misc.md) + * [Log and status collection](content/troubleshooting/log_status_collect.md) + * [Recovering a pod volume attachment from a crashed Kubernetes node](content/troubleshooting/recovering_vol_attach_crashed_k8s.md) + * [Miscellaneous troubleshooting](content/troubleshooting/troubleshooting_misc.md) * [Notices](book_files/storage_csi_notices.md) - * [Trademarks](book_files/csi_trademarks.md) -* [Publications and related information](book_files/csi_block_storage_kc_pdfs.md) \ No newline at end of file + * [Trademarks](book_files/csi_trademarks.md) +* [Publications and related information](book_files/csi_block_storage_kc_pdfs.md) diff --git a/docs/book_files/csi_block_storage_kc_pdfs.md b/docs/book_files/csi_block_storage_kc_pdfs.md index 73e05cca6..2edb1209b 100644 --- a/docs/book_files/csi_block_storage_kc_pdfs.md +++ b/docs/book_files/csi_block_storage_kc_pdfs.md @@ -10,27 +10,26 @@ To view a PDF file, you need Adobe™ Reader. You can download it at no charge f For more information about storage systems and orchestration platforms that are supported by IBM® block storage CSI driver, see the following resources. -IBM resources - -- [IBM SAN Volume Controller documentation](https://www.ibm.com/docs/en/sanvolumecontroller) -- [IBM Spectrum Scale documentation](https://www.ibm.com/docs/en/spectrum-scale) -- [IBM FlashSystem® 5200, 5000, 5100, Storwize® V5100 and V5000E documentation](https://www.ibm.com/docs/en/flashsystem-5x00) -- [IBM FlashSystem™ 7300, 7200, and Storwize V7000 documentation](https://www.ibm.com/docs/en/flashsystem-7x00) -- [IBM Spectrum Virtualize as Software Only documentation](https://www.ibm.com/docs/en/spectrumvirtualsoftw) -- [IBM FlashSystem 9500, 9200, and 9100 documentation](https://www.ibm.com/docs/en/flashsystem-9x00) -- [IBM FlashSystem A9000 documentation](https://www.ibm.com/docs/en/flashsystem-a9000) -- [IBM FlashSystem A9000R documentation](https://www.ibm.com/docs/en/flashsystem-a9000r) -- [IBM DS8880 documentation](https://www.ibm.com/docs/en/ds8880) -- [IBM DS8900 documentation](https://www.ibm.com/docs/en/ds8900) -- [IBM Spectrum® Access for IBM Cloud® Private Blueprint](https://www-01.ibm.com/common/ssi/cgi-bin/ssialias?htmlfid=TSW03569USEN&) - - Used as the FlexVolume driver based solution for OpenShift® 3.11, using [IBM Storage Enabler for Containers](https://www.ibm.com/docs/en/stgenablercontainers) - -- [IBM Storage for Red Hat® OpenShift Blueprint](http://www.redbooks.ibm.com/abstracts/redp5565.html?Open) - -External resources -- [Red Hat OpenShift Documentation](https://docs.openshift.com/) -- [Persistent volumes on Kubernetes](https://kubernetes.io/docs/concepts/storage/volumes/) -- [Kubernetes Documentation](https://kubernetes.io/docs/home/) -- [Kubernetes Blog](https://kubernetes.io/blog/) +**IBM resources** + +- [IBM SAN Volume Controller documentation](https://www.ibm.com/docs/sanvolumecontroller) +- [IBM Spectrum Scale documentation](https://www.ibm.com/docs/spectrum-scale) +- [IBM FlashSystem® 5200, 5000, 5100, Storwize® V5100 and V5000E documentation](https://www.ibm.com/docs/flashsystem-5x00) +- [IBM FlashSystem™ 7300, 7200, and Storwize V7000 documentation](https://www.ibm.com/docs/flashsystem-7x00) +- [IBM Spectrum Virtualize as Software Only documentation](https://www.ibm.com/docs/spectrumvirtualsoftw) +- [IBM FlashSystem 9500, 9200, and 9100 documentation](https://www.ibm.com/docs/flashsystem-9x00) +- [IBM DS8880 documentation](https://www.ibm.com/docs/ds8880) +- [IBM DS8900 documentation](https://www.ibm.com/docs/ds8900) +- [IBM Spectrum® Access for IBM Cloud® Private Blueprint](https://www-01.ibm.com/common/ssi/cgi-bin/ssialias?htmlfid=TSW03569USEN&) + + Used as the FlexVolume driver based solution for OpenShift® 3.11, using [IBM Storage Enabler for Containers](https://www.ibm.com/docs/stgenablercontainers) + +- [IBM Storage for Red Hat® OpenShift Blueprint](http://www.redbooks.ibm.com/abstracts/redp5565.html?Open) +- [IBM Spectrum Virtualize with IBM block storage CSI driver for VMware Tanzu Kubernetes Grid multicloud (TKGm)](https://www.ibm.com/support/pages/node/6616257) + +**External resources** +- [Red Hat OpenShift Documentation](https://docs.openshift.com/) +- [Persistent volumes on Kubernetes](https://kubernetes.io/docs/concepts/storage/volumes/) +- [Kubernetes Documentation](https://kubernetes.io/docs/home/) +- [Kubernetes Blog](https://kubernetes.io/blog/) diff --git a/docs/book_files/csi_block_storage_kc_rn.md b/docs/book_files/csi_block_storage_kc_rn.md index 560f900eb..90f81cfcd 100644 --- a/docs/book_files/csi_block_storage_kc_rn.md +++ b/docs/book_files/csi_block_storage_kc_rn.md @@ -1,4 +1,4 @@ # Release notes -The following release information is available for version 1.10.0 of the IBM® block storage CSI driver. +The following release information is available for version 1.11.2 of the IBM® block storage CSI driver. diff --git a/docs/book_files/csi_block_storage_kc_welcome.md b/docs/book_files/csi_block_storage_kc_welcome.md index ccb57ce33..adff97c49 100644 --- a/docs/book_files/csi_block_storage_kc_welcome.md +++ b/docs/book_files/csi_block_storage_kc_welcome.md @@ -1,4 +1,4 @@ -# IBM block storage CSI driver 1.10.0 welcome page +# IBM block storage CSI driver 1.11.2 welcome page IBM block storage CSI driver is based on an open-source IBM project, included as a part of IBM Storage orchestration for containers. IBM Storage orchestration for containers enables enterprises to implement a modern container-driven hybrid multicloud environment that can diff --git a/docs/book_files/csi_block_storage_kc_whatsnew.md b/docs/book_files/csi_block_storage_kc_whatsnew.md index ce23dec4a..aaa56c237 100644 --- a/docs/book_files/csi_block_storage_kc_whatsnew.md +++ b/docs/book_files/csi_block_storage_kc_whatsnew.md @@ -1,11 +1,8 @@ # What's new -This topic lists the dates and nature of updates to the published information of IBM® block storage CSI driver 1.10.0. +This topic lists the dates and nature of updates to the published information of IBM® block storage CSI driver 1.11.2. -|Date|Nature of updates to the published information| -|----|----------------------------------------------| -|12 September 2022|
  • Updated the following information for dynamic host definition feature:
    • [Installing the host definer](../content/installation/install_hostdefiner.md)
    • [Configuring the host definer](../content/configuration/configuring_hostdefiner.md)
    • [Log collection when using host definer](../content/troubleshooting/log_status_collect.md#log-collection-when-using-host-definer)
  • Updated [Supported operating systems](../content/release_notes/supported_os.md)
  • Added new limitations related to DS8000 family storage systems and snapshots (see [Limitations](../content/release_notes/limitations.md)).
| -|3 August 2022|Updated [Importing an existing volume](../content/configuration/importing_existing_volume.md).| -|1 August 2022|General typo and formatting updates.| -|26 July 2022|The version information was added to IBM Documentation.
For more information about this version, see the [What's new in 1.10.0](../content/release_notes/whats_new.md) section in the release notes.| +| Date | Nature of updates to the published information | +|-----------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| 01 May 2024 | The version information was added to IBM Documentation.
For more information about this version, see the [What's new in 1.11.2](../content/release_notes/whats_new.md) section in the release notes. | diff --git a/docs/book_files/k8s_driver_arch_diagram_1.11.svg b/docs/book_files/k8s_driver_arch_diagram_1.11.svg new file mode 100644 index 000000000..40bc45498 --- /dev/null +++ b/docs/book_files/k8s_driver_arch_diagram_1.11.svg @@ -0,0 +1,918 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + Kubernetes Cluster + + + + + + + + + + + + + + ibm-block-csi + node + + + + + + ibm-block-csi + controller + + + + + + + Kubelet CSI + + + + + + Worker node + + + + + + + + + ibm-block-csi + node + + + + + + Kubelet CSI + + + + + + Worker node + + + + + + + + + ibm-block-csi + node + + + + + + Kubelet CSI + + + + + + Worker node + + + + + + + + + Master nodes + + + + + + iSCSI or FC + + + + + + + + Management + + Data Path + + + IBM Spectrum Virtualize Family + IBM SAN Volume Controller + + + + + + + + + IBM DS8000 Family + + + diff --git a/docs/content/configuration/advanced_configuration.md b/docs/content/configuration/advanced_configuration.md index 8ef33857d..1340b927a 100644 --- a/docs/content/configuration/advanced_configuration.md +++ b/docs/content/configuration/advanced_configuration.md @@ -2,5 +2,6 @@ Use advanced configuration tasks to further customize the configuration of the IBM® block storage CSI driver. -- [Importing an existing volume](importing_existing_volume.md) +- [Importing an existing volume](importing_existing_volume.md) +- [Importing an existing volume group](importing_existing_volume_group.md) diff --git a/docs/content/configuration/configuring_hostdefiner.md b/docs/content/configuration/configuring_hostdefiner.md index 7dd71e5c4..dd6106908 100644 --- a/docs/content/configuration/configuring_hostdefiner.md +++ b/docs/content/configuration/configuring_hostdefiner.md @@ -7,8 +7,8 @@ For more information about using the host definer, see [Using dynamic host defin |Field|Description| |---------|--------| |`prefix`|Adds a prefix to the hosts defined by the host definer.
**Note:** The prefix length is bound by the limitation of the storage system. When defined, the length is a combination of both the prefix and node (server) hostname.| -|`connectivityType`|Selects the connectivity type for the host ports.
Possible input values are:
- `nvmeofc` for use with NVMe over Fibre Channel connectivity
- `fc` for use with Fibre Channel over SCSI connectivity
- `iscsi` for use with iSCSI connectivity
By default, this field is blank and the host definer selects the first of available connectivity types on the node, according to the following hierarchy: NVMe, FC, iSCSI.| +|`connectivityType`|Selects the connectivity type for the host ports.
Possible input values are:
- `nvmeofc` for use with NVMe over Fibre Channel connectivity
- `fc` for use with Fibre Channel over SCSI connectivity
- `iscsi` for use with iSCSI connectivity
By default, this field is blank and the host definer selects the first of available connectivity types on the node, according to the following hierarchy: NVMe, FC, iSCSI.
**Note:** When left blank, the connectivity type will update along with any changes within the host ports, according to the set hierarchy. If the value is set and there are host port changes, connectivity needs to be manually updated. For more information, see [Changing node connectivity](../using/changing_node_connectivity.md).| |`allowDelete`|Defines whether the host definer is allowed to delete host definitions on the storage system.
Input values are `true` or `false`.
The default value is `true`.| |`dynamicNodeLabeling`|Defines whether the nodes that run the CSI node pod are dynamically labeled or if the user must create the `hostdefiner.block.csi.ibm.com/manage-node=true` label on each relevant node. This label tells the host definer which nodes to manage their host definition on the storage side.
Input values are `true` or `false`.
The default value is `false`, where the user must manually create this label on every node to be managed by the host definer for dynamic host definition on the storage.| -For an example HostDefiner yaml file, see [csi_v1_hostdefiner_cr.yaml](https://raw.githubusercontent.com/IBM/ibm-block-csi-operator/v1.10.0/config/samples/csi_v1_hostdefiner_cr.yaml). \ No newline at end of file +For an example HostDefiner yaml file, see [csi_v1_hostdefiner_cr.yaml](https://raw.githubusercontent.com/IBM/ibm-block-csi-operator/v1.11.2/config/samples/csi_v1_hostdefiner_cr.yaml). \ No newline at end of file diff --git a/docs/content/configuration/configuring_policy_based_replication.md b/docs/content/configuration/configuring_policy_based_replication.md new file mode 100644 index 000000000..8915e9076 --- /dev/null +++ b/docs/content/configuration/configuring_policy_based_replication.md @@ -0,0 +1,17 @@ +# Configuring for policy-based replication + +Use this information for specific configuring information when using policy-based replication with the IBM® block storage CSI driver. + +Policy-based replication uses volume groups to automatically deploy and manage replication. Be sure to use dynamic volume groups when configuring the CSI driver for policy-based replication. + +See the following sections for more information: +- [Limitations](../release_notes/limitations.md) +- [Compatibility and requirements](../installation/install_compatibility_requirements.md) +- [Using the CSI driver with policy-based replication](../using/using_policy_based_replication.md). + +

+- [Creating a StorageClass with volume groups](creating_storageclass_vg.md) +- [Creating a PersistentVolumeClaim (PVC) with volume groups](creating_pvc_vg.md) +- [Creating a VolumeReplication with policy-based replication](creating_volumereplication_pbr.md) +- [Creating a VolumeGroupClass](creating_volumegroupclass.md) +- [Creating a VolumeGroup](creating_volumegroup.md) \ No newline at end of file diff --git a/docs/content/configuration/configuring_topology.md b/docs/content/configuration/configuring_topology.md index 52465e15b..09f297bc1 100644 --- a/docs/content/configuration/configuring_topology.md +++ b/docs/content/configuration/configuring_topology.md @@ -2,6 +2,10 @@ Use this information for specific configuring information when using CSI Topology with the IBM® block storage CSI driver. +Using the CSI Topology feature, volume access can be limited to a subset of nodes, based on regions and availability zones. Nodes can be located in various regions within an availability zone, or across the different availability zones. Using the CSI Topology feature can ease volume provisioning for workloads within a multi-zone architecture. + +Using dynamic host definition together with the CSI Topology feature, allows for defining hosts on the proper storage storage system, according to the topology zone configuration. + **Important:** Be sure that all of the topology requirements are met before starting. For more information, see [Compatibility and requirements](../installation/install_compatibility_requirements.md). - [Creating a Secret with topology awareness](creating_secret_topology_aware.md) diff --git a/docs/content/configuration/creating_pvc.md b/docs/content/configuration/creating_pvc.md index 21e899879..5f189b9c5 100644 --- a/docs/content/configuration/creating_pvc.md +++ b/docs/content/configuration/creating_pvc.md @@ -4,7 +4,9 @@ Create a PersistentVolumeClaim (PVC) YAML file for a persistent volume (PV). The IBM® block storage CSI driver supports using both file system and raw block volume modes. -**Important:** If not defined, the default mode is `Filesystem`. Be sure to define the mode as `Block` if this configuration is preferred. +**Important:** + - If not defined, the default mode is `Filesystem`. Be sure to define the mode as `Block` if this configuration is preferred. + - The volume group labels are not pre-defined. Be sure to match the selector in the target volume group (`spec.source.selector`). For an example of creating a PVC using the VolumeGroup configuration, see [Creating a PVC within a volume group with the dynamic volume group feature](#creating-a-pvc-within-a-volume-group-with-the-dynamic-volume-group-feature). **Note:** The examples below create the PVC with a storage size 1 Gb. This can be changed, per customer needs. @@ -20,6 +22,7 @@ Use the following sections, according to your PVC needs: - [Creating a PVC for volume with file system](#creating-a-pvc-for-volume-with-file-system) - [Creating a PVC for raw block volume](#creating-a-pvc-for-raw-block-volume) +- [Creating a PVC within a volume group with the dynamic volume group feature](#creating-a-pvc-within-a-volume-group-with-the-dynamic-volume-group-feature) - [Creating a PVC from volume snapshot](#creating-a-pvc-from-volume-snapshot) - [Creating a volume clone from an existing PVC](#creating-a-volume-clone-from-an-existing-pvc) @@ -59,6 +62,27 @@ Create a PVC YAML file, similar to the following `demo-pvc-raw-block.yaml` file, storage: 1Gi storageClassName: demo-storageclass +## Creating a PVC within a volume group with the dynamic volume group feature + +Create a PVC YAML file similar to the following `demo-pvc-in-volume-group.yaml` file, changing the `volumeMode` as needed. + +**Note:** Be sure to match the selector in the target volume group (`spec.source.selector`). For more information, see [Creating a VolumeGroup](creating_volumegroup.md). + + kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: demo-pvc-in-volume-group + labels: + demo-volumegroup-key: demo-volumegroup-value + spec: + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: demo-storageclass + ## Creating a PVC from volume snapshot To create a PVC from an existing volume snapshot, create a PVC YAML file, similar to the following `demo-pvc-from-snapshot.yaml` file, with the size of 1 Gb. diff --git a/docs/content/configuration/creating_pvc_vg.md b/docs/content/configuration/creating_pvc_vg.md new file mode 100644 index 000000000..d1934322d --- /dev/null +++ b/docs/content/configuration/creating_pvc_vg.md @@ -0,0 +1,35 @@ +# Creating a PersistentVolumeClaim (PVC) with volume groups + +Create a PersistentVolumeClaim (PVC) YAML file for a persistent volume (PV). + +**Note:** For information and parameter definitions that are not related to topology awareness, be sure to see the information provided in [Creating a PersistentVolumeClaim (PVC)](creating_pvc.md), in addition to the current section. + + +**Note:** The examples below create the PVC with a storage size 1 Gb. This can be changed, per customer needs. + +Create a PVC YAML file similar to the following `demo-pvc-in-volume-group.yaml` file, changing the `volumeMode` as needed. + +**Note:** Be sure to match the selector in the target volume group (`spec.source.selector`). For more information, see [Creating a VolumeGroup](creating_volumegroup.md). + + kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: demo-pvc-in-volume-group + labels: + demo-volumegroup-key: demo-volumegroup-value + spec: + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: demo-storageclass + +After each YAML file creation, use the `kubectl apply` command. + +``` +kubectl apply -f .yaml +``` + +The `persistentvolumeclaim/ created` message is emitted. diff --git a/docs/content/configuration/creating_storageclass_vg.md b/docs/content/configuration/creating_storageclass_vg.md new file mode 100644 index 000000000..647f704b5 --- /dev/null +++ b/docs/content/configuration/creating_storageclass_vg.md @@ -0,0 +1,37 @@ +# Creating a StorageClass with volume groups + +Use the following procedure to create and apply the storage classes when using policy-based replication and volume groups. + +**Note:** For information and parameter definitions that are not related to topology awareness, be sure to see the information provided in [Creating a StorageClass](creating_volumestorageclass.md), in addition to the current section. + +**Note:** This procedure is applicable for both Kubernetes and Red Hat® OpenShift®. For Red Hat OpenShift, replace `kubectl` with `oc` in all relevant commands. + +**Attention:** Volume groups can only be managed by **either** the associated VolumeGroup **or** the associated StorageClass (with the `volume_group` parameter). If a volume group is already associated with a VolumeGroup, then each volume of this StorageClass can be automatically deleted. + + + kind: StorageClass + apiVersion: storage.k8s.io/v1 + metadata: + name: demo-storageclass + provisioner: block.csi.ibm.com + parameters: + pool: demo-pool + io_group: demo-iogrp # Optional. + volume_group: demo-volumegroup # Optional. + SpaceEfficiency: thin # Optional. + volume_name_prefix: demo-prefix # Optional. + virt_snap_func: "false" # Optional. Values "true"/"false". The default is "false". + + csi.storage.k8s.io/fstype: xfs # Optional. Values ext4/xfs. The default is ext4. + csi.storage.k8s.io/secret-name: demo-secret + csi.storage.k8s.io/secret-namespace: default + allowVolumeExpansion: true + + +Apply the storage class. + + ``` + kubectl apply -f .yaml + ``` + +The `storageclass.storage.k8s.io/ created` message is emitted. \ No newline at end of file diff --git a/docs/content/configuration/creating_volumegroup.md b/docs/content/configuration/creating_volumegroup.md new file mode 100644 index 000000000..d06d07045 --- /dev/null +++ b/docs/content/configuration/creating_volumegroup.md @@ -0,0 +1,44 @@ +# Creating a VolumeGroup + +Create a VolumeGroup YAML file to specify a volume group key, for creating PersistentVolumeClaim (PVC) groups. + +VolumeGroupClass needs to be present before a VolumeGroup can be created. For more information, see [Creating a VolumeGroupClass](creating_volumegroupclass.md). + +Before creating a volume group, be sure to follow all of the volume group configurations, found in [Compatibility and requirements](../installation/install_compatibility_requirements.md). + +1. Create a Volume Group by using the `demo-volumegroup.yaml`. + + **Note:** Be sure to match the selector in the target volume group (`spec.source.selector`) with the PVC. For more information, see [Creating a PersistentVolumeClaim (PVC)](creating_pvc.md). + + ``` + apiVersion: csi.ibm.com/v1 + kind: VolumeGroup + metadata: + name: demo-volumegroup + spec: + volumeGroupClassName: demo-volumegroupclass + source: + selector: + matchLabels: + demo-volumegroup-key: demo-volumegroup-value + ``` + +2. After the YAML file is created, apply it by using the `kubectl apply -f` command. + + ``` + kubectl apply -f .yaml + ``` + + The `volumegroup.csi.ibm.com/ created` message is emitted. + +3. Verify that the volume group was created. + + Run the `kubectl describe volumegroup ` command. + + This command lists which PVCs are in the volume group, within the `status.pvcList` section. + +## Assigning a PVC to a volume group + +To assign a PVC to a specific volume group add a label within the PVC, matching the selector of the volume group. + +**Note:** PVCs that belong to a volume group must first be removed from the assigned volume group in order to be deleted. \ No newline at end of file diff --git a/docs/content/configuration/creating_volumegroupclass.md b/docs/content/configuration/creating_volumegroupclass.md new file mode 100644 index 000000000..728b961ab --- /dev/null +++ b/docs/content/configuration/creating_volumegroupclass.md @@ -0,0 +1,32 @@ +# Creating a VolumeGroupClass + +Create a VolumeGroupClass YAML file to enable volume groups. + +Volume groups allow users to create PersistentVolumeClaim (PVC) groups. PVCs can be dynamically managed through the volume groups created. This allows actions across all PVCs within the same volume group at once. + +Volume groups are used with policy-based replication, allowing all PVCs within a single volume group to be replicated at once. For more information about volume groups and policy-based replication, see the following sections within your Spectrum Virtualize product documentation [IBM Documentation](https://www.ibm.com/docs). + +- **Product overview** > **Technical overview** > **Volume groups** +- **What's new** > **Getting started with policy-based replication** + +In order to enable volume groups for your storage system, create a VolumeGroupClass YAML file, similar to the following `demo-volumegroupclass.yaml`. + +``` +apiVersion: csi.ibm.com/v1 +kind: VolumeGroupClass +metadata: + name: demo-volumegroupclass +driver: block.csi.ibm.com +parameters: + volume_group_name_prefix: demo-prefix + + volumegroup.storage.ibm.io/secret-name: demo-secret + volumegroup.storage.ibm.io/secret-namespace: default +``` + +After the YAML file is created, apply it by using the `kubectl apply -f` command. + +``` +kubectl apply -f .yaml +``` +The `volumegroupclass.csi.ibm.com/ created` message is emitted. \ No newline at end of file diff --git a/docs/content/configuration/creating_volumereplication.md b/docs/content/configuration/creating_volumereplication.md index 337479faa..fc643e4c2 100644 --- a/docs/content/configuration/creating_volumereplication.md +++ b/docs/content/configuration/creating_volumereplication.md @@ -8,12 +8,15 @@ VolumeReplicationClass needs to be present before a VolumeReplication can be cre When replicating a volume, be sure to follow all of the replication configurations, found in [Compatibility and requirements](../installation/install_compatibility_requirements.md) before volume replication. +- When policy-based replication **is not** being used, use the `spec.csi.volumeHandle` of the relevant target PersistentVolume (PV) for the `replicationHandle` value. +- When policy-based replication **is** being used, **do not** use the `replicationHandle` value. Using the `replicationHandle` while policy-based replication is defined in the VolumeReplicationClass results in the following error message: `got an invalid parameter: replicationHandle`. +- `spec.dataSource.kind` values are `PersistentVolumeClaim` or `VolumeGroup`. Only one value can be used at a time. Only use the `VolumeGroup` value policy-based replication is being used. +- `spec.dataSource.name` value must be the VolumeGroup name, when policy-based replication is being used. + 1. Replicate a specific PersistentVolumeClaim (PVC) using the `demo-volumereplication.yaml`. For more information about PVC configuration, see [Creating a PersistentVolumeClaim (PVC)](creating_pvc.md). - **Note:** Use the `spec.csi.volumeHandle` of the relevant target PersistentVolume (PV) for the `replicationHandle` value. - ``` apiVersion: replication.storage.openshift.io/v1alpha1 kind: VolumeReplication @@ -47,4 +50,4 @@ When replicating a volume, be sure to follow all of the replication configuratio - **Secondary** Indicates that the source volume is the secondary volume. - **Unknown** Indicates that the driver does not recognize the replication state. - **Note:** For information about changing the replication state, see the [Usage](https://github.com/csi-addons/volume-replication-operator/tree/v0.2.0#usage) section of the Volume Replication Operator for csi-addons. \ No newline at end of file + **Note:** For information about changing the replication state, see the [Usage](https://github.com/csiblock/volume-replication-operator/tree/v0.1.0#usage) section of the Volume Replication Operator for csi-addons. \ No newline at end of file diff --git a/docs/content/configuration/creating_volumereplication_pbr.md b/docs/content/configuration/creating_volumereplication_pbr.md new file mode 100644 index 000000000..9b79d51d3 --- /dev/null +++ b/docs/content/configuration/creating_volumereplication_pbr.md @@ -0,0 +1,47 @@ +# Creating a VolumeReplication with policy-based replication + +Create a VolumeReplication YAML file to replicate a specific PersistentVolumeClaim (PVC). + +VolumeReplicationClass needs to be present before a VolumeReplication can be created. For more information, see [Creating a VolumeReplicationClass](creating_volumereplicationclass.md). + +**Note:** For information and parameter definitions that are not related to topology awareness, be sure to see the information provided in [Creating a VolumeReplication](creating_volumereplication.md), in addition to the current section. + +**Note:** Use the `VolumeGroup` value for `spec.dataSource.kind`. + +1. Replicate a specific PersistentVolumeClaim (PVC) using the `demo-volumereplication.yaml`. + + For more information about PVC configuration, see [Creating a PersistentVolumeClaim (PVC)](creating_pvc.md). + + ``` + apiVersion: replication.storage.openshift.io/v1alpha1 + kind: VolumeReplication + metadata: + name: demo-volumereplication + namespace: default + spec: + volumeReplicationClass: demo-volumereplicationclass + replicationState: primary + dataSource: + kind: VolumeGroup + name: demo-volumegroup # Ensure that this is in the same namespace as VolumeGroup. + ``` + +2. After the YAML file is created, apply it by using the `kubectl apply -f` command. + + ``` + kubectl apply -f .yaml + ``` + + The `volumereplication.replication.storage.openshift.io/ created` message is emitted. + +3. Verify that the volume was replicated. + + Run the `kubectl describe volumereplication` command. + + See the `status.state` section to see which of the following states the replication is in: + + - **Primary** Indicates that the source volume is the primary volume. + - **Secondary** Indicates that the source volume is the secondary volume. + - **Unknown** Indicates that the driver does not recognize the replication state. + + **Note:** For information about changing the replication state, see the [Usage](https://github.com/csiblock/volume-replication-operator/tree/v0.1.0#usage) section of the Volume Replication Operator for csi-addons. \ No newline at end of file diff --git a/docs/content/configuration/creating_volumereplicationclass.md b/docs/content/configuration/creating_volumereplicationclass.md index 648a78496..e62353fa6 100644 --- a/docs/content/configuration/creating_volumereplicationclass.md +++ b/docs/content/configuration/creating_volumereplicationclass.md @@ -8,7 +8,15 @@ In order to enable volume replication for your storage system, create a VolumeRe When configuring the file, be sure to use the same array secret and array secret namespace as defined in [Creating a Secret](creating_secret.md). -Use the `system_id` of the storage system containing the `replicationHandle` volumes. For information on obtaining your storage system `system_id`, see [Finding a `system_id`](finding_systemid.md). +If using policy-based replication, use the `replication_policy` parameter, with the `replication_policy_name` value, instead of `system_id`. For information on obtaining your volume `replication_policy_name`, see [Finding the `replication_policy_name`](finding_replication_policy_name.md). + +If policy-based replication is not in use, use the `system_id` of the storage system containing the `replicationHandle` volumes. For information on obtaining your storage system `system_id`, see [Finding a `system_id`](finding_systemid.md). + +**Important:** Be sure to only use one of the following parameters: `replication_policy` **or** `system_id`. Using both parameters within the VolumeReplicationClass, results in the following error message: `got an invalid parameter: system_id`. + +Use one of the following examples, depending on the replication type that is being used. + +**Example 1: VolumeReplicationClass _not_ using Spectrum Virtualize policy-based replication** ``` apiVersion: replication.storage.openshift.io/v1alpha1 @@ -24,6 +32,20 @@ spec: replication.storage.openshift.io/replication-secret-name: demo-secret replication.storage.openshift.io/replication-secret-namespace: default ``` +**Example 2: VolumeReplicationClass using Spectrum Virtualize policy-based replication** + +``` +apiVersion: replication.storage.openshift.io/v1alpha1 +kind: VolumeReplicationClass +metadata: + name: demo-volumereplicationclass +spec: + provisioner: block.csi.ibm.com + parameters: + replication_policy: demo_replication-policy-name + replication.storage.openshift.io/replication-secret-name: demo-secret + replication.storage.openshift.io/replication-secret-namespace: default +``` After the YAML file is created, apply it by using the `kubectl apply -f` command. diff --git a/docs/content/configuration/creating_volumesnapshotclass.md b/docs/content/configuration/creating_volumesnapshotclass.md index 0ba8d2bed..8e6bf8ee2 100644 --- a/docs/content/configuration/creating_volumesnapshotclass.md +++ b/docs/content/configuration/creating_volumesnapshotclass.md @@ -22,7 +22,6 @@ When configuring the file, be sure to use the same array secret and array secret For more information about stretched snapshot limitations and requirements, see [Limitations](../release_notes/limitations.md) and [Compatibility and requirements](../installation/install_compatibility_requirements.md). -- The `pool` parameter is not available on IBM FlashSystem A9000 and A9000R storage systems. For these storage systems, the snapshot must be created on the same pool as the source. ``` apiVersion: snapshot.storage.k8s.io/v1 diff --git a/docs/content/configuration/creating_volumestorageclass.md b/docs/content/configuration/creating_volumestorageclass.md index 10922ae1e..9dfbcf682 100644 --- a/docs/content/configuration/creating_volumestorageclass.md +++ b/docs/content/configuration/creating_volumestorageclass.md @@ -14,11 +14,12 @@ When configuring the file, be sure to use the same array secret and array secret Use the `SpaceEfficiency` parameters for each storage system, as defined in the following table. These values are not case-sensitive. +**Important:** When using external provisioning policies for linked pools, do not use the `SpaceEfficiency` parameter, the capacity savings within the linked pools are defined by the provisioning policy. If the `SpaceEfficiency` parameter is used together with provisioning policies, the volume cannot be created. For more information see **What's new** > **Getting started with policy-based replication** > **Configuring policy-based replication** > **Creating provisioning policy and assigning to pools**. + #### `SpaceEfficiency` parameter definitions per storage system type |Storage system type|SpaceEfficiency parameter options| |-------------------|---------------------------------| -|IBM FlashSystem® A9000 and A9000R|Always includes deduplication and compression. No need to specify during configuration.| |IBM Spectrum® Virtualize family|- `thick` (default value)
- `thin`
- `compressed`
- `dedup_thin` (creates volumes that are deduplicated with thin-provisioning)
- `dedup_compressed` (creates deduplicated and compressed volumes)

**Note:**
- The `deduplicated` value is deprecated. Use `dedup_compressed`, if possible. When used, `deduplicated` provides the same results as `dedup_compressed`.
- If not specified, the default value is `thick`.| |IBM® DS8000® family| - `none` (default value)
- `thin`

**Note:** If not specified, the default value is `none`.| @@ -41,6 +42,8 @@ Use the `SpaceEfficiency` parameters for each storage system, as defined in the - The `csi.storage.k8s.io/fstype` parameter is optional. The values that are allowed are _ext4_ or _xfs_. The default value is _ext4_. - The `volume_name_prefix` parameter is optional. - The `io_group` and `volume_group` parameters are only available on Spectrum Virtualize storage systems. +

**Attention:** Volume groups can only be managed by **either** the associated VolumeGroup **or** the associated StorageClass (with the `volume_group` parameter). If a volume group is already associated with a VolumeGroup, then each volume of this StorageClass can be automatically deleted. +

**Note:** If no `io_group` is defined, the volume is created within the storage system's default I/O group(s). - The `virt_snap_func` parameter is optional but necessary in Spectrum Virtualize storage systems if using the Snapshot function. To enable the Snapshot function, set the value to _"true"_. The default value is _"false"_. If the value is `"false"` the snapshot will use the FlashCopy function. **Note:** diff --git a/docs/content/configuration/finding_replication_policy_name.md b/docs/content/configuration/finding_replication_policy_name.md new file mode 100644 index 000000000..6575fc098 --- /dev/null +++ b/docs/content/configuration/finding_replication_policy_name.md @@ -0,0 +1,8 @@ +# Finding the `replication_policy_name` + +Find the remote storage system `replication_policy_name` parameter on your storage system in order to create a VolumeReplicationClass YAML file, enabling policy-based replication. + +For finding the `replication_policy_name` parameter on your Spectrum Virtualize storage system, use the `lsreplicationpolicy` command, or through the Spectrum Virtualize user interface. + +For more information, see **Command-line interface** > **Copy Service commands** > **lsreplicationpolicy** within your specific product documentation on [IBM Docs](https://www.ibm.com/docs/). + diff --git a/docs/content/configuration/finding_systemid.md b/docs/content/configuration/finding_systemid.md index 247ec7441..a53fc85d1 100644 --- a/docs/content/configuration/finding_systemid.md +++ b/docs/content/configuration/finding_systemid.md @@ -1,8 +1,8 @@ -# Finding a `system_id` +# Finding the `system_id` Find the remote storage system `system_id` parameter on your storage system in order to create a VolumeReplicationClass YAML file, enabling replication. -For finding the `system_id` parameter on your Spectrum Virtualize storage system, use the `lspartnership` command. +For finding the `system_id` parameter on your Spectrum Virtualize storage system, use the `lspartnership` command, or through the Spectrum Virtualize user interface.. For more information, see **Command-line interface** > **Copy Service commands** > **lspartnership** within your specific product documentation on [IBM Docs](https://www.ibm.com/docs/). diff --git a/docs/content/configuration/importing_existing_volume.md b/docs/content/configuration/importing_existing_volume.md index 367cd318d..40dd0bfc2 100644 --- a/docs/content/configuration/importing_existing_volume.md +++ b/docs/content/configuration/importing_existing_volume.md @@ -25,29 +25,7 @@ Before starting to import an existing volume, find the `volumeHandle` in the exi For more information about Spectrum Virtualize products, find your product information in [IBM Documentation](https://www.ibm.com/docs/). -- **For FlashSystem A9000 and A9000R:** - The `volumeHandle` is formatted as `A9000:id;WWN`. - - - Through command line: - - Find the `id` and `WWN` for the volume, by using the `vol_list -f` command. - - For more information, see **Reference** > **Command-line reference (12.3.2.x)** > **Volume management commands** > **Listing volumes** within your specific product documentation on [IBM Documentation](https://www.ibm.com/docs/). - - - Through the Hyper-Scale Management user interface: - - 1. Select **Pools and Volumes Views** > **Volumes** from the side bar. - - The **Volumes** table is displayed. - - 2. Select the `Volume`. - - The **Volume Properties** form is displayed. - - 3. Use the **ID** and **WWN** values. - - For more information, see [IBM Hyper-Scale Manager documentation](https://www.ibm.com/docs/en/hyper-scale-manager/). - **For DS8000 family:** @@ -89,8 +67,8 @@ Use this procedure to help build a PV YAML file for your volumes. apiVersion: v1 kind: PersistentVolume metadata: - # annotations: - # pv.kubernetes.io/provisioned-by: block.csi.ibm.com + annotations: + pv.kubernetes.io/provisioned-by: block.csi.ibm.com name: demo-pv spec: accessModes: @@ -98,6 +76,7 @@ Use this procedure to help build a PV YAML file for your volumes. capacity: storage: 1Gi csi: + fsType: ext4 controllerExpandSecretRef: name: demo-secret namespace: default @@ -110,17 +89,10 @@ Use this procedure to help build a PV YAML file for your volumes. nodeStageSecretRef: name: demo-secret namespace: default - # fsType: ext4 driver: block.csi.ibm.com - # volumeAttributes: - # pool_name: demo-pool - # storage_type: SVC - # volume_name: demo-prefix_demo-pvc-file-system - # array_address: demo-management-address - volumeHandle: SVC:0;600507640082000B08000000000004FF - # persistentVolumeReclaimPolicy: Retain + volumeHandle: SVC:id;uid storageClassName: demo-storageclass - # volumeMode: Filesystem + persistentVolumeReclaimPolicy: Retain ``` 3. Create a PersistentVolumeClaim (PVC) YAML file. diff --git a/docs/content/configuration/importing_existing_volume_group.md b/docs/content/configuration/importing_existing_volume_group.md new file mode 100644 index 000000000..282a065d5 --- /dev/null +++ b/docs/content/configuration/importing_existing_volume_group.md @@ -0,0 +1,58 @@ +# Importing an existing volume group + +Use this information to import volume groups that were created externally from the IBM® block storage CSI driver, by using a VolumeGroupContent YAML file. + +**Attention:** Read these important notices before importing a volume group. If these conditions are not met existing volumes within the volume group are deleted. + 1. Before you begin importing existing volume groups from the storage system, be sure to import any existing volumes that belong to the volume groups that will be imported (see [Importing an existing volume](importing_existing_volume.md)). When importing volumes, add the relevant labels to ensure that the volume is pointing to the volume group (see [Creating a PVC within a volume group with the dynamic volume group feature](creating_pvc.md#creating-a-pvc-within-a-volume-group-with-the-dynamic-volume-group-feature)). + + 2. Volume groups can only be managed by **either** the associated VolumeGroup **or** the associated StorageClass (with the `volume_group` parameter). If a volume group is imported and a StorageClass is already associated with it, then each volume of this StorageClass can be automatically deleted after the import. + +Before starting to import an existing volume group, find the `volumeGroupHandle` in the existing volume group in order to include the information in the VolumeGroupContent YAML file. + +The `volumeGroupHandle` is formatted as `SVC:id;name`. + +Through the Spectrum Virtualize command-line, find both the `id` and `name` attributes, by using the `lsvolumegroup` command. + +For more information, see **Command-line interface** > **Volume commands** > **lsvolumegroup** within your specific product documentation on [IBM Documentation](https://www.ibm.com/docs/). + +**Note:** The volume group name can also be found through the management GUI. Go to **Volumes** > **Volume Groups** from the side bar. + +Use this procedure to help build a VolumeGroupContent YAML file for your volume groups. + +1. Create a VolumeGroupContent YAML file. + + **Important:** Be sure to include the `volumeGroupHandle` parameter or errors may occur. + + Update the volumeGroupHandle according to the volume group information found previously. + + ``` + apiVersion: csi.ibm.com/v1 + kind: VolumeGroupContent + metadata: + name: demo-volumegroupcontent + spec: + source: + driver: block.csi.ibm.com + volumeGroupHandle: SVC:id;name + ``` + +3. Create a VolumeGroup YAML file. + + **Note:** + + - Be sure to include the `volumeGroupClassName`. + - For more information about creating a VolumeGroup YAML file, see [Creating a VolumeGroup](creating_volumegroup.md). + + ``` + apiVersion: csi.ibm.com/v1 + kind: VolumeGroup + metadata: + name: demo-volumegroup-from-content + spec: + volumeGroupClassName: demo-volumegroupclass + source: + volumeGroupContentName: demo-volumegroupcontent + selector: + matchLabels: + demo-volumegroup-key: demo-volumegroup-value + ``` diff --git a/docs/content/installation/install_compatibility_requirements.md b/docs/content/installation/install_compatibility_requirements.md index cf0feb2ca..e802fc73b 100644 --- a/docs/content/installation/install_compatibility_requirements.md +++ b/docs/content/installation/install_compatibility_requirements.md @@ -13,11 +13,8 @@ The CSI driver requires the following ports to be opened on the worker nodes OS Port 3260 - - **FlashSystem A9000 and A9000R** - - Port 7778 - - - **IBM Spectrum® Virtualize family includes IBM® SAN Volume Controller and IBM FlashSystem® family members that are built with IBM Spectrum® Virtualize (including FlashSystem 5xxx, 7xxx, 9xxx)** + + - **IBM Spectrum® Virtualize family** Port 22 @@ -36,13 +33,13 @@ Complete these steps to prepare your environment for installing the CSI (Contain Download and save the following YAML file: ``` - curl https://raw.githubusercontent.com/IBM/ibm-block-csi-operator/master/deploy/99-ibm-attach.yaml > 99-ibm-attach.yaml + curl https://raw.githubusercontent.com/IBM/ibm-block-csi-operator/v1.11.2/deploy/99-ibm-attach.yaml > 99-ibm-attach.yaml ``` This file can be used for both Fibre Channel and iSCSI configurations. To support iSCSI, uncomment the last two lines in the file. **Important:** - - The `99-ibm-attach.yaml` configuration file overrides any files that exist on your system. Only use this file if the files mentioned are not already created.
If one or more were created, edit this YAML file, as necessary. + - The `99-ibm-attach.yaml` configuration file overrides any files that exist on your system. Only use this file if the files mentioned are not already created.
If one or more have been created, edit this YAML file, as necessary. - The `99-ibm-attach.yaml` configuration file contains the default configuration for the CSI driver. It is best practice to update the file according to your storage system and application networking needs. Apply the YAML file. @@ -53,7 +50,7 @@ Complete these steps to prepare your environment for installing the CSI (Contain 2. Configure your storage system host attachment, per worker node. - **Note:** IBM® block storage CSI driver 1.10 introduces dynamic host definition. For more information and installation instructions, see [Installing the host definer](install_hostdefiner.md). If this feature is not installed, the nodes are not dynamically defined on the storage system and they must be defined manually. + **Note:** IBM® block storage CSI driver 1.11.0 introduced dynamic host definition. For more information and installation instructions, see [Installing the host definer](install_hostdefiner.md). If this feature is not installed, the nodes are not dynamically defined on the storage system and they must be defined manually. Be sure to configure your storage system host attachment according to your storage system instructions. @@ -83,15 +80,32 @@ Complete these steps to prepare your environment for installing the CSI (Contain The instructions and relevant YAML files to enable volume snapshots can be found at: [https://github.com/kubernetes-csi/external-snapshotter#usage](https://github.com/kubernetes-csi/external-snapshotter#usage) -5. (Optional) If planning on using volume replication (remote copy function), enable support on your orchestration platform cluster and storage system. +5. (Optional) If planning on using policy-based replication with volume groups, enable support on your orchestration platform cluster and storage system. 1. To enable support on your Kubernetes cluster, install the following replication CRDs once per cluster. ``` - curl -O https://raw.githubusercontent.com/csi-addons/volume-replication-operator/v0.3.0/config/crd/bases/replication.storage.openshift.io_volumereplicationclasses.yaml + curl -O https://raw.githubusercontent.com/IBM/csi-volume-group-operator/v0.9.1/config/crd/bases/csi.ibm.com_volumegroupclasses.yaml + kubectl apply -f csi.ibm.com_volumegroupclasses.yaml + + curl -O https://raw.githubusercontent.com/IBM/csi-volume-group-operator/v0.9.1/config/crd/bases/csi.ibm.com_volumegroupcontents.yaml + kubectl apply -f csi.ibm.com_volumegroupcontents.yaml + + curl -O https://raw.githubusercontent.com/IBM/csi-volume-group-operator/v0.9.1/config/crd/bases/csi.ibm.com_volumegroups.yaml + kubectl apply -f csi.ibm.com_volumegroups.yaml + ``` + + 2. Enable policy-based replication on volume groups, see the following section within your Spectrum Virtualize product documentation on [IBM Documentation](https://www.ibm.com/docs/): **Administering** > **Managing policy-based replication** > **Assigning replication policies to volume groups**. + +5. (Optional) If planning on using volume replication (remote copy function), enable support on your orchestration platform cluster and storage system. + + 1. To enable support on your Kubernetes cluster, install the following volume group CRDs once per cluster. + + ``` + curl -O https://raw.githubusercontent.com/csiblock/volume-replication-operator/v0.1.0/config/crd/bases/replication.storage.openshift.io_volumereplicationclasses.yaml kubectl apply -f ./replication.storage.openshift.io_volumereplicationclasses.yaml - curl -O https://raw.githubusercontent.com/csi-addons/volume-replication-operator/v0.3.0/config/crd/bases/replication.storage.openshift.io_volumereplications.yaml + curl -O https://raw.githubusercontent.com/csiblock/volume-replication-operator/v0.1.0/config/crd/bases/replication.storage.openshift.io_volumereplications.yaml kubectl apply -f ./replication.storage.openshift.io_volumereplications.yaml ``` @@ -109,4 +123,6 @@ Complete these steps to prepare your environment for installing the CSI (Contain - **Configuring** > **Configuration details** > **HyperSwap system configuration details** - Stretched topology planning and configuration ([SAN Volume Controller](https://www.ibm.com/docs/en/sanvolumecontroller) only): - **Planning** > **Planning for high availability** > **Planning for a stretched topology system** - - **Configuring** > **Configuration details** > **Stretched system configuration details** \ No newline at end of file + - **Configuring** > **Configuration details** > **Stretched system configuration details** + +8. (Optional) If planning on using policy-based replication with your Spectrum Virtualize storage system, verify that the correct replication policy is in place. This can be done either through the Spectrum Virtualize user interface (go to **Policies** > **Replication policies**) or through the CLI (`lsreplicationpolicy`). If a replication policy is not in place create one before replicating a volume through the CSI driver. \ No newline at end of file diff --git a/docs/content/installation/install_driver_github.md b/docs/content/installation/install_driver_github.md index f2366b0c5..6e1ed0a63 100644 --- a/docs/content/installation/install_driver_github.md +++ b/docs/content/installation/install_driver_github.md @@ -14,7 +14,7 @@ Use the following steps to install the operator and driver, with [GitHub](https: 1. Download the manifest from GitHub. ``` - curl https://raw.githubusercontent.com/IBM/ibm-block-csi-operator/v1.10.0/deploy/installer/generated/ibm-block-csi-operator.yaml > ibm-block-csi-operator.yaml + curl https://raw.githubusercontent.com/IBM/ibm-block-csi-operator/v1.11.2/deploy/installer/generated/ibm-block-csi-operator.yaml > ibm-block-csi-operator.yaml ``` 2. (Optional) Update the image fields in the `ibm-block-csi-operator.yaml`. @@ -40,7 +40,7 @@ Use the following steps to install the operator and driver, with [GitHub](https: 1. Download the manifest from GitHub. ``` - curl https://raw.githubusercontent.com/IBM/ibm-block-csi-operator/v1.10.0/config/samples/csi.ibm.com_v1_ibmblockcsi_cr.yaml > csi.ibm.com_v1_ibmblockcsi_cr.yaml + curl https://raw.githubusercontent.com/IBM/ibm-block-csi-operator/v1.11.2/config/samples/csi.ibm.com_v1_ibmblockcsi_cr.yaml > csi.ibm.com_v1_ibmblockcsi_cr.yaml ``` 2. (Optional) Update the image repository field, tag field, or both in the `csi.ibm.com_v1_ibmblockcsi_cr.yaml`. diff --git a/docs/content/installation/install_hostdefiner.md b/docs/content/installation/install_hostdefiner.md index e3124773b..c3494f6eb 100644 --- a/docs/content/installation/install_hostdefiner.md +++ b/docs/content/installation/install_hostdefiner.md @@ -13,3 +13,5 @@ The host definer can be installed at any time in the following ways: - With OperatorHub.io (see [Installing the host definer with OperatorHub.io](install_hostdefiner_operatorhub.md)). The host definer can also be downloaded and installed as part of the operator and driver installation process. For more information, see [Installing the operator and driver](install_operator_driver.md). + +After the HostDefiner custom resource, created by the operator, is installed, the HostDefiner pod automatically creates HostDefinition custom resources. The HostDefinition information can be viewed using the `kubectl get hostdefinition` command. diff --git a/docs/content/installation/install_hostdefiner_github.md b/docs/content/installation/install_hostdefiner_github.md index e2283e103..3cf5e353e 100644 --- a/docs/content/installation/install_hostdefiner_github.md +++ b/docs/content/installation/install_hostdefiner_github.md @@ -6,7 +6,7 @@ Use the following steps to install the HostDefiner custom resource, with [GitHub 1. Download the custom resource manifest from [GitHub](https://github.com/IBM/ibm-block-csi-operator). - curl https://raw.githubusercontent.com/IBM/ibm-block-csi-operator/v1.10.0/config/samples/csi_v1_hostdefiner_cr.yaml > csi_v1_hostdefiner_cr.yaml + curl https://raw.githubusercontent.com/IBM/ibm-block-csi-operator/v1.11.2/config/samples/csi_v1_hostdefiner_cr.yaml > csi_v1_hostdefiner_cr.yaml 2. Install the `csi_v1_hostdefiner_cr.yaml`. diff --git a/docs/content/overview.md b/docs/content/overview.md index 4d5d9a8b6..292370384 100644 --- a/docs/content/overview.md +++ b/docs/content/overview.md @@ -9,11 +9,11 @@ By leveraging CSI (Container Storage Interface) drivers for IBM storage systems, IBM storage orchestration for containers includes the following driver types for storage provisioning: - The IBM block storage CSI driver, for block storage (documented here). -- The IBM Spectrum® Scale CSI driver, for file storage. For specific Spectrum Scale and Spectrum Scale CSI driver product information, see [IBM Spectrum Scale documentation](https://www.ibm.com/docs/en/spectrum-scale/). +- The IBM Storage® Scale CSI driver, for file storage. For specific Storage Scale and Storage Scale CSI driver product information, see [IBM Storage Scale documentation](https://www.ibm.com/docs/en/storage-scale/). For details about volume provisioning with Kubernetes, refer to [Persistent volumes on Kubernetes](https://kubernetes.io/docs/concepts/storage/volumes/). **Note:** For the user convenience, this guide might refer to IBM block storage CSI driver as CSI driver. -![This image shows CSI driver integration with IBM block storage.](../book_files/k8s_driver_arch_diagram.png "Integration of IBM block storage systems and CSI driver in a Kubernetes environment") +![This image shows CSI driver integration with IBM block storage.](../book_files/k8s_driver_arch_diagram_1.11.svg "Integration of IBM block storage systems and CSI driver in a Kubernetes environment") diff --git a/docs/content/release_notes/changelog_1.10.0.md b/docs/content/release_notes/changelog_1.10.0.md index b820455b2..3432cd5c0 100644 --- a/docs/content/release_notes/changelog_1.10.0.md +++ b/docs/content/release_notes/changelog_1.10.0.md @@ -1,6 +1,6 @@ # 1.10.0 (July 2022) -IBM® block storage CSI driver 1.10.0 adds new support and enhancements. +IBM® block storage CSI driver 1.10.0 added new support and enhancements. - New Spectrum Virtualize family system support for the following: - New Spectrum Virtualize Snapshot function that was introduced in IBM Spectrum Virtualize 8.5.1 release (Alpha support) - Dynamic host definition diff --git a/docs/content/release_notes/changelog_1.11.0.md b/docs/content/release_notes/changelog_1.11.0.md new file mode 100644 index 000000000..36110f8f1 --- /dev/null +++ b/docs/content/release_notes/changelog_1.11.0.md @@ -0,0 +1,12 @@ +# 1.11.0 (January 2023) + +IBM® block storage CSI driver 1.11.0 added new support and enhancements. +- Added dynamic host definition enhancements +- New Spectrum Virtualize family system support for policy-based replication and dynamic volume groups +- Additional orchestration platform support for Red Hat OpenShift 4.12 and Kubernetes 1.25 and 1.26 + +Version 1.11.0 also resolved the following issue: + +|Ticket ID|Severity|Description| +|---------|--------|-----------| +|**CSI-4554**|Low|During HostDefinition creation no success event is emitted when the status is in a _Ready_ state.| \ No newline at end of file diff --git a/docs/content/release_notes/changelog_1.11.1.md b/docs/content/release_notes/changelog_1.11.1.md new file mode 100644 index 000000000..5f527f899 --- /dev/null +++ b/docs/content/release_notes/changelog_1.11.1.md @@ -0,0 +1,5 @@ +# 1.11.1 (May 2023) + +IBM® block storage CSI driver 1.11.1 added new support and enhancements. +- Additional orchestration platform support for Red Hat OpenShift 4.13 and Kubernetes 1.27 + diff --git a/docs/content/release_notes/changelog_1.11.2.md b/docs/content/release_notes/changelog_1.11.2.md new file mode 100644 index 000000000..9f6211500 --- /dev/null +++ b/docs/content/release_notes/changelog_1.11.2.md @@ -0,0 +1,6 @@ +# 1.11.2 (May 2024) + +IBM® block storage CSI driver 1.11.2 added new support and enhancements. +- Additional orchestration platform support for Red Hat OpenShift 4.14 and 4.15, as well as Kubernetes 1.28 and 1.29 +- New RHEL 9.x support for x86 architecture +- Remove support for RHEL 7.x (reached end of life) \ No newline at end of file diff --git a/docs/content/release_notes/compatibility_requirements.md b/docs/content/release_notes/compatibility_requirements.md index 696c8c335..bd7c1710b 100644 --- a/docs/content/release_notes/compatibility_requirements.md +++ b/docs/content/release_notes/compatibility_requirements.md @@ -1,3 +1,3 @@ # Compatibility and requirements -This section specifies the compatibility and requirements of version 1.10.0 of IBM® block storage CSI driver. +This section specifies the compatibility and requirements of version 1.11.2 of IBM® block storage CSI driver. diff --git a/docs/content/release_notes/known_issues.md b/docs/content/release_notes/known_issues.md index 0109e6d9f..8d56e958a 100644 --- a/docs/content/release_notes/known_issues.md +++ b/docs/content/release_notes/known_issues.md @@ -1,6 +1,6 @@ # Known issues -This section details the known issues in IBM® block storage CSI driver 1.10.0, along with possible solutions or workarounds (if available). +This section details the known issues in IBM® block storage CSI driver 1.11.2, along with possible solutions or workarounds (if available). The following severity levels apply to known issues: @@ -12,14 +12,15 @@ The following severity levels apply to known issues: **Important:** -- **The issues listed below apply to IBM block storage CSI driver 1.10.0**. As long as a newer version has not yet been released, a newer release notes edition for IBM block storage CSI driver 1.10.0 might be issued to provide a more updated list of known issues and workarounds. -- When a newer version is released for general availability, the release notes of this version will no longer be updated. Accordingly, check the release notes of the newer version to learn whether any newly discovered issues affect IBM block storage CSI driver 1.10.0 or whether the newer version resolves any of the issues listed below. +- **The issues listed below apply to IBM block storage CSI driver 1.11.2**. As long as a newer version has not yet been released, a newer release notes edition for IBM block storage CSI driver 1.11.2 might be issued to provide a more updated list of known issues and workarounds. +- When a newer version is released for general availability, the release notes of this version will no longer be updated. Accordingly, check the release notes of the newer version to learn whether any newly discovered issues affect IBM block storage CSI driver 1.11.2 or whether the newer version resolves any of the issues listed below. |Ticket ID|Severity|Description| |---------|--------|-----------| +|**CSI-5231**|Service|In some cases, when the volume group selector information is updated, triggering an add or remove PVC operation the following can occur:
- No PVC update events are emitted.
- Finalizers are not added or removed to all of the PVCs. In these cases the PVCs can potentially be deleted even when part of a volume group.
**Workaround:** To prevent this issue from occurring, trigger add or remove PVC operations by editing the PVC volume group label (by either adding or removing the label).
If the volume group selector has been updated, manually update the finalizer with in the PVC.
- To add a finalizer to an added PVC, use the following command:
`kubectl patch pvc -p '{"metadata":{"finalizers":["volumegroup.storage.ibm.io/pvc-protection"]}}'`
- To remove a specific finalizer, use the `kubectl edit pvc ` and then remove the `volumegroup.storage.ibm.io/pvc-protection` finalizer from the finalizer list.
- To remove all finalizers from a PVC, use the following command: `kubectl patch pvc -p '{"metadata":{"finalizers":null}}'`| |**CSI-4555**|Service|In rare cases when using dynamic host definition and `connectivityType` is defined as `fc`, only one WWPN port is defined on the storage. When more than one WWPN port on the worker node is defined this can cause I/O issues or a single point of failure.
**Workaround:** Ensure that all host ports are properly configured on the storage system. If the issue continues and the CSI driver can still not attach a pod, contact IBM Support.| |**CSI-4446**|Service|In extremely rare cases, the HostDefiner `hostdefiner.block.csi.ibm.com/manage-node=true` labels are not deleted during csinode deletion from the nodes. This occurs even when the `allowDelete` and `dynamicNodeLabeling` parameters are set to `true`.
**Workaround:** Manually delete the hosts from the storage system.| |**CSI-3382**|Service|After CSI Topology label deletion, volume provisioning does not work, even when not using any topology-aware YAML files.
**Workaround:** To allow volume provisioning through the CSI driver, delete the operator pod.
After the deletion, a new operator pod is created and the controller pod is automatically restarted, allowing for volume provisioning.| |**CSI-2157**|Service|In extremely rare cases, too many Fibre Channel worker node connections may result in a failure when the CSI driver attempts to attach a pod. As a result, the `Host for node: {0} was not found, ensure all host ports are configured on storage` error message may be found in the IBM block storage CSI driver controller logs.
**Workaround:** Ensure that all host ports are properly configured on the storage system. If the issue continues and the CSI driver can still not attach a pod, contact IBM Support.| -|**CSI-4554**|Low|During HostDefinition creation no success event is emitted when the status is in a _Ready_ state.
**Workaround:** No workaround available.| + diff --git a/docs/content/release_notes/limitations.md b/docs/content/release_notes/limitations.md index 2ed2e7acb..b0145486a 100644 --- a/docs/content/release_notes/limitations.md +++ b/docs/content/release_notes/limitations.md @@ -2,20 +2,25 @@ As opposed to known issues, limitations are functionality restrictions that are part of the predefined system design and capabilities in a particular version. +- [IBM DS8000 usage limitations](#ibm®-ds8000®-usage-limitations) +- [High availability (HA) limitations](#high-availability-ha-limitations) +- [I/O group limitations](#io-group-limitations) +- [NVMe/FC usage limitations](#nvme®fc-usage-limitations) +- [Policy-based replication limitations](#policy-based-replication-limitations) +- [Snapshot function limitations](#snapshot-function-limitations) +- [Volume attach limitations](#volume-attach-limitations) +- [Volume clone limitations](#volume-clone-limitations) +- [Volume expansion limitations](#volume-clone-limitations) +- [Volume group limitations](#volume-group-limitations) +- [Volume replication limitations](#volume-replication-limitations) +- [Volume snapshot limitations](#volume-snapshot-limitations) + ## IBM® DS8000® usage limitations When using the CSI driver with DS8000 family storage systems: - Connectivity limits on the storage side might be reached with DS8000 family products due to too many open connections. This occurs due to connection closing lag times from the storage side. - There is a limit of 11 FlashCopy relationships per volume (including all snapshots and clones). -## Dynamic host definition limitations - -Dynamic host definition is only supported for use with IBM Spectrum Virtualize family storage systems. - -In addition, the following are not supported when using dynamic host definitions with the IBM block storage CSI driver: -- I/O groups -- CSI Topology (see [Configuring for CSI Topology](../configuration/configuring_topology.md)) - ## High availability (HA) limitations **Note:** - HyperSwap topology is only supported for use with IBM Spectrum Virtualize family storage systems. @@ -42,6 +47,11 @@ I/O group configuration is only supported for use with IBM Spectrum Virtualize f For other limitations with your storage system, see the following section within your Spectrum Virtualize product documentation on [IBM Documentation](https://www.ibm.com/docs/en/): **Configuring** > **Host attachment** > **NVMe over Fibre Channel host attachments** > **FC-NVMe limitations and SAN configuration guidelines**. +## Policy-based replication limitations +Policy-based replication is only supported for use with IBM Spectrum Virtualize family storage system versions 8.5.2 or higher. To see if your specific product is supported and for more information, see **What's new** > **Getting started with policy-based replication** within your Spectrum Virtualize product documentation on [IBM Documentation](https://www.ibm.com/docs). + +For other policy-based replication limitations with your storage system, see the Configuration Limits and Restrictions for your product software version. From the [IBM Support](https://www.ibm.com/mysupport) website, search for `Configuration Limits and Restrictions` and your product name. For example, `Configuration Limits and Restrictions FlashSystem 9500`. + ## Snapshot function limitations **Important:** Snapshot function support is only Alpha support. @@ -94,6 +104,14 @@ The following limitations apply when expanding volumes with the IBM block storag Volume group configuration is only supported for use with IBM Spectrum Virtualize family storage systems. +The following limitations apply when using volume groups: + +- PersistentVolumeClaims (PVCs) can only be defined inside volume groups in the following ways: + - Defined within a StorageClass + - Defined within a PVC, using the volume group key +- PVCs that already belong to a StorageClass with a defined volume group cannot be added to a VolumeGroup object. +- Volume groups must be empty in order to be deleted. + ## Volume replication limitations When a role switch is conducted, this is not reflected within the other orchestration platform replication objects. diff --git a/docs/content/release_notes/supported_orchestration.md b/docs/content/release_notes/supported_orchestration.md index fc26ed73c..89541dcc9 100644 --- a/docs/content/release_notes/supported_orchestration.md +++ b/docs/content/release_notes/supported_orchestration.md @@ -2,17 +2,19 @@ The following table details orchestration platforms suitable for deployment of the IBM® block storage CSI driver. -|Orchestration platform|Version|Architecture| -|----------------------|-------|------------| -|Kubernetes|1.23|x86| -|Kubernetes|1.24|x86| -|Red Hat OpenShift|4.9|x86, IBM Z, IBM Power Systems1| -|Red Hat OpenShift|4.10|x86, IBM Z, IBM Power Systems1| -|Red Hat OpenShift|4.11|x86, IBM Z, IBM Power Systems1| +|Orchestration platform| Version |Architecture| +|----------------------|---------|------------| +|Kubernetes| 1.27 |x86| +|Kubernetes| 1.28 |x86| +|Kubernetes| 1.29 |x86| +|Red Hat OpenShift| 4.12 |x86, IBM Z, IBM Power Systems1| +|Red Hat OpenShift| 4.13 |x86, IBM Z, IBM Power Systems1| +|Red Hat OpenShift| 4.14 |x86, IBM Z, IBM Power Systems1| +|Red Hat OpenShift| 4.15 |x86, IBM Z, IBM Power Systems1| 1IBM Power Systems architecture is only supported on Spectrum Virtualize and DS8000 family storage systems. **Note:** -- As of this document's publication date, IBM Cloud® Satellite only supports RHEL 7 on x86 architecture for Red Hat OpenShift. For the latest support information, see [cloud.ibm.com/docs/satellite](https://cloud.ibm.com/docs/satellite). +- As of this document's publication date, IBM Cloud® Satellite only supports RHEL 8 on x86 architecture for Red Hat OpenShift. For the latest support information, see [Satellite host system requirements](https://cloud.ibm.com/docs/satellite?topic=satellite-host-reqs). - For the latest orchestration platform support information, see the [Lifecycle and support matrix](https://www.ibm.com/docs/en/stg-block-csi-driver?topic=SSRQ8T/landing/csi_lifecycle_support_matrix.html). diff --git a/docs/content/release_notes/supported_os.md b/docs/content/release_notes/supported_os.md index ab7fc4917..401d1dc07 100644 --- a/docs/content/release_notes/supported_os.md +++ b/docs/content/release_notes/supported_os.md @@ -2,16 +2,18 @@ The following table lists operating systems required for deployment of the IBM® block storage CSI driver. -|Operating system|Architecture| -|----------------|------------| -|Red Hat® Enterprise Linux® (RHEL) 7.x|x86, IBM Z®| -|Red Hat® Enterprise Linux® (RHEL) 8.x|x86| -|Red Hat Enterprise Linux CoreOS (RHCOS) 4.9-4.11|x86, IBM Z, IBM Power Systems™1| -|Ubuntu 20.04.4 LTS2| x86_64| +| Operating system |Architecture| +|---------------------------------------------------|------------| +| Red Hat® Enterprise Linux® (RHEL) 8.x |x86| +| Red Hat® Enterprise Linux® (RHEL) 9.x |x86| +| Red Hat Enterprise Linux CoreOS (RHCOS) 4.12-4.15 |x86, IBM Z, IBM Power Systems™1| +| Ubuntu 20.04.4 LTS2 | x86_64| 1IBM Power Systems architecture is only supported on Spectrum Virtualize and DS8000 family storage systems.
2Ubuntu is supported with Kubernetes orchestration platforms only. -**Note:** For the latest operating system support information, see the [Lifecycle and support matrix](https://www.ibm.com/docs/en/stg-block-csi-driver?topic=SSRQ8T/landing/csi_lifecycle_support_matrix.html). +**Note:** +- Virtualized worker nodes (for example, VMware vSphere) are supported with iSCSI and Fibre Channel (FC) adapters, when the FC adapter is used in passthrough mode. +- For the latest operating system support information, see the [Lifecycle and support matrix](https://www.ibm.com/docs/en/stg-block-csi-driver?topic=SSRQ8T/landing/csi_lifecycle_support_matrix.html). diff --git a/docs/content/release_notes/supported_storage.md b/docs/content/release_notes/supported_storage.md index f5f3dd741..054ecdf64 100644 --- a/docs/content/release_notes/supported_storage.md +++ b/docs/content/release_notes/supported_storage.md @@ -1,18 +1,16 @@ # Supported storage systems -IBM® block storage CSI driver 1.10.0 supports different IBM storage systems as listed in the following table. +IBM® block storage CSI driver 1.11.2 supports different IBM storage systems as listed in the following table. |Storage system|Microcode version| |--------------|-----------------| -|FlashSystem™ A9000|12.3.2.c or later| -|FlashSystem A9000R|12.3.2.c or later| -|Spectrum Virtualize™ family including Spectrum Virtualize as software only, Spectrum Virtualize for Public Cloud, SAN Volume Controller (SVC), and FlashSystem® family members built with Spectrum® Virtualize (including FlashSystem 5xxx, 7xxx, 9xxx)|7.8.x, 8.2.x, 8.3.x, 8.4.x, 8.5.x| +|Spectrum Virtualize™ family including Spectrum Virtualize as software only, Spectrum Virtualize for Public Cloud, SAN Volume Controller (SVC), and FlashSystem® family members built with Spectrum® Virtualize (including FlashSystem 5xxx, 7xxx, 9xxx)|8.4.x, 8.5.x, 8.6.x| |DS8000® family|8.x and higher with same API interface| **Note:** - For the latest microcode storage support information, see the [Lifecycle and support matrix](https://www.ibm.com/docs/en/stg-block-csi-driver?topic=SSRQ8T/landing/csi_lifecycle_support_matrix.html). - The Spectrum Virtualize family and SAN Volume Controller storage systems run the Spectrum Virtualize software. In addition, the Spectrum Virtualize package is available as a deployable solution that can be run on any compatible hardware. -- Spectrum Virtualize family (including Storwize, FlashSystem, and SAN Volume Controller) microcode versions 8.4.x and 8.5.x include both LTS and Non-LTS releases. For more information, see [IBM Spectrum Virtualize FAQ for Continuous Development (CD) Release Model for software releases](https://www.ibm.com/support/pages/node/6409554). +- Spectrum Virtualize family (including Storwize, FlashSystem, and SAN Volume Controller) microcode versions 8.4.x, 8.5.x and 8.6.x include both LTS and Non-LTS releases. For more information, see [IBM Spectrum Virtualize FAQ for Continuous Development (CD) Release Model for software releases](https://www.ibm.com/support/pages/node/6409554). diff --git a/docs/content/release_notes/whats_new.md b/docs/content/release_notes/whats_new.md index 7a73a73c7..eb5db6101 100644 --- a/docs/content/release_notes/whats_new.md +++ b/docs/content/release_notes/whats_new.md @@ -1,43 +1,13 @@ -# What's new in 1.10.0 +# What's new in 1.11.2 -IBM® block storage CSI driver 1.10.0 introduces the enhancements that are detailed in the following section. +IBM® block storage CSI driver 1.11.2 introduces the enhancements that are detailed in the following section. -**General availability date:** 26 July 2022 +**General availability date:** 1 May 2024 -## Alpha support for the new Snapshot function that was introduced in IBM Spectrum Virtualize 8.5.1 release +## Supported orchestration platforms for deployment -This version adds Alpha support for the new Snapshot function that was introduced in IBM Spectrum Virtualize 8.5.1 release. The main use case of snapshot is corruption protection. It protects the user data from deliberate or accidental data corruption from the host's systems. For more information about the Snapshot function, see **Product overview** > **Technical overview** > **Volume groups** > **Snapshot function** within your Spectrum Virtualize product documentation on [IBM Documentation](https://www.ibm.com/docs). +This version adds support for orchestration platforms Kubernetes 1.28 and Red Hat® OpenShift® 4.14, suitable for deployment of the CSI (Container Storage Interface) driver. -**Important:** Be sure to read all of the limitations before using Snapshot function with the CSI driver. +## Miscellaneous resolved issues -**Note:** The IBM® FlashCopy and Snapshot function are both referred to as the more generic volume snapshots and cloning within this documentation set. Not all supported products use the FlashCopy and Snapshot function terminology. Spectrum Virtualize storage systems introduced the new Snapshot function as of Spectrum Virtualize 8.5.1 release. Notes clarifying which function is being referred to within this document are made, as necessary. - -## New dynamic host definition - -IBM® block storage CSI driver 1.10.0 enables users to not need to statically define hosts on the storage in advance, eliminating the need for manual static host definitions. The host definer handles changes in the orchestrator cluster that relate to the host definition and applies them to the relevant storage systems. - -## Now enables volume group configuration - -The CSI driver now enables volume group configuration when creating a new volume for Spectrum Virtualize family systems. - -For more information about volume groups, see **Product overview** > **Technical overview** > **Volume groups** within your product documentation on [IBM Documentation](https://www.ibm.com/docs). - -## New metrics support - -IBM® block storage CSI driver 1.10.0 introduces new kubelet mounted volume metrics support for volumes created with the CSI driver. - -The following metrics are currently supported: -- kubelet_volume_stats_available_bytes -- kubelet_volume_stats_capacity_bytes -- kubelet_volume_stats_inodes -- kubelet_volume_stats_inodes_free -- kubelet_volume_stats_inodes_used -- kubelet_volume_stats_used_bytes - -For more information about the supported metrics, see `VolumeUsage` within the [Container Storage Interface (CSI) spec documentation for `NodeGetVolumeStats`](https://github.com/container-storage-interface/spec/blob/v1.5.0/spec.md#nodegetvolumestats). - -For more information about using metrics in Kubernetes, see [Metrics in Kubernetes](https://kubernetes.io/docs/concepts/cluster-administration/system-metrics/#metrics-in-kubernetes) in the Kubernetes documentation. - -## Additional supported orchestration platforms for deployment - -This version adds support for orchestration platforms Kubernetes 1.24 and Red Hat® OpenShift® 4.11, suitable for deployment of the CSI (Container Storage Interface) driver. \ No newline at end of file +For information about the resolved issues in version 1.11.2, see [1.11.2 (May 2024)](changelog_1.11.2.md). diff --git a/docs/content/using/changing_node_connectivity.md b/docs/content/using/changing_node_connectivity.md index 1bff2d799..6fc6d277e 100644 --- a/docs/content/using/changing_node_connectivity.md +++ b/docs/content/using/changing_node_connectivity.md @@ -1,6 +1,16 @@ # Changing node connectivity -Node connectivity for dynamic host definition needs to be updated when node connectivity changes take place. +Node connectivity for dynamic host definition is done dynamically. In some situations connectivity may need to be updated manually. + +The following are examples of when dynamic host definition can occur: +- If ports connectivity is changed on the host and restart to the relevant CSI node port. +- If connectivity has changed on either the custom resource or label. + +For more information, see [Configuring the host definer](../configuration/configuring_hostdefiner.md). + +## Manually changing node connectivity + +In some situations, node connectivity may need to be manually configured. Before you begin, if the `allowDelete` parameter is set to `false`, ensure that the old host definition is deleted. diff --git a/docs/content/using/delete_vg.md b/docs/content/using/delete_vg.md new file mode 100644 index 000000000..2c447e583 --- /dev/null +++ b/docs/content/using/delete_vg.md @@ -0,0 +1,9 @@ +# Deleting a VolumeGroup with a replication policy + +When both Primary and Secondary volume groups are represented on a cluster, delete them in this specific order. + +For each VolumeGroup Primary and Secondary pair to be deleted: + 1. Delete the Primary VolumeGroup. + + 2. Delete the Secondary VolumeGroup.

+ **Note:** After the Primary VolumeGroup has been deleted, the Secondary volume group is automatically deleted from the storage system. diff --git a/docs/content/using/promoting_vg.md b/docs/content/using/promoting_vg.md new file mode 100644 index 000000000..0dbdf8e9b --- /dev/null +++ b/docs/content/using/promoting_vg.md @@ -0,0 +1,12 @@ +# Promoting a volume group +To promote a replicated volume group within the CSI driver, the VolumeReplication state must be promoted. + +Promote the VolumeReplication state, by changing the `spec.replicationState` from `Secondary` to `Primary`. For more information, see [Creating a VolumeReplication](../configuration/creating_volumereplication.md). + +## Promoting a replicated volume group +Use the following procedure to promote a replicated volume group: + +1. Import the existing volume group. See [Importing an existing volume group](../configuration/importing_existing_volume_group.md). +

**Attention:** Be sure to import any existing volumes before importing the volume group. + +2. Create and apply a new VolumeReplication YAML file for the volume group, with the `spec.replicationState` parameter being `Primary`. See [Creating a VolumeReplication](../configuration/creating_volumereplication.md). diff --git a/docs/content/using/removing_pvc_vg.md b/docs/content/using/removing_pvc_vg.md new file mode 100644 index 000000000..4e9e96a08 --- /dev/null +++ b/docs/content/using/removing_pvc_vg.md @@ -0,0 +1,10 @@ +# Removing a PVC from a volume group with a replication policy + +When both Primary and Secondary volume groups are represented on a cluster, their associated PVCs must be removed in this specific order. + +**Important:** Be sure to follow these steps in the correct order to prevent a PVC from locking. + +For each PVC Primary and Secondary pair to be removed from its volume group: + 1. Remove the Primary PVC volume group labels. + 2. Remove the Secondary PVC volume group labels.
+ **Note:** After the Primary PVC volume group labels have been removed, the Secondary PVC associated volume is automatically deleted from the storage system. diff --git a/docs/content/using/using.md b/docs/content/using/using.md index 6f9e8f3a5..e547afbce 100644 --- a/docs/content/using/using.md +++ b/docs/content/using/using.md @@ -4,6 +4,11 @@ Use this information for further usage information for the CSI (Container Storag * [Using dynamic host connectivity](using_hostdefinition.md) * [Changing node connectivity](changing_node_connectivity.md) + * [Adding optional labels for dynamic host definition](using_hostdefinition_labels.md) +* [Using the CSI driver with policy-based replication](using_policy_based_replication.md) + * [Promoting a volume group](promoting_vg.md) + * [Deleting a VolumeGroup with a replication policy](delete_vg.md) + * [Removing a PVC from a volume group with a replication policy](removing_pvc_vg.md) * [Sample configurations for running a stateful container](sample_stateful_container.md) diff --git a/docs/content/using/using_hostdefinition.md b/docs/content/using/using_hostdefinition.md index 2a6d75f6a..afe3b2531 100644 --- a/docs/content/using/using_hostdefinition.md +++ b/docs/content/using/using_hostdefinition.md @@ -4,6 +4,11 @@ Dynamic host connectivity eliminates the necessity for manual host definitions. A use case example of using dynamic host definition is when creating a new storage class with a new storage. With the dynamic host definition feature, new host definitions are created on the storage for the relevant nodes. For each host definition on the storage, a new host definition resource is created. With these resources, the status of the host definition on the storage system can easily be retrieved. +Dynamic host definitions supports the following: + +- **CSI Topology**
For more information, see [Configuring for CSI Topology](../configuration/configuring_topology.md). +- **I/O Groups**
By default the host definer creates all definitions on all possible I/O groups (0, 1, 2, 3) and there is no need to define the I/O groups.
If you want a node to use a specific I/O group, use the I/O group label to specify the usage. For more information, see [Adding optional labels for dynamic host definition](using_hostdefinition_labels.md). + The host definer identifies the nodes available for host definition on each storage system and controls each of the host definitions. To see the phase status of all managed HostDefinitions by the host definer, use: kubectl get hostdefinitions @@ -15,6 +20,10 @@ The host definer identifies the nodes available for host definition on each stor |PendingDeletion|Host deletion did not complete during the last attempt. The host definer will try again.| |Error|Host definition or deletion did not complete and will not try again.| +Adding labels to nodes allows for greater control over the system nodes, when using dynamic host definition. + +Node labels can be used to help customize node usage with host definition. For more information, see [Adding optional labels for dynamic host definition](using_hostdefinition_labels.md). + ## Recovering from an Error state If any of the host definitions have an Error status, follow this procedure to have the host definer reattempt to define the hosts. @@ -34,13 +43,7 @@ If any of the host definitions have an Error status, follow this procedure to ha ``` $> kubectl get hostdefinition - NAME AGE PHASE NODE - 102m Ready - 102m Ready + NAME AGE PHASE NODE MANAGEMENT_ADDRESS + 102m Ready + 102m Ready ``` - -## Blocking a specific node definition from being deleted - -To block a specific host definition from being deleted by the host definer, you can add the following label to the node: `hostdefiner.block.csi.ibm.com/avoid-deletion=true`. - -This label works on a per node basis, where the `allowDelete` parameter definition in the `csi_v1_hostdefiner_cr.yaml` is for all cluster nodes. diff --git a/docs/content/using/using_hostdefinition_labels.md b/docs/content/using/using_hostdefinition_labels.md new file mode 100644 index 000000000..4670e6bdd --- /dev/null +++ b/docs/content/using/using_hostdefinition_labels.md @@ -0,0 +1,36 @@ +# Adding optional labels for dynamic host definition + +Adding labels to nodes allows for greater control over the system nodes, when using dynamic host definition. + +## Blocking a specific node definition from being deleted + +To block a specific host definition from being deleted by the host definer, you can add the following label to the node: `hostdefiner.block.csi.ibm.com/avoid-deletion=true`. + +This label works on a per node basis, where the `allowDelete` parameter definition in the `csi_v1_hostdefiner_cr.yaml` is for all cluster nodes. + +## Defining a specific host node + +In addition to defining `connectivityType` in the HostDefiner, the node's connectivity type can be defined by overriding the `connectivityType` definition within the HostDefiner by using the `connectivity-type` label. + +This tag defines the connectivity type of the node regardless of connectivity hierarchy. + +For example, if `connectivityType` is defined as using `fc` but you want to use NVMe on a specific node, you can define `nvmeofc` for this specific node, using this label. + +`block.csi.ibm.com/connectivity-type= + +**Note:**
+- The values for the connectivityType are the same as those for defining the HostDefiner: `nvmeofc`, `fc`, `iscsi`. If an invalid label is used, this label is ignored. +- For more information about defining the connectivity type within the HostDefiner, see [Configuring the host definer](../configuration/configuring_hostdefiner.md)). + +## Specifying I/O group usage + +To specify which I/O group(s) a node should use, add any of the following labels to the node: + +- `hostdefiner.block.csi.ibm.com/io-group-0=true` +- `hostdefiner.block.csi.ibm.com/io-group-1=true` +- `hostdefiner.block.csi.ibm.com/io-group-2=true` +- `hostdefiner.block.csi.ibm.com/io-group-3=true` + +**Note:** If no `io_group` is defined, the volume is created within the storage system's default I/O group(s). + +For more about the I/O group function, see **Product overview** > **Technical overview** > **I/O group** within your Spectrum Virtualize product documentation on [IBM Documentation](https://www.ibm.com/docs). \ No newline at end of file diff --git a/docs/content/using/using_policy_based_replication.md b/docs/content/using/using_policy_based_replication.md new file mode 100644 index 000000000..1761fc017 --- /dev/null +++ b/docs/content/using/using_policy_based_replication.md @@ -0,0 +1,19 @@ +# Using the CSI driver with policy-based replication + +Policy-based replication was introduced in IBM Spectrum Virtualize 8.5.2 release. Policy-based replication provides simplified configuration and management of asynchronous replication between two systems. + +Policy-based replication uses volume groups to automatically deploy and manage replication. This feature significantly simplifies configuring, managing, and monitoring replication between two systems. In order to support this feature, the CSI driver creates fictitious volume groups for the volume being replicated, as replication is handled on a per volume basis within CSI. It is then the volume group that gets replicated. Once the volume groups are replicated all volume groups can be seen within the Spectrum Virtualize user interface. All replicated volumes are identified by the original volume group name with the `_vg` suffix. + +When deleting volumes that are replicated, both the replicated volume and volume group are automatically deleted, as well as the original fictitious volume group that was created in order to use the policy-based replication. Deleting replicated volumes / volume groups does not delete the original volume itself. + +The CSI driver identifies that policy-based replication is being used based on the use of the `replication_policy` parameter within the VolumeReplicationClass YAML file. + +**Important:** `replication_policy` cannot be used together with the `system_id` parameter. + +Before replicating a volume with policy-based replication, verify that the proper replication policies are in place on your storage system. + +- For more information, see [Compatibility and requirements](../installation/install_compatibility_requirements.md) +- For more configuration information, see [Creating a VolumeReplicationClass](../configuration/creating_volumereplicationclass.md) and [Creating a VolumeReplication](../configuration/creating_volumereplication.md) +- For information on importing existing volume groups, see [Importing an existing volume group](../configuration/importing_existing_volume_group.md) + +**Note:** For full information about this Spectrum Virtualize feature, see **What's new** > **Getting started with policy-based replication** within your Spectrum Virtualize product documentation on [IBM Documentation](https://www.ibm.com/docs). diff --git a/go.mod b/go.mod index dc40cd2e6..5db0d73f3 100644 --- a/go.mod +++ b/go.mod @@ -1,21 +1,98 @@ module github.com/ibm/ibm-block-csi-driver -go 1.13 +go 1.19 require ( - github.com/container-storage-interface/spec v1.5.0 - github.com/golang/mock v1.3.1 + github.com/container-storage-interface/spec v1.8.0 + github.com/golang/mock v1.6.0 github.com/gophercloud/gophercloud v0.1.0 // indirect - github.com/kubernetes-csi/csi-lib-utils v0.9.1 - github.com/sirupsen/logrus v1.6.0 - golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e - golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4 - google.golang.org/grpc v1.29.0 - gopkg.in/yaml.v2 v2.2.8 - k8s.io/apimachinery v0.19.0 - k8s.io/client-go v0.19.0 + github.com/kubernetes-csi/csi-lib-utils v0.15.0 + github.com/sirupsen/logrus v1.9.0 + golang.org/x/sync v0.1.0 + golang.org/x/sys v0.13.0 + google.golang.org/grpc v1.54.0 + gopkg.in/yaml.v2 v2.4.0 + k8s.io/apimachinery v0.28.0 + k8s.io/client-go v0.28.0 k8s.io/klog v1.0.0 // indirect - k8s.io/mount-utils v0.20.13 - k8s.io/utils v0.0.0-20201110183641-67b214c5f920 + k8s.io/mount-utils v0.27.1 + k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 sigs.k8s.io/structured-merge-diff/v3 v3.0.0 // indirect ) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.10.2 // indirect + github.com/go-logr/logr v1.2.4 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/google/gnostic v0.6.9 // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/moby/sys/mountinfo v0.6.2 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/oauth2 v0.7.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect + golang.org/x/time v0.3.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/protobuf v1.30.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/api v0.27.1 // indirect + k8s.io/klog/v2 v2.90.1 // indirect + k8s.io/kube-openapi v0.0.0-20230327201221-f5883ff37f0c // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect +) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.10.2 // indirect + github.com/go-logr/logr v1.2.4 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/google/gnostic v0.6.9 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/moby/sys/mountinfo v0.6.2 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + golang.org/x/net v0.13.0 // indirect + golang.org/x/oauth2 v0.8.0 // indirect + golang.org/x/term v0.10.0 // indirect + golang.org/x/text v0.11.0 // indirect + golang.org/x/time v0.3.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/api v0.28.0 // indirect + k8s.io/klog/v2 v2.100.1 // indirect + k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect +) diff --git a/go.sum b/go.sum index fc71d6b1a..387bb503a 100644 --- a/go.sum +++ b/go.sum @@ -1,68 +1,39 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= -github.com/container-storage-interface/spec v1.5.0 h1:lvKxe3uLgqQeVQcrnL2CPQKISoKjTJxojEs9cBk+HXo= -github.com/container-storage-interface/spec v1.5.0/go.mod h1:8K96oQNkJ7pFcC2R9Z1ynGGBB1I93kcS6PGg3SsOk8s= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/container-storage-interface/spec v1.8.0 h1:D0vhF3PLIZwlwZEf2eNbpujGCNwspwTYf2idJRJx4xI= +github.com/container-storage-interface/spec v1.8.0/go.mod h1:ROLik+GhPslwwWRNFF1KasPzroNARibH2rfz1rkg4H0= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE= +github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= @@ -72,29 +43,39 @@ github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= @@ -103,25 +84,41 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= +github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= @@ -131,38 +128,58 @@ github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyyc github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kubernetes-csi/csi-lib-utils v0.9.1 h1:sGq6ifVujfMSkfTsMZip44Ttv8SDXvsBlFk9GdYl/b8= github.com/kubernetes-csi/csi-lib-utils v0.9.1/go.mod h1:8E2jVUX9j3QgspwHXa6LwyN7IHQDjW9jX3kwoWnSC+M= +github.com/kubernetes-csi/csi-lib-utils v0.13.0 h1:QrTdZVZbHlaSUBN9ReayBPnnF1N0edFIpUKBwVIBW3w= +github.com/kubernetes-csi/csi-lib-utils v0.13.0/go.mod h1:JS9eDIZmSjx4F9o0bLTVK/qfhIIOifdjEfVXzxWapfE= +github.com/kubernetes-csi/csi-lib-utils v0.15.0 h1:YTMO6WilRUmjGh5/73kF4KjNcXev+V37O4bx8Uoxy5A= +github.com/kubernetes-csi/csi-lib-utils v0.15.0/go.mod h1:fsoR7g1fOfl1z0WDpA1WvWPtt4oVvgzChgSUgR3JWDw= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= +github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -170,25 +187,28 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo/v2 v2.9.1 h1:zie5Ly042PD3bsCvsSOPvRnFwyo3rKe64TJlD6nu0mk= +github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E= +github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -196,55 +216,60 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8 github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -254,91 +279,113 @@ golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCc golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= +golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= +golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7 h1:HmbHVPwrPEKPGLAcHSrMe6+hqSUlvZU0rab6x5EXfGU= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4 h1:5/PjkGUjvEU5Gl6BxmvKRPpqo2uNMv4rcHBMwzk/st8= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= +golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -347,12 +394,18 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= +golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -365,30 +418,29 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.0 h1:2pJjwYOdkZ9HlN4sWRYBg9ttH5bCOlsueaM+b/oYjwo= google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= +google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -399,45 +451,59 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= k8s.io/api v0.18.0 h1:lwYk8Vt7rsVTwjRU6pzEsa9YNhThbmbocQlKvNBB4EQ= k8s.io/api v0.18.0/go.mod h1:q2HRQkfDzHMBZL9l/y9rH63PkQl4vae0xRT+8prbrK8= k8s.io/api v0.19.0 h1:XyrFIJqTYZJ2DU7FBE/bSPz7b1HvbVBuBf07oeo6eTc= k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= +k8s.io/api v0.27.1 h1:Z6zUGQ1Vd10tJ+gHcNNNgkV5emCyW+v2XTmn+CLjSd0= +k8s.io/api v0.27.1/go.mod h1:z5g/BpAiD+f6AArpqNjkY+cji8ueZDU/WV1jcj5Jk4E= +k8s.io/api v0.28.0 h1:3j3VPWmN9tTDI68NETBWlDiA9qOiGJ7sdKeufehBYsM= +k8s.io/api v0.28.0/go.mod h1:0l8NZJzB0i/etuWnIXcwfIv+xnDOhL3lLW919AWYDuY= k8s.io/apimachinery v0.18.0 h1:fuPfYpk3cs1Okp/515pAf0dNhL66+8zk8RLbSX+EgAE= k8s.io/apimachinery v0.18.0/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= k8s.io/apimachinery v0.19.0 h1:gjKnAda/HZp5k4xQYjL0K/Yb66IvNqjthCb03QlKpaQ= k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/apimachinery v0.27.1 h1:EGuZiLI95UQQcClhanryclaQE6xjg1Bts6/L3cD7zyc= +k8s.io/apimachinery v0.27.1/go.mod h1:5ikh59fK3AJ287GUvpUsryoMFtH9zj/ARfWCo3AyXTM= k8s.io/client-go v0.18.0 h1:yqKw4cTUQraZK3fcVCMeSa+lqKwcjZ5wtcOIPnxQno4= k8s.io/client-go v0.18.0/go.mod h1:uQSYDYs4WhVZ9i6AIoEZuwUggLVEF64HOD37boKAtF8= k8s.io/client-go v0.19.0 h1:1+0E0zfWFIWeyRhQYWzimJOyAk2UT7TiARaLNwJCf7k= k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU= +k8s.io/client-go v0.27.1 h1:oXsfhW/qncM1wDmWBIuDzRHNS2tLhK3BZv512Nc59W8= +k8s.io/client-go v0.27.1/go.mod h1:f8LHMUkVb3b9N8bWturc+EDtVVVwZ7ueTVquFAJb2vA= +k8s.io/client-go v0.28.0 h1:ebcPRDZsCjpj62+cMk1eGNX1QkMdRmQ6lmz5BLoFWeM= +k8s.io/client-go v0.28.0/go.mod h1:0Asy9Xt3U98RypWJmU1ZrRAGKhP6NqDPmptlAzK2kMc= k8s.io/component-base v0.19.0/go.mod h1:dKsY8BxkA+9dZIAh2aWJLL/UdASFDNtGYTCItL4LM7Y= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= @@ -450,22 +516,40 @@ k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= +k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/kube-openapi v0.0.0-20230327201221-f5883ff37f0c h1:EFfsozyzZ/pggw5qNx7ftTVZdp7WZl+3ih89GEjYEK8= +k8s.io/kube-openapi v0.0.0-20230327201221-f5883ff37f0c/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= +k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= +k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= k8s.io/mount-utils v0.20.13 h1:IaaCihes2VCR5B6ZYZ/tynCHkJSiidQG0SNQoh8vrg4= k8s.io/mount-utils v0.20.13/go.mod h1:Jv9NRZ5L2LF87A17GaGlArD+r3JAJdZFvo4XD1cG4Kc= +k8s.io/mount-utils v0.27.1 h1:RSd0wslbIuwLRaGGNAGMZ3m9FLcvukxJ3FWlOm76W2A= +k8s.io/mount-utils v0.27.1/go.mod h1:vmcjYdi2Vg1VTWY7KkhvwJVY6WDHxb/QQhiQKkR8iNs= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20200912215256-4140de9c8800 h1:9ZNvfPvVIEsp/T1ez4GQuzCcCTEQWhovSofhqR73A6g= k8s.io/utils v0.0.0-20200912215256-4140de9c8800/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk= +k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/node/pkg/driver/version_test.go b/node/pkg/driver/version_test.go index f05e8d668..9bf5abd02 100644 --- a/node/pkg/driver/version_test.go +++ b/node/pkg/driver/version_test.go @@ -46,7 +46,7 @@ func TestGetVersion(t *testing.T) { version, err := GetVersion(dir) expected := VersionInfo{ - DriverVersion: "1.11.0", + DriverVersion: "1.12.0", GitCommit: "", BuildDate: "", GoVersion: runtime.Version(), @@ -76,7 +76,7 @@ func TestGetVersionJSON(t *testing.T) { } expected := fmt.Sprintf(`{ - "driverVersion": "1.11.0", + "driverVersion": "1.12.0", "gitCommit": "", "buildDate": "", "goVersion": "%s", diff --git a/scripts/ci/Jenkinsfile b/scripts/ci/Jenkinsfile index bcdd245ec..19b7afeef 100644 --- a/scripts/ci/Jenkinsfile +++ b/scripts/ci/Jenkinsfile @@ -1,6 +1,6 @@ pipeline { parameters { - string(name: 'IMAGE_VERSION', defaultValue: "1.11.0") + string(name: 'IMAGE_VERSION', defaultValue: "1.12.0") string(name: 'DOCKER_REGISTRY', defaultValue: DEFAULT_DOCKER_REGISTRY) string(name: 'EMAIL_TO', defaultValue: "") }