From d22d22260946b255d04fa647e35f46e22a144f2d Mon Sep 17 00:00:00 2001 From: Yury Kulazhenkov Date: Fri, 9 Jun 2023 11:15:26 +0300 Subject: [PATCH 01/18] Makefile: Add infrastructure to generate GRPC APIs Signed-off-by: Yury Kulazhenkov --- .github/workflows/build-test-lint.yml | 12 +++ .gitignore | 1 + Makefile | 104 ++++++++++++++++++++++++-- api/grpc/buf.yaml | 5 ++ 4 files changed, 117 insertions(+), 5 deletions(-) create mode 100644 api/grpc/buf.yaml diff --git a/.github/workflows/build-test-lint.yml b/.github/workflows/build-test-lint.yml index 4a17da3..9f9e626 100644 --- a/.github/workflows/build-test-lint.yml +++ b/.github/workflows/build-test-lint.yml @@ -32,6 +32,18 @@ jobs: uses: actions/checkout@v3 - name: Lint run: make lint + grpc-check: + runs-on: ubuntu-latest + needs: build + steps: + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: 1.20.x + - name: Check out code into the Go module directory + uses: actions/checkout@v3 + - name: Check GRPC + run: make grpc-check test: runs-on: ubuntu-latest needs: build diff --git a/.gitignore b/.gitignore index aefaec5..19b59d1 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ *.dylib bin build/ +_tmp testbin/* .gocache diff --git a/Makefile b/Makefile index 41147f8..e231a25 100644 --- a/Makefile +++ b/Makefile @@ -59,6 +59,12 @@ all: build help: ## Display this help. @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) +.PHONY: clean +clean: ## Remove downloaded tools and compiled binaries + @rm -rf $(LOCALBIN) + @rm -rf $(BUILD_DIR) + @rm -rf $(GRPC_TMP_DIR) + ##@ Development .PHONY: lint @@ -80,7 +86,6 @@ test: lint unit-test cov-report: gcov2lcov unit-test ## Build test coverage report in lcov format $(GCOV2LCOV) -infile $(COVER_PROFILE) -outfile $(LCOV_PATH) - ##@ Build .PHONY: build-controller @@ -122,16 +127,32 @@ LOCALBIN ?= $(PROJECT_DIR)/bin $(LOCALBIN): mkdir -p $(LOCALBIN) +## Temporary location for GRPC files +GRPC_TMP_DIR ?= $(CURDIR)/_tmp +$(GRPC_TMP_DIR): + @mkdir -p $@ + +##@ Tools + ## Tool Binaries ENVTEST ?= $(LOCALBIN)/setup-envtest GOLANGCILINT ?= $(LOCALBIN)/golangci-lint GCOV2LCOV ?= $(LOCALBIN)/gcov2lcov MOCKERY ?= $(LOCALBIN)/mockery +PROTOC ?= $(LOCALBIN)/protoc/bin/protoc +PROTOC_GEN_GO ?= $(LOCALBIN)/protoc-gen-go +PROTOC_GEN_GO_GRPC ?= $(LOCALBIN)/protoc-gen-go-grpc +BUF ?= $(LOCALBIN)/buf ## Tool Versions GOLANGCILINT_VERSION ?= v1.52.2 GCOV2LCOV_VERSION ?= v1.0.5 MOCKERY_VERSION ?= v2.27.1 +PROTOC_VER ?= 23.4 +PROTOC_GEN_GO_VER ?= 1.31.0 +PROTOC_GEN_GO_GRPC_VER ?= 1.3.0 +BUF_VERSION ?= 1.23.1 + .PHONY: envtest envtest: $(ENVTEST) ## Download envtest-setup locally if necessary. @@ -153,7 +174,80 @@ mockery: $(MOCKERY) ## Download mockery locally if necessary. $(MOCKERY): | $(LOCALBIN) GOBIN=$(LOCALBIN) go install github.com/vektra/mockery/v2@$(MOCKERY_VERSION) -.PHONY: clean -clean: ## Remove downloaded tools and compiled binaries - @rm -rf $(LOCALBIN) - @rm -rf $(BUILD_DIR) +.PHONY: protoc +PROTOC_REL ?= https://github.com/protocolbuffers/protobuf/releases +protoc: $(PROTOC) ## Download protoc locally if necessary. +$(PROTOC): | $(LOCALBIN) + cd $(LOCALBIN) && \ + curl -L --output tmp.zip $(PROTOC_REL)/download/v$(PROTOC_VER)/protoc-$(PROTOC_VER)-linux-x86_64.zip && \ + unzip tmp.zip -d protoc && rm tmp.zip + +.PHONY: protoc-gen-go +protoc-gen-go: $(PROTOC_GEN_GO) ## Download protoc-gen-go locally if necessary. +$(PROTOC_GEN_GO): | $(LOCALBIN) + GOBIN=$(LOCALBIN) go install google.golang.org/protobuf/cmd/protoc-gen-go@v$(PROTOC_GEN_GO_VER) + +.PHONY: protoc-gen-go-grpc +protoc-gen-go-grpc: $(PROTOC_GEN_GO_GRPC) ## Download protoc-gen-go locally if necessary. +$(PROTOC_GEN_GO_GRPC): | $(LOCALBIN) + GOBIN=$(LOCALBIN) go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v$(PROTOC_GEN_GO_GRPC_VER) + +.PHONY: buf +buf: $(BUF) ## Download buf locally if necessary +$(BUF): | $(LOCALBIN) + cd $(LOCALBIN) && \ + curl -sSL "https://github.com/bufbuild/buf/releases/download/v$(BUF_VERSION)/buf-Linux-x86_64" -o "$(LOCALBIN)/buf" && \ + chmod +x "$(LOCALBIN)/buf" + +##@ GRPC +# go package for generated code +API_PKG_GO_MOD ?= github.com/Mellanox/nvidia-k8s-ipam/api/grpc + +# GRPC DIRs +GRPC_DIR ?= $(PROJECT_DIR)/api/grpc +PROTO_DIR ?= $(GRPC_DIR)/proto +GENERATED_CODE_DIR ?= $(GRPC_DIR) + +grpc-generate: protoc protoc-gen-go protoc-gen-go-grpc ## Generate GO client and server GRPC code + @echo "generate GRPC API"; \ + echo " go module: $(API_PKG_GO_MOD)"; \ + echo " output dir: $(GENERATED_CODE_DIR) "; \ + echo " proto dir: $(PROTO_DIR) "; \ + cd $(PROTO_DIR) && \ + TARGET_FILES=""; \ + PROTOC_OPTIONS="--plugin=protoc-gen-go=$(PROTOC_GEN_GO) \ + --plugin=protoc-gen-go-grpc=$(PROTOC_GEN_GO_GRPC) \ + --go_out=$(GENERATED_CODE_DIR) \ + --go_opt=module=$(API_PKG_GO_MOD) \ + --proto_path=$(PROTO_DIR) \ + --go-grpc_out=$(GENERATED_CODE_DIR) \ + --go-grpc_opt=module=$(API_PKG_GO_MOD)"; \ + echo "discovered proto files:"; \ + for proto_file in $$(find . -name "*.proto"); do \ + proto_file=$$(echo $$proto_file | cut -d'/' -f2-); \ + proto_dir=$$(dirname $$proto_file); \ + pkg_name=M$$proto_file=$(API_PKG_GO_MOD)/$$proto_dir; \ + echo " $$proto_file"; \ + TARGET_FILES="$$TARGET_FILES $$proto_file"; \ + PROTOC_OPTIONS="$$PROTOC_OPTIONS \ + --go_opt=$$pkg_name \ + --go-grpc_opt=$$pkg_name" ; \ + done; \ + $(PROTOC) $$PROTOC_OPTIONS $$TARGET_FILES + +grpc-check: grpc-format grpc-lint protoc protoc-gen-go protoc-gen-go-grpc $(GRPC_TMP_DIR) ## Check that generated GO client code match proto files + @rm -rf $(GRPC_TMP_DIR)/nvidia/ + @$(MAKE) GENERATED_CODE_DIR=$(GRPC_TMP_DIR) grpc-generate + @diff -Naur $(GRPC_TMP_DIR)/nvidia/ $(GENERATED_CODE_DIR)/nvidia/ || \ + (printf "\n\nOutdated files detected!\nPlease, run 'make generate' to regenerate GO code\n\n" && exit 1) + @echo "generated files are up to date" + +grpc-lint: buf ## Lint GRPC files + @echo "lint protobuf files"; + cd $(PROTO_DIR) && \ + $(BUF) lint --config ../buf.yaml + +grpc-format: buf ## Format GRPC files + @echo "format protobuf files"; + cd $(PROTO_DIR) && \ + $(BUF) format -w --exit-code diff --git a/api/grpc/buf.yaml b/api/grpc/buf.yaml new file mode 100644 index 0000000..ec75633 --- /dev/null +++ b/api/grpc/buf.yaml @@ -0,0 +1,5 @@ +version: v1 +lint: + use: + - DEFAULT + - PACKAGE_NO_IMPORT_CYCLE From 3f645afce01551faf8d7d4156636cb104d70550e Mon Sep 17 00:00:00 2001 From: Yury Kulazhenkov Date: Wed, 28 Jun 2023 12:45:33 +0300 Subject: [PATCH 02/18] Makefile: Fix generate-mocks Makefile target Signed-off-by: Yury Kulazhenkov --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index e231a25..e2b9e1d 100644 --- a/Makefile +++ b/Makefile @@ -119,8 +119,8 @@ kind-load-image: ## Load ipam image to kind cluster kind load docker-image --name $(KIND_CLUSTER) $(IMG) .PHONY: generate-mocks -generate-mocks: ## generate mock objects - PATH=$(PATH):$(LOCALBIN) go generate ./... +generate-mocks: mockery ## generate mock objects + PATH=$(LOCALBIN):$(PATH) go generate ./... ## Location to install dependencies to LOCALBIN ?= $(PROJECT_DIR)/bin From a4e17a45ff9d80074c4f81aea457aa9658b5b900 Mon Sep 17 00:00:00 2001 From: Yury Kulazhenkov Date: Fri, 9 Jun 2023 11:15:44 +0300 Subject: [PATCH 03/18] API: Add GRPC API for IPAM node daemon Signed-off-by: Yury Kulazhenkov --- api/grpc/nvidia/ipam/node/v1/node.pb.go | 783 ++++++++++++++++++ api/grpc/nvidia/ipam/node/v1/node_grpc.pb.go | 254 ++++++ api/grpc/proto/nvidia/ipam/node/v1/node.proto | 113 +++ go.mod | 16 +- go.sum | 30 +- 5 files changed, 1177 insertions(+), 19 deletions(-) create mode 100644 api/grpc/nvidia/ipam/node/v1/node.pb.go create mode 100644 api/grpc/nvidia/ipam/node/v1/node_grpc.pb.go create mode 100644 api/grpc/proto/nvidia/ipam/node/v1/node.proto diff --git a/api/grpc/nvidia/ipam/node/v1/node.pb.go b/api/grpc/nvidia/ipam/node/v1/node.pb.go new file mode 100644 index 0000000..e0ab65b --- /dev/null +++ b/api/grpc/nvidia/ipam/node/v1/node.pb.go @@ -0,0 +1,783 @@ +// Copyright 2023, NVIDIA CORPORATION & AFFILIATES +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.23.4 +// source: nvidia/ipam/node/v1/node.proto + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// AllocateRequest contains parameters for Allocate rpc call +type AllocateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // required, IPAMParameters contains parameters IPAM parameters related to the request + Parameters *IPAMParameters `protobuf:"bytes,1,opt,name=parameters,proto3" json:"parameters,omitempty"` +} + +func (x *AllocateRequest) Reset() { + *x = AllocateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_nvidia_ipam_node_v1_node_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AllocateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AllocateRequest) ProtoMessage() {} + +func (x *AllocateRequest) ProtoReflect() protoreflect.Message { + mi := &file_nvidia_ipam_node_v1_node_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AllocateRequest.ProtoReflect.Descriptor instead. +func (*AllocateRequest) Descriptor() ([]byte, []int) { + return file_nvidia_ipam_node_v1_node_proto_rawDescGZIP(), []int{0} +} + +func (x *AllocateRequest) GetParameters() *IPAMParameters { + if x != nil { + return x.Parameters + } + return nil +} + +// IPAMParameters common message which contains information used in all rpc calls +type IPAMParameters struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // required, list of pools in which IP addresses should be allocated, + // must contain one or two elements (dual-stack, IPv4 + IPv6 use-case) + Pools []string `protobuf:"bytes,1,rep,name=pools,proto3" json:"pools,omitempty"` + // required, a unique plaintext identifier for a container, allocated by the runtime + CniContainerid string `protobuf:"bytes,2,opt,name=cni_containerid,json=cniContainerid,proto3" json:"cni_containerid,omitempty"` + // required, name of the interface inside the container + CniIfname string `protobuf:"bytes,3,opt,name=cni_ifname,json=cniIfname,proto3" json:"cni_ifname,omitempty"` + // required, additional metadata to identify IP allocation + Metadata *IPAMMetadata `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *IPAMParameters) Reset() { + *x = IPAMParameters{} + if protoimpl.UnsafeEnabled { + mi := &file_nvidia_ipam_node_v1_node_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IPAMParameters) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IPAMParameters) ProtoMessage() {} + +func (x *IPAMParameters) ProtoReflect() protoreflect.Message { + mi := &file_nvidia_ipam_node_v1_node_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IPAMParameters.ProtoReflect.Descriptor instead. +func (*IPAMParameters) Descriptor() ([]byte, []int) { + return file_nvidia_ipam_node_v1_node_proto_rawDescGZIP(), []int{1} +} + +func (x *IPAMParameters) GetPools() []string { + if x != nil { + return x.Pools + } + return nil +} + +func (x *IPAMParameters) GetCniContainerid() string { + if x != nil { + return x.CniContainerid + } + return "" +} + +func (x *IPAMParameters) GetCniIfname() string { + if x != nil { + return x.CniIfname + } + return "" +} + +func (x *IPAMParameters) GetMetadata() *IPAMMetadata { + if x != nil { + return x.Metadata + } + return nil +} + +// IPAMMetadata contains metadata for IPAM calls +type IPAMMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // required, name of the k8s pod + K8SPodName string `protobuf:"bytes,1,opt,name=k8s_pod_name,json=k8sPodName,proto3" json:"k8s_pod_name,omitempty"` + // required, namespace of the k8s pod + K8SPodNamespace string `protobuf:"bytes,2,opt,name=k8s_pod_namespace,json=k8sPodNamespace,proto3" json:"k8s_pod_namespace,omitempty"` + // optional, UID of the k8s pod, k8s_pod_uid exist in containerd >= 1.6 cr-io >= 0.3 + K8SPodUid string `protobuf:"bytes,3,opt,name=k8s_pod_uid,json=k8sPodUid,proto3" json:"k8s_pod_uid,omitempty"` + // optional, PCI device ID related to the allocation + DeviceId string `protobuf:"bytes,4,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"` +} + +func (x *IPAMMetadata) Reset() { + *x = IPAMMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_nvidia_ipam_node_v1_node_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IPAMMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IPAMMetadata) ProtoMessage() {} + +func (x *IPAMMetadata) ProtoReflect() protoreflect.Message { + mi := &file_nvidia_ipam_node_v1_node_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IPAMMetadata.ProtoReflect.Descriptor instead. +func (*IPAMMetadata) Descriptor() ([]byte, []int) { + return file_nvidia_ipam_node_v1_node_proto_rawDescGZIP(), []int{2} +} + +func (x *IPAMMetadata) GetK8SPodName() string { + if x != nil { + return x.K8SPodName + } + return "" +} + +func (x *IPAMMetadata) GetK8SPodNamespace() string { + if x != nil { + return x.K8SPodNamespace + } + return "" +} + +func (x *IPAMMetadata) GetK8SPodUid() string { + if x != nil { + return x.K8SPodUid + } + return "" +} + +func (x *IPAMMetadata) GetDeviceId() string { + if x != nil { + return x.DeviceId + } + return "" +} + +// IsAllocatedRequest contains parameters for IsAllocated rpc call +type IsAllocatedRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // required, IPAMParameters contains parameters IPAM parameters related to the request + Parameters *IPAMParameters `protobuf:"bytes,1,opt,name=parameters,proto3" json:"parameters,omitempty"` +} + +func (x *IsAllocatedRequest) Reset() { + *x = IsAllocatedRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_nvidia_ipam_node_v1_node_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IsAllocatedRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IsAllocatedRequest) ProtoMessage() {} + +func (x *IsAllocatedRequest) ProtoReflect() protoreflect.Message { + mi := &file_nvidia_ipam_node_v1_node_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IsAllocatedRequest.ProtoReflect.Descriptor instead. +func (*IsAllocatedRequest) Descriptor() ([]byte, []int) { + return file_nvidia_ipam_node_v1_node_proto_rawDescGZIP(), []int{3} +} + +func (x *IsAllocatedRequest) GetParameters() *IPAMParameters { + if x != nil { + return x.Parameters + } + return nil +} + +// DeallocateRequest contains parameters for Deallocate rpc call +type DeallocateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // required, IPAMParameters contains parameters IPAM parameters related to the request + Parameters *IPAMParameters `protobuf:"bytes,1,opt,name=parameters,proto3" json:"parameters,omitempty"` +} + +func (x *DeallocateRequest) Reset() { + *x = DeallocateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_nvidia_ipam_node_v1_node_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeallocateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeallocateRequest) ProtoMessage() {} + +func (x *DeallocateRequest) ProtoReflect() protoreflect.Message { + mi := &file_nvidia_ipam_node_v1_node_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeallocateRequest.ProtoReflect.Descriptor instead. +func (*DeallocateRequest) Descriptor() ([]byte, []int) { + return file_nvidia_ipam_node_v1_node_proto_rawDescGZIP(), []int{4} +} + +func (x *DeallocateRequest) GetParameters() *IPAMParameters { + if x != nil { + return x.Parameters + } + return nil +} + +// AllocateResponse contains reply for Allocate rpc call +type AllocateResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // list of allocated IPs + Allocations []*AllocationInfo `protobuf:"bytes,1,rep,name=allocations,proto3" json:"allocations,omitempty"` +} + +func (x *AllocateResponse) Reset() { + *x = AllocateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_nvidia_ipam_node_v1_node_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AllocateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AllocateResponse) ProtoMessage() {} + +func (x *AllocateResponse) ProtoReflect() protoreflect.Message { + mi := &file_nvidia_ipam_node_v1_node_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AllocateResponse.ProtoReflect.Descriptor instead. +func (*AllocateResponse) Descriptor() ([]byte, []int) { + return file_nvidia_ipam_node_v1_node_proto_rawDescGZIP(), []int{5} +} + +func (x *AllocateResponse) GetAllocations() []*AllocationInfo { + if x != nil { + return x.Allocations + } + return nil +} + +// AllocationInfo contains information about the allocation +type AllocationInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // name of the pool in which this IP was allocated + Pool string `protobuf:"bytes,1,opt,name=pool,proto3" json:"pool,omitempty"` + // allocated IP together with prefix length, e.g. 192.168.10.33/24 + Ip string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"` + // gateway for allocated IP + Gateway string `protobuf:"bytes,3,opt,name=gateway,proto3" json:"gateway,omitempty"` +} + +func (x *AllocationInfo) Reset() { + *x = AllocationInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_nvidia_ipam_node_v1_node_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AllocationInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AllocationInfo) ProtoMessage() {} + +func (x *AllocationInfo) ProtoReflect() protoreflect.Message { + mi := &file_nvidia_ipam_node_v1_node_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AllocationInfo.ProtoReflect.Descriptor instead. +func (*AllocationInfo) Descriptor() ([]byte, []int) { + return file_nvidia_ipam_node_v1_node_proto_rawDescGZIP(), []int{6} +} + +func (x *AllocationInfo) GetPool() string { + if x != nil { + return x.Pool + } + return "" +} + +func (x *AllocationInfo) GetIp() string { + if x != nil { + return x.Ip + } + return "" +} + +func (x *AllocationInfo) GetGateway() string { + if x != nil { + return x.Gateway + } + return "" +} + +// IsAllocatedReply contains reply for IsAllocated rpc call +type IsAllocatedResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *IsAllocatedResponse) Reset() { + *x = IsAllocatedResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_nvidia_ipam_node_v1_node_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IsAllocatedResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IsAllocatedResponse) ProtoMessage() {} + +func (x *IsAllocatedResponse) ProtoReflect() protoreflect.Message { + mi := &file_nvidia_ipam_node_v1_node_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IsAllocatedResponse.ProtoReflect.Descriptor instead. +func (*IsAllocatedResponse) Descriptor() ([]byte, []int) { + return file_nvidia_ipam_node_v1_node_proto_rawDescGZIP(), []int{7} +} + +// DeallocateReply contains reply for Deallocate rpc call +type DeallocateResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *DeallocateResponse) Reset() { + *x = DeallocateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_nvidia_ipam_node_v1_node_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeallocateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeallocateResponse) ProtoMessage() {} + +func (x *DeallocateResponse) ProtoReflect() protoreflect.Message { + mi := &file_nvidia_ipam_node_v1_node_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeallocateResponse.ProtoReflect.Descriptor instead. +func (*DeallocateResponse) Descriptor() ([]byte, []int) { + return file_nvidia_ipam_node_v1_node_proto_rawDescGZIP(), []int{8} +} + +var File_nvidia_ipam_node_v1_node_proto protoreflect.FileDescriptor + +var file_nvidia_ipam_node_v1_node_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x6e, 0x76, 0x69, 0x64, 0x69, 0x61, 0x2f, 0x69, 0x70, 0x61, 0x6d, 0x2f, 0x6e, 0x6f, + 0x64, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x13, 0x6e, 0x76, 0x69, 0x64, 0x69, 0x61, 0x2e, 0x69, 0x70, 0x61, 0x6d, 0x2e, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x76, 0x31, 0x22, 0x56, 0x0a, 0x0f, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x43, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6e, + 0x76, 0x69, 0x64, 0x69, 0x61, 0x2e, 0x69, 0x70, 0x61, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x49, 0x50, 0x41, 0x4d, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x22, 0xad, 0x01, + 0x0a, 0x0e, 0x49, 0x50, 0x41, 0x4d, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x12, 0x14, 0x0a, 0x05, 0x70, 0x6f, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x05, 0x70, 0x6f, 0x6f, 0x6c, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6e, 0x69, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0e, 0x63, 0x6e, 0x69, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x69, 0x64, 0x12, + 0x1d, 0x0a, 0x0a, 0x63, 0x6e, 0x69, 0x5f, 0x69, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6e, 0x69, 0x49, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, + 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x6e, 0x76, 0x69, 0x64, 0x69, 0x61, 0x2e, 0x69, 0x70, 0x61, 0x6d, 0x2e, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x50, 0x41, 0x4d, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x99, 0x01, + 0x0a, 0x0c, 0x49, 0x50, 0x41, 0x4d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x20, + 0x0a, 0x0c, 0x6b, 0x38, 0x73, 0x5f, 0x70, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x2a, 0x0a, 0x11, 0x6b, 0x38, 0x73, 0x5f, 0x70, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6b, 0x38, 0x73, + 0x50, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1e, 0x0a, 0x0b, + 0x6b, 0x38, 0x73, 0x5f, 0x70, 0x6f, 0x64, 0x5f, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x6b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x55, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, + 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x22, 0x59, 0x0a, 0x12, 0x49, 0x73, 0x41, + 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x43, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6e, 0x76, 0x69, 0x64, 0x69, 0x61, 0x2e, 0x69, 0x70, 0x61, + 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x50, 0x41, 0x4d, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x73, 0x22, 0x58, 0x0a, 0x11, 0x44, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x43, 0x0a, 0x0a, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, + 0x6e, 0x76, 0x69, 0x64, 0x69, 0x61, 0x2e, 0x69, 0x70, 0x61, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x50, 0x41, 0x4d, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x22, 0x59, + 0x0a, 0x10, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6e, 0x76, 0x69, 0x64, 0x69, 0x61, + 0x2e, 0x69, 0x70, 0x61, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6c, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0b, 0x61, 0x6c, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x4e, 0x0a, 0x0e, 0x41, 0x6c, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x70, + 0x6f, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x6f, 0x6f, 0x6c, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, + 0x18, 0x0a, 0x07, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x22, 0x15, 0x0a, 0x13, 0x49, 0x73, 0x41, + 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x14, 0x0a, 0x12, 0x44, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xad, 0x02, 0x0a, 0x0b, 0x49, 0x50, 0x41, 0x4d, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x59, 0x0a, 0x08, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x65, 0x12, 0x24, 0x2e, 0x6e, 0x76, 0x69, 0x64, 0x69, 0x61, 0x2e, 0x69, 0x70, 0x61, 0x6d, + 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x6e, 0x76, 0x69, 0x64, 0x69, + 0x61, 0x2e, 0x69, 0x70, 0x61, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, + 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x62, 0x0a, 0x0b, 0x49, 0x73, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x12, 0x27, 0x2e, 0x6e, 0x76, 0x69, 0x64, 0x69, 0x61, 0x2e, 0x69, 0x70, 0x61, 0x6d, 0x2e, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x73, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x6e, 0x76, 0x69, 0x64, + 0x69, 0x61, 0x2e, 0x69, 0x70, 0x61, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x49, 0x73, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5f, 0x0a, 0x0a, 0x44, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x65, 0x12, 0x26, 0x2e, 0x6e, 0x76, 0x69, 0x64, 0x69, 0x61, 0x2e, 0x69, 0x70, 0x61, + 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x61, 0x6c, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x6e, 0x76, + 0x69, 0x64, 0x69, 0x61, 0x2e, 0x69, 0x70, 0x61, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, + 0x31, 0x2e, 0x44, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_nvidia_ipam_node_v1_node_proto_rawDescOnce sync.Once + file_nvidia_ipam_node_v1_node_proto_rawDescData = file_nvidia_ipam_node_v1_node_proto_rawDesc +) + +func file_nvidia_ipam_node_v1_node_proto_rawDescGZIP() []byte { + file_nvidia_ipam_node_v1_node_proto_rawDescOnce.Do(func() { + file_nvidia_ipam_node_v1_node_proto_rawDescData = protoimpl.X.CompressGZIP(file_nvidia_ipam_node_v1_node_proto_rawDescData) + }) + return file_nvidia_ipam_node_v1_node_proto_rawDescData +} + +var file_nvidia_ipam_node_v1_node_proto_msgTypes = make([]protoimpl.MessageInfo, 9) +var file_nvidia_ipam_node_v1_node_proto_goTypes = []interface{}{ + (*AllocateRequest)(nil), // 0: nvidia.ipam.node.v1.AllocateRequest + (*IPAMParameters)(nil), // 1: nvidia.ipam.node.v1.IPAMParameters + (*IPAMMetadata)(nil), // 2: nvidia.ipam.node.v1.IPAMMetadata + (*IsAllocatedRequest)(nil), // 3: nvidia.ipam.node.v1.IsAllocatedRequest + (*DeallocateRequest)(nil), // 4: nvidia.ipam.node.v1.DeallocateRequest + (*AllocateResponse)(nil), // 5: nvidia.ipam.node.v1.AllocateResponse + (*AllocationInfo)(nil), // 6: nvidia.ipam.node.v1.AllocationInfo + (*IsAllocatedResponse)(nil), // 7: nvidia.ipam.node.v1.IsAllocatedResponse + (*DeallocateResponse)(nil), // 8: nvidia.ipam.node.v1.DeallocateResponse +} +var file_nvidia_ipam_node_v1_node_proto_depIdxs = []int32{ + 1, // 0: nvidia.ipam.node.v1.AllocateRequest.parameters:type_name -> nvidia.ipam.node.v1.IPAMParameters + 2, // 1: nvidia.ipam.node.v1.IPAMParameters.metadata:type_name -> nvidia.ipam.node.v1.IPAMMetadata + 1, // 2: nvidia.ipam.node.v1.IsAllocatedRequest.parameters:type_name -> nvidia.ipam.node.v1.IPAMParameters + 1, // 3: nvidia.ipam.node.v1.DeallocateRequest.parameters:type_name -> nvidia.ipam.node.v1.IPAMParameters + 6, // 4: nvidia.ipam.node.v1.AllocateResponse.allocations:type_name -> nvidia.ipam.node.v1.AllocationInfo + 0, // 5: nvidia.ipam.node.v1.IPAMService.Allocate:input_type -> nvidia.ipam.node.v1.AllocateRequest + 3, // 6: nvidia.ipam.node.v1.IPAMService.IsAllocated:input_type -> nvidia.ipam.node.v1.IsAllocatedRequest + 4, // 7: nvidia.ipam.node.v1.IPAMService.Deallocate:input_type -> nvidia.ipam.node.v1.DeallocateRequest + 5, // 8: nvidia.ipam.node.v1.IPAMService.Allocate:output_type -> nvidia.ipam.node.v1.AllocateResponse + 7, // 9: nvidia.ipam.node.v1.IPAMService.IsAllocated:output_type -> nvidia.ipam.node.v1.IsAllocatedResponse + 8, // 10: nvidia.ipam.node.v1.IPAMService.Deallocate:output_type -> nvidia.ipam.node.v1.DeallocateResponse + 8, // [8:11] is the sub-list for method output_type + 5, // [5:8] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_nvidia_ipam_node_v1_node_proto_init() } +func file_nvidia_ipam_node_v1_node_proto_init() { + if File_nvidia_ipam_node_v1_node_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_nvidia_ipam_node_v1_node_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AllocateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_nvidia_ipam_node_v1_node_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*IPAMParameters); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_nvidia_ipam_node_v1_node_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*IPAMMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_nvidia_ipam_node_v1_node_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*IsAllocatedRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_nvidia_ipam_node_v1_node_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeallocateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_nvidia_ipam_node_v1_node_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AllocateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_nvidia_ipam_node_v1_node_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AllocationInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_nvidia_ipam_node_v1_node_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*IsAllocatedResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_nvidia_ipam_node_v1_node_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeallocateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_nvidia_ipam_node_v1_node_proto_rawDesc, + NumEnums: 0, + NumMessages: 9, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_nvidia_ipam_node_v1_node_proto_goTypes, + DependencyIndexes: file_nvidia_ipam_node_v1_node_proto_depIdxs, + MessageInfos: file_nvidia_ipam_node_v1_node_proto_msgTypes, + }.Build() + File_nvidia_ipam_node_v1_node_proto = out.File + file_nvidia_ipam_node_v1_node_proto_rawDesc = nil + file_nvidia_ipam_node_v1_node_proto_goTypes = nil + file_nvidia_ipam_node_v1_node_proto_depIdxs = nil +} diff --git a/api/grpc/nvidia/ipam/node/v1/node_grpc.pb.go b/api/grpc/nvidia/ipam/node/v1/node_grpc.pb.go new file mode 100644 index 0000000..68f55ab --- /dev/null +++ b/api/grpc/nvidia/ipam/node/v1/node_grpc.pb.go @@ -0,0 +1,254 @@ +// Copyright 2023, NVIDIA CORPORATION & AFFILIATES +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.23.4 +// source: nvidia/ipam/node/v1/node.proto + +package v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + IPAMService_Allocate_FullMethodName = "/nvidia.ipam.node.v1.IPAMService/Allocate" + IPAMService_IsAllocated_FullMethodName = "/nvidia.ipam.node.v1.IPAMService/IsAllocated" + IPAMService_Deallocate_FullMethodName = "/nvidia.ipam.node.v1.IPAMService/Deallocate" +) + +// IPAMServiceClient is the client API for IPAMService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type IPAMServiceClient interface { + // Allocate is called as a part of CMD_ADD flow. + // Returns response with allocated IPs if allocation succeeds or an error in case of failure. + // If multiple pools are provided in the request, then allocation succeeds if it succeeds for all pools. + // errors: + // + // Canceled - request was canceled by the caller + // Internal - internal failure of the service, this error can't be fixed by the caller + // InvalidArgument - missing required argument or argument has wrong format, check message for details + // NotFound - allocation is requested for unknown IP pool + // AlreadyExists - container identified by IPAMParameters already has allocated IP in the pool + // ResourceExhausted - no free IP addresses available in the IP pool + Allocate(ctx context.Context, in *AllocateRequest, opts ...grpc.CallOption) (*AllocateResponse, error) + // IsAllocated is called as a part of CMD_CHECK flow + // Returns empty response if a valid allocation already exists or an error otherwise. + // If multiple pools are provided in the request, then check + // succeed only if it is succeed for all pools. + // errors: + // + // Canceled - request was canceled by the caller + // Internal - internal failure of the service, this error can't be fixed by the caller + // InvalidArgument - missing required argument or argument has wrong format, check message for details + // NotFound - allocation not found + IsAllocated(ctx context.Context, in *IsAllocatedRequest, opts ...grpc.CallOption) (*IsAllocatedResponse, error) + // Deallocate is called as a part of CMD_DEL flow. + // Returns empty response if allocation for all pools released successfully or error otherwise. + // If multiple pools are provided in the request, then deallocation + // succeeds if it is succeeds for all pools else no deallocation is performed. + // errors: + // + // Canceled - request was canceled by the caller + // Internal - internal failure of the service, this error can't be fixed by the caller + // InvalidArgument - missing required argument or argument has wrong format, check message for details + Deallocate(ctx context.Context, in *DeallocateRequest, opts ...grpc.CallOption) (*DeallocateResponse, error) +} + +type iPAMServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewIPAMServiceClient(cc grpc.ClientConnInterface) IPAMServiceClient { + return &iPAMServiceClient{cc} +} + +func (c *iPAMServiceClient) Allocate(ctx context.Context, in *AllocateRequest, opts ...grpc.CallOption) (*AllocateResponse, error) { + out := new(AllocateResponse) + err := c.cc.Invoke(ctx, IPAMService_Allocate_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *iPAMServiceClient) IsAllocated(ctx context.Context, in *IsAllocatedRequest, opts ...grpc.CallOption) (*IsAllocatedResponse, error) { + out := new(IsAllocatedResponse) + err := c.cc.Invoke(ctx, IPAMService_IsAllocated_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *iPAMServiceClient) Deallocate(ctx context.Context, in *DeallocateRequest, opts ...grpc.CallOption) (*DeallocateResponse, error) { + out := new(DeallocateResponse) + err := c.cc.Invoke(ctx, IPAMService_Deallocate_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// IPAMServiceServer is the server API for IPAMService service. +// All implementations must embed UnimplementedIPAMServiceServer +// for forward compatibility +type IPAMServiceServer interface { + // Allocate is called as a part of CMD_ADD flow. + // Returns response with allocated IPs if allocation succeeds or an error in case of failure. + // If multiple pools are provided in the request, then allocation succeeds if it succeeds for all pools. + // errors: + // + // Canceled - request was canceled by the caller + // Internal - internal failure of the service, this error can't be fixed by the caller + // InvalidArgument - missing required argument or argument has wrong format, check message for details + // NotFound - allocation is requested for unknown IP pool + // AlreadyExists - container identified by IPAMParameters already has allocated IP in the pool + // ResourceExhausted - no free IP addresses available in the IP pool + Allocate(context.Context, *AllocateRequest) (*AllocateResponse, error) + // IsAllocated is called as a part of CMD_CHECK flow + // Returns empty response if a valid allocation already exists or an error otherwise. + // If multiple pools are provided in the request, then check + // succeed only if it is succeed for all pools. + // errors: + // + // Canceled - request was canceled by the caller + // Internal - internal failure of the service, this error can't be fixed by the caller + // InvalidArgument - missing required argument or argument has wrong format, check message for details + // NotFound - allocation not found + IsAllocated(context.Context, *IsAllocatedRequest) (*IsAllocatedResponse, error) + // Deallocate is called as a part of CMD_DEL flow. + // Returns empty response if allocation for all pools released successfully or error otherwise. + // If multiple pools are provided in the request, then deallocation + // succeeds if it is succeeds for all pools else no deallocation is performed. + // errors: + // + // Canceled - request was canceled by the caller + // Internal - internal failure of the service, this error can't be fixed by the caller + // InvalidArgument - missing required argument or argument has wrong format, check message for details + Deallocate(context.Context, *DeallocateRequest) (*DeallocateResponse, error) + mustEmbedUnimplementedIPAMServiceServer() +} + +// UnimplementedIPAMServiceServer must be embedded to have forward compatible implementations. +type UnimplementedIPAMServiceServer struct { +} + +func (UnimplementedIPAMServiceServer) Allocate(context.Context, *AllocateRequest) (*AllocateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Allocate not implemented") +} +func (UnimplementedIPAMServiceServer) IsAllocated(context.Context, *IsAllocatedRequest) (*IsAllocatedResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method IsAllocated not implemented") +} +func (UnimplementedIPAMServiceServer) Deallocate(context.Context, *DeallocateRequest) (*DeallocateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Deallocate not implemented") +} +func (UnimplementedIPAMServiceServer) mustEmbedUnimplementedIPAMServiceServer() {} + +// UnsafeIPAMServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to IPAMServiceServer will +// result in compilation errors. +type UnsafeIPAMServiceServer interface { + mustEmbedUnimplementedIPAMServiceServer() +} + +func RegisterIPAMServiceServer(s grpc.ServiceRegistrar, srv IPAMServiceServer) { + s.RegisterService(&IPAMService_ServiceDesc, srv) +} + +func _IPAMService_Allocate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AllocateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IPAMServiceServer).Allocate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: IPAMService_Allocate_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IPAMServiceServer).Allocate(ctx, req.(*AllocateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IPAMService_IsAllocated_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(IsAllocatedRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IPAMServiceServer).IsAllocated(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: IPAMService_IsAllocated_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IPAMServiceServer).IsAllocated(ctx, req.(*IsAllocatedRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IPAMService_Deallocate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeallocateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IPAMServiceServer).Deallocate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: IPAMService_Deallocate_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IPAMServiceServer).Deallocate(ctx, req.(*DeallocateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// IPAMService_ServiceDesc is the grpc.ServiceDesc for IPAMService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var IPAMService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "nvidia.ipam.node.v1.IPAMService", + HandlerType: (*IPAMServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Allocate", + Handler: _IPAMService_Allocate_Handler, + }, + { + MethodName: "IsAllocated", + Handler: _IPAMService_IsAllocated_Handler, + }, + { + MethodName: "Deallocate", + Handler: _IPAMService_Deallocate_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "nvidia/ipam/node/v1/node.proto", +} diff --git a/api/grpc/proto/nvidia/ipam/node/v1/node.proto b/api/grpc/proto/nvidia/ipam/node/v1/node.proto new file mode 100644 index 0000000..cc5d610 --- /dev/null +++ b/api/grpc/proto/nvidia/ipam/node/v1/node.proto @@ -0,0 +1,113 @@ +// Copyright 2023, NVIDIA CORPORATION & AFFILIATES +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package nvidia.ipam.node.v1; + +// gRPC service definition for NVIDIA IPAM node daemon +service IPAMService { + // Allocate is called as a part of CMD_ADD flow. + // Returns response with allocated IPs if allocation succeeds or an error in case of failure. + // If multiple pools are provided in the request, then allocation succeeds if it succeeds for all pools. + // errors: + // Canceled - request was canceled by the caller + // Internal - internal failure of the service, this error can't be fixed by the caller + // InvalidArgument - missing required argument or argument has wrong format, check message for details + // NotFound - allocation is requested for unknown IP pool + // AlreadyExists - container identified by IPAMParameters already has allocated IP in the pool + // ResourceExhausted - no free IP addresses available in the IP pool + rpc Allocate(AllocateRequest) returns (AllocateResponse) {} + // IsAllocated is called as a part of CMD_CHECK flow + // Returns empty response if a valid allocation already exists or an error otherwise. + // If multiple pools are provided in the request, then check + // succeed only if it is succeed for all pools. + // errors: + // Canceled - request was canceled by the caller + // Internal - internal failure of the service, this error can't be fixed by the caller + // InvalidArgument - missing required argument or argument has wrong format, check message for details + // NotFound - allocation not found + rpc IsAllocated(IsAllocatedRequest) returns (IsAllocatedResponse) {} + // Deallocate is called as a part of CMD_DEL flow. + // Returns empty response if allocation for all pools released successfully or error otherwise. + // If multiple pools are provided in the request, then deallocation + // succeeds if it is succeeds for all pools else no deallocation is performed. + // errors: + // Canceled - request was canceled by the caller + // Internal - internal failure of the service, this error can't be fixed by the caller + // InvalidArgument - missing required argument or argument has wrong format, check message for details + rpc Deallocate(DeallocateRequest) returns (DeallocateResponse) {} +} + +// AllocateRequest contains parameters for Allocate rpc call +message AllocateRequest { + // required, IPAMParameters contains parameters IPAM parameters related to the request + IPAMParameters parameters = 1; +} + +// IPAMParameters common message which contains information used in all rpc calls +message IPAMParameters { + // required, list of pools in which IP addresses should be allocated, + // must contain one or two elements (dual-stack, IPv4 + IPv6 use-case) + repeated string pools = 1; + // required, a unique plaintext identifier for a container, allocated by the runtime + string cni_containerid = 2; + // required, name of the interface inside the container + string cni_ifname = 3; + // required, additional metadata to identify IP allocation + IPAMMetadata metadata = 4; +} + +// IPAMMetadata contains metadata for IPAM calls +message IPAMMetadata { + // required, name of the k8s pod + string k8s_pod_name = 1; + // required, namespace of the k8s pod + string k8s_pod_namespace = 2; + // optional, UID of the k8s pod, k8s_pod_uid exist in containerd >= 1.6 cr-io >= 0.3 + string k8s_pod_uid = 3; + // optional, PCI device ID related to the allocation + string device_id = 4; +} + +// IsAllocatedRequest contains parameters for IsAllocated rpc call +message IsAllocatedRequest { + // required, IPAMParameters contains parameters IPAM parameters related to the request + IPAMParameters parameters = 1; +} + +// DeallocateRequest contains parameters for Deallocate rpc call +message DeallocateRequest { + // required, IPAMParameters contains parameters IPAM parameters related to the request + IPAMParameters parameters = 1; +} + +// AllocateResponse contains reply for Allocate rpc call +message AllocateResponse { + // list of allocated IPs + repeated AllocationInfo allocations = 1; +} + +// AllocationInfo contains information about the allocation +message AllocationInfo { + // name of the pool in which this IP was allocated + string pool = 1; + // allocated IP together with prefix length, e.g. 192.168.10.33/24 + string ip = 2; + // gateway for allocated IP + string gateway = 3; +} + +// IsAllocatedReply contains reply for IsAllocated rpc call +message IsAllocatedResponse {} + +// DeallocateReply contains reply for Deallocate rpc call +message DeallocateResponse {} diff --git a/go.mod b/go.mod index 99a895b..d85ade6 100644 --- a/go.mod +++ b/go.mod @@ -12,6 +12,8 @@ require ( github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.8.3 + google.golang.org/grpc v1.56.2 + google.golang.org/protobuf v1.31.0 k8s.io/api v0.26.4 k8s.io/apimachinery v0.26.4 k8s.io/client-go v0.26.4 @@ -24,7 +26,7 @@ require ( github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/coreos/go-iptables v0.6.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect @@ -65,15 +67,15 @@ require ( go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.24.0 // indirect - golang.org/x/net v0.8.0 // indirect - golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/term v0.6.0 // indirect - golang.org/x/text v0.8.0 // indirect + golang.org/x/net v0.9.0 // indirect + golang.org/x/oauth2 v0.7.0 // indirect + golang.org/x/sys v0.7.0 // indirect + golang.org/x/term v0.7.0 // indirect + golang.org/x/text v0.9.0 // indirect golang.org/x/time v0.3.0 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.28.1 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 6c48b7f..012ac98 100644 --- a/go.sum +++ b/go.sum @@ -51,8 +51,9 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -401,16 +402,17 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b h1:clP8eMhB30EHdc0bd2Twtq6kgU7yl5ub2cQLSdrv1Dg= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -470,12 +472,12 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -483,8 +485,8 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -594,6 +596,8 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -606,6 +610,8 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= +google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -618,8 +624,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From 4638893eaa190463f91774269cb7405041a1426b Mon Sep 17 00:00:00 2001 From: Yury Kulazhenkov Date: Tue, 4 Jul 2023 12:39:38 +0300 Subject: [PATCH 04/18] Rename pool.Manager to pool.ConfigReader and add Manager Signed-off-by: Yury Kulazhenkov --- cmd/ipam-controller/app/app_test.go | 4 +- pkg/ipam-controller/allocator/allocator.go | 4 +- pkg/ipam-controller/controllers/node/node.go | 4 +- pkg/pool/manager.go | 82 ++++++++ pkg/pool/manager_test.go | 47 +++++ pkg/pool/mocks/Manager.go | 199 +++++++++++++++++++ pkg/pool/{pool.go => reader.go} | 29 ++- pkg/pool/{pool_test.go => reader_test.go} | 22 +- 8 files changed, 363 insertions(+), 28 deletions(-) create mode 100644 pkg/pool/manager.go create mode 100644 pkg/pool/manager_test.go create mode 100644 pkg/pool/mocks/Manager.go rename pkg/pool/{pool.go => reader.go} (72%) rename pkg/pool/{pool_test.go => reader_test.go} (84%) diff --git a/cmd/ipam-controller/app/app_test.go b/cmd/ipam-controller/app/app_test.go index 4fda43c..2dc7db4 100644 --- a/cmd/ipam-controller/app/app_test.go +++ b/cmd/ipam-controller/app/app_test.go @@ -101,11 +101,11 @@ func updateNode(node *corev1.Node) *corev1.Node { func getRangeFromNode(nodeName string) map[string]*pool.IPPool { node := getNode(nodeName) - mgr, err := pool.NewManagerImpl(node) + poolCfg, err := pool.NewConfigReader(node) if err != nil { return nil } - return mgr.GetPools() + return poolCfg.GetPools() } // WaitAndCheckForStability wait for condition and then check it is stable for 1 second diff --git a/pkg/ipam-controller/allocator/allocator.go b/pkg/ipam-controller/allocator/allocator.go index bbf17b4..97ea52a 100644 --- a/pkg/ipam-controller/allocator/allocator.go +++ b/pkg/ipam-controller/allocator/allocator.go @@ -240,14 +240,14 @@ func (a *Allocator) ConfigureAndLoadAllocations(ctx context.Context, configs []A for i := range nodes { node := nodes[i] nodeLog := log.WithValues("node", node.Name) - nodePoolMgr, err := pool.NewManagerImpl(&node) + poolCfg, err := pool.NewConfigReader(&node) if err != nil { nodeLog.Info("skip loading data from the node", "reason", err.Error()) continue } // load allocators only for know pools (pools which are defined in the config) for poolName, poolData := range a.allocators { - nodeIPPoolConfig := nodePoolMgr.GetPoolByName(poolName) + nodeIPPoolConfig := poolCfg.GetPoolByName(poolName) allocInfo, err := ipPoolConfigToNodeAllocationInfo(node.Name, nodeIPPoolConfig) logErr := func(err error) { nodeLog.Info("ignore allocation info from node", diff --git a/pkg/ipam-controller/controllers/node/node.go b/pkg/ipam-controller/controllers/node/node.go index 1f7cf4a..26ff442 100644 --- a/pkg/ipam-controller/controllers/node/node.go +++ b/pkg/ipam-controller/controllers/node/node.go @@ -74,9 +74,9 @@ func (r *NodeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl. } var existingNodeAlloc map[string]*pool.IPPool - nodeAllocManager, err := pool.NewManagerImpl(node) + poolCfg, err := pool.NewConfigReader(node) if err == nil { - existingNodeAlloc = nodeAllocManager.GetPools() + existingNodeAlloc = poolCfg.GetPools() } expectedAlloc, err := r.Allocator.Allocate(ctx, node.Name) diff --git a/pkg/pool/manager.go b/pkg/pool/manager.go new file mode 100644 index 0000000..37ea6fa --- /dev/null +++ b/pkg/pool/manager.go @@ -0,0 +1,82 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pool + +import ( + "fmt" + "sync" + + corev1 "k8s.io/api/core/v1" +) + +// Manager provide access to pools configuration +// +//go:generate mockery --name Manager +type Manager interface { + ConfigReader + // Update Pool's configs from node object, + // returns an error if node object doesn't contain valid config + Update(node *corev1.Node) error + // Reset clean Pool config which is cached in memory + Reset() +} + +// NewManager create and initialize new manager instance +func NewManager() Manager { + return &manager{} +} + +type manager struct { + lock sync.Mutex + reader ConfigReader +} + +// GetPoolByName is the Manager interface implementation for the manager +func (m *manager) GetPoolByName(name string) *IPPool { + m.lock.Lock() + defer m.lock.Unlock() + if m.reader == nil { + return nil + } + return m.reader.GetPoolByName(name) +} + +// GetPools is the Manager interface implementation for the manager +func (m *manager) GetPools() map[string]*IPPool { + m.lock.Lock() + defer m.lock.Unlock() + if m.reader == nil { + return nil + } + return m.reader.GetPools() +} + +// Update is the Manager interface implementation for the manager +func (m *manager) Update(node *corev1.Node) error { + m.lock.Lock() + defer m.lock.Unlock() + r, err := NewConfigReader(node) + if err != nil { + return fmt.Errorf("failed to update pools configuration from the node object: %v", err) + } + m.reader = r + return nil +} + +// Reset is the Manager interface implementation for the manager +func (m *manager) Reset() { + m.lock.Lock() + defer m.lock.Unlock() + m.reader = nil +} diff --git a/pkg/pool/manager_test.go b/pkg/pool/manager_test.go new file mode 100644 index 0000000..585c316 --- /dev/null +++ b/pkg/pool/manager_test.go @@ -0,0 +1,47 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pool_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + + "github.com/Mellanox/nvidia-k8s-ipam/pkg/pool" +) + +var _ = Describe("Manager", func() { + It("Update pool data", func() { + testPools := make(map[string]*pool.IPPool) + testPoolName := "my-pool-1" + testPools[testPoolName] = &pool.IPPool{ + Name: "my-pool-1", + Subnet: "192.168.0.0/16", + StartIP: "192.168.0.2", + EndIP: "192.168.0.254", + Gateway: "192.168.0.1", + } + node := &corev1.Node{} + Expect(pool.SetIPBlockAnnotation(node, testPools)).NotTo(HaveOccurred()) + + mgr := pool.NewManager() + Expect(mgr.GetPoolByName(testPoolName)).To(BeNil()) + Expect(mgr.Update(node)).NotTo(HaveOccurred()) + Expect(mgr.GetPoolByName(testPoolName)).NotTo(BeNil()) + Expect(mgr.GetPools()).To(HaveLen(1)) + mgr.Reset() + Expect(mgr.GetPoolByName(testPoolName)).To(BeNil()) + }) +}) diff --git a/pkg/pool/mocks/Manager.go b/pkg/pool/mocks/Manager.go new file mode 100644 index 0000000..ba7ca01 --- /dev/null +++ b/pkg/pool/mocks/Manager.go @@ -0,0 +1,199 @@ +// Code generated by mockery v2.27.1. DO NOT EDIT. + +package mocks + +import ( + pool "github.com/Mellanox/nvidia-k8s-ipam/pkg/pool" + mock "github.com/stretchr/testify/mock" + + v1 "k8s.io/api/core/v1" +) + +// Manager is an autogenerated mock type for the Manager type +type Manager struct { + mock.Mock +} + +type Manager_Expecter struct { + mock *mock.Mock +} + +func (_m *Manager) EXPECT() *Manager_Expecter { + return &Manager_Expecter{mock: &_m.Mock} +} + +// GetPoolByName provides a mock function with given fields: name +func (_m *Manager) GetPoolByName(name string) *pool.IPPool { + ret := _m.Called(name) + + var r0 *pool.IPPool + if rf, ok := ret.Get(0).(func(string) *pool.IPPool); ok { + r0 = rf(name) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*pool.IPPool) + } + } + + return r0 +} + +// Manager_GetPoolByName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPoolByName' +type Manager_GetPoolByName_Call struct { + *mock.Call +} + +// GetPoolByName is a helper method to define mock.On call +// - name string +func (_e *Manager_Expecter) GetPoolByName(name interface{}) *Manager_GetPoolByName_Call { + return &Manager_GetPoolByName_Call{Call: _e.mock.On("GetPoolByName", name)} +} + +func (_c *Manager_GetPoolByName_Call) Run(run func(name string)) *Manager_GetPoolByName_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *Manager_GetPoolByName_Call) Return(_a0 *pool.IPPool) *Manager_GetPoolByName_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Manager_GetPoolByName_Call) RunAndReturn(run func(string) *pool.IPPool) *Manager_GetPoolByName_Call { + _c.Call.Return(run) + return _c +} + +// GetPools provides a mock function with given fields: +func (_m *Manager) GetPools() map[string]*pool.IPPool { + ret := _m.Called() + + var r0 map[string]*pool.IPPool + if rf, ok := ret.Get(0).(func() map[string]*pool.IPPool); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]*pool.IPPool) + } + } + + return r0 +} + +// Manager_GetPools_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPools' +type Manager_GetPools_Call struct { + *mock.Call +} + +// GetPools is a helper method to define mock.On call +func (_e *Manager_Expecter) GetPools() *Manager_GetPools_Call { + return &Manager_GetPools_Call{Call: _e.mock.On("GetPools")} +} + +func (_c *Manager_GetPools_Call) Run(run func()) *Manager_GetPools_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Manager_GetPools_Call) Return(_a0 map[string]*pool.IPPool) *Manager_GetPools_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Manager_GetPools_Call) RunAndReturn(run func() map[string]*pool.IPPool) *Manager_GetPools_Call { + _c.Call.Return(run) + return _c +} + +// Reset provides a mock function with given fields: +func (_m *Manager) Reset() { + _m.Called() +} + +// Manager_Reset_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Reset' +type Manager_Reset_Call struct { + *mock.Call +} + +// Reset is a helper method to define mock.On call +func (_e *Manager_Expecter) Reset() *Manager_Reset_Call { + return &Manager_Reset_Call{Call: _e.mock.On("Reset")} +} + +func (_c *Manager_Reset_Call) Run(run func()) *Manager_Reset_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Manager_Reset_Call) Return() *Manager_Reset_Call { + _c.Call.Return() + return _c +} + +func (_c *Manager_Reset_Call) RunAndReturn(run func()) *Manager_Reset_Call { + _c.Call.Return(run) + return _c +} + +// Update provides a mock function with given fields: node +func (_m *Manager) Update(node *v1.Node) error { + ret := _m.Called(node) + + var r0 error + if rf, ok := ret.Get(0).(func(*v1.Node) error); ok { + r0 = rf(node) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Manager_Update_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Update' +type Manager_Update_Call struct { + *mock.Call +} + +// Update is a helper method to define mock.On call +// - node *v1.Node +func (_e *Manager_Expecter) Update(node interface{}) *Manager_Update_Call { + return &Manager_Update_Call{Call: _e.mock.On("Update", node)} +} + +func (_c *Manager_Update_Call) Run(run func(node *v1.Node)) *Manager_Update_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*v1.Node)) + }) + return _c +} + +func (_c *Manager_Update_Call) Return(_a0 error) *Manager_Update_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Manager_Update_Call) RunAndReturn(run func(*v1.Node) error) *Manager_Update_Call { + _c.Call.Return(run) + return _c +} + +type mockConstructorTestingTNewManager interface { + mock.TestingT + Cleanup(func()) +} + +// NewManager creates a new instance of Manager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewManager(t mockConstructorTestingTNewManager) *Manager { + mock := &Manager{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/pool/pool.go b/pkg/pool/reader.go similarity index 72% rename from pkg/pool/pool.go rename to pkg/pool/reader.go index f9b48ec..79dec63 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/reader.go @@ -33,19 +33,26 @@ type IPPool struct { Gateway string `json:"gateway"` } -// Manager is an interface to manage IPPools -type Manager interface { +// String return string representation of the IPPool config +func (p *IPPool) String() string { + //nolint:errchkjson + data, _ := json.Marshal(p) + return string(data) +} + +// ConfigReader is an interface to which provides access to the pool configuration +type ConfigReader interface { // GetPoolByName returns IPPool for the provided pool name or nil if pool doesnt exist GetPoolByName(name string) *IPPool // GetPools returns map with information about all pools GetPools() map[string]*IPPool } -type ManagerImpl struct { +type configReader struct { poolByName map[string]*IPPool } -func NewManagerImpl(node *v1.Node) (*ManagerImpl, error) { +func NewConfigReader(node *v1.Node) (ConfigReader, error) { if node == nil { return nil, fmt.Errorf("nil node provided") } @@ -65,17 +72,17 @@ func NewManagerImpl(node *v1.Node) (*ManagerImpl, error) { pool.Name = poolName } - return &ManagerImpl{ + return &configReader{ poolByName: poolByName, }, nil } -// GetPoolByName implements Manager interface -func (pm *ManagerImpl) GetPoolByName(name string) *IPPool { - return pm.poolByName[name] +// GetPoolByName implements ConfigReader interface +func (r *configReader) GetPoolByName(name string) *IPPool { + return r.poolByName[name] } -// GetPools implements Manager interface -func (pm *ManagerImpl) GetPools() map[string]*IPPool { - return pm.poolByName +// GetPools implements ConfigReader interface +func (r *configReader) GetPools() map[string]*IPPool { + return r.poolByName } diff --git a/pkg/pool/pool_test.go b/pkg/pool/reader_test.go similarity index 84% rename from pkg/pool/pool_test.go rename to pkg/pool/reader_test.go index 33d4527..ee7b947 100644 --- a/pkg/pool/pool_test.go +++ b/pkg/pool/reader_test.go @@ -23,16 +23,16 @@ import ( ) var _ = Describe("pool tests", func() { - Context("NewManagerImpl()", func() { + Context("NewConfigReader()", func() { It("Creates a Manager successfully if node has ip-pool annotation", func() { n := v1.Node{} emptyAnnot := map[string]string{ pool.IPBlocksAnnotation: "{}", } n.SetAnnotations(emptyAnnot) - m, err := pool.NewManagerImpl(&n) + r, err := pool.NewConfigReader(&n) Expect(err).ToNot(HaveOccurred()) - Expect(m.GetPools()).To(HaveLen(0)) + Expect(r.GetPools()).To(HaveLen(0)) annot := map[string]string{ pool.IPBlocksAnnotation: `{"my-pool": @@ -40,14 +40,14 @@ var _ = Describe("pool tests", func() { "endIP": "192.168.0.254", "gateway": "192.168.0.1"}}`, } n.SetAnnotations(annot) - m, err = pool.NewManagerImpl(&n) + r, err = pool.NewConfigReader(&n) Expect(err).ToNot(HaveOccurred()) - Expect(m.GetPools()).To(HaveLen(1)) + Expect(r.GetPools()).To(HaveLen(1)) }) It("Fails to create Manager if node is missing ip-pool annotation", func() { n := v1.Node{} - _, err := pool.NewManagerImpl(&n) + _, err := pool.NewConfigReader(&n) Expect(err).To(HaveOccurred()) }) @@ -57,13 +57,13 @@ var _ = Describe("pool tests", func() { pool.IPBlocksAnnotation: "", } n.SetAnnotations(emptyAnnot) - _, err := pool.NewManagerImpl(&n) + _, err := pool.NewConfigReader(&n) Expect(err).To(HaveOccurred()) }) }) Context("GetPoolByName()", func() { - var m pool.Manager + var r pool.ConfigReader BeforeEach(func() { var err error @@ -74,17 +74,17 @@ var _ = Describe("pool tests", func() { "endIP": "192.168.0.254", "gateway": "192.168.0.1"}}`, } n.SetAnnotations(annot) - m, err = pool.NewManagerImpl(&n) + r, err = pool.NewConfigReader(&n) Expect(err).ToNot(HaveOccurred()) }) It("returns nil if pool does not exist", func() { - p := m.GetPoolByName("non-existent-pool") + p := r.GetPoolByName("non-existent-pool") Expect(p).To(BeNil()) }) It("returns pool if exists", func() { - p := m.GetPoolByName("my-pool") + p := r.GetPoolByName("my-pool") Expect(p).ToNot(BeNil()) Expect(p.Subnet).To(Equal("192.168.0.0/16")) }) From 9d2019fd96a02c4ceecccf007f586f4b485e111e Mon Sep 17 00:00:00 2001 From: Yury Kulazhenkov Date: Fri, 30 Jun 2023 14:24:51 +0300 Subject: [PATCH 05/18] ipam-node: Add filesystem store implementation Signed-off-by: Yury Kulazhenkov --- go.mod | 8 +- go.sum | 17 +- pkg/ipam-node/store/mocks/Session.go | 404 ++++++++++++++++++++++++ pkg/ipam-node/store/mocks/Store.go | 92 ++++++ pkg/ipam-node/store/store.go | 309 ++++++++++++++++++ pkg/ipam-node/store/store_suite_test.go | 26 ++ pkg/ipam-node/store/store_test.go | 233 ++++++++++++++ pkg/ipam-node/types/checksum.go | 54 ++++ pkg/ipam-node/types/checksum_test.go | 32 ++ pkg/ipam-node/types/types.go | 142 +++++++++ pkg/ipam-node/types/types_suite_test.go | 26 ++ pkg/ipam-node/types/types_test.go | 57 ++++ 12 files changed, 1394 insertions(+), 6 deletions(-) create mode 100644 pkg/ipam-node/store/mocks/Session.go create mode 100644 pkg/ipam-node/store/mocks/Store.go create mode 100644 pkg/ipam-node/store/store.go create mode 100644 pkg/ipam-node/store/store_suite_test.go create mode 100644 pkg/ipam-node/store/store_test.go create mode 100644 pkg/ipam-node/types/checksum.go create mode 100644 pkg/ipam-node/types/checksum_test.go create mode 100644 pkg/ipam-node/types/types.go create mode 100644 pkg/ipam-node/types/types_suite_test.go create mode 100644 pkg/ipam-node/types/types_test.go diff --git a/go.mod b/go.mod index d85ade6..0a4f814 100644 --- a/go.mod +++ b/go.mod @@ -6,9 +6,10 @@ require ( github.com/containernetworking/cni v1.1.2 github.com/containernetworking/plugins v1.2.0 github.com/go-logr/logr v1.2.4 + github.com/google/renameio/v2 v2.0.0 github.com/k8snetworkplumbingwg/cni-log v0.0.0-20230321145726-634c593dd11f - github.com/onsi/ginkgo/v2 v2.6.1 - github.com/onsi/gomega v1.24.2 + github.com/onsi/ginkgo/v2 v2.9.2 + github.com/onsi/gomega v1.27.6 github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.8.3 @@ -37,12 +38,14 @@ require ( github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.1 // indirect github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/google/gofuzz v1.1.0 // indirect + github.com/google/pprof v0.0.0-20230323073829-e72429f035bd // indirect github.com/google/uuid v1.3.0 // indirect github.com/imdario/mergo v0.3.6 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -73,6 +76,7 @@ require ( golang.org/x/term v0.7.0 // indirect golang.org/x/text v0.9.0 // indirect golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.7.0 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect diff --git a/go.sum b/go.sum index 012ac98..fc7b9b1 100644 --- a/go.sum +++ b/go.sum @@ -112,6 +112,8 @@ github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/ github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -174,7 +176,11 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20230323073829-e72429f035bd h1:r8yyd+DJDmsUhGrRBxH5Pj7KeFK5l+Y3FsgT8keqKtk= +github.com/google/pprof v0.0.0-20230323073829-e72429f035bd/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= +github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -242,13 +248,13 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108 github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.6.1 h1:1xQPCjcqYw/J5LchOcp4/2q/jzJFjiAOc25chhnDw+Q= -github.com/onsi/ginkgo/v2 v2.6.1/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo= +github.com/onsi/ginkgo/v2 v2.9.2 h1:BA2GMJOtfGAfagzYtrAlufIP0lq6QERkFmHLMLPwFSU= +github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.24.2 h1:J/tulyYK6JwBldPViHJReihxxZ+22FHs0piGjQAvoUE= -github.com/onsi/gomega v1.24.2/go.mod h1:gs3J10IS7Z7r7eXRoNJIrNqU4ToQukCJhFtKrWgHWnk= +github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= +github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -302,6 +308,7 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= @@ -536,6 +543,8 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/pkg/ipam-node/store/mocks/Session.go b/pkg/ipam-node/store/mocks/Session.go new file mode 100644 index 0000000..2cfa5d5 --- /dev/null +++ b/pkg/ipam-node/store/mocks/Session.go @@ -0,0 +1,404 @@ +// Code generated by mockery v2.27.1. DO NOT EDIT. + +package mocks + +import ( + net "net" + + mock "github.com/stretchr/testify/mock" + + types "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/types" +) + +// Session is an autogenerated mock type for the Session type +type Session struct { + mock.Mock +} + +type Session_Expecter struct { + mock *mock.Mock +} + +func (_m *Session) EXPECT() *Session_Expecter { + return &Session_Expecter{mock: &_m.Mock} +} + +// Cancel provides a mock function with given fields: +func (_m *Session) Cancel() { + _m.Called() +} + +// Session_Cancel_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Cancel' +type Session_Cancel_Call struct { + *mock.Call +} + +// Cancel is a helper method to define mock.On call +func (_e *Session_Expecter) Cancel() *Session_Cancel_Call { + return &Session_Cancel_Call{Call: _e.mock.On("Cancel")} +} + +func (_c *Session_Cancel_Call) Run(run func()) *Session_Cancel_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Session_Cancel_Call) Return() *Session_Cancel_Call { + _c.Call.Return() + return _c +} + +func (_c *Session_Cancel_Call) RunAndReturn(run func()) *Session_Cancel_Call { + _c.Call.Return(run) + return _c +} + +// Commit provides a mock function with given fields: +func (_m *Session) Commit() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Session_Commit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Commit' +type Session_Commit_Call struct { + *mock.Call +} + +// Commit is a helper method to define mock.On call +func (_e *Session_Expecter) Commit() *Session_Commit_Call { + return &Session_Commit_Call{Call: _e.mock.On("Commit")} +} + +func (_c *Session_Commit_Call) Run(run func()) *Session_Commit_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Session_Commit_Call) Return(_a0 error) *Session_Commit_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Session_Commit_Call) RunAndReturn(run func() error) *Session_Commit_Call { + _c.Call.Return(run) + return _c +} + +// GetLastReservedIP provides a mock function with given fields: pool +func (_m *Session) GetLastReservedIP(pool string) net.IP { + ret := _m.Called(pool) + + var r0 net.IP + if rf, ok := ret.Get(0).(func(string) net.IP); ok { + r0 = rf(pool) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(net.IP) + } + } + + return r0 +} + +// Session_GetLastReservedIP_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastReservedIP' +type Session_GetLastReservedIP_Call struct { + *mock.Call +} + +// GetLastReservedIP is a helper method to define mock.On call +// - pool string +func (_e *Session_Expecter) GetLastReservedIP(pool interface{}) *Session_GetLastReservedIP_Call { + return &Session_GetLastReservedIP_Call{Call: _e.mock.On("GetLastReservedIP", pool)} +} + +func (_c *Session_GetLastReservedIP_Call) Run(run func(pool string)) *Session_GetLastReservedIP_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *Session_GetLastReservedIP_Call) Return(_a0 net.IP) *Session_GetLastReservedIP_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Session_GetLastReservedIP_Call) RunAndReturn(run func(string) net.IP) *Session_GetLastReservedIP_Call { + _c.Call.Return(run) + return _c +} + +// GetReservationByID provides a mock function with given fields: pool, id, ifName +func (_m *Session) GetReservationByID(pool string, id string, ifName string) *types.Reservation { + ret := _m.Called(pool, id, ifName) + + var r0 *types.Reservation + if rf, ok := ret.Get(0).(func(string, string, string) *types.Reservation); ok { + r0 = rf(pool, id, ifName) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Reservation) + } + } + + return r0 +} + +// Session_GetReservationByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetReservationByID' +type Session_GetReservationByID_Call struct { + *mock.Call +} + +// GetReservationByID is a helper method to define mock.On call +// - pool string +// - id string +// - ifName string +func (_e *Session_Expecter) GetReservationByID(pool interface{}, id interface{}, ifName interface{}) *Session_GetReservationByID_Call { + return &Session_GetReservationByID_Call{Call: _e.mock.On("GetReservationByID", pool, id, ifName)} +} + +func (_c *Session_GetReservationByID_Call) Run(run func(pool string, id string, ifName string)) *Session_GetReservationByID_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(string), args[2].(string)) + }) + return _c +} + +func (_c *Session_GetReservationByID_Call) Return(_a0 *types.Reservation) *Session_GetReservationByID_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Session_GetReservationByID_Call) RunAndReturn(run func(string, string, string) *types.Reservation) *Session_GetReservationByID_Call { + _c.Call.Return(run) + return _c +} + +// ListPools provides a mock function with given fields: +func (_m *Session) ListPools() []string { + ret := _m.Called() + + var r0 []string + if rf, ok := ret.Get(0).(func() []string); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + return r0 +} + +// Session_ListPools_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListPools' +type Session_ListPools_Call struct { + *mock.Call +} + +// ListPools is a helper method to define mock.On call +func (_e *Session_Expecter) ListPools() *Session_ListPools_Call { + return &Session_ListPools_Call{Call: _e.mock.On("ListPools")} +} + +func (_c *Session_ListPools_Call) Run(run func()) *Session_ListPools_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Session_ListPools_Call) Return(_a0 []string) *Session_ListPools_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Session_ListPools_Call) RunAndReturn(run func() []string) *Session_ListPools_Call { + _c.Call.Return(run) + return _c +} + +// ListReservations provides a mock function with given fields: pool +func (_m *Session) ListReservations(pool string) []types.Reservation { + ret := _m.Called(pool) + + var r0 []types.Reservation + if rf, ok := ret.Get(0).(func(string) []types.Reservation); ok { + r0 = rf(pool) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]types.Reservation) + } + } + + return r0 +} + +// Session_ListReservations_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListReservations' +type Session_ListReservations_Call struct { + *mock.Call +} + +// ListReservations is a helper method to define mock.On call +// - pool string +func (_e *Session_Expecter) ListReservations(pool interface{}) *Session_ListReservations_Call { + return &Session_ListReservations_Call{Call: _e.mock.On("ListReservations", pool)} +} + +func (_c *Session_ListReservations_Call) Run(run func(pool string)) *Session_ListReservations_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *Session_ListReservations_Call) Return(_a0 []types.Reservation) *Session_ListReservations_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Session_ListReservations_Call) RunAndReturn(run func(string) []types.Reservation) *Session_ListReservations_Call { + _c.Call.Return(run) + return _c +} + +// ReleaseReservationByID provides a mock function with given fields: pool, id, ifName +func (_m *Session) ReleaseReservationByID(pool string, id string, ifName string) { + _m.Called(pool, id, ifName) +} + +// Session_ReleaseReservationByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ReleaseReservationByID' +type Session_ReleaseReservationByID_Call struct { + *mock.Call +} + +// ReleaseReservationByID is a helper method to define mock.On call +// - pool string +// - id string +// - ifName string +func (_e *Session_Expecter) ReleaseReservationByID(pool interface{}, id interface{}, ifName interface{}) *Session_ReleaseReservationByID_Call { + return &Session_ReleaseReservationByID_Call{Call: _e.mock.On("ReleaseReservationByID", pool, id, ifName)} +} + +func (_c *Session_ReleaseReservationByID_Call) Run(run func(pool string, id string, ifName string)) *Session_ReleaseReservationByID_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(string), args[2].(string)) + }) + return _c +} + +func (_c *Session_ReleaseReservationByID_Call) Return() *Session_ReleaseReservationByID_Call { + _c.Call.Return() + return _c +} + +func (_c *Session_ReleaseReservationByID_Call) RunAndReturn(run func(string, string, string)) *Session_ReleaseReservationByID_Call { + _c.Call.Return(run) + return _c +} + +// Reserve provides a mock function with given fields: pool, id, ifName, meta, address +func (_m *Session) Reserve(pool string, id string, ifName string, meta types.ReservationMetadata, address net.IP) error { + ret := _m.Called(pool, id, ifName, meta, address) + + var r0 error + if rf, ok := ret.Get(0).(func(string, string, string, types.ReservationMetadata, net.IP) error); ok { + r0 = rf(pool, id, ifName, meta, address) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Session_Reserve_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Reserve' +type Session_Reserve_Call struct { + *mock.Call +} + +// Reserve is a helper method to define mock.On call +// - pool string +// - id string +// - ifName string +// - meta types.ReservationMetadata +// - address net.IP +func (_e *Session_Expecter) Reserve(pool interface{}, id interface{}, ifName interface{}, meta interface{}, address interface{}) *Session_Reserve_Call { + return &Session_Reserve_Call{Call: _e.mock.On("Reserve", pool, id, ifName, meta, address)} +} + +func (_c *Session_Reserve_Call) Run(run func(pool string, id string, ifName string, meta types.ReservationMetadata, address net.IP)) *Session_Reserve_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(string), args[2].(string), args[3].(types.ReservationMetadata), args[4].(net.IP)) + }) + return _c +} + +func (_c *Session_Reserve_Call) Return(_a0 error) *Session_Reserve_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Session_Reserve_Call) RunAndReturn(run func(string, string, string, types.ReservationMetadata, net.IP) error) *Session_Reserve_Call { + _c.Call.Return(run) + return _c +} + +// SetLastReservedIP provides a mock function with given fields: pool, ip +func (_m *Session) SetLastReservedIP(pool string, ip net.IP) { + _m.Called(pool, ip) +} + +// Session_SetLastReservedIP_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetLastReservedIP' +type Session_SetLastReservedIP_Call struct { + *mock.Call +} + +// SetLastReservedIP is a helper method to define mock.On call +// - pool string +// - ip net.IP +func (_e *Session_Expecter) SetLastReservedIP(pool interface{}, ip interface{}) *Session_SetLastReservedIP_Call { + return &Session_SetLastReservedIP_Call{Call: _e.mock.On("SetLastReservedIP", pool, ip)} +} + +func (_c *Session_SetLastReservedIP_Call) Run(run func(pool string, ip net.IP)) *Session_SetLastReservedIP_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(net.IP)) + }) + return _c +} + +func (_c *Session_SetLastReservedIP_Call) Return() *Session_SetLastReservedIP_Call { + _c.Call.Return() + return _c +} + +func (_c *Session_SetLastReservedIP_Call) RunAndReturn(run func(string, net.IP)) *Session_SetLastReservedIP_Call { + _c.Call.Return(run) + return _c +} + +type mockConstructorTestingTNewSession interface { + mock.TestingT + Cleanup(func()) +} + +// NewSession creates a new instance of Session. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewSession(t mockConstructorTestingTNewSession) *Session { + mock := &Session{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/ipam-node/store/mocks/Store.go b/pkg/ipam-node/store/mocks/Store.go new file mode 100644 index 0000000..b686150 --- /dev/null +++ b/pkg/ipam-node/store/mocks/Store.go @@ -0,0 +1,92 @@ +// Code generated by mockery v2.27.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + store "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/store" + mock "github.com/stretchr/testify/mock" +) + +// Store is an autogenerated mock type for the Store type +type Store struct { + mock.Mock +} + +type Store_Expecter struct { + mock *mock.Mock +} + +func (_m *Store) EXPECT() *Store_Expecter { + return &Store_Expecter{mock: &_m.Mock} +} + +// Open provides a mock function with given fields: ctx +func (_m *Store) Open(ctx context.Context) (store.Session, error) { + ret := _m.Called(ctx) + + var r0 store.Session + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (store.Session, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) store.Session); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(store.Session) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Store_Open_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Open' +type Store_Open_Call struct { + *mock.Call +} + +// Open is a helper method to define mock.On call +// - ctx context.Context +func (_e *Store_Expecter) Open(ctx interface{}) *Store_Open_Call { + return &Store_Open_Call{Call: _e.mock.On("Open", ctx)} +} + +func (_c *Store_Open_Call) Run(run func(ctx context.Context)) *Store_Open_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *Store_Open_Call) Return(_a0 store.Session, _a1 error) *Store_Open_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Store_Open_Call) RunAndReturn(run func(context.Context) (store.Session, error)) *Store_Open_Call { + _c.Call.Return(run) + return _c +} + +type mockConstructorTestingTNewStore interface { + mock.TestingT + Cleanup(func()) +} + +// NewStore creates a new instance of Store. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewStore(t mockConstructorTestingTNewStore) *Store { + mock := &Store{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/ipam-node/store/store.go b/pkg/ipam-node/store/store.go new file mode 100644 index 0000000..fb4a0b7 --- /dev/null +++ b/pkg/ipam-node/store/store.go @@ -0,0 +1,309 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package store + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net" + "os" + "sync" + "time" + + "github.com/go-logr/logr" + "github.com/google/renameio/v2" + + "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/types" +) + +var ( + // ErrReservationAlreadyExist is returned when reservation for id and interface already exist in the pool + ErrReservationAlreadyExist = errors.New("reservation already exist, " + + "duplicate allocations are not allowed by the spec") + // ErrIPAlreadyReserved is returned when trying to reserve IP which is already reserved + ErrIPAlreadyReserved = errors.New("ip address is already reserved") +) + +// Store implements API to open file store in exclusive mode +// +//go:generate mockery --name Store +type Store interface { + // Open returns a session for the store with exclusive lock. + // returns an error if failed to read persistedData from the disk. + // Open blocks if another Session is already opened. + // Session.Commit() should be called to close the session, release the lock and write changes to disk and + // Session.Cancel() can be called to close the session, revert all changes and release the lock + Open(ctx context.Context) (Session, error) +} + +// Session is the interface implemented by the store package +// +//go:generate mockery --name Session +type Session interface { + // Reserve reserves IP for the id and interface name, + // returns error if allocation failed + Reserve(pool string, id string, ifName string, meta types.ReservationMetadata, address net.IP) error + // ListReservations list all reservations in the pool + ListReservations(pool string) []types.Reservation + // ListPools return list with names of all known pools + ListPools() []string + // GetLastReservedIP returns last reserved IP for the pool or nil + GetLastReservedIP(pool string) net.IP + // SetLastReservedIP set last reserved IP fot the pool + SetLastReservedIP(pool string, ip net.IP) + // ReleaseReservationByID releases reservation by id and interface name + ReleaseReservationByID(pool string, id string, ifName string) + // GetReservationByID returns existing reservation for id and interface name, + // return nil if allocation not found + GetReservationByID(pool string, id string, ifName string) *types.Reservation + // Commit writes persistedData to the disk and release the lock. + // the store can't be used after this call + Commit() error + // Cancel cancels all modifications and release the lock + // the store can't be used after this call + Cancel() +} + +// New create and initialize new store instance +func New(storeFile string) Store { + return &store{ + storeFile: storeFile, + lock: &sync.Mutex{}, + } +} + +type store struct { + storeFile string + persistedData *types.Root + lock *sync.Mutex +} + +// Open is the Store interface implementation for store +func (m *store) Open(ctx context.Context) (Session, error) { + m.lock.Lock() + logger := logr.FromContextOrDiscard(ctx) + if m.persistedData == nil { + diskData, err := m.loadFromDisk() + if err != nil { + m.lock.Unlock() + logger.Error(err, "failed to load store data from the disk") + return nil, err + } + m.persistedData = diskData + } + return newStore(m.storeFile, m.persistedData, m.lock, logger), nil +} + +func (m *store) loadFromDisk() (*types.Root, error) { + dst := &types.Root{} + data, err := os.ReadFile(m.storeFile) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return nil, fmt.Errorf("failed to read store persistedData: %v", err) + } + if len(data) != 0 { + if err := json.Unmarshal(data, dst); err != nil { + return nil, fmt.Errorf("failed to unmarshal store persistedData: %v", err) + } + if dst.Checksum == 0 { + return nil, fmt.Errorf("checksum not set in store file") + } + if err := dst.Checksum.Verify(dst); err != nil { + return nil, fmt.Errorf("store file corrupted, checksum mismatch") + } + } else { + dst = types.NewRoot() + } + if dst.Pools == nil { + dst.Pools = map[string]types.PoolReservations{} + } + return dst, nil +} + +func newStore(storeFile string, persistentData *types.Root, lock *sync.Mutex, logger logr.Logger) *session { + s := &session{ + log: logger, + lock: lock, + storeFile: storeFile, + persistedData: persistentData, + tmpData: persistentData.DeepCopy(), + } + return s +} + +// default session implementation +type session struct { + log logr.Logger + lock *sync.Mutex + storeFile string + // store holds this data instance + persistedData *types.Root + // temp data, this data is local for session instance + tmpData *types.Root + isModified bool + isClosed bool +} + +// Commit is the Session interface implementation for session +func (s *session) Commit() error { + s.checkClosed() + defer s.markClosed() + defer s.lock.Unlock() + if !s.isModified { + return nil + } + if err := s.writeToDisk(s.tmpData); err != nil { + s.log.Error(err, "failed to write data to disk") + return err + } + *s.persistedData = *s.tmpData.DeepCopy() + return nil +} + +// Cancel is the Session interface implementation for session +func (s *session) Cancel() { + s.checkClosed() + defer s.markClosed() + defer s.lock.Unlock() +} + +func (s *session) markClosed() { + s.isClosed = true +} + +func (s *session) checkClosed() { + if s.isClosed { + panic("operation was performed on the closed store") + } +} + +// Reserve is the Session interface implementation for session +func (s *session) Reserve(pool string, id string, ifName string, meta types.ReservationMetadata, address net.IP) error { + s.checkClosed() + reservationKey := s.getKey(id, ifName) + poolData := s.getPoolData(pool, s.tmpData) + _, exist := poolData.Entries[reservationKey] + if exist { + return ErrReservationAlreadyExist + } + duplicateIP := false + for _, r := range poolData.Entries { + if address.Equal(r.IPAddress) { + duplicateIP = true + break + } + } + if duplicateIP { + // duplicate allocator should retry + return ErrIPAlreadyReserved + } + poolData.LastReservedIP = address + poolData.LastPoolConfig = meta.PoolConfigSnapshot + reservation := types.Reservation{ + ContainerID: id, + InterfaceName: ifName, + IPAddress: address, + Metadata: meta, + } + reservation.Metadata.CreateTime = time.Now().Format(time.RFC3339Nano) + poolData.Entries[reservationKey] = reservation + s.tmpData.Pools[pool] = *poolData + s.isModified = true + return nil +} + +func (s *session) getPoolData(pool string, layout *types.Root) *types.PoolReservations { + res, exist := layout.Pools[pool] + if exist { + if res.Entries == nil { + res.Entries = map[string]types.Reservation{} + } + return &res + } + return types.NewPoolReservations(pool) +} + +// ListReservations is the Session interface implementation for session +func (s *session) ListReservations(pool string) []types.Reservation { + s.checkClosed() + poolData := s.getPoolData(pool, s.tmpData) + allocations := make([]types.Reservation, 0, len(poolData.Entries)) + for _, a := range poolData.Entries { + allocations = append(allocations, a) + } + return allocations +} + +// ListPools is the Session interface implementation for session +func (s *session) ListPools() []string { + s.checkClosed() + pools := make([]string, 0, len(s.tmpData.Pools)) + for poolName := range s.tmpData.Pools { + pools = append(pools, poolName) + } + return pools +} + +// GetLastReservedIP is the Session interface implementation for session +func (s *session) GetLastReservedIP(pool string) net.IP { + s.checkClosed() + poolData := s.getPoolData(pool, s.tmpData) + return poolData.LastReservedIP +} + +// SetLastReservedIP is the Session interface implementation for session +func (s *session) SetLastReservedIP(pool string, ip net.IP) { + s.checkClosed() + poolData := s.getPoolData(pool, s.tmpData) + poolData.LastReservedIP = ip + s.tmpData.Pools[pool] = *poolData + s.isModified = true +} + +// ReleaseReservationByID is the Session interface implementation for session +func (s *session) ReleaseReservationByID(pool string, id string, ifName string) { + s.checkClosed() + poolData := s.getPoolData(pool, s.tmpData) + delete(poolData.Entries, s.getKey(id, ifName)) + s.tmpData.Pools[pool] = *poolData + s.isModified = true +} + +// GetReservationByID is the Session interface implementation for session +func (s *session) GetReservationByID(pool string, id string, ifName string) *types.Reservation { + s.checkClosed() + poolData := s.getPoolData(pool, s.tmpData) + reservation, exist := poolData.Entries[s.getKey(id, ifName)] + if !exist { + return nil + } + return &reservation +} + +func (s *session) writeToDisk(src *types.Root) error { + src.Checksum = types.NewChecksum(src) + data, err := json.Marshal(src) + if err != nil { + return fmt.Errorf("failed to marshal store persistedData: %v", err) + } + if err := renameio.WriteFile(s.storeFile, data, 0664); err != nil { + return fmt.Errorf("failed to write store persistedData: %v", err) + } + return nil +} + +func (s *session) getKey(id, ifName string) string { + return id + "_" + ifName +} diff --git a/pkg/ipam-node/store/store_suite_test.go b/pkg/ipam-node/store/store_suite_test.go new file mode 100644 index 0000000..b4c7382 --- /dev/null +++ b/pkg/ipam-node/store/store_suite_test.go @@ -0,0 +1,26 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package store_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestStore(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Store Suite") +} diff --git a/pkg/ipam-node/store/store_test.go b/pkg/ipam-node/store/store_test.go new file mode 100644 index 0000000..8d6aa79 --- /dev/null +++ b/pkg/ipam-node/store/store_test.go @@ -0,0 +1,233 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package store_test + +import ( + "context" + "encoding/json" + "net" + "os" + "path/filepath" + "sync" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + storePkg "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/store" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/types" +) + +const ( + testPoolName = "pool1" + testContainerID = "id1" + testNetIfName = "net0" + testPodUUID = "a9516e9d-6f45-4693-b299-cc3d2f83e26a" + testPodName = "testPod" + testNamespaceName = "testNamespace" + testDeviceID = "0000:d8:00.1" + testIP = "192.168.1.100" + testIP2 = "192.168.1.200" +) + +func createTestReservation(s storePkg.Session) { + ExpectWithOffset(1, s.Reserve(testPoolName, testContainerID, testNetIfName, types.ReservationMetadata{ + CreateTime: time.Now().Format(time.RFC3339Nano), + PodUUID: testPodUUID, + PodName: testPodName, + PodNamespace: testNamespaceName, + DeviceID: testDeviceID, + PoolConfigSnapshot: "something", + }, net.ParseIP(testIP))).NotTo(HaveOccurred()) +} + +var _ = Describe("Store", func() { + + var ( + storePath string + store storePkg.Store + ) + + BeforeEach(func() { + storePath = filepath.Join(GinkgoT().TempDir(), "store") + store = storePkg.New(storePath) + }) + + It("Basic testing", func() { + By("Open store") + s, err := store.Open(context.Background()) + Expect(err).NotTo(HaveOccurred()) + + By("Create reservation") + createTestReservation(s) + + By("Check reservation exist") + res := s.GetReservationByID(testPoolName, testContainerID, testNetIfName) + Expect(res).NotTo(BeNil()) + Expect(res.ContainerID).To(Equal(testContainerID)) + + resList := s.ListReservations(testPoolName) + Expect(resList).To(HaveLen(1)) + Expect(resList[0].ContainerID).To(Equal(testContainerID)) + + pools := s.ListPools() + Expect(pools).To(Equal([]string{testPoolName})) + + By("Check last reserved IP") + Expect(s.GetLastReservedIP(testPoolName)).To(Equal(net.ParseIP(testIP))) + + By("Set last reserved IP") + newLastReservedIP := net.ParseIP("192.168.1.200") + s.SetLastReservedIP(testPoolName, newLastReservedIP) + Expect(s.GetLastReservedIP(testPoolName)).To(Equal(newLastReservedIP)) + + By("Release reservation") + s.ReleaseReservationByID(testPoolName, testContainerID, testNetIfName) + + By("Check reservation removed") + Expect(s.GetReservationByID(testPoolName, testContainerID, testNetIfName)).To(BeNil()) + + By("Commit changes") + Expect(s.Commit()).NotTo(HaveOccurred()) + }) + It("Commit should persist data", func() { + s, err := store.Open(context.Background()) + Expect(err).NotTo(HaveOccurred()) + createTestReservation(s) + Expect(s.Commit()).NotTo(HaveOccurred()) + + s, err = store.Open(context.Background()) + Expect(err).NotTo(HaveOccurred()) + res := s.GetReservationByID(testPoolName, testContainerID, testNetIfName) + Expect(res).NotTo(BeNil()) + Expect(res.ContainerID).To(Equal(testContainerID)) + }) + It("Cancel should rollback changes", func() { + s, err := store.Open(context.Background()) + Expect(err).NotTo(HaveOccurred()) + createTestReservation(s) + s.Cancel() + + s, err = store.Open(context.Background()) + Expect(err).NotTo(HaveOccurred()) + Expect(s.GetReservationByID(testPoolName, testContainerID, testNetIfName)).To(BeNil()) + }) + It("Closed session should panic", func() { + s, err := store.Open(context.Background()) + Expect(err).NotTo(HaveOccurred()) + s.Cancel() + Expect(func() { s.GetReservationByID(testPoolName, testContainerID, testNetIfName) }).To(Panic()) + Expect(func() { s.ListReservations(testPoolName) }).To(Panic()) + Expect(func() { s.GetLastReservedIP(testPoolName) }).To(Panic()) + }) + It("Reload data from the disk", func() { + s, err := store.Open(context.Background()) + Expect(err).NotTo(HaveOccurred()) + createTestReservation(s) + Expect(s.Commit()).NotTo(HaveOccurred()) + + store2 := storePkg.New(storePath) + s, err = store2.Open(context.Background()) + Expect(err).NotTo(HaveOccurred()) + Expect(s.GetReservationByID(testPoolName, testContainerID, testNetIfName)).NotTo(BeNil()) + }) + It("Concurrent access", func() { + done := make(chan interface{}) + go func() { + s, err := store.Open(context.Background()) + Expect(err).NotTo(HaveOccurred()) + ch := make(chan int, 2) + wg := sync.WaitGroup{} + wg.Add(2) + go func() { + defer wg.Done() + defer GinkgoRecover() + time.Sleep(time.Millisecond * 100) + createTestReservation(s) + ch <- 1 + Expect(s.Commit()).NotTo(HaveOccurred()) + }() + go func() { + defer wg.Done() + defer GinkgoRecover() + s2, err := store.Open(context.Background()) + Expect(err).NotTo(HaveOccurred()) + ch <- 2 + Expect(s2.GetReservationByID(testPoolName, testContainerID, testNetIfName)).NotTo(BeNil()) + s2.Cancel() + }() + wg.Wait() + ret := make([]int, 0, 2) + Loop: + for { + select { + case i := <-ch: + ret = append(ret, i) + default: + break Loop + } + } + Expect(ret).To(HaveLen(2)) + Expect(ret[0]).To(Equal(1)) + Expect(ret[1]).To(Equal(2)) + + close(done) + }() + Eventually(done, time.Minute).Should(BeClosed()) + }) + + It("Invalid data on the disk", func() { + Expect(os.WriteFile(storePath, []byte("something"), 0664)).NotTo(HaveOccurred()) + _, err := store.Open(context.Background()) + Expect(err).To(HaveOccurred()) + }) + It("Checksum mismatch", func() { + // create valid store file + s, err := store.Open(context.Background()) + Expect(err).NotTo(HaveOccurred()) + createTestReservation(s) + Expect(s.Commit()).NotTo(HaveOccurred()) + + // patch checksum field + storeRoot := &types.Root{} + data, err := os.ReadFile(storePath) + Expect(err).NotTo(HaveOccurred()) + Expect(json.Unmarshal(data, storeRoot)) + storeRoot.Checksum = 123455 + data, err = json.Marshal(storeRoot) + Expect(err).NotTo(HaveOccurred()) + Expect(os.WriteFile(storePath, data, 0664)).NotTo(HaveOccurred()) + + // try to load the store file + manager2 := storePkg.New(storePath) + s, err = manager2.Open(context.Background()) + Expect(err).To(MatchError("store file corrupted, checksum mismatch")) + }) + It("Already has allocation", func() { + s, err := store.Open(context.Background()) + Expect(err).NotTo(HaveOccurred()) + createTestReservation(s) + Expect( + s.Reserve(testPoolName, testContainerID, testNetIfName, + types.ReservationMetadata{}, net.ParseIP(testIP2))).To(MatchError(storePkg.ErrReservationAlreadyExist)) + }) + It("Duplicate IP allocation", func() { + s, err := store.Open(context.Background()) + Expect(err).NotTo(HaveOccurred()) + createTestReservation(s) + Expect( + s.Reserve(testPoolName, "other", testNetIfName, + types.ReservationMetadata{}, net.ParseIP(testIP))).To(MatchError(storePkg.ErrIPAlreadyReserved)) + }) +}) diff --git a/pkg/ipam-node/types/checksum.go b/pkg/ipam-node/types/checksum.go new file mode 100644 index 0000000..49cef8e --- /dev/null +++ b/pkg/ipam-node/types/checksum.go @@ -0,0 +1,54 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "encoding/json" + "errors" + "hash/fnv" +) + +var ( + ErrCorruptData = errors.New("data corrupted, checksum mismatch") +) + +// Checksum is the data to be stored as checkpoint +type Checksum uint32 + +// Verify verifies that passed checksum is same as calculated checksum +func (cs Checksum) Verify(r *Root) error { + if cs != NewChecksum(r) { + return ErrCorruptData + } + return nil +} + +// NewChecksum returns the Checksum of the object +func NewChecksum(r *Root) Checksum { + return Checksum(getChecksum(r)) +} + +// Get returns calculated checksum for the Root object +func getChecksum(r *Root) uint32 { + h := fnv.New32a() + tmpChecksum := r.Checksum + r.Checksum = 0 + data, err := json.Marshal(r) + if err != nil { + panic("failed to compute checksum for input data") + } + r.Checksum = tmpChecksum + _, _ = h.Write(data) + return h.Sum32() +} diff --git a/pkg/ipam-node/types/checksum_test.go b/pkg/ipam-node/types/checksum_test.go new file mode 100644 index 0000000..935fdac --- /dev/null +++ b/pkg/ipam-node/types/checksum_test.go @@ -0,0 +1,32 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/types" +) + +var _ = Describe("Checksum", func() { + It("Equal", func() { + Expect(types.NewChecksum(types.NewRoot()).Verify(types.NewRoot())).NotTo(HaveOccurred()) + }) + It("Diff", func() { + r1 := types.NewRoot() + r1.Pools["foo"] = *types.NewPoolReservations("foo") + Expect(types.NewChecksum(r1).Verify(types.NewRoot())).To(HaveOccurred()) + }) +}) diff --git a/pkg/ipam-node/types/types.go b/pkg/ipam-node/types/types.go new file mode 100644 index 0000000..e789237 --- /dev/null +++ b/pkg/ipam-node/types/types.go @@ -0,0 +1,142 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "encoding/json" + "net" +) + +// ImplementedVersion contains implemented version of the store file layout +const ImplementedVersion = 1 + +// NewRoot returns initialized store Root +func NewRoot() *Root { + return &Root{Version: ImplementedVersion, Pools: map[string]PoolReservations{}} +} + +// Root is the root object of the store +type Root struct { + Version int `json:"version"` + Checksum Checksum `json:"checksum"` + Pools map[string]PoolReservations `json:"pools"` +} + +// DeepCopy is a deepcopy function for the Root struct +func (in *Root) DeepCopy() *Root { + if in == nil { + return nil + } + out := new(Root) + *out = *in + if in.Pools != nil { + in, out := &in.Pools, &out.Pools + *out = make(map[string]PoolReservations, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + return out +} + +// NewPoolReservations returns initialized PoolReservations object +func NewPoolReservations(name string) *PoolReservations { + return &PoolReservations{ + Name: name, + Entries: map[string]Reservation{}, + } +} + +// PoolReservations is object which used to store IP reservations for the IP pool +type PoolReservations struct { + Name string `json:"name"` + // key is container_id + _ + interface_name + Entries map[string]Reservation `json:"entries"` + // contains pool config from the latest store update + LastPoolConfig string `json:"last_pool_config"` + LastReservedIP net.IP `json:"last_reserved_ip"` +} + +// DeepCopy is a deepcopy function for the PoolReservations struct +func (in *PoolReservations) DeepCopy() *PoolReservations { + if in == nil { + return nil + } + out := new(PoolReservations) + *out = *in + if in.Entries != nil { + in, out := &in.Entries, &out.Entries + *out = make(map[string]Reservation, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.LastReservedIP != nil { + in, out := &in.LastReservedIP, &out.LastReservedIP + *out = make(net.IP, len(*in)) + copy(*out, *in) + } + return out +} + +// Reservation is used to store Reservation in a checkpoint file +type Reservation struct { + ContainerID string `json:"container_id"` + InterfaceName string `json:"interface_name"` + IPAddress net.IP `json:"ip_address"` + Metadata ReservationMetadata `json:"metadata"` +} + +// DeepCopy is a deepcopy function for the Reservation struct +func (in *Reservation) DeepCopy() *Reservation { + if in == nil { + return nil + } + out := new(Reservation) + *out = *in + if in.IPAddress != nil { + in, out := &in.IPAddress, &out.IPAddress + *out = make(net.IP, len(*in)) + copy(*out, *in) + } + in.Metadata = *out.Metadata.DeepCopy() + return out +} + +// String returns string representation of the Reservation +func (in *Reservation) String() string { + //nolint:errchkjson + data, _ := json.Marshal(&in) + return string(data) +} + +// ReservationMetadata contains meta information for reservation +type ReservationMetadata struct { + CreateTime string `json:"create_time"` + PodUUID string `json:"pod_uuid"` + PodName string `json:"pod_name"` + PodNamespace string `json:"pod_namespace"` + DeviceID string `json:"device_id"` + PoolConfigSnapshot string `json:"pool_config_snapshot"` +} + +// DeepCopy is a deepcopy function for the ReservationMetadata struct +func (in *ReservationMetadata) DeepCopy() *ReservationMetadata { + if in == nil { + return nil + } + out := new(ReservationMetadata) + *out = *in + return out +} diff --git a/pkg/ipam-node/types/types_suite_test.go b/pkg/ipam-node/types/types_suite_test.go new file mode 100644 index 0000000..37557f3 --- /dev/null +++ b/pkg/ipam-node/types/types_suite_test.go @@ -0,0 +1,26 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestTypes(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Types Suite") +} diff --git a/pkg/ipam-node/types/types_test.go b/pkg/ipam-node/types/types_test.go new file mode 100644 index 0000000..299b066 --- /dev/null +++ b/pkg/ipam-node/types/types_test.go @@ -0,0 +1,57 @@ +package types_test + +import ( + "net" + "reflect" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/types" +) + +var _ = Describe("Types", func() { + It("Deepcopy", func() { + orig := &types.Root{ + Version: 1, + Checksum: 12345, + Pools: map[string]types.PoolReservations{"foo": { + Name: "foo", + Entries: map[string]types.Reservation{"id1_net0": { + ContainerID: "id1", + InterfaceName: "net0", + IPAddress: net.ParseIP("192.168.1.100"), + Metadata: types.ReservationMetadata{ + CreateTime: time.Now().Format(time.RFC3339Nano), + PodUUID: "testPodUUID", + PodName: "testPodName", + PodNamespace: "testNamespace", + DeviceID: "testDeviceID", + PoolConfigSnapshot: "testPoolConfigSnapshot", + }, + }}, + LastPoolConfig: "testLastPoolConfig", + LastReservedIP: net.ParseIP("192.168.1.100"), + }}, + } + clone := orig.DeepCopy() + Expect(reflect.DeepEqual(orig, clone)).To(BeTrue()) + + orig.Checksum = 54321 + res := orig.Pools["foo"] + res.LastPoolConfig = "changed" + res.LastReservedIP = net.ParseIP("192.168.1.200") + entry := res.Entries["id1_net0"] + entry.IPAddress = net.ParseIP("192.168.1.200") + res.Entries["id1_net0"] = entry + orig.Pools["foo"] = res + + Expect(reflect.DeepEqual(orig, clone)).To(BeFalse()) + Expect(orig.Checksum).NotTo(Equal(clone.Checksum)) + Expect(orig.Pools["foo"].LastPoolConfig).NotTo(Equal(clone.Pools["foo"].LastPoolConfig)) + Expect(orig.Pools["foo"].LastReservedIP).NotTo(Equal(clone.Pools["foo"].LastReservedIP)) + Expect(orig.Pools["foo"].Entries["id1_net0"].IPAddress).NotTo( + Equal(clone.Pools["foo"].Entries["id1_net0"].IPAddress)) + }) +}) From f8035cf1db2433aac74111cc5f91a6cd3b1918f4 Mon Sep 17 00:00:00 2001 From: Yury Kulazhenkov Date: Fri, 30 Jun 2023 14:25:01 +0300 Subject: [PATCH 06/18] ipam-node: Add IP allocator implementation Signed-off-by: Yury Kulazhenkov --- pkg/ip/cidr.go | 18 +- pkg/ipam-node/allocator/allocator.go | 178 ++++++++ .../allocator/allocator_suite_test.go | 27 ++ pkg/ipam-node/allocator/allocator_test.go | 380 ++++++++++++++++++ pkg/ipam-node/allocator/mocks/IPAllocator.go | 93 +++++ pkg/ipam-node/allocator/range.go | 179 +++++++++ pkg/ipam-node/allocator/range_set.go | 101 +++++ pkg/ipam-node/allocator/range_set_test.go | 71 ++++ pkg/ipam-node/allocator/range_test.go | 244 +++++++++++ 9 files changed, 1282 insertions(+), 9 deletions(-) create mode 100644 pkg/ipam-node/allocator/allocator.go create mode 100644 pkg/ipam-node/allocator/allocator_suite_test.go create mode 100644 pkg/ipam-node/allocator/allocator_test.go create mode 100644 pkg/ipam-node/allocator/mocks/IPAllocator.go create mode 100644 pkg/ipam-node/allocator/range.go create mode 100644 pkg/ipam-node/allocator/range_set.go create mode 100644 pkg/ipam-node/allocator/range_set_test.go create mode 100644 pkg/ipam-node/allocator/range_test.go diff --git a/pkg/ip/cidr.go b/pkg/ip/cidr.go index 967e3f6..156351a 100644 --- a/pkg/ip/cidr.go +++ b/pkg/ip/cidr.go @@ -22,7 +22,7 @@ import ( // NextIP returns IP incremented by 1, if IP is invalid, return nil func NextIP(ip net.IP) net.IP { - normalizedIP := normalizeIP(ip) + normalizedIP := NormalizeIP(ip) if normalizedIP == nil { return nil } @@ -36,7 +36,7 @@ func NextIPWithOffset(ip net.IP, offset int64) net.IP { if offset < 0 { return nil } - normalizedIP := normalizeIP(ip) + normalizedIP := NormalizeIP(ip) if normalizedIP == nil { return nil } @@ -47,7 +47,7 @@ func NextIPWithOffset(ip net.IP, offset int64) net.IP { // PrevIP returns IP decremented by 1, if IP is invalid, return nil func PrevIP(ip net.IP) net.IP { - normalizedIP := normalizeIP(ip) + normalizedIP := NormalizeIP(ip) if normalizedIP == nil { return nil } @@ -62,8 +62,8 @@ func PrevIP(ip net.IP) net.IP { // a > b : 1 // incomparable : -2 func Cmp(a, b net.IP) int { - normalizedA := normalizeIP(a) - normalizedB := normalizeIP(b) + normalizedA := NormalizeIP(a) + normalizedB := NormalizeIP(b) if len(normalizedA) == len(normalizedB) && len(normalizedA) != 0 { return ipToInt(normalizedA).Cmp(ipToInt(normalizedB)) @@ -76,8 +76,8 @@ func Cmp(a, b net.IP) int { // returns -1 if result is negative // returns -2 if result is too large or IPs are not valid addresses func Distance(a, b net.IP) int64 { - normalizedA := normalizeIP(a) - normalizedB := normalizeIP(b) + normalizedA := NormalizeIP(a) + normalizedB := NormalizeIP(b) if len(normalizedA) == len(normalizedB) && len(normalizedA) != 0 { count := big.NewInt(0).Sub(ipToInt(normalizedB), ipToInt(normalizedA)) @@ -115,11 +115,11 @@ func intToIP(i *big.Int, isIPv6 bool) net.IP { return append(make([]byte, zeroes), intBytes...) } -// normalizeIP will normalize IP by family, +// NormalizeIP will normalize IP by family, // IPv4 : 4-byte form // IPv6 : 16-byte form // others : nil -func normalizeIP(ip net.IP) net.IP { +func NormalizeIP(ip net.IP) net.IP { if ipTo4 := ip.To4(); ipTo4 != nil { return ipTo4 } diff --git a/pkg/ipam-node/allocator/allocator.go b/pkg/ipam-node/allocator/allocator.go new file mode 100644 index 0000000..d5c0a41 --- /dev/null +++ b/pkg/ipam-node/allocator/allocator.go @@ -0,0 +1,178 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Copyright 2015 CNI authors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package allocator + +import ( + "errors" + "net" + + current "github.com/containernetworking/cni/pkg/types/100" + + "github.com/Mellanox/nvidia-k8s-ipam/pkg/ip" + storePkg "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/store" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/types" +) + +var ( + // ErrNoFreeAddresses is returned is there is no free IP addresses left in the pool + ErrNoFreeAddresses = errors.New("no free addresses in the allocated range") +) + +// IPAllocator is the interface of the allocate package +// +//go:generate mockery --name IPAllocator +type IPAllocator interface { + // Allocate allocates IP address from the range for the container identified by ID and ifName + Allocate(id string, ifName string, meta types.ReservationMetadata) (*current.IPConfig, error) +} + +type allocator struct { + rangeSet *RangeSet + session storePkg.Session + poolName string +} + +// NewIPAllocator create and initialize a new instance of IP allocator +func NewIPAllocator(s *RangeSet, poolName string, session storePkg.Session) IPAllocator { + return &allocator{ + rangeSet: s, + session: session, + poolName: poolName, + } +} + +// Allocate allocates an IP +func (a *allocator) Allocate(id string, ifName string, meta types.ReservationMetadata) (*current.IPConfig, error) { + var reservedIP *net.IPNet + var gw net.IP + + iter := a.getIter() + for { + reservedIP, gw = iter.Next() + if reservedIP == nil { + return nil, ErrNoFreeAddresses + } + err := a.session.Reserve(a.poolName, id, ifName, meta, reservedIP.IP) + if err == nil { + break + } + if !errors.Is(err, storePkg.ErrIPAlreadyReserved) { + return nil, err + } + } + + return ¤t.IPConfig{ + Address: *reservedIP, + Gateway: gw, + }, nil +} + +// RangeIter implements iterator over the RangeSet +type RangeIter struct { + rangeSet *RangeSet + // The current range id + rangeIdx int + // Our current position + cur net.IP + // The IP where we started iterating; if we hit this again, we're done. + startIP net.IP +} + +// getIter encapsulates the strategy for this allocator. +// We use a round-robin strategy, attempting to evenly use the whole set. +// More specifically, a crash-looping container will not see the same IP until +// the entire range has been run through. +// We may wish to consider avoiding recently-released IPs in the future. +func (a *allocator) getIter() *RangeIter { + iter := RangeIter{ + rangeSet: a.rangeSet, + } + + // Round-robin by trying to allocate from the last reserved IP + 1 + startFromLastReservedIP := false + + // We might get a last reserved IP that is wrong if the range indexes changed. + // This is not critical, we just lose round-robin this one time. + lastReservedIP := a.session.GetLastReservedIP(a.poolName) + if lastReservedIP != nil { + startFromLastReservedIP = a.rangeSet.Contains(lastReservedIP) + } + + // Find the range in the set with this IP + if startFromLastReservedIP { + for i, r := range *a.rangeSet { + if r.Contains(lastReservedIP) { + iter.rangeIdx = i + + // We advance the cursor on every Next(), so the first call + // to next() will return lastReservedIP + 1 + iter.cur = lastReservedIP + break + } + } + } else { + iter.rangeIdx = 0 + iter.startIP = (*a.rangeSet)[0].RangeStart + } + return &iter +} + +// Next returns the next IP, its mask, and its gateway. Returns nil +// if the iterator has been exhausted +func (i *RangeIter) Next() (*net.IPNet, net.IP) { + r := (*i.rangeSet)[i.rangeIdx] + + // If this is the first time iterating, and we're not starting in the middle + // of the range, then start at rangeStart, which is inclusive + if i.cur == nil { + i.cur = r.RangeStart + i.startIP = i.cur + if i.cur.Equal(r.Gateway) { + return i.Next() + } + return &net.IPNet{IP: i.cur, Mask: r.Subnet.Mask}, r.Gateway + } + + nextIP := ip.NextIP(i.cur) + // If we've reached the end of this range, we need to advance the range + // RangeEnd is inclusive as well + if i.cur.Equal(r.RangeEnd) || nextIP == nil { + i.rangeIdx++ + i.rangeIdx %= len(*i.rangeSet) + r = (*i.rangeSet)[i.rangeIdx] + + i.cur = r.RangeStart + } else { + i.cur = nextIP + } + + if i.startIP == nil { + i.startIP = i.cur + } else if i.cur.Equal(i.startIP) { + // IF we've looped back to where we started, give up + return nil, nil + } + + if i.cur.Equal(r.Gateway) { + return i.Next() + } + + return &net.IPNet{IP: i.cur, Mask: r.Subnet.Mask}, r.Gateway +} + +// StartIP returns start IP of the current range +func (i *RangeIter) StartIP() net.IP { + return i.startIP +} diff --git a/pkg/ipam-node/allocator/allocator_suite_test.go b/pkg/ipam-node/allocator/allocator_suite_test.go new file mode 100644 index 0000000..2889dc3 --- /dev/null +++ b/pkg/ipam-node/allocator/allocator_suite_test.go @@ -0,0 +1,27 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Copyright 2015 CNI authors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package allocator_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestAllocator(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Daemon allocator suite") +} diff --git a/pkg/ipam-node/allocator/allocator_test.go b/pkg/ipam-node/allocator/allocator_test.go new file mode 100644 index 0000000..aae6a7f --- /dev/null +++ b/pkg/ipam-node/allocator/allocator_test.go @@ -0,0 +1,380 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Copyright 2015 CNI authors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package allocator_test + +import ( + "context" + "fmt" + "net" + "path/filepath" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + cniTypes "github.com/containernetworking/cni/pkg/types" + current "github.com/containernetworking/cni/pkg/types/100" + + "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/allocator" + storePkg "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/store" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/types" +) + +const ( + testPoolName = "test" + testIFName = "eth0" + testContainerID = "ID" +) + +type AllocatorTestCase struct { + subnets []string + ipMap map[string]string + expectResult string + lastIP string +} + +func mkAlloc(session storePkg.Session) allocator.IPAllocator { + p := allocator.RangeSet{ + allocator.Range{Subnet: mustSubnet("192.168.1.0/29")}, + } + Expect(p.Canonicalize()).NotTo(HaveOccurred()) + return allocator.NewIPAllocator(&p, testPoolName, session) +} + +func newAllocatorWithMultiRanges(session storePkg.Session) allocator.IPAllocator { + p := allocator.RangeSet{ + allocator.Range{RangeStart: net.IP{192, 168, 1, 0}, RangeEnd: net.IP{192, 168, 1, 3}, Subnet: mustSubnet("192.168.1.0/30")}, + allocator.Range{RangeStart: net.IP{192, 168, 2, 0}, RangeEnd: net.IP{192, 168, 2, 3}, Subnet: mustSubnet("192.168.2.0/30")}, + } + Expect(p.Canonicalize()).NotTo(HaveOccurred()) + return allocator.NewIPAllocator(&p, testPoolName, session) +} + +func (t AllocatorTestCase) run(idx int, session storePkg.Session) (*current.IPConfig, error) { + _, _ = fmt.Fprintln(GinkgoWriter, "Index:", idx) + p := allocator.RangeSet{} + for _, s := range t.subnets { + subnet, err := cniTypes.ParseCIDR(s) + if err != nil { + return nil, err + } + p = append(p, allocator.Range{Subnet: cniTypes.IPNet(*subnet)}) + } + for r := range t.ipMap { + Expect(session.Reserve(testPoolName, r, "net1", + types.ReservationMetadata{}, net.ParseIP(r))).NotTo(HaveOccurred()) + } + session.SetLastReservedIP(testPoolName, net.ParseIP(t.lastIP)) + + Expect(p.Canonicalize()).To(Succeed()) + alloc := allocator.NewIPAllocator(&p, testPoolName, session) + return alloc.Allocate(testContainerID, testIFName, types.ReservationMetadata{}) +} + +func checkAlloc(a allocator.IPAllocator, id string, expectedIP net.IP) { + cfg, err := a.Allocate(id, testIFName, types.ReservationMetadata{}) + if expectedIP == nil { + ExpectWithOffset(1, err).To(HaveOccurred()) + return + } + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + ExpectWithOffset(1, cfg.Address.IP).To(Equal(expectedIP)) +} + +var _ = Describe("allocator", func() { + Context("RangeIter", func() { + It("should loop correctly from the beginning", func() { + store, err := storePkg.New(filepath.Join(GinkgoT().TempDir(), "test_store")).Open(context.Background()) + Expect(err).NotTo(HaveOccurred()) + defer func() { + _ = store.Commit() + }() + a := mkAlloc(store) + checkAlloc(a, "1", net.IP{192, 168, 1, 2}) + checkAlloc(a, "2", net.IP{192, 168, 1, 3}) + checkAlloc(a, "3", net.IP{192, 168, 1, 4}) + checkAlloc(a, "4", net.IP{192, 168, 1, 5}) + checkAlloc(a, "5", net.IP{192, 168, 1, 6}) + checkAlloc(a, "6", nil) + }) + + It("should loop correctly from the end", func() { + store, err := storePkg.New(filepath.Join(GinkgoT().TempDir(), "test_store")).Open(context.Background()) + Expect(err).NotTo(HaveOccurred()) + defer func() { + _ = store.Commit() + }() + a := mkAlloc(store) + Expect(store.Reserve(testPoolName, testContainerID, testIFName, types.ReservationMetadata{}, net.IP{192, 168, 1, 6})). + NotTo(HaveOccurred()) + checkAlloc(a, "1", net.IP{192, 168, 1, 2}) + checkAlloc(a, "2", net.IP{192, 168, 1, 3}) + }) + It("should loop correctly from the middle", func() { + store, err := storePkg.New(filepath.Join(GinkgoT().TempDir(), "test_store")).Open(context.Background()) + Expect(err).NotTo(HaveOccurred()) + defer func() { + _ = store.Commit() + }() + a := mkAlloc(store) + Expect(store.Reserve(testPoolName, testContainerID, testIFName, types.ReservationMetadata{}, net.IP{192, 168, 1, 3})). + NotTo(HaveOccurred()) + store.ReleaseReservationByID(testPoolName, testContainerID, testIFName) + checkAlloc(a, "0", net.IP{192, 168, 1, 4}) + checkAlloc(a, "1", net.IP{192, 168, 1, 5}) + checkAlloc(a, "2", net.IP{192, 168, 1, 6}) + checkAlloc(a, "3", net.IP{192, 168, 1, 2}) + checkAlloc(a, "4", net.IP{192, 168, 1, 3}) + checkAlloc(a, "5", nil) + }) + }) + + Context("when has free ip", func() { + It("should allocate ips in round robin", func() { + testCases := []AllocatorTestCase{ + // fresh start + { + subnets: []string{"10.0.0.0/29"}, + ipMap: map[string]string{}, + expectResult: "10.0.0.2", + lastIP: "", + }, + { + subnets: []string{"2001:db8:1::0/64"}, + ipMap: map[string]string{}, + expectResult: "2001:db8:1::2", + lastIP: "", + }, + { + subnets: []string{"10.0.0.0/30"}, + ipMap: map[string]string{}, + expectResult: "10.0.0.2", + lastIP: "", + }, + { + subnets: []string{"10.0.0.0/29"}, + ipMap: map[string]string{ + "10.0.0.2": testContainerID, + }, + expectResult: "10.0.0.3", + lastIP: "", + }, + // next ip of last reserved ip + { + subnets: []string{"10.0.0.0/29"}, + ipMap: map[string]string{}, + expectResult: "10.0.0.6", + lastIP: "10.0.0.5", + }, + { + subnets: []string{"10.0.0.0/29"}, + ipMap: map[string]string{ + "10.0.0.4": testContainerID, + "10.0.0.5": testContainerID, + }, + expectResult: "10.0.0.6", + lastIP: "10.0.0.3", + }, + // round-robin to the beginning + { + subnets: []string{"10.0.0.0/29"}, + ipMap: map[string]string{ + "10.0.0.6": testContainerID, + }, + expectResult: "10.0.0.2", + lastIP: "10.0.0.5", + }, + // lastIP is out of range + { + subnets: []string{"10.0.0.0/29"}, + ipMap: map[string]string{ + "10.0.0.2": testContainerID, + }, + expectResult: "10.0.0.3", + lastIP: "10.0.0.128", + }, + // subnet is completely full except for lastip + // wrap around and reserve lastIP + { + subnets: []string{"10.0.0.0/29"}, + ipMap: map[string]string{ + "10.0.0.2": testContainerID, + "10.0.0.4": testContainerID, + "10.0.0.5": testContainerID, + "10.0.0.6": testContainerID, + }, + expectResult: "10.0.0.3", + lastIP: "10.0.0.3", + }, + // allocate from multiple subnets + { + subnets: []string{"10.0.0.0/30", "10.0.1.0/30"}, + expectResult: "10.0.0.2", + ipMap: map[string]string{}, + }, + // advance to next subnet + { + subnets: []string{"10.0.0.0/30", "10.0.1.0/30"}, + lastIP: "10.0.0.2", + expectResult: "10.0.1.2", + ipMap: map[string]string{}, + }, + // Roll to start subnet + { + subnets: []string{"10.0.0.0/30", "10.0.1.0/30", "10.0.2.0/30"}, + lastIP: "10.0.2.2", + expectResult: "10.0.0.2", + ipMap: map[string]string{}, + }, + // Already allocated + { + subnets: []string{"10.0.2.0/30"}, + lastIP: "10.33.33.1", + expectResult: "10.0.2.2", + ipMap: map[string]string{ + "10.0.2.1": testContainerID, + }, + }, + // IP overflow + { + subnets: []string{"255.255.255.0/24"}, + lastIP: "255.255.255.255", + // skip GW ip + expectResult: "255.255.255.2", + ipMap: map[string]string{}, + }, + } + + for idx, tc := range testCases { + store, err := storePkg.New( + filepath.Join(GinkgoT().TempDir(), "test_store")).Open(context.Background()) + Expect(err).NotTo(HaveOccurred()) + res, err := tc.run(idx, store) + Expect(err).ToNot(HaveOccurred()) + Expect(res.Address.IP.String()).To(Equal(tc.expectResult)) + Expect(store.Commit()).NotTo(HaveOccurred()) + } + }) + + It("should not allocate the broadcast address", func() { + store, err := storePkg.New( + filepath.Join(GinkgoT().TempDir(), "test_store")).Open(context.Background()) + defer func() { + _ = store.Commit() + }() + alloc := mkAlloc(store) + for i := 2; i < 7; i++ { + res, err := alloc.Allocate(fmt.Sprintf("ID%d", i), testIFName, types.ReservationMetadata{}) + Expect(err).ToNot(HaveOccurred()) + s := fmt.Sprintf("192.168.1.%d/29", i) + Expect(s).To(Equal(res.Address.String())) + _, _ = fmt.Fprintln(GinkgoWriter, "got ip", res.Address.String()) + } + + x, err := alloc.Allocate("ID8", testIFName, types.ReservationMetadata{}) + _, _ = fmt.Fprintln(GinkgoWriter, "got ip", x) + Expect(err).To(HaveOccurred()) + }) + + It("should allocate in a round-robin fashion", func() { + store, err := storePkg.New( + filepath.Join(GinkgoT().TempDir(), "test_store")).Open(context.Background()) + defer func() { + _ = store.Commit() + }() + alloc := mkAlloc(store) + res, err := alloc.Allocate(testContainerID, testIFName, types.ReservationMetadata{}) + Expect(err).ToNot(HaveOccurred()) + Expect(res.Address.String()).To(Equal("192.168.1.2/29")) + + store.ReleaseReservationByID(testPoolName, testContainerID, testIFName) + + res, err = alloc.Allocate(testContainerID, testIFName, types.ReservationMetadata{}) + Expect(err).ToNot(HaveOccurred()) + Expect(res.Address.String()).To(Equal("192.168.1.3/29")) + }) + }) + Context("when out of ips", func() { + It("returns a meaningful error", func() { + testCases := []AllocatorTestCase{ + { + subnets: []string{"10.0.0.0/30"}, + ipMap: map[string]string{ + "10.0.0.2": testContainerID, + }, + }, + { + subnets: []string{"10.0.0.0/29"}, + ipMap: map[string]string{ + "10.0.0.2": testContainerID, + "10.0.0.3": testContainerID, + "10.0.0.4": testContainerID, + "10.0.0.5": testContainerID, + "10.0.0.6": testContainerID, + }, + }, + { + subnets: []string{"10.0.0.0/30", "10.0.1.0/30"}, + ipMap: map[string]string{ + "10.0.0.2": testContainerID, + "10.0.1.2": testContainerID, + }, + }, + } + for idx, tc := range testCases { + store, err := storePkg.New( + filepath.Join(GinkgoT().TempDir(), "test_store")).Open(context.Background()) + Expect(err).NotTo(HaveOccurred()) + _, err = tc.run(idx, store) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(HavePrefix("no free addresses in the allocated range")) + Expect(store.Commit()).NotTo(HaveOccurred()) + } + }) + }) + + Context("when lastReservedIP is at the end of one of multi ranges", func() { + It("should use the first IP of next range as startIP after Next", func() { + store, err := storePkg.New( + filepath.Join(GinkgoT().TempDir(), "test_store")).Open(context.Background()) + Expect(err).NotTo(HaveOccurred()) + defer func() { + _ = store.Commit() + }() + a := newAllocatorWithMultiRanges(store) + + // reserve the last IP of the first range + err = store.Reserve(testPoolName, testContainerID, testIFName, types.ReservationMetadata{}, net.IP{192, 168, 1, 3}) + Expect(err).NotTo(HaveOccurred()) + + // check that IP from the next range is used + checkAlloc(a, "0", net.IP{192, 168, 2, 0}) + }) + }) + + Context("when no lastReservedIP", func() { + It("should use the first IP of the first range as startIP after Next", func() { + store, err := storePkg.New( + filepath.Join(GinkgoT().TempDir(), "test_store")).Open(context.Background()) + Expect(err).NotTo(HaveOccurred()) + defer func() { + _ = store.Commit() + }() + a := newAllocatorWithMultiRanges(store) + + // get range iterator and do the first Next + checkAlloc(a, "0", net.IP{192, 168, 1, 0}) + }) + }) +}) diff --git a/pkg/ipam-node/allocator/mocks/IPAllocator.go b/pkg/ipam-node/allocator/mocks/IPAllocator.go new file mode 100644 index 0000000..ec06449 --- /dev/null +++ b/pkg/ipam-node/allocator/mocks/IPAllocator.go @@ -0,0 +1,93 @@ +// Code generated by mockery v2.27.1. DO NOT EDIT. + +package mocks + +import ( + types "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/types" + types100 "github.com/containernetworking/cni/pkg/types/100" + mock "github.com/stretchr/testify/mock" +) + +// IPAllocator is an autogenerated mock type for the IPAllocator type +type IPAllocator struct { + mock.Mock +} + +type IPAllocator_Expecter struct { + mock *mock.Mock +} + +func (_m *IPAllocator) EXPECT() *IPAllocator_Expecter { + return &IPAllocator_Expecter{mock: &_m.Mock} +} + +// Allocate provides a mock function with given fields: id, ifName, meta +func (_m *IPAllocator) Allocate(id string, ifName string, meta types.ReservationMetadata) (*types100.IPConfig, error) { + ret := _m.Called(id, ifName, meta) + + var r0 *types100.IPConfig + var r1 error + if rf, ok := ret.Get(0).(func(string, string, types.ReservationMetadata) (*types100.IPConfig, error)); ok { + return rf(id, ifName, meta) + } + if rf, ok := ret.Get(0).(func(string, string, types.ReservationMetadata) *types100.IPConfig); ok { + r0 = rf(id, ifName, meta) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types100.IPConfig) + } + } + + if rf, ok := ret.Get(1).(func(string, string, types.ReservationMetadata) error); ok { + r1 = rf(id, ifName, meta) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IPAllocator_Allocate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Allocate' +type IPAllocator_Allocate_Call struct { + *mock.Call +} + +// Allocate is a helper method to define mock.On call +// - id string +// - ifName string +// - meta types.ReservationMetadata +func (_e *IPAllocator_Expecter) Allocate(id interface{}, ifName interface{}, meta interface{}) *IPAllocator_Allocate_Call { + return &IPAllocator_Allocate_Call{Call: _e.mock.On("Allocate", id, ifName, meta)} +} + +func (_c *IPAllocator_Allocate_Call) Run(run func(id string, ifName string, meta types.ReservationMetadata)) *IPAllocator_Allocate_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(string), args[2].(types.ReservationMetadata)) + }) + return _c +} + +func (_c *IPAllocator_Allocate_Call) Return(_a0 *types100.IPConfig, _a1 error) *IPAllocator_Allocate_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *IPAllocator_Allocate_Call) RunAndReturn(run func(string, string, types.ReservationMetadata) (*types100.IPConfig, error)) *IPAllocator_Allocate_Call { + _c.Call.Return(run) + return _c +} + +type mockConstructorTestingTNewIPAllocator interface { + mock.TestingT + Cleanup(func()) +} + +// NewIPAllocator creates a new instance of IPAllocator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewIPAllocator(t mockConstructorTestingTNewIPAllocator) *IPAllocator { + mock := &IPAllocator{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/ipam-node/allocator/range.go b/pkg/ipam-node/allocator/range.go new file mode 100644 index 0000000..ce9c58d --- /dev/null +++ b/pkg/ipam-node/allocator/range.go @@ -0,0 +1,179 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Copyright 2015 CNI authors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package allocator + +import ( + "fmt" + "net" + + "github.com/containernetworking/cni/pkg/types" + + "github.com/Mellanox/nvidia-k8s-ipam/pkg/ip" +) + +// Range contains IP range configuration +type Range struct { + RangeStart net.IP `json:"rangeStart,omitempty"` // The first ip, inclusive + RangeEnd net.IP `json:"rangeEnd,omitempty"` // The last ip, inclusive + Subnet types.IPNet `json:"subnet"` + Gateway net.IP `json:"gateway,omitempty"` +} + +// Canonicalize takes a given range and ensures that all information is consistent, +// filling out Start, End, and Gateway with sane values if missing +func (r *Range) Canonicalize() error { + if err := CanonicalizeIP(&r.Subnet.IP); err != nil { + return err + } + + // Can't create an allocator for a network with no addresses, eg + // a /32 or /31 + ones, masklen := r.Subnet.Mask.Size() + if ones > masklen-2 { + return fmt.Errorf("network %s too small to allocate from", (*net.IPNet)(&r.Subnet).String()) + } + + if len(r.Subnet.IP) != len(r.Subnet.Mask) { + return fmt.Errorf("IPNet IP and Mask version mismatch") + } + + // Ensure Subnet IP is the network address, not some other address + networkIP := r.Subnet.IP.Mask(r.Subnet.Mask) + if !r.Subnet.IP.Equal(networkIP) { + return fmt.Errorf("network has host bits set. "+ + "For a subnet mask of length %d the network address is %s", ones, networkIP.String()) + } + + // If the gateway is nil, claim .1 + if r.Gateway == nil { + r.Gateway = ip.NextIP(r.Subnet.IP) + if r.Gateway == nil { + return fmt.Errorf("computed Gateway for the subnet is not a valid IP") + } + } else { + if err := CanonicalizeIP(&r.Gateway); err != nil { + return err + } + } + + // RangeStart: If specified, make sure it's sane (inside the subnet), + // otherwise use the first free IP (i.e. .1) - this will conflict with the + // gateway but we skip it in the iterator + if r.RangeStart != nil { + if err := CanonicalizeIP(&r.RangeStart); err != nil { + return err + } + + if !r.Contains(r.RangeStart) { + return fmt.Errorf("RangeStart %s not in network %s", r.RangeStart.String(), (*net.IPNet)(&r.Subnet).String()) + } + } else { + r.RangeStart = ip.NextIP(r.Subnet.IP) + if r.RangeStart == nil { + return fmt.Errorf("computed RangeStart is not a valid IP") + } + } + + // RangeEnd: If specified, verify sanity. Otherwise, add a sensible default + // (e.g. for a /24: .254 if IPv4, ::255 if IPv6) + if r.RangeEnd != nil { + if err := CanonicalizeIP(&r.RangeEnd); err != nil { + return err + } + + if !r.Contains(r.RangeEnd) { + return fmt.Errorf("RangeEnd %s not in network %s", r.RangeEnd.String(), (*net.IPNet)(&r.Subnet).String()) + } + } else { + r.RangeEnd = lastIP(r.Subnet) + } + + return nil +} + +// Contains checks if a given ip is a valid, allocatable address in a given Range +func (r *Range) Contains(addr net.IP) bool { + if err := CanonicalizeIP(&addr); err != nil { + return false + } + + subnet := (net.IPNet)(r.Subnet) + + // Not the same address family + if len(addr) != len(r.Subnet.IP) { + return false + } + + // Not in network + if !subnet.Contains(addr) { + return false + } + + if ip.Cmp(addr, r.RangeStart) < 0 { + // Before the range start + return false + } + + if ip.Cmp(addr, r.RangeEnd) > 0 { + // After the range end + return false + } + + return true +} + +// Overlaps returns true if there is any overlap between ranges +func (r *Range) Overlaps(r1 *Range) bool { + // different families + if len(r.RangeStart) != len(r1.RangeStart) { + return false + } + + return r.Contains(r1.RangeStart) || + r.Contains(r1.RangeEnd) || + r1.Contains(r.RangeStart) || + r1.Contains(r.RangeEnd) +} + +// String returns string representation of the Range +func (r *Range) String() string { + return fmt.Sprintf("%s-%s", r.RangeStart.String(), r.RangeEnd.String()) +} + +// CanonicalizeIP makes sure a provided ip is in standard form +func CanonicalizeIP(addr *net.IP) error { + if addr == nil { + return fmt.Errorf("IP can't be nil") + } + normalizedIP := ip.NormalizeIP(*addr) + if normalizedIP == nil { + return fmt.Errorf("IP %s not v4 nor v6", *addr) + } + *addr = normalizedIP + return nil +} + +// Determine the last IP of a subnet, excluding the broadcast if IPv4 +func lastIP(subnet types.IPNet) net.IP { + var end net.IP + for i := 0; i < len(subnet.IP); i++ { + end = append(end, subnet.IP[i]|^subnet.Mask[i]) + } + if subnet.IP.To4() != nil { + end[3]-- + } + + return end +} diff --git a/pkg/ipam-node/allocator/range_set.go b/pkg/ipam-node/allocator/range_set.go new file mode 100644 index 0000000..ebbfd36 --- /dev/null +++ b/pkg/ipam-node/allocator/range_set.go @@ -0,0 +1,101 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Copyright 2015 CNI authors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package allocator + +import ( + "fmt" + "net" + "strings" +) + +// RangeSet is an alias type for list or Range objects +type RangeSet []Range + +// Contains returns true if any range in this set contains an IP +func (s *RangeSet) Contains(addr net.IP) bool { + r, _ := s.RangeFor(addr) + return r != nil +} + +// RangeFor finds the range that contains an IP, or nil if not found +func (s *RangeSet) RangeFor(addr net.IP) (*Range, error) { + if err := CanonicalizeIP(&addr); err != nil { + return nil, err + } + + for _, r := range *s { + if r.Contains(addr) { + return &r, nil + } + } + + return nil, fmt.Errorf("%s not in range set %s", addr.String(), s.String()) +} + +// Overlaps returns true if any ranges in any set overlap with this one +func (s *RangeSet) Overlaps(p1 *RangeSet) bool { + for _, r := range *s { + for _, r1 := range *p1 { + r1 := r1 + if r.Overlaps(&r1) { + return true + } + } + } + return false +} + +// Canonicalize ensures the RangeSet is in a standard form, and detects any +// invalid input. Call Range.Canonicalize() on every Range in the set +func (s *RangeSet) Canonicalize() error { + if len(*s) == 0 { + return fmt.Errorf("empty range set") + } + + fam := 0 + for i := range *s { + if err := (*s)[i].Canonicalize(); err != nil { + return err + } + if i == 0 { + fam = len((*s)[i].RangeStart) + } else if fam != len((*s)[i].RangeStart) { + return fmt.Errorf("mixed address families") + } + } + + // Make sure none of the ranges in the set overlap + l := len(*s) + for i, r1 := range (*s)[:l-1] { + for _, r2 := range (*s)[i+1:] { + r2 := r2 + if r1.Overlaps(&r2) { + return fmt.Errorf("subnets %s and %s overlap", r1.String(), r2.String()) + } + } + } + + return nil +} + +// String returns string representation of the RangeSet +func (s *RangeSet) String() string { + out := []string{} + for _, r := range *s { + out = append(out, r.String()) + } + + return strings.Join(out, ",") +} diff --git a/pkg/ipam-node/allocator/range_set_test.go b/pkg/ipam-node/allocator/range_set_test.go new file mode 100644 index 0000000..1ecc44e --- /dev/null +++ b/pkg/ipam-node/allocator/range_set_test.go @@ -0,0 +1,71 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Copyright 2015 CNI authors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package allocator_test + +import ( + "net" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/allocator" +) + +var _ = Describe("range sets", func() { + It("should detect set membership correctly", func() { + p := allocator.RangeSet{ + allocator.Range{Subnet: mustSubnet("192.168.0.0/24")}, + allocator.Range{Subnet: mustSubnet("172.16.1.0/24")}, + } + + err := p.Canonicalize() + Expect(err).NotTo(HaveOccurred()) + + Expect(p.Contains(net.IP{192, 168, 0, 55})).To(BeTrue()) + + r, err := p.RangeFor(net.IP{192, 168, 0, 55}) + Expect(err).NotTo(HaveOccurred()) + Expect(r).To(Equal(&p[0])) + + r, err = p.RangeFor(net.IP{192, 168, 99, 99}) + Expect(r).To(BeNil()) + Expect(err).To(MatchError("192.168.99.99 not in range set 192.168.0.1-192.168.0.254,172.16.1.1-172.16.1.254")) + }) + + It("should discover overlaps within a set", func() { + p := allocator.RangeSet{ + {Subnet: mustSubnet("192.168.0.0/20")}, + {Subnet: mustSubnet("192.168.2.0/24")}, + } + + err := p.Canonicalize() + Expect(err).To(MatchError("subnets 192.168.0.1-192.168.15.254 and 192.168.2.1-192.168.2.254 overlap")) + }) + + It("should discover overlaps outside a set", func() { + p1 := allocator.RangeSet{ + {Subnet: mustSubnet("192.168.0.0/20")}, + } + p2 := allocator.RangeSet{ + {Subnet: mustSubnet("192.168.2.0/24")}, + } + + Expect(p1.Canonicalize()).NotTo(HaveOccurred()) + Expect(p2.Canonicalize()).NotTo(HaveOccurred()) + + Expect(p1.Overlaps(&p2)).To(BeTrue()) + Expect(p2.Overlaps(&p1)).To(BeTrue()) + }) +}) diff --git a/pkg/ipam-node/allocator/range_test.go b/pkg/ipam-node/allocator/range_test.go new file mode 100644 index 0000000..b4e2c15 --- /dev/null +++ b/pkg/ipam-node/allocator/range_test.go @@ -0,0 +1,244 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Copyright 2015 CNI authors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package allocator_test + +import ( + "net" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/containernetworking/cni/pkg/types" + + "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/allocator" +) + +var _ = Describe("IP ranges", func() { + It("should generate sane defaults for ipv4 with a clean prefix", func() { + subnetStr := "192.0.2.0/24" + r := allocator.Range{Subnet: mustSubnet(subnetStr)} + + err := r.Canonicalize() + Expect(err).NotTo(HaveOccurred()) + + Expect(r).To(Equal(allocator.Range{ + Subnet: networkSubnet(subnetStr), + RangeStart: net.IP{192, 0, 2, 1}, + RangeEnd: net.IP{192, 0, 2, 254}, + Gateway: net.IP{192, 0, 2, 1}, + })) + }) + It("should generate sane defaults for a smaller ipv4 subnet", func() { + subnetStr := "192.0.2.0/25" + r := allocator.Range{Subnet: mustSubnet(subnetStr)} + + err := r.Canonicalize() + Expect(err).NotTo(HaveOccurred()) + + Expect(r).To(Equal(allocator.Range{ + Subnet: networkSubnet(subnetStr), + RangeStart: net.IP{192, 0, 2, 1}, + RangeEnd: net.IP{192, 0, 2, 126}, + Gateway: net.IP{192, 0, 2, 1}, + })) + }) + It("should reject ipv4 subnet using a masked address", func() { + subnetStr := "192.0.2.12/24" + r := allocator.Range{Subnet: mustSubnet(subnetStr)} + + err := r.Canonicalize() + Expect(err).Should(MatchError("network has host bits set. " + + "For a subnet mask of length 24 the network address is 192.0.2.0")) + }) + It("should reject ipv6 subnet using a masked address", func() { + subnetStr := "2001:DB8:1::24:19ff:fee1:c44a/64" + r := allocator.Range{Subnet: mustSubnet(subnetStr)} + + err := r.Canonicalize() + Expect(err).Should(MatchError("network has host bits set. " + + "For a subnet mask of length 64 the network address is 2001:db8:1::")) + }) + It("should reject ipv6 prefix with host bit set", func() { + subnetStr := "2001:DB8:24:19ff::/63" + r := allocator.Range{Subnet: mustSubnet(subnetStr)} + + err := r.Canonicalize() + Expect(err).Should(MatchError("network has host bits set. " + + "For a subnet mask of length 63 the network address is 2001:db8:24:19fe::")) + }) + It("should reject ipv4 network with host bit set", func() { + subnetStr := "192.168.127.0/23" + r := allocator.Range{Subnet: mustSubnet(subnetStr)} + + err := r.Canonicalize() + Expect(err).Should(MatchError("network has host bits set." + + " For a subnet mask of length 23 the network address is 192.168.126.0")) + }) + It("should generate sane defaults for ipv6 with a clean prefix", func() { + subnetStr := "2001:DB8:1::/64" + r := allocator.Range{Subnet: mustSubnet(subnetStr)} + + err := r.Canonicalize() + Expect(err).NotTo(HaveOccurred()) + + Expect(r).To(Equal(allocator.Range{ + Subnet: networkSubnet(subnetStr), + RangeStart: net.ParseIP("2001:DB8:1::1"), + RangeEnd: net.ParseIP("2001:DB8:1::ffff:ffff:ffff:ffff"), + Gateway: net.ParseIP("2001:DB8:1::1"), + })) + }) + + It("Should reject a network that's too small", func() { + r := allocator.Range{Subnet: mustSubnet("192.0.2.0/31")} + err := r.Canonicalize() + Expect(err).Should(MatchError("network 192.0.2.0/31 too small to allocate from")) + }) + + It("should reject invalid RangeStart and RangeEnd specifications", func() { + subnetStr := "192.0.2.0/24" + r := allocator.Range{Subnet: mustSubnet(subnetStr), RangeStart: net.ParseIP("192.0.3.0")} + err := r.Canonicalize() + Expect(err).Should(MatchError("RangeStart 192.0.3.0 not in network 192.0.2.0/24")) + + r = allocator.Range{Subnet: mustSubnet(subnetStr), RangeEnd: net.ParseIP("192.0.4.0")} + err = r.Canonicalize() + Expect(err).Should(MatchError("RangeEnd 192.0.4.0 not in network 192.0.2.0/24")) + + r = allocator.Range{ + Subnet: networkSubnet(subnetStr), + RangeStart: net.ParseIP("192.0.2.50"), + RangeEnd: net.ParseIP("192.0.2.40"), + } + err = r.Canonicalize() + Expect(err).Should(MatchError("RangeStart 192.0.2.50 not in network 192.0.2.0/24")) + }) + + It("should parse all fields correctly", func() { + subnetStr := "192.0.2.0/24" + r := allocator.Range{ + Subnet: mustSubnet(subnetStr), + RangeStart: net.ParseIP("192.0.2.40"), + RangeEnd: net.ParseIP("192.0.2.50"), + Gateway: net.ParseIP("192.0.2.254"), + } + err := r.Canonicalize() + Expect(err).NotTo(HaveOccurred()) + + Expect(r).To(Equal(allocator.Range{ + Subnet: networkSubnet(subnetStr), + RangeStart: net.IP{192, 0, 2, 40}, + RangeEnd: net.IP{192, 0, 2, 50}, + Gateway: net.IP{192, 0, 2, 254}, + })) + }) + + It("should accept v4 IPs in range and reject IPs out of range", func() { + r := allocator.Range{ + Subnet: mustSubnet("192.0.2.0/24"), + RangeStart: net.ParseIP("192.0.2.40"), + RangeEnd: net.ParseIP("192.0.2.50"), + Gateway: net.ParseIP("192.0.2.254"), + } + err := r.Canonicalize() + Expect(err).NotTo(HaveOccurred()) + + Expect(r.Contains(net.ParseIP("192.0.3.0"))).Should(BeFalse()) + + Expect(r.Contains(net.ParseIP("192.0.2.39"))).Should(BeFalse()) + Expect(r.Contains(net.ParseIP("192.0.2.40"))).Should(BeTrue()) + Expect(r.Contains(net.ParseIP("192.0.2.50"))).Should(BeTrue()) + Expect(r.Contains(net.ParseIP("192.0.2.51"))).Should(BeFalse()) + }) + + It("should accept v6 IPs in range and reject IPs out of range", func() { + r := allocator.Range{ + Subnet: mustSubnet("2001:DB8:1::/64"), + RangeStart: net.ParseIP("2001:db8:1::40"), + RangeEnd: net.ParseIP("2001:db8:1::50"), + } + err := r.Canonicalize() + Expect(err).NotTo(HaveOccurred()) + Expect(r.Contains(net.ParseIP("2001:db8:2::"))).Should(BeFalse()) + + Expect(r.Contains(net.ParseIP("2001:db8:1::39"))).Should(BeFalse()) + Expect(r.Contains(net.ParseIP("2001:db8:1::40"))).Should(BeTrue()) + Expect(r.Contains(net.ParseIP("2001:db8:1::50"))).Should(BeTrue()) + Expect(r.Contains(net.ParseIP("2001:db8:1::51"))).Should(BeFalse()) + }) + + DescribeTable("Detecting overlap", + func(r1 allocator.Range, r2 allocator.Range, expected bool) { + Expect(r1.Canonicalize()).NotTo(HaveOccurred()) + Expect(r2.Canonicalize()).NotTo(HaveOccurred()) + + // operation should be commutative + Expect(r1.Overlaps(&r2)).To(Equal(expected)) + Expect(r2.Overlaps(&r1)).To(Equal(expected)) + }, + Entry("non-overlapping", + allocator.Range{Subnet: mustSubnet("10.0.0.0/24")}, + allocator.Range{Subnet: mustSubnet("10.0.1.0/24")}, + false), + Entry("different families", + // Note that the bits overlap + allocator.Range{Subnet: mustSubnet("0.0.0.0/24")}, + allocator.Range{Subnet: mustSubnet("::/24")}, + false), + Entry("Identical", + allocator.Range{Subnet: mustSubnet("10.0.0.0/24")}, + allocator.Range{Subnet: mustSubnet("10.0.0.0/24")}, + true), + Entry("Containing", + allocator.Range{Subnet: mustSubnet("10.0.0.0/20")}, + allocator.Range{Subnet: mustSubnet("10.0.1.0/24")}, + true), + Entry("same subnet, non overlapping start + end", + allocator.Range{ + Subnet: mustSubnet("10.0.0.0/24"), + RangeEnd: net.ParseIP("10.0.0.127"), + }, + allocator.Range{ + Subnet: mustSubnet("10.0.0.0/24"), + RangeStart: net.ParseIP("10.0.0.128"), + }, + false), + Entry("same subnet, overlapping start + end", + allocator.Range{ + Subnet: mustSubnet("10.0.0.0/24"), + RangeEnd: net.ParseIP("10.0.0.127"), + }, + allocator.Range{ + Subnet: mustSubnet("10.0.0.0/24"), + RangeStart: net.ParseIP("10.0.0.127"), + }, + true), + ) +}) + +func mustSubnet(s string) types.IPNet { + n, err := types.ParseCIDR(s) + if err != nil { + Fail(err.Error()) + } + _ = allocator.CanonicalizeIP(&n.IP) + return types.IPNet(*n) +} + +func networkSubnet(s string) types.IPNet { + ipNet := mustSubnet(s) + ipNet.IP = ipNet.IP.Mask(ipNet.Mask) + return ipNet +} From 316126abe6c2a10923cea2543e6696f25dd78940 Mon Sep 17 00:00:00 2001 From: Yury Kulazhenkov Date: Wed, 28 Jun 2023 14:50:41 +0300 Subject: [PATCH 07/18] nv-ipam: Update CNI plugin to be shim plugin for node daemon Signed-off-by: Yury Kulazhenkov --- go.mod | 6 - go.sum | 18 -- pkg/cni/k8sclient/k8sclient.go | 36 --- pkg/cni/k8sclient/k8sclient_suite_test.go | 26 --- pkg/cni/k8sclient/k8sclient_test.go | 66 ------ pkg/cni/plugin/mocks/GRPCClient.go | 251 ++++++++++++++++++++ pkg/cni/plugin/mocks/IPAMExecutor.go | 178 --------------- pkg/cni/plugin/plugin.go | 265 +++++++++++----------- pkg/cni/plugin/plugin_test.go | 101 +++------ pkg/cni/types/host-local.go | 66 ------ pkg/cni/types/host-local_test.go | 57 ----- pkg/cni/types/types.go | 96 ++++---- pkg/cni/types/types_test.go | 53 ++--- 13 files changed, 484 insertions(+), 735 deletions(-) delete mode 100644 pkg/cni/k8sclient/k8sclient.go delete mode 100644 pkg/cni/k8sclient/k8sclient_suite_test.go delete mode 100644 pkg/cni/k8sclient/k8sclient_test.go create mode 100644 pkg/cni/plugin/mocks/GRPCClient.go delete mode 100644 pkg/cni/plugin/mocks/IPAMExecutor.go delete mode 100644 pkg/cni/types/host-local.go delete mode 100644 pkg/cni/types/host-local_test.go diff --git a/go.mod b/go.mod index 0a4f814..a612893 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,6 @@ go 1.20 require ( github.com/containernetworking/cni v1.1.2 - github.com/containernetworking/plugins v1.2.0 github.com/go-logr/logr v1.2.4 github.com/google/renameio/v2 v2.0.0 github.com/k8snetworkplumbingwg/cni-log v0.0.0-20230321145726-634c593dd11f @@ -28,10 +27,8 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/coreos/go-iptables v0.6.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect - github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/go-logr/zapr v1.2.3 // indirect @@ -63,10 +60,7 @@ require ( github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect - github.com/safchain/ethtool v0.2.0 // indirect github.com/stretchr/objx v0.5.0 // indirect - github.com/vishvananda/netlink v1.2.1-beta.2 // indirect - github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.24.0 // indirect diff --git a/go.sum b/go.sum index fc7b9b1..7e3cb87 100644 --- a/go.sum +++ b/go.sum @@ -61,10 +61,6 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ= github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= -github.com/containernetworking/plugins v1.2.0 h1:SWgg3dQG1yzUo4d9iD8cwSVh1VqI+bP7mkPDoSfP9VU= -github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4= -github.com/coreos/go-iptables v0.6.0 h1:is9qnZMPYjLd8LYqmm/qlE+wwEgJIkTYdhV3rfZo4jk= -github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= @@ -80,7 +76,6 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -241,12 +236,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.9.2 h1:BA2GMJOtfGAfagzYtrAlufIP0lq6QERkFmHLMLPwFSU= github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts= @@ -289,8 +282,6 @@ github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5 github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/safchain/ethtool v0.2.0 h1:dILxMBqDnQfX192cCAPjZr9v2IgVXeElHPy435Z/IdE= -github.com/safchain/ethtool v0.2.0/go.mod h1:WkKB1DnNtvsMlDmQ50sgwowDJV/hGbJSOvJoEXs1AJQ= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -315,11 +306,6 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= -github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA= -github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -454,7 +440,6 @@ golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -465,7 +450,6 @@ golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -475,7 +459,6 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -647,7 +630,6 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/pkg/cni/k8sclient/k8sclient.go b/pkg/cni/k8sclient/k8sclient.go deleted file mode 100644 index 31a4f13..0000000 --- a/pkg/cni/k8sclient/k8sclient.go +++ /dev/null @@ -1,36 +0,0 @@ -/* - Copyright 2023, NVIDIA CORPORATION & AFFILIATES - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package k8sclient - -import ( - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/clientcmd" -) - -// FromKubeconfig returns a kubernetes client created from provided kubeconfig path -func FromKubeconfig(kubeconfigPath string) (*kubernetes.Clientset, error) { - cfg, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( - &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfigPath}, - &clientcmd.ConfigOverrides{}).ClientConfig() - if err != nil { - return nil, err - } - - k8sClient, err := kubernetes.NewForConfig(cfg) - if err != nil { - return nil, err - } - - return k8sClient, nil -} diff --git a/pkg/cni/k8sclient/k8sclient_suite_test.go b/pkg/cni/k8sclient/k8sclient_suite_test.go deleted file mode 100644 index 3e9e3d5..0000000 --- a/pkg/cni/k8sclient/k8sclient_suite_test.go +++ /dev/null @@ -1,26 +0,0 @@ -/* - Copyright 2023, NVIDIA CORPORATION & AFFILIATES - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package k8sclient_test - -import ( - "testing" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -func TestK8sClient(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "K8sClient Suite") -} diff --git a/pkg/cni/k8sclient/k8sclient_test.go b/pkg/cni/k8sclient/k8sclient_test.go deleted file mode 100644 index 0318241..0000000 --- a/pkg/cni/k8sclient/k8sclient_test.go +++ /dev/null @@ -1,66 +0,0 @@ -/* - Copyright 2023, NVIDIA CORPORATION & AFFILIATES - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package k8sclient_test - -import ( - "os" - "path" - "path/filepath" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "github.com/Mellanox/nvidia-k8s-ipam/pkg/cni/k8sclient" -) - -var _ = Describe("k8sclient tests", func() { - var ( - tmpDir string - validKubeconfigPath string - invalidKubeconfigPath string - ) - - BeforeEach(func() { - var err error - tmpDir := GinkgoT().TempDir() - - validKubeconfigPath = filepath.Join(tmpDir, "kube.conf") - invalidKubeconfigPath = filepath.Join(tmpDir, "kube-invalid.conf") - - data, err := os.ReadFile(path.Join("..", "..", "..", "testdata", "test.kubeconfig")) - Expect(err).ToNot(HaveOccurred()) - err = os.WriteFile(validKubeconfigPath, data, 0o644) - Expect(err).NotTo(HaveOccurred()) - - err = os.WriteFile(invalidKubeconfigPath, []byte("This is an invalid kubeconfig content"), 0o644) - Expect(err).NotTo(HaveOccurred()) - }) - - Context("FromKubeconfig", func() { - It("creates k8s deffered client if kubeconfig valid", func() { - _, err := k8sclient.FromKubeconfig(validKubeconfigPath) - Expect(err).ToNot(HaveOccurred()) - }) - - It("fails if kubeconfig path not found", func() { - _, err := k8sclient.FromKubeconfig(filepath.Join(tmpDir, "does-not-exist.conf")) - Expect(err).To(HaveOccurred()) - }) - - It("fails if kubeconfig file contains garbage", func() { - _, err := k8sclient.FromKubeconfig(invalidKubeconfigPath) - Expect(err).To(HaveOccurred()) - }) - }) -}) diff --git a/pkg/cni/plugin/mocks/GRPCClient.go b/pkg/cni/plugin/mocks/GRPCClient.go new file mode 100644 index 0000000..e3bfd81 --- /dev/null +++ b/pkg/cni/plugin/mocks/GRPCClient.go @@ -0,0 +1,251 @@ +// Code generated by mockery v2.27.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + grpc "google.golang.org/grpc" + + mock "github.com/stretchr/testify/mock" + + v1 "github.com/Mellanox/nvidia-k8s-ipam/api/grpc/nvidia/ipam/node/v1" +) + +// GRPCClient is an autogenerated mock type for the GRPCClient type +type GRPCClient struct { + mock.Mock +} + +type GRPCClient_Expecter struct { + mock *mock.Mock +} + +func (_m *GRPCClient) EXPECT() *GRPCClient_Expecter { + return &GRPCClient_Expecter{mock: &_m.Mock} +} + +// Allocate provides a mock function with given fields: ctx, in, opts +func (_m *GRPCClient) Allocate(ctx context.Context, in *v1.AllocateRequest, opts ...grpc.CallOption) (*v1.AllocateResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 *v1.AllocateResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *v1.AllocateRequest, ...grpc.CallOption) (*v1.AllocateResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *v1.AllocateRequest, ...grpc.CallOption) *v1.AllocateResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*v1.AllocateResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *v1.AllocateRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GRPCClient_Allocate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Allocate' +type GRPCClient_Allocate_Call struct { + *mock.Call +} + +// Allocate is a helper method to define mock.On call +// - ctx context.Context +// - in *v1.AllocateRequest +// - opts ...grpc.CallOption +func (_e *GRPCClient_Expecter) Allocate(ctx interface{}, in interface{}, opts ...interface{}) *GRPCClient_Allocate_Call { + return &GRPCClient_Allocate_Call{Call: _e.mock.On("Allocate", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *GRPCClient_Allocate_Call) Run(run func(ctx context.Context, in *v1.AllocateRequest, opts ...grpc.CallOption)) *GRPCClient_Allocate_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*v1.AllocateRequest), variadicArgs...) + }) + return _c +} + +func (_c *GRPCClient_Allocate_Call) Return(_a0 *v1.AllocateResponse, _a1 error) *GRPCClient_Allocate_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *GRPCClient_Allocate_Call) RunAndReturn(run func(context.Context, *v1.AllocateRequest, ...grpc.CallOption) (*v1.AllocateResponse, error)) *GRPCClient_Allocate_Call { + _c.Call.Return(run) + return _c +} + +// Deallocate provides a mock function with given fields: ctx, in, opts +func (_m *GRPCClient) Deallocate(ctx context.Context, in *v1.DeallocateRequest, opts ...grpc.CallOption) (*v1.DeallocateResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 *v1.DeallocateResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *v1.DeallocateRequest, ...grpc.CallOption) (*v1.DeallocateResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *v1.DeallocateRequest, ...grpc.CallOption) *v1.DeallocateResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*v1.DeallocateResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *v1.DeallocateRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GRPCClient_Deallocate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Deallocate' +type GRPCClient_Deallocate_Call struct { + *mock.Call +} + +// Deallocate is a helper method to define mock.On call +// - ctx context.Context +// - in *v1.DeallocateRequest +// - opts ...grpc.CallOption +func (_e *GRPCClient_Expecter) Deallocate(ctx interface{}, in interface{}, opts ...interface{}) *GRPCClient_Deallocate_Call { + return &GRPCClient_Deallocate_Call{Call: _e.mock.On("Deallocate", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *GRPCClient_Deallocate_Call) Run(run func(ctx context.Context, in *v1.DeallocateRequest, opts ...grpc.CallOption)) *GRPCClient_Deallocate_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*v1.DeallocateRequest), variadicArgs...) + }) + return _c +} + +func (_c *GRPCClient_Deallocate_Call) Return(_a0 *v1.DeallocateResponse, _a1 error) *GRPCClient_Deallocate_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *GRPCClient_Deallocate_Call) RunAndReturn(run func(context.Context, *v1.DeallocateRequest, ...grpc.CallOption) (*v1.DeallocateResponse, error)) *GRPCClient_Deallocate_Call { + _c.Call.Return(run) + return _c +} + +// IsAllocated provides a mock function with given fields: ctx, in, opts +func (_m *GRPCClient) IsAllocated(ctx context.Context, in *v1.IsAllocatedRequest, opts ...grpc.CallOption) (*v1.IsAllocatedResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 *v1.IsAllocatedResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *v1.IsAllocatedRequest, ...grpc.CallOption) (*v1.IsAllocatedResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *v1.IsAllocatedRequest, ...grpc.CallOption) *v1.IsAllocatedResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*v1.IsAllocatedResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *v1.IsAllocatedRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GRPCClient_IsAllocated_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsAllocated' +type GRPCClient_IsAllocated_Call struct { + *mock.Call +} + +// IsAllocated is a helper method to define mock.On call +// - ctx context.Context +// - in *v1.IsAllocatedRequest +// - opts ...grpc.CallOption +func (_e *GRPCClient_Expecter) IsAllocated(ctx interface{}, in interface{}, opts ...interface{}) *GRPCClient_IsAllocated_Call { + return &GRPCClient_IsAllocated_Call{Call: _e.mock.On("IsAllocated", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *GRPCClient_IsAllocated_Call) Run(run func(ctx context.Context, in *v1.IsAllocatedRequest, opts ...grpc.CallOption)) *GRPCClient_IsAllocated_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*v1.IsAllocatedRequest), variadicArgs...) + }) + return _c +} + +func (_c *GRPCClient_IsAllocated_Call) Return(_a0 *v1.IsAllocatedResponse, _a1 error) *GRPCClient_IsAllocated_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *GRPCClient_IsAllocated_Call) RunAndReturn(run func(context.Context, *v1.IsAllocatedRequest, ...grpc.CallOption) (*v1.IsAllocatedResponse, error)) *GRPCClient_IsAllocated_Call { + _c.Call.Return(run) + return _c +} + +type mockConstructorTestingTNewGRPCClient interface { + mock.TestingT + Cleanup(func()) +} + +// NewGRPCClient creates a new instance of GRPCClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewGRPCClient(t mockConstructorTestingTNewGRPCClient) *GRPCClient { + mock := &GRPCClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/cni/plugin/mocks/IPAMExecutor.go b/pkg/cni/plugin/mocks/IPAMExecutor.go deleted file mode 100644 index bc08711..0000000 --- a/pkg/cni/plugin/mocks/IPAMExecutor.go +++ /dev/null @@ -1,178 +0,0 @@ -// Code generated by mockery v2.27.1. DO NOT EDIT. - -package mocks - -import ( - mock "github.com/stretchr/testify/mock" - - types "github.com/containernetworking/cni/pkg/types" -) - -// IPAMExecutor is an autogenerated mock type for the IPAMExecutor type -type IPAMExecutor struct { - mock.Mock -} - -type IPAMExecutor_Expecter struct { - mock *mock.Mock -} - -func (_m *IPAMExecutor) EXPECT() *IPAMExecutor_Expecter { - return &IPAMExecutor_Expecter{mock: &_m.Mock} -} - -// ExecAdd provides a mock function with given fields: pluginName, data -func (_m *IPAMExecutor) ExecAdd(pluginName string, data []byte) (types.Result, error) { - ret := _m.Called(pluginName, data) - - var r0 types.Result - var r1 error - if rf, ok := ret.Get(0).(func(string, []byte) (types.Result, error)); ok { - return rf(pluginName, data) - } - if rf, ok := ret.Get(0).(func(string, []byte) types.Result); ok { - r0 = rf(pluginName, data) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(types.Result) - } - } - - if rf, ok := ret.Get(1).(func(string, []byte) error); ok { - r1 = rf(pluginName, data) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// IPAMExecutor_ExecAdd_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ExecAdd' -type IPAMExecutor_ExecAdd_Call struct { - *mock.Call -} - -// ExecAdd is a helper method to define mock.On call -// - pluginName string -// - data []byte -func (_e *IPAMExecutor_Expecter) ExecAdd(pluginName interface{}, data interface{}) *IPAMExecutor_ExecAdd_Call { - return &IPAMExecutor_ExecAdd_Call{Call: _e.mock.On("ExecAdd", pluginName, data)} -} - -func (_c *IPAMExecutor_ExecAdd_Call) Run(run func(pluginName string, data []byte)) *IPAMExecutor_ExecAdd_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(string), args[1].([]byte)) - }) - return _c -} - -func (_c *IPAMExecutor_ExecAdd_Call) Return(_a0 types.Result, _a1 error) *IPAMExecutor_ExecAdd_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *IPAMExecutor_ExecAdd_Call) RunAndReturn(run func(string, []byte) (types.Result, error)) *IPAMExecutor_ExecAdd_Call { - _c.Call.Return(run) - return _c -} - -// ExecCheck provides a mock function with given fields: pluginName, data -func (_m *IPAMExecutor) ExecCheck(pluginName string, data []byte) error { - ret := _m.Called(pluginName, data) - - var r0 error - if rf, ok := ret.Get(0).(func(string, []byte) error); ok { - r0 = rf(pluginName, data) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// IPAMExecutor_ExecCheck_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ExecCheck' -type IPAMExecutor_ExecCheck_Call struct { - *mock.Call -} - -// ExecCheck is a helper method to define mock.On call -// - pluginName string -// - data []byte -func (_e *IPAMExecutor_Expecter) ExecCheck(pluginName interface{}, data interface{}) *IPAMExecutor_ExecCheck_Call { - return &IPAMExecutor_ExecCheck_Call{Call: _e.mock.On("ExecCheck", pluginName, data)} -} - -func (_c *IPAMExecutor_ExecCheck_Call) Run(run func(pluginName string, data []byte)) *IPAMExecutor_ExecCheck_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(string), args[1].([]byte)) - }) - return _c -} - -func (_c *IPAMExecutor_ExecCheck_Call) Return(_a0 error) *IPAMExecutor_ExecCheck_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *IPAMExecutor_ExecCheck_Call) RunAndReturn(run func(string, []byte) error) *IPAMExecutor_ExecCheck_Call { - _c.Call.Return(run) - return _c -} - -// ExecDel provides a mock function with given fields: pluginName, data -func (_m *IPAMExecutor) ExecDel(pluginName string, data []byte) error { - ret := _m.Called(pluginName, data) - - var r0 error - if rf, ok := ret.Get(0).(func(string, []byte) error); ok { - r0 = rf(pluginName, data) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// IPAMExecutor_ExecDel_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ExecDel' -type IPAMExecutor_ExecDel_Call struct { - *mock.Call -} - -// ExecDel is a helper method to define mock.On call -// - pluginName string -// - data []byte -func (_e *IPAMExecutor_Expecter) ExecDel(pluginName interface{}, data interface{}) *IPAMExecutor_ExecDel_Call { - return &IPAMExecutor_ExecDel_Call{Call: _e.mock.On("ExecDel", pluginName, data)} -} - -func (_c *IPAMExecutor_ExecDel_Call) Run(run func(pluginName string, data []byte)) *IPAMExecutor_ExecDel_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(string), args[1].([]byte)) - }) - return _c -} - -func (_c *IPAMExecutor_ExecDel_Call) Return(_a0 error) *IPAMExecutor_ExecDel_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *IPAMExecutor_ExecDel_Call) RunAndReturn(run func(string, []byte) error) *IPAMExecutor_ExecDel_Call { - _c.Call.Return(run) - return _c -} - -type mockConstructorTestingTNewIPAMExecutor interface { - mock.TestingT - Cleanup(func()) -} - -// NewIPAMExecutor creates a new instance of IPAMExecutor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewIPAMExecutor(t mockConstructorTestingTNewIPAMExecutor) *IPAMExecutor { - mock := &IPAMExecutor{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/pkg/cni/plugin/plugin.go b/pkg/cni/plugin/plugin.go index 374727c..3a2d730 100644 --- a/pkg/cni/plugin/plugin.go +++ b/pkg/cni/plugin/plugin.go @@ -15,180 +15,131 @@ package plugin import ( "context" - "encoding/json" "fmt" - "os" - "path/filepath" + "net" + "time" "github.com/containernetworking/cni/pkg/skel" cnitypes "github.com/containernetworking/cni/pkg/types" - "github.com/containernetworking/plugins/pkg/ipam" + current "github.com/containernetworking/cni/pkg/types/100" log "github.com/k8snetworkplumbingwg/cni-log" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + nodev1 "github.com/Mellanox/nvidia-k8s-ipam/api/grpc/nvidia/ipam/node/v1" "github.com/Mellanox/nvidia-k8s-ipam/pkg/cni/types" - "github.com/Mellanox/nvidia-k8s-ipam/pkg/pool" "github.com/Mellanox/nvidia-k8s-ipam/pkg/version" ) const ( CNIPluginName = "nv-ipam" - - delegateIPAMPluginName = "host-local" ) -// IPAMExecutor is an interface that executes IPAM CNI Plugin +// GRPCClient is an interface for the client which is used to communicate with NVIDIA IPAM Node Daemon // -//go:generate mockery --name IPAMExecutor -type IPAMExecutor interface { - // ExecAdd executes IPAM plugin named pluginName ADD call - ExecAdd(pluginName string, data []byte) (cnitypes.Result, error) - // ExecDel executes IPAM plugin named pluginName DEL call - ExecDel(pluginName string, data []byte) error - // ExecCheck executes IPAM plugin named pluginName CHECK call - ExecCheck(pluginName string, data []byte) error -} - -// NewIPAMExecutor creates a new instance that implements IPAMExecutor -func NewIPAMExecutor() IPAMExecutor { - return &cniIpamExecutor{} -} - -// cniIpamExecutor implements IPAMExecutor using cni plugins ipam package -type cniIpamExecutor struct{} - -// ExecAdd implements IPAMExecutor interface -func (ie *cniIpamExecutor) ExecAdd(pluginName string, data []byte) (cnitypes.Result, error) { - return ipam.ExecAdd(pluginName, data) -} +//go:generate mockery --name GRPCClient -// ExecDel implements IPAMExecutor interface -func (ie *cniIpamExecutor) ExecDel(pluginName string, data []byte) error { - return ipam.ExecDel(pluginName, data) +type GRPCClient interface { + nodev1.IPAMServiceClient } -// ExecCheck implements IPAMExecutor interface -func (ie *cniIpamExecutor) ExecCheck(pluginName string, data []byte) error { - return ipam.ExecCheck(pluginName, data) -} +type NewGRPCClientFunc func(daemonSocket string) (GRPCClient, error) func NewPlugin() *Plugin { return &Plugin{ - Name: CNIPluginName, - Version: version.GetVersionString(), - ConfLoader: types.NewConfLoader(), - IPAMExecutor: NewIPAMExecutor(), + Name: CNIPluginName, + Version: version.GetVersionString(), + ConfLoader: types.NewConfLoader(), + NewGRPCClientFunc: defaultNewGRPCClientFunc, } } type Plugin struct { - Name string - Version string - ConfLoader types.ConfLoader - IPAMExecutor IPAMExecutor + Name string + Version string + ConfLoader types.ConfLoader + NewGRPCClientFunc NewGRPCClientFunc } func (p *Plugin) CmdAdd(args *skel.CmdArgs) error { - conf, err := p.ConfLoader.LoadConf(args.StdinData) - if err != nil { - return fmt.Errorf("failed to load config. %w", err) - } - setupLog(conf.IPAM.LogFile, conf.IPAM.LogLevel) - logCall("ADD", args, conf.IPAM) - - // build host-local config - pool, err := getPoolbyName(conf.IPAM.K8sClient, conf.IPAM.NodeName, conf.IPAM.PoolName) + cmd, err := p.prepareCMD(args) if err != nil { - return fmt.Errorf("failed to get pool by name. %w", err) + return log.Errorf("command preparation failed: %v", err) } - - hlc := types.HostLocalNetConfFromNetConfAndPool(conf, pool) - data, err := json.Marshal(hlc) + logCall("ADD", args, cmd.Config.IPAM) + ctx, cFunc := context.WithTimeout(context.Background(), + time.Second*time.Duration(cmd.Config.IPAM.DaemonCallTimeoutSeconds)) + defer cFunc() + resp, err := cmd.Client.Allocate(ctx, &nodev1.AllocateRequest{Parameters: cmd.ReqParams}) if err != nil { - return fmt.Errorf("failed to marshal host-local net conf. %w", err) + return log.Errorf("grpc call failed: %v", err) } - log.Debugf("host-local stdin data: %q", string(data)) - - // call host-local cni with alternate path - err = os.Setenv("CNI_PATH", filepath.Join(conf.IPAM.DataDir, "bin")) + result, err := grpcRespToResult(resp) if err != nil { return err } - res, err := p.IPAMExecutor.ExecAdd(delegateIPAMPluginName, data) - if err != nil { - return fmt.Errorf("failed to exec ADD host-local CNI plugin. %w", err) - } - - return cnitypes.PrintResult(res, conf.CNIVersion) + log.Infof("CmdAdd succeed") + return cnitypes.PrintResult(result, cmd.Config.CNIVersion) } func (p *Plugin) CmdDel(args *skel.CmdArgs) error { - conf, err := p.ConfLoader.LoadConf(args.StdinData) - if err != nil { - return fmt.Errorf("failed to load config. %w", err) - } - setupLog(conf.IPAM.LogFile, conf.IPAM.LogLevel) - logCall("DEL", args, conf.IPAM) - - // build host-local config - pool, err := getPoolbyName(conf.IPAM.K8sClient, conf.IPAM.NodeName, conf.IPAM.PoolName) - if err != nil { - return fmt.Errorf("failed to get pool by name. %w", err) - } - - hlc := types.HostLocalNetConfFromNetConfAndPool(conf, pool) - data, err := json.Marshal(hlc) - if err != nil { - return fmt.Errorf("failed to marshal host-local net conf. %w", err) - } - log.Debugf("host-local stdin data: %q", string(data)) - - // call host-local cni with alternate path - err = os.Setenv("CNI_PATH", filepath.Join(conf.IPAM.DataDir, "bin")) + cmd, err := p.prepareCMD(args) if err != nil { return err } - err = p.IPAMExecutor.ExecDel(delegateIPAMPluginName, data) - if err != nil { - return fmt.Errorf("failed to exec DEL host-local CNI plugin. %w", err) + logCall("DEL", args, cmd.Config.IPAM) + ctx, cFunc := context.WithTimeout(context.Background(), + time.Second*time.Duration(cmd.Config.IPAM.DaemonCallTimeoutSeconds)) + defer cFunc() + if _, err := cmd.Client.Deallocate(ctx, &nodev1.DeallocateRequest{Parameters: cmd.ReqParams}); err != nil { + return log.Errorf("grpc call failed: %v", err) } - + log.Infof("CmdDel succeed") return nil } func (p *Plugin) CmdCheck(args *skel.CmdArgs) error { - conf, err := p.ConfLoader.LoadConf(args.StdinData) + cmd, err := p.prepareCMD(args) if err != nil { - return fmt.Errorf("failed to load config. %w", err) + return err } - setupLog(conf.IPAM.LogFile, conf.IPAM.LogLevel) - logCall("CHECK", args, conf.IPAM) - - // build host-local config - pool, err := getPoolbyName(conf.IPAM.K8sClient, conf.IPAM.NodeName, conf.IPAM.PoolName) - if err != nil { - return fmt.Errorf("failed to get pool by name. %w", err) + logCall("CHECK", args, cmd.Config.IPAM) + ctx, cFunc := context.WithTimeout(context.Background(), + time.Second*time.Duration(cmd.Config.IPAM.DaemonCallTimeoutSeconds)) + defer cFunc() + if _, err := cmd.Client.IsAllocated(ctx, &nodev1.IsAllocatedRequest{Parameters: cmd.ReqParams}); err != nil { + return log.Errorf("grpc call failed: %v", err) } + log.Infof("CmdCheck succeed") + return nil +} + +type cmdContext struct { + Client GRPCClient + Config *types.NetConf + ReqParams *nodev1.IPAMParameters +} - hlc := types.HostLocalNetConfFromNetConfAndPool(conf, pool) - data, err := json.Marshal(hlc) +func (p *Plugin) prepareCMD(args *skel.CmdArgs) (cmdContext, error) { + var ( + c cmdContext + err error + ) + c.Config, err = p.ConfLoader.LoadConf(args.StdinData) if err != nil { - return fmt.Errorf("failed to marshal host-local net conf. %w", err) + return cmdContext{}, fmt.Errorf("failed to load config. %v", err) } - log.Debugf("host-local stdin data: %q", string(data)) + setupLog(c.Config.IPAM.LogFile, c.Config.IPAM.LogLevel) - // call host-local cni with alternate path - err = os.Setenv("CNI_PATH", filepath.Join(conf.IPAM.DataDir, "bin")) + c.Client, err = p.NewGRPCClientFunc(c.Config.IPAM.DaemonSocket) if err != nil { - return err + return cmdContext{}, fmt.Errorf("failed to connect to IPAM daemon: %v", err) } - err = p.IPAMExecutor.ExecCheck(delegateIPAMPluginName, data) + c.ReqParams, err = cniConfToGRPCReq(c.Config, args) if err != nil { - return fmt.Errorf("failed to exec CHECK host-local CNI plugin. %w", err) + return cmdContext{}, fmt.Errorf("failed to convert CNI parameters to GRPC request: %v", err) } - - return nil + return c, nil } func setupLog(logFile, logLevel string) { @@ -208,21 +159,79 @@ func logCall(cmd string, args *skel.CmdArgs, conf *types.IPAMConf) { log.Debugf("CMD %s: Parsed IPAM conf: %+v", cmd, conf) } -func getPoolbyName(kclient kubernetes.Interface, nodeName, poolName string) (*pool.IPPool, error) { - // get pool info from node - node, err := kclient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) +func grpcRespToResult(resp *nodev1.AllocateResponse) (*current.Result, error) { + result := ¤t.Result{CNIVersion: current.ImplementedSpecVersion} + logErr := func(msg string) error { + return log.Errorf("unexpected response from IPAM daemon: %s", msg) + } + for _, alloc := range resp.Allocations { + if alloc.Ip == "" { + return nil, logErr("IP can't be empty") + } + if alloc.Gateway == "" { + return nil, logErr("Gateway can't be empty") + } + ipAddr, netAddr, err := net.ParseCIDR(alloc.Ip) + if err != nil { + return nil, logErr(fmt.Sprintf("unexpected IP address format, received value: %s", alloc.Ip)) + } + gwIP := net.ParseIP(alloc.Gateway) + if gwIP == nil { + return nil, logErr(fmt.Sprintf("unexpected Gateway address format, received value: %s", alloc.Gateway)) + } + result.IPs = append(result.IPs, ¤t.IPConfig{ + Address: net.IPNet{IP: ipAddr, Mask: netAddr.Mask}, + Gateway: gwIP, + }) + } + + return result, nil +} + +func cniConfToGRPCReq(conf *types.NetConf, args *skel.CmdArgs) (*nodev1.IPAMParameters, error) { + cniExtraArgs := &kubernetesCNIArgs{} + err := cnitypes.LoadArgs(args.Args, cniExtraArgs) if err != nil { - return nil, fmt.Errorf("failed to get node %s from k8s API. %w", nodeName, err) + return nil, fmt.Errorf("failed to load extra CNI args: %v", err) + } + req := &nodev1.IPAMParameters{ + Pools: conf.IPAM.Pools, + CniIfname: args.IfName, + CniContainerid: args.ContainerID, + Metadata: &nodev1.IPAMMetadata{ + K8SPodName: string(cniExtraArgs.K8S_POD_NAME), + K8SPodNamespace: string(cniExtraArgs.K8S_POD_NAMESPACE), + K8SPodUid: string(cniExtraArgs.K8S_POD_UID), + DeviceId: conf.DeviceID, + }, } - pm, err := pool.NewManagerImpl(node) - if err != nil { - return nil, fmt.Errorf("failed to get pools from node %s. %w", nodeName, err) + if req.Metadata.K8SPodName == "" { + return nil, log.Errorf("CNI_ARGS: K8S_POD_NAME is not provided by container runtime") + } + if req.Metadata.K8SPodNamespace == "" { + return nil, log.Errorf("CNI_ARGS: K8S_POD_NAMESPACE is not provided by container runtime") } + if req.Metadata.K8SPodUid == "" { + log.Warningf("CNI_ARGS: K8S_POD_UID is not provided by container runtime") + } + return req, nil +} - pool := pm.GetPoolByName(poolName) - if pool == nil { - return nil, fmt.Errorf("failed to get pools from node %s. pool %s not found", nodeName, poolName) +// default NewGRPCClientFunc, initializes insecure GRPC connection to provided daemon socket +func defaultNewGRPCClientFunc(daemonSocket string) (GRPCClient, error) { + conn, err := grpc.Dial(daemonSocket, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return nil, err } - return pool, nil + return nodev1.NewIPAMServiceClient(conn), nil +} + +// kubernetesCNIArgs is the container for extra CNI Args which set by container runtimes +// in Kubernetes +type kubernetesCNIArgs struct { + cnitypes.CommonArgs + K8S_POD_NAME cnitypes.UnmarshallableString //nolint + K8S_POD_NAMESPACE cnitypes.UnmarshallableString //nolint + K8S_POD_UID cnitypes.UnmarshallableString //nolint } diff --git a/pkg/cni/plugin/plugin_test.go b/pkg/cni/plugin/plugin_test.go index 1bc3776..98eefa2 100644 --- a/pkg/cni/plugin/plugin_test.go +++ b/pkg/cni/plugin/plugin_test.go @@ -14,24 +14,19 @@ package plugin_test import ( - "encoding/json" "path" "github.com/containernetworking/cni/pkg/skel" cnitypes "github.com/containernetworking/cni/pkg/types" - cnitypes100 "github.com/containernetworking/cni/pkg/types/100" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/fake" + "github.com/stretchr/testify/mock" + nodev1 "github.com/Mellanox/nvidia-k8s-ipam/api/grpc/nvidia/ipam/node/v1" "github.com/Mellanox/nvidia-k8s-ipam/pkg/cni/plugin" "github.com/Mellanox/nvidia-k8s-ipam/pkg/cni/types" - "github.com/Mellanox/nvidia-k8s-ipam/pkg/pool" "github.com/Mellanox/nvidia-k8s-ipam/pkg/version" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "github.com/stretchr/testify/mock" pluginMocks "github.com/Mellanox/nvidia-k8s-ipam/pkg/cni/plugin/mocks" typesMocks "github.com/Mellanox/nvidia-k8s-ipam/pkg/cni/types/mocks" @@ -39,40 +34,26 @@ import ( var _ = Describe("plugin tests", func() { var ( - tmpDir string - p plugin.Plugin - fakeClient *fake.Clientset - testNode *v1.Node - mockExecutor *pluginMocks.IPAMExecutor - mockConfLoader *typesMocks.ConfLoader - testConf *types.NetConf - args *skel.CmdArgs + tmpDir string + p plugin.Plugin + mockConfLoader *typesMocks.ConfLoader + mockDaemonClient *pluginMocks.GRPCClient + testConf *types.NetConf + args *skel.CmdArgs ) BeforeEach(func() { tmpDir = GinkgoT().TempDir() - - testNode = &v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-node", - }, - } - nodeAnnot := map[string]string{ - pool.IPBlocksAnnotation: `{"my-pool": - {"subnet": "192.168.0.0/16", "startIP": "192.168.0.2", - "endIP": "192.168.0.254", "gateway": "192.168.0.1"}}`, - } - testNode.SetAnnotations(nodeAnnot) - - fakeClient = fake.NewSimpleClientset(testNode) - mockExecutor = pluginMocks.NewIPAMExecutor(GinkgoT()) mockConfLoader = typesMocks.NewConfLoader(GinkgoT()) + mockDaemonClient = pluginMocks.NewGRPCClient(GinkgoT()) p = plugin.Plugin{ - Name: plugin.CNIPluginName, - Version: version.GetVersionString(), - ConfLoader: mockConfLoader, - IPAMExecutor: mockExecutor, + Name: plugin.CNIPluginName, + Version: version.GetVersionString(), + ConfLoader: mockConfLoader, + NewGRPCClientFunc: func(_ string) (plugin.GRPCClient, error) { + return mockDaemonClient, nil + }, } testConf = &types.NetConf{ @@ -82,12 +63,9 @@ var _ = Describe("plugin tests", func() { IPAM: cnitypes.IPAM{ Type: "nv-ipam", }, - PoolName: "my-pool", - DataDir: "/foo/bar", - LogFile: path.Join(tmpDir, "nv-ipam.log"), - LogLevel: "debug", - NodeName: "test-node", - K8sClient: fakeClient, + Pools: []string{"my-pool"}, + LogFile: path.Join(tmpDir, "nv-ipam.log"), + LogLevel: "debug", }, } @@ -96,54 +74,49 @@ var _ = Describe("plugin tests", func() { Netns: "/proc/19783/ns", IfName: "net1", StdinData: []byte("doesnt-matter"), + Args: "K8S_POD_NAME=test;K8S_POD_NAMESPACE=test", } }) Context("CmdAdd()", func() { It("executes successfully", func() { mockConfLoader.On("LoadConf", args.StdinData).Return(testConf, nil) - mockExecutor.On("ExecAdd", "host-local", mock.Anything).Return(&cnitypes100.Result{}, func(_ string, data []byte) error { - // fail if we cannot unmarshal data to host-local config - hostLocalConf := &types.HostLocalNetConf{} - return json.Unmarshal(data, hostLocalConf) - }) - + mockDaemonClient.On("Allocate", mock.Anything, &nodev1.AllocateRequest{ + Parameters: &nodev1.IPAMParameters{ + Pools: []string{"my-pool"}, + CniIfname: "net1", + CniContainerid: "1234", + Metadata: &nodev1.IPAMMetadata{ + K8SPodName: "test", + K8SPodNamespace: "test", + }, + }}).Return(&nodev1.AllocateResponse{ + Allocations: []*nodev1.AllocationInfo{{ + Pool: "my-pool", + Ip: "192.168.1.10/24", + Gateway: "192.168.1.1", + }}, + }, nil) err := p.CmdAdd(args) Expect(err).ToNot(HaveOccurred()) - mockConfLoader.AssertExpectations(GinkgoT()) - mockExecutor.AssertExpectations(GinkgoT()) }) }) Context("CmdDel()", func() { It("executes successfully", func() { mockConfLoader.On("LoadConf", args.StdinData).Return(testConf, nil) - mockExecutor.On("ExecDel", "host-local", mock.Anything).Return(func(_ string, data []byte) error { - // fail if we cannot unmarshal data to host-local config - hostLocalConf := &types.HostLocalNetConf{} - return json.Unmarshal(data, hostLocalConf) - }) - + mockDaemonClient.On("Deallocate", mock.Anything, mock.Anything).Return(nil, nil) err := p.CmdDel(args) Expect(err).ToNot(HaveOccurred()) - mockConfLoader.AssertExpectations(GinkgoT()) - mockExecutor.AssertExpectations(GinkgoT()) }) }) Context("CmdCheck()", func() { It("executes successfully", func() { mockConfLoader.On("LoadConf", args.StdinData).Return(testConf, nil) - mockExecutor.On("ExecCheck", "host-local", mock.Anything).Return(func(_ string, data []byte) error { - // fail if we cannot unmarshal data to host-local config - hostLocalConf := &types.HostLocalNetConf{} - return json.Unmarshal(data, hostLocalConf) - }) - + mockDaemonClient.On("IsAllocated", mock.Anything, mock.Anything).Return(nil, nil) err := p.CmdCheck(args) Expect(err).ToNot(HaveOccurred()) - mockConfLoader.AssertExpectations(GinkgoT()) - mockExecutor.AssertExpectations(GinkgoT()) }) }) }) diff --git a/pkg/cni/types/host-local.go b/pkg/cni/types/host-local.go deleted file mode 100644 index 4ed4bc8..0000000 --- a/pkg/cni/types/host-local.go +++ /dev/null @@ -1,66 +0,0 @@ -/* - Copyright 2023, NVIDIA CORPORATION & AFFILIATES - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package types - -import ( - "path/filepath" - - "github.com/Mellanox/nvidia-k8s-ipam/pkg/pool" -) - -// TODO: do we want to support Routes ? DNS entires from ResolvConf as host-local CNI ? - -type HostLocalNetConf struct { - Name string `json:"name"` - CNIVersion string `json:"cniVersion"` - IPAM *HostLocalIPAMConfig `json:"ipam"` -} - -type HostLocalIPAMConfig struct { - Type string `json:"type"` - DataDir string `json:"dataDir"` - Ranges []HostLocalRangeSet `json:"ranges"` -} - -type HostLocalRangeSet []HostLocalRange - -type HostLocalRange struct { - RangeStart string `json:"rangeStart,omitempty"` // The first ip, inclusive - RangeEnd string `json:"rangeEnd,omitempty"` // The last ip, inclusive - Subnet string `json:"subnet"` - Gateway string `json:"gateway,omitempty"` -} - -func HostLocalNetConfFromNetConfAndPool(nc *NetConf, p *pool.IPPool) *HostLocalNetConf { - // Note(adrianc): we use Pool name as Network Name for host-local call so that assignments are managed - // by host-local ipam by pool name and not the network name. - return &HostLocalNetConf{ - Name: p.Name, - CNIVersion: nc.CNIVersion, - IPAM: &HostLocalIPAMConfig{ - Type: "host-local", - DataDir: filepath.Join(nc.IPAM.DataDir, HostLocalDataDir), - Ranges: []HostLocalRangeSet{ - []HostLocalRange{ - { - RangeStart: p.StartIP, - RangeEnd: p.EndIP, - Subnet: p.Subnet, - Gateway: p.Gateway, - }, - }, - }, - }, - } -} diff --git a/pkg/cni/types/host-local_test.go b/pkg/cni/types/host-local_test.go deleted file mode 100644 index 53fa722..0000000 --- a/pkg/cni/types/host-local_test.go +++ /dev/null @@ -1,57 +0,0 @@ -/* - Copyright 2023, NVIDIA CORPORATION & AFFILIATES - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package types_test - -import ( - "path" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "github.com/Mellanox/nvidia-k8s-ipam/pkg/cni/types" - "github.com/Mellanox/nvidia-k8s-ipam/pkg/pool" -) - -var _ = Describe("host-local types tests", func() { - Context("HostLocalNetConfFromNetConfAndPool", func() { - It("Converts to host-local netconf", func() { - conf := types.NetConf{ - Name: "my-net", - CNIVersion: "0.4.0", - IPAM: &types.IPAMConf{ - PoolName: "my-pool", - DataDir: "/foo/bar", - }, - } - pool := pool.IPPool{ - Name: "my-pool", - Subnet: "192.168.0.0/16", - StartIP: "192.168.0.2", - EndIP: "192.168.0.254", - Gateway: "192.168.0.1", - } - - hostlocalConf := types.HostLocalNetConfFromNetConfAndPool(&conf, &pool) - - Expect(hostlocalConf.Name).To(Equal(pool.Name)) - Expect(hostlocalConf.CNIVersion).To(Equal(conf.CNIVersion)) - Expect(hostlocalConf.IPAM.Type).To(Equal("host-local")) - Expect(hostlocalConf.IPAM.DataDir).To(Equal(path.Join(conf.IPAM.DataDir, types.HostLocalDataDir))) - Expect(hostlocalConf.IPAM.Ranges[0][0].Subnet).To(Equal(pool.Subnet)) - Expect(hostlocalConf.IPAM.Ranges[0][0].RangeStart).To(Equal(pool.StartIP)) - Expect(hostlocalConf.IPAM.Ranges[0][0].RangeEnd).To(Equal(pool.EndIP)) - Expect(hostlocalConf.IPAM.Ranges[0][0].Gateway).To(Equal(pool.Gateway)) - }) - }) -}) diff --git a/pkg/cni/types/types.go b/pkg/cni/types/types.go index da9fa8b..5eff194 100644 --- a/pkg/cni/types/types.go +++ b/pkg/cni/types/types.go @@ -21,25 +21,19 @@ import ( "strings" "github.com/containernetworking/cni/pkg/types" - "k8s.io/client-go/kubernetes" - - "github.com/Mellanox/nvidia-k8s-ipam/pkg/cni/k8sclient" ) const ( // DefaultConfDir is the default dir where configurations are found DefaultConfDir = "/etc/cni/net.d/nv-ipam.d" - // DefaultDataDir is the default dir where cni stores data and binaries - DefaultDataDir = "/var/lib/cni/nv-ipam" + // DefaultDaemonSocket is the default socket path for the daemon + DefaultDaemonSocket = "unix:///var/lib/cni/nv-ipam/daemon.sock" + // DefaultDaemonCallTimeoutSeconds is the default timeout IPAM daemon calls + DefaultDaemonCallTimeoutSeconds = 5 // DefaultLogFile default log file path to be used for logging DefaultLogFile = "/var/log/nv-ipam-cni.log" - - // HostLocalDataDir is the relative path within the data dir for host-local state data - HostLocalDataDir = "state/host-local" - // K8sNodeNameFile is the file name containing k8s node name - K8sNodeNameFile = "k8s-node-name" - // DefaultKubeConfigFileName is the default name of kubeconfig file - DefaultKubeConfigFileName = "nv-ipam.kubeconfig" + // DefaultLogLevel is the default log level + DefaultLogLevel = "info" // ConfFileName is the name of CNI configuration file found in conf dir ConfFileName = "nv-ipam.conf" ) @@ -57,16 +51,16 @@ type IPAMConf struct { types.IPAM // PoolName is the name of the pool to be used to allocate IP - PoolName string `json:"poolName,omitempty"` - Kubeconfig string `json:"kubeconfig,omitempty"` - DataDir string `json:"dataDir,omitempty"` - ConfDir string `json:"confDir,omitempty"` - LogFile string `json:"logFile,omitempty"` - LogLevel string `json:"logLevel,omitempty"` - - // internal configuration - NodeName string - K8sClient kubernetes.Interface + PoolName string `json:"poolName,omitempty"` + // Address of the NVIDIA-ipam DaemonSocket + DaemonSocket string `json:"daemonSocket,omitempty"` + DaemonCallTimeoutSeconds int `json:"daemonCallTimeoutSeconds,omitempty"` + ConfDir string `json:"confDir,omitempty"` + LogFile string `json:"logFile,omitempty"` + LogLevel string `json:"logLevel,omitempty"` + + // internal fields + Pools []string `json:"-"` } // NetConf is CNI network config @@ -74,6 +68,7 @@ type NetConf struct { Name string `json:"name"` CNIVersion string `json:"cniVersion"` IPAM *IPAMConf `json:"ipam"` + DeviceID string `json:"deviceID"` } type confLoader struct{} @@ -110,35 +105,36 @@ func (cl *confLoader) LoadConf(bytes []byte) (*NetConf, error) { // overlay config with defaults defaultConf := &IPAMConf{ // use network name as pool name by default - PoolName: n.Name, - Kubeconfig: filepath.Join(n.IPAM.ConfDir, DefaultKubeConfigFileName), - DataDir: DefaultDataDir, - ConfDir: DefaultConfDir, - LogFile: DefaultLogFile, - LogLevel: "info", + PoolName: n.Name, + ConfDir: DefaultConfDir, + LogFile: DefaultLogFile, + DaemonSocket: DefaultDaemonSocket, + DaemonCallTimeoutSeconds: DefaultDaemonCallTimeoutSeconds, + LogLevel: DefaultLogLevel, } cl.overlayConf(defaultConf, n.IPAM) - // get Node name - p := filepath.Join(n.IPAM.ConfDir, K8sNodeNameFile) - data, err := os.ReadFile(p) - if err != nil { - return nil, fmt.Errorf("failed to read k8s node name from path: %s. %w", p, err) - } - n.IPAM.NodeName = strings.TrimSpace(string(data)) - if n.IPAM.NodeName == "" { - return nil, fmt.Errorf("failed to parse k8s node name from path: %s", p) - } - - // create k8s client - n.IPAM.K8sClient, err = k8sclient.FromKubeconfig(n.IPAM.Kubeconfig) + n.IPAM.Pools, err = parsePoolName(n.IPAM.PoolName) if err != nil { - return nil, fmt.Errorf("failed to create k8s client from kubeconfig path: %s. %w", n.IPAM.Kubeconfig, err) + return nil, err } return n, nil } +func parsePoolName(poolName string) ([]string, error) { + pools := strings.Split(poolName, ",") + if len(pools) > 2 { + return nil, fmt.Errorf("pool field can't contain more then two entries") + } + for _, p := range pools { + if p == "" { + return nil, fmt.Errorf("pool field has invalid format") + } + } + return pools, nil +} + // loadFromConfFile returns *IPAMConf with values from config file located in filePath. func (cl *confLoader) loadFromConfFile(filePath string) (*IPAMConf, error) { data, err := os.ReadFile(filePath) @@ -162,14 +158,6 @@ func (cl *confLoader) overlayConf(from, to *IPAMConf) { to.ConfDir = from.ConfDir } - if to.DataDir == "" { - to.DataDir = from.DataDir - } - - if to.Kubeconfig == "" { - to.Kubeconfig = from.Kubeconfig - } - if to.LogFile == "" { to.LogFile = from.LogFile } @@ -181,4 +169,12 @@ func (cl *confLoader) overlayConf(from, to *IPAMConf) { if to.PoolName == "" { to.PoolName = from.PoolName } + + if to.DaemonSocket == "" { + to.DaemonSocket = from.DaemonSocket + } + + if to.DaemonCallTimeoutSeconds == 0 { + to.DaemonCallTimeoutSeconds = from.DaemonCallTimeoutSeconds + } } diff --git a/pkg/cni/types/types_test.go b/pkg/cni/types/types_test.go index d7581c7..2f44142 100644 --- a/pkg/cni/types/types_test.go +++ b/pkg/cni/types/types_test.go @@ -21,7 +21,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "github.com/Mellanox/nvidia-k8s-ipam/pkg/cni/types" + cniTypes "github.com/Mellanox/nvidia-k8s-ipam/pkg/cni/types" ) var _ = Describe("Types Tests", func() { @@ -35,96 +35,69 @@ var _ = Describe("Types Tests", func() { testConfDir = path.Join(tmpDir, "nv-ipam.d") err := os.Mkdir(testConfDir, 0o755) Expect(err).ToNot(HaveOccurred()) - err = os.WriteFile(path.Join(testConfDir, types.K8sNodeNameFile), []byte("test-node"), 0o644) Expect(err).ToNot(HaveOccurred()) }) Context("LoadConf()", func() { It("loads default configuration when no overwrites provided", func() { // write empty config file - err := os.WriteFile(path.Join(testConfDir, types.ConfFileName), []byte("{}"), 0o644) - Expect(err).ToNot(HaveOccurred()) - - // write kubeconfig file - data, err := os.ReadFile(path.Join("..", "..", "..", "testdata", "test.kubeconfig")) - Expect(err).ToNot(HaveOccurred()) - err = os.WriteFile(path.Join(testConfDir, types.DefaultKubeConfigFileName), []byte(data), 0o644) + err := os.WriteFile(path.Join(testConfDir, cniTypes.ConfFileName), []byte("{}"), 0o644) Expect(err).ToNot(HaveOccurred()) // Load config testConf := fmt.Sprintf(`{"name": "my-net", "ipam": {"confDir": %q}}`, testConfDir) - conf, err := types.NewConfLoader().LoadConf([]byte(testConf)) + conf, err := cniTypes.NewConfLoader().LoadConf([]byte(testConf)) // Validate Expect(err).ToNot(HaveOccurred()) Expect(conf.IPAM.PoolName).To(Equal(conf.Name)) Expect(conf.IPAM.ConfDir).To(Equal(testConfDir)) - Expect(conf.IPAM.DataDir).To(Equal(types.DefaultDataDir)) - Expect(conf.IPAM.LogFile).To(Equal(types.DefaultLogFile)) + Expect(conf.IPAM.LogFile).To(Equal(cniTypes.DefaultLogFile)) Expect(conf.IPAM.LogLevel).To(Equal("info")) - Expect(conf.IPAM.Kubeconfig).To(Equal(path.Join(conf.IPAM.ConfDir, types.DefaultKubeConfigFileName))) }) It("overwrites configuration from file", func() { // write config file - confData := fmt.Sprintf(` - {"logLevel": "debug", "logFile": "some/path.log", "dataDir": "some/data/path", - "kubeconfig": "%s/alternate.kubeconfig"}`, testConfDir) - err := os.WriteFile(path.Join(testConfDir, types.ConfFileName), []byte(confData), 0o644) - Expect(err).ToNot(HaveOccurred()) - - // write kubeconfig file - data, err := os.ReadFile(path.Join("..", "..", "..", "testdata", "test.kubeconfig")) - Expect(err).ToNot(HaveOccurred()) - err = os.WriteFile(path.Join(testConfDir, "alternate.kubeconfig"), []byte(data), 0o644) + confData := `{"logLevel": "debug", "logFile": "some/path.log"}` + err := os.WriteFile(path.Join(testConfDir, cniTypes.ConfFileName), []byte(confData), 0o644) Expect(err).ToNot(HaveOccurred()) // Load config testConf := fmt.Sprintf(`{"name": "my-net", "ipam": {"confDir": %q}}`, testConfDir) - conf, err := types.NewConfLoader().LoadConf([]byte(testConf)) + conf, err := cniTypes.NewConfLoader().LoadConf([]byte(testConf)) // Validate Expect(err).ToNot(HaveOccurred()) Expect(conf.IPAM.ConfDir).To(Equal(testConfDir)) - Expect(conf.IPAM.DataDir).To(Equal("some/data/path")) Expect(conf.IPAM.LogFile).To(Equal("some/path.log")) Expect(conf.IPAM.LogLevel).To(Equal("debug")) - Expect(conf.IPAM.Kubeconfig).To(Equal(path.Join(conf.IPAM.ConfDir, "alternate.kubeconfig"))) }) It("overwrites configuration from json input", func() { // write config file - confData := `{"logLevel": "debug", "dataDir": "some/data/path"}` - err := os.WriteFile(path.Join(testConfDir, types.ConfFileName), []byte(confData), 0o644) - Expect(err).ToNot(HaveOccurred()) - - // write kubeconfig file - data, err := os.ReadFile(path.Join("..", "..", "..", "testdata", "test.kubeconfig")) - Expect(err).ToNot(HaveOccurred()) - err = os.WriteFile(path.Join(testConfDir, types.DefaultKubeConfigFileName), []byte(data), 0o644) + confData := `{"logLevel": "debug"}` + err := os.WriteFile(path.Join(testConfDir, cniTypes.ConfFileName), []byte(confData), 0o644) Expect(err).ToNot(HaveOccurred()) // Load config testConf := fmt.Sprintf(`{"name": "my-net", "ipam": {"confDir": %q, "poolName": "my-pool", "logLevel": "error"}}`, testConfDir) - conf, err := types.NewConfLoader().LoadConf([]byte(testConf)) + conf, err := cniTypes.NewConfLoader().LoadConf([]byte(testConf)) // Validate Expect(err).ToNot(HaveOccurred()) Expect(conf.IPAM.PoolName).To(Equal("my-pool")) Expect(conf.IPAM.ConfDir).To(Equal(testConfDir)) - Expect(conf.IPAM.DataDir).To(Equal("some/data/path")) - Expect(conf.IPAM.LogFile).To(Equal(types.DefaultLogFile)) + Expect(conf.IPAM.LogFile).To(Equal(cniTypes.DefaultLogFile)) Expect(conf.IPAM.LogLevel).To(Equal("error")) - Expect(conf.IPAM.Kubeconfig).To(Equal(path.Join(conf.IPAM.ConfDir, types.DefaultKubeConfigFileName))) }) It("Fails if config is invalid json", func() { - _, err := types.NewConfLoader().LoadConf([]byte("{garbage%^&*")) + _, err := cniTypes.NewConfLoader().LoadConf([]byte("{garbage%^&*")) Expect(err).To(HaveOccurred()) }) It("Fails if config does not contain ipam key", func() { - _, err := types.NewConfLoader().LoadConf([]byte(`{"name": "my-net", "type": "sriov"}`)) + _, err := cniTypes.NewConfLoader().LoadConf([]byte(`{"name": "my-net", "type": "sriov"}`)) Expect(err).To(HaveOccurred()) }) }) From 50f4e5dfde2c2ed2918ac557bc144c0bd1047ef0 Mon Sep 17 00:00:00 2001 From: Yury Kulazhenkov Date: Tue, 4 Jul 2023 13:36:43 +0300 Subject: [PATCH 08/18] ipam-node: Update ipam-node command to run in daemon mode Signed-off-by: Yury Kulazhenkov --- cmd/ipam-node/app/app.go | 224 ++++++++++++ cmd/ipam-node/app/options/options.go | 114 ++++++ cmd/ipam-node/main.go | 327 +----------------- go.mod | 4 +- pkg/ipam-node/controllers/node/node.go | 73 ++++ pkg/ipam-node/grpc/middleware/middleware.go | 49 +++ .../grpc/middleware/middleware_suite_test.go | 26 ++ .../grpc/middleware/middleware_test.go | 70 ++++ pkg/ipam-node/handlers/allocate.go | 28 ++ pkg/ipam-node/handlers/deallocate.go | 28 ++ pkg/ipam-node/handlers/handlers.go | 40 +++ pkg/ipam-node/handlers/isallocated.go | 28 ++ 12 files changed, 685 insertions(+), 326 deletions(-) create mode 100644 cmd/ipam-node/app/app.go create mode 100644 cmd/ipam-node/app/options/options.go create mode 100644 pkg/ipam-node/controllers/node/node.go create mode 100644 pkg/ipam-node/grpc/middleware/middleware.go create mode 100644 pkg/ipam-node/grpc/middleware/middleware_suite_test.go create mode 100644 pkg/ipam-node/grpc/middleware/middleware_test.go create mode 100644 pkg/ipam-node/handlers/allocate.go create mode 100644 pkg/ipam-node/handlers/deallocate.go create mode 100644 pkg/ipam-node/handlers/handlers.go create mode 100644 pkg/ipam-node/handlers/isallocated.go diff --git a/cmd/ipam-node/app/app.go b/cmd/ipam-node/app/app.go new file mode 100644 index 0000000..421f650 --- /dev/null +++ b/cmd/ipam-node/app/app.go @@ -0,0 +1,224 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package app does all the work necessary to configure and run a +// IPAM Node daemon app process. +package app + +import ( + "context" + "fmt" + "net" + "sync" + + "github.com/go-logr/logr" + "github.com/spf13/cobra" + "google.golang.org/grpc" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + cliflag "k8s.io/component-base/cli/flag" + "k8s.io/component-base/term" + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/healthz" + + // register json format for logger + _ "k8s.io/component-base/logs/json/register" + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) + // to ensure that exec-entrypoint and run can make use of them. + _ "k8s.io/client-go/plugin/pkg/client/auth" + + nodev1 "github.com/Mellanox/nvidia-k8s-ipam/api/grpc/nvidia/ipam/node/v1" + "github.com/Mellanox/nvidia-k8s-ipam/cmd/ipam-node/app/options" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/common" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/allocator" + nodectrl "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/controllers/node" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/grpc/middleware" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/handlers" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/store" + poolPkg "github.com/Mellanox/nvidia-k8s-ipam/pkg/pool" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/version" +) + +// NewControllerCommand creates a *cobra.Command object with default parameters +func NewControllerCommand() *cobra.Command { + opts := options.New() + ctx := ctrl.SetupSignalHandler() + + cmd := &cobra.Command{ + Use: common.IPAMName + "node daemon", + Long: `NVIDIA K8S IPAM Node Daemon`, + SilenceUsage: true, + Version: version.GetVersionString(), + RunE: func(cmd *cobra.Command, args []string) error { + if err := opts.Validate(); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + conf, err := ctrl.GetConfig() + if err != nil { + return fmt.Errorf("failed to read config for k8s client: %v", err) + } + return RunNodeDaemon(logr.NewContext(ctx, klog.NewKlogr()), conf, opts) + }, + Args: func(cmd *cobra.Command, args []string) error { + for _, arg := range args { + if len(arg) > 0 { + return fmt.Errorf("%q does not take any arguments, got %q", cmd.CommandPath(), args) + } + } + return nil + }, + } + sharedFS := cliflag.NamedFlagSets{} + opts.AddNamedFlagSets(&sharedFS) + + cmdFS := cmd.PersistentFlags() + for _, f := range sharedFS.FlagSets { + cmdFS.AddFlagSet(f) + } + + cols, _, _ := term.TerminalSize(cmd.OutOrStdout()) + cliflag.SetUsageAndHelpFunc(cmd, sharedFS, cols) + + return cmd +} + +// RunNodeDaemon start IPAM node daemon with provided options +// +//nolint:funlen +func RunNodeDaemon(ctx context.Context, config *rest.Config, opts *options.Options) error { + logger := logr.FromContextOrDiscard(ctx) + ctrl.SetLogger(logger) + + logger.Info("start IPAM node daemon", + "version", version.GetVersionString(), "node", opts.NodeName) + + scheme := runtime.NewScheme() + + if err := clientgoscheme.AddToScheme(scheme); err != nil { + logger.Error(err, "failed to register scheme") + return err + } + + poolManager := poolPkg.NewManager() + + mgr, err := ctrl.NewManager(config, ctrl.Options{ + Scheme: scheme, + NewCache: cache.BuilderWithOptions(cache.Options{ + SelectorsByObject: cache.SelectorsByObject{&corev1.Node{}: cache.ObjectSelector{ + Field: fields.ParseSelectorOrDie(fmt.Sprintf("metadata.name=%s", opts.NodeName)), + }}, + }), + MetricsBindAddress: opts.MetricsAddr, + Port: 9443, + HealthProbeBindAddress: opts.ProbeAddr, + }) + if err != nil { + logger.Error(err, "unable to initialize manager") + return err + } + if err = (&nodectrl.NodeReconciler{ + PoolManager: poolManager, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + logger.Error(err, "unable to create controller", "controller", "Node") + return err + } + + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + logger.Error(err, "unable to set up health check") + return err + } + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + logger.Error(err, "unable to set up ready check") + return err + } + + grpcServer, listener, err := initGRPCServer(opts, logger, poolManager) + if err != nil { + return err + } + + wg := sync.WaitGroup{} + wg.Add(3) + + errCh := make(chan error, 1) + + innerCtx, innerCFunc := context.WithCancel(ctx) + defer innerCFunc() + + go func() { + defer wg.Done() + <-innerCtx.Done() + grpcServer.GracefulStop() + }() + go func() { + defer wg.Done() + logger.Info("start grpc server") + if err := grpcServer.Serve(listener); err != nil { + logger.Error(err, "problem start grpc server") + select { + case errCh <- err: + default: + } + } + logger.Info("grpc server stopped") + }() + go func() { + defer wg.Done() + logger.Info("start manager") + if err := mgr.Start(innerCtx); err != nil { + logger.Error(err, "problem running manager") + select { + case errCh <- err: + default: + } + } + logger.Info("manager stopped") + }() + + select { + case <-ctx.Done(): + case <-errCh: + innerCFunc() + } + wg.Wait() + + logger.Info("IPAM node daemon stopped") + return nil +} + +func initGRPCServer(opts *options.Options, + log logr.Logger, poolConfReader poolPkg.ConfigReader) (*grpc.Server, net.Listener, error) { + network, address, err := options.ParseBindAddress(opts.BindAddress) + if err != nil { + return nil, nil, err + } + listener, err := net.Listen(network, address) + if err != nil { + log.Error(err, "failed to start listener for GRPC server") + return nil, nil, err + } + grpcServer := grpc.NewServer(grpc.ChainUnaryInterceptor( + middleware.SetLoggerMiddleware, + middleware.LogCallMiddleware)) + + nodev1.RegisterIPAMServiceServer(grpcServer, + handlers.New(poolConfReader, store.New(opts.StoreFile), allocator.NewIPAllocator)) + return grpcServer, listener, nil +} diff --git a/cmd/ipam-node/app/options/options.go b/cmd/ipam-node/app/options/options.go new file mode 100644 index 0000000..4a2ecb3 --- /dev/null +++ b/cmd/ipam-node/app/options/options.go @@ -0,0 +1,114 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package options + +import ( + goflag "flag" + "fmt" + "net/url" + "os" + "path/filepath" + + cliflag "k8s.io/component-base/cli/flag" + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/Mellanox/nvidia-k8s-ipam/pkg/cmdoptions" + cniTypes "github.com/Mellanox/nvidia-k8s-ipam/pkg/cni/types" +) + +const ( + // DefaultStoreFile contains path of the default store file + DefaultStoreFile = "/var/lib/cni/nv-ipam/store" + DefaultBindAddress = "unix://" + cniTypes.DefaultDaemonSocket +) + +// New initialize and return new Options object +func New() *Options { + return &Options{ + Options: *cmdoptions.New(), + MetricsAddr: ":8080", + ProbeAddr: ":8081", + NodeName: "", + BindAddress: DefaultBindAddress, + StoreFile: DefaultStoreFile, + } +} + +// Options holds command line options for controller +type Options struct { + cmdoptions.Options + MetricsAddr string + ProbeAddr string + NodeName string + BindAddress string + StoreFile string +} + +// AddNamedFlagSets register flags for common options in NamedFlagSets +func (o *Options) AddNamedFlagSets(sharedFS *cliflag.NamedFlagSets) { + o.Options.AddNamedFlagSets(sharedFS) + + daemonFS := sharedFS.FlagSet("Node daemon") + + goFS := goflag.NewFlagSet("tmp", goflag.ContinueOnError) + ctrl.RegisterFlags(goFS) + daemonFS.AddGoFlagSet(goFS) + + daemonFS.StringVar(&o.MetricsAddr, "metrics-bind-address", o.MetricsAddr, + "The address the metric endpoint binds to.") + daemonFS.StringVar(&o.ProbeAddr, "health-probe-bind-address", + o.ProbeAddr, "The address the probe endpoint binds to.") + daemonFS.StringVar(&o.NodeName, "node-name", + o.NodeName, "The name of the Node on which the daemon runs") + daemonFS.StringVar(&o.BindAddress, "bind-address", o.BindAddress, + "GPRC server bind address. e.g.: tcp://127.0.0.1:9092, unix:///var/lib/foo") + daemonFS.StringVar(&o.StoreFile, "store-file", o.StoreFile, + "Path of the file which used to store allocations") +} + +// Validate registered options +func (o *Options) Validate() error { + if len(o.NodeName) == 0 { + return fmt.Errorf("node-name is required parameter") + } + _, _, err := ParseBindAddress(o.BindAddress) + if err != nil { + return fmt.Errorf("bind-address is invalid: %v", err) + } + if len(o.StoreFile) == 0 { + return fmt.Errorf("store-file can't be empty") + } + _, err = os.Stat(filepath.Dir(o.StoreFile)) + if err != nil { + return fmt.Errorf("store-file is invalid: %v", err) + } + return o.Options.Validate() +} + +// ParseBindAddress parses bind-address and return network and address part separately, +// returns error if bind-address format is invalid +func ParseBindAddress(addr string) (string, string, error) { + u, err := url.Parse(addr) + if err != nil { + return "", "", err + } + switch u.Scheme { + case "tcp": + return u.Scheme, u.Host, nil + case "unix": + return u.Scheme, u.Host + u.Path, nil + default: + return "", "", fmt.Errorf("unsupported scheme") + } +} diff --git a/cmd/ipam-node/main.go b/cmd/ipam-node/main.go index 56c2186..a2b970d 100644 --- a/cmd/ipam-node/main.go +++ b/cmd/ipam-node/main.go @@ -14,334 +14,13 @@ package main import ( - "bytes" - "crypto/sha256" - b64 "encoding/base64" - "fmt" - "io" - "log" "os" - "text/template" - "time" - "github.com/spf13/pflag" + "k8s.io/component-base/cli" - "github.com/Mellanox/nvidia-k8s-ipam/pkg/cmdutils" + "github.com/Mellanox/nvidia-k8s-ipam/cmd/ipam-node/app" ) -// Options stores command line options -type Options struct { - CNIBinDir string - NvIpamCNIBinFile string - SkipNvIpamCNIBinaryCopy bool - NvIpamCNIDataDir string - NvIpamCNIDataDirHost string - CNIConfDir string - HostLocalBinFile string // may be hidden or remove? - SkipHostLocalBinaryCopy bool - NvIpamKubeConfigFileHost string - NvIpamLogLevel string - NvIpamLogFile string - SkipTLSVerify bool -} - -const ( - serviceAccountTokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token" //nolint:golint,gosec - serviceAccountCAFile = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" //nolint:golint,gosec -) - -func (o *Options) addFlags() { - // suppress error message for help - pflag.ErrHelp = nil //nolint:golint,reassign - fs := pflag.CommandLine - fs.StringVar(&o.CNIBinDir, - "cni-bin-dir", "/host/opt/cni/bin", "CNI binary directory") - fs.StringVar(&o.NvIpamCNIBinFile, - "nv-ipam-bin-file", "/nv-ipam", "nv-ipam binary file path") - fs.BoolVar(&o.SkipNvIpamCNIBinaryCopy, - "skip-nv-ipam-binary-copy", false, "skip nv-ipam binary file copy") - fs.StringVar(&o.NvIpamCNIDataDir, - "nv-ipam-cni-data-dir", "/host/var/lib/cni/nv-ipam", "nv-ipam CNI data directory") - fs.StringVar(&o.NvIpamCNIDataDirHost, - "nv-ipam-cni-data-dir-host", "/var/lib/cni/nv-ipam", "nv-ipam CNI data directory on host") - fs.StringVar(&o.CNIConfDir, - "cni-conf-dir", "/host/etc/cni/net.d", "CNI config directory") - fs.StringVar(&o.HostLocalBinFile, - "host-local-bin-file", "/host-local", "host-local binary file path") - fs.BoolVar(&o.SkipHostLocalBinaryCopy, - "skip-host-local-binary-copy", false, "skip host-local binary file copy") - fs.StringVar(&o.NvIpamKubeConfigFileHost, - "nv-ipam-kubeconfig-file-host", "/etc/cni/net.d/nv-ipam.d/nv-ipam.kubeconfig", "kubeconfig for nv-ipam") - fs.StringVar(&o.NvIpamLogLevel, - "nv-ipam-log-level", "info", "nv-ipam log level") - fs.StringVar(&o.NvIpamLogFile, - "nv-ipam-log-file", "/var/log/nv-ipam-cni.log", "nv-ipam log file") - fs.BoolVar(&o.SkipTLSVerify, - "skip-tls-verify", false, "skip TLS verify") - fs.MarkHidden("skip-tls-verify") //nolint:golint,errcheck -} - -func (o *Options) verifyFileExists() error { - // CNIBinDir - if _, err := os.Stat(o.CNIBinDir); err != nil { - return fmt.Errorf("cni-bin-dir is not found: %v", err) - } - - // CNIConfDir - if _, err := os.Stat(o.CNIConfDir); err != nil { - return fmt.Errorf("cni-conf-dir is not found: %v", err) - } - - if _, err := os.Stat(fmt.Sprintf("%s/bin", o.NvIpamCNIDataDir)); err != nil { - return fmt.Errorf("nv-ipam-cni-data-bin-dir is not found: %v", err) - } - - if _, err := os.Stat(fmt.Sprintf("%s/state/host-local", o.NvIpamCNIDataDir)); err != nil { - return fmt.Errorf("nv-ipam-cni-data-state-dir is not found: %v", err) - } - - // HostLocalBinFile - if _, err := os.Stat(o.HostLocalBinFile); err != nil { - return fmt.Errorf("host-local-bin-file is not found: %v", err) - } - return nil -} - -const kubeConfigTemplate = `# Kubeconfig file for nv-ipam CNI plugin. -apiVersion: v1 -kind: Config -clusters: -- name: local - cluster: - server: {{ .KubeConfigHost }} - {{ .KubeServerTLS }} -users: -- name: nv-ipam-node - user: - token: "{{ .KubeServiceAccountToken }}" -contexts: -- name: nv-ipam-node-context - context: - cluster: local - user: nv-ipam-node -current-context: nv-ipam-node-context -` - -const nvIpamConfigTemplate = `{ - "kubeconfig": "{{ .KubeConfigFile }}", - "dataDir": "{{ .NvIpamDataDir }}", - "logFile": "{{ .NvIpamLogFile }}", - "logLevel": "{{ .NvIpamLogLevel }}" -} -` - -func (o *Options) createKubeConfig(currentFileHash []byte) ([]byte, error) { - // check file exists - if _, err := os.Stat(serviceAccountTokenFile); err != nil { - return nil, fmt.Errorf("service account token is not found: %v", err) - } - if _, err := os.Stat(serviceAccountCAFile); err != nil { - return nil, fmt.Errorf("service account ca is not found: %v", err) - } - - // create nv-ipam.d directory - if err := os.MkdirAll(fmt.Sprintf("%s/nv-ipam.d", o.CNIConfDir), 0755); err != nil { - return nil, fmt.Errorf("cannot create nv-ipam.d directory: %v", err) - } - - // get Kubernetes service protocol/host/port - kubeProtocol := os.Getenv("KUBERNETES_SERVICE_PROTOCOL") - if kubeProtocol == "" { - kubeProtocol = "https" - } - kubeHost := os.Getenv("KUBERNETES_SERVICE_HOST") - kubePort := os.Getenv("KUBERNETES_SERVICE_PORT") - - // check tlsConfig - var tlsConfig string - if o.SkipTLSVerify { - tlsConfig = "insecure-skip-tls-verify: true" - } else { - // create tlsConfig by service account CA file - caFileByte, err := os.ReadFile(serviceAccountCAFile) - if err != nil { - return nil, fmt.Errorf("cannot read service account ca file: %v", err) - } - caFileB64 := bytes.ReplaceAll([]byte(b64.StdEncoding.EncodeToString(caFileByte)), []byte("\n"), []byte("")) - tlsConfig = fmt.Sprintf("certificate-authority-data: %s", string(caFileB64)) - } - - saTokenByte, err := os.ReadFile(serviceAccountTokenFile) - if err != nil { - return nil, fmt.Errorf("cannot read service account token file: %v", err) - } - - // create kubeconfig by template and replace it by atomic - tempKubeConfigFile := fmt.Sprintf("%s/nv-ipam.d/nv-ipam.kubeconfig.new", o.CNIConfDir) - nvIPAMKubeConfig := fmt.Sprintf("%s/nv-ipam.d/nv-ipam.kubeconfig", o.CNIConfDir) - fp, err := os.OpenFile(tempKubeConfigFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return nil, fmt.Errorf("cannot create kubeconfig temp file: %v", err) - } - - templateKubeconfig, err := template.New("kubeconfig").Parse(kubeConfigTemplate) - if err != nil { - return nil, fmt.Errorf("template parse error: %v", err) - } - templateData := map[string]string{ - "KubeConfigHost": fmt.Sprintf("%s://[%s]:%s", kubeProtocol, kubeHost, kubePort), - "KubeServerTLS": tlsConfig, - "KubeServiceAccountToken": string(saTokenByte), - } - - // Prepare - hash := sha256.New() - writer := io.MultiWriter(hash, fp) - - // genearate kubeconfig from template - if err = templateKubeconfig.Execute(writer, templateData); err != nil { - return nil, fmt.Errorf("cannot create kubeconfig: %v", err) - } - - if err := fp.Sync(); err != nil { - os.Remove(fp.Name()) - return nil, fmt.Errorf("cannot flush kubeconfig temp file: %v", err) - } - if err := fp.Close(); err != nil { - os.Remove(fp.Name()) - return nil, fmt.Errorf("cannot close kubeconfig temp file: %v", err) - } - - newFileHash := hash.Sum(nil) - if currentFileHash != nil && bytes.Equal(newFileHash, currentFileHash) { - log.Printf("kubeconfig is same, not copy\n") - os.Remove(fp.Name()) - return currentFileHash, nil - } - - // replace file with tempfile - if err := os.Rename(tempKubeConfigFile, nvIPAMKubeConfig); err != nil { - return nil, fmt.Errorf("cannot replace %q with temp file %q: %v", nvIPAMKubeConfig, tempKubeConfigFile, err) - } - - log.Printf("kubeconfig is created in %s\n", nvIPAMKubeConfig) - return newFileHash, nil -} -func (o *Options) createNvIpamConfig(currentFileHash []byte) ([]byte, error) { - // create kubeconfig by template and replace it by atomic - tempNvIpamConfigFile := fmt.Sprintf("%s/nv-ipam.d/nv-ipam.conf.new", o.CNIConfDir) - nvIpamConfigFile := fmt.Sprintf("%s/nv-ipam.d/nv-ipam.conf", o.CNIConfDir) - fp, err := os.OpenFile(tempNvIpamConfigFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return nil, fmt.Errorf("cannot create nv-ipam.conf temp file: %v", err) - } - - templateNvIpamConfig, err := template.New("nv-ipam-config").Parse(nvIpamConfigTemplate) - if err != nil { - return nil, fmt.Errorf("template parse error: %v", err) - } - - templateData := map[string]string{ - "KubeConfigFile": o.NvIpamKubeConfigFileHost, - "NvIpamDataDir": o.NvIpamCNIDataDirHost, - "NvIpamLogFile": o.NvIpamLogFile, - "NvIpamLogLevel": o.NvIpamLogLevel, - } - - // Prepare - hash := sha256.New() - writer := io.MultiWriter(hash, fp) - - // genearate nv-ipam-config from template - if err = templateNvIpamConfig.Execute(writer, templateData); err != nil { - return nil, fmt.Errorf("cannot create nv-ipam-config: %v", err) - } - - if err := fp.Sync(); err != nil { - os.Remove(fp.Name()) - return nil, fmt.Errorf("cannot flush nv-ipam-config temp file: %v", err) - } - if err := fp.Close(); err != nil { - os.Remove(fp.Name()) - return nil, fmt.Errorf("cannot close nv-ipam-config temp file: %v", err) - } - - newFileHash := hash.Sum(nil) - if currentFileHash != nil && bytes.Equal(newFileHash, currentFileHash) { - log.Printf("nv-ipam-config is same, not copy\n") - os.Remove(fp.Name()) - return currentFileHash, nil - } - - // replace file with tempfile - if err := os.Rename(tempNvIpamConfigFile, nvIpamConfigFile); err != nil { - return nil, fmt.Errorf("cannot replace %q with temp file %q: %v", nvIpamConfigFile, tempNvIpamConfigFile, err) - } - - log.Printf("nv-ipam-config is created in %s\n", nvIpamConfigFile) - return newFileHash, nil -} - func main() { - opt := Options{} - opt.addFlags() - helpFlag := pflag.BoolP("help", "h", false, "show help message and quit") - - pflag.Parse() - if *helpFlag { - pflag.PrintDefaults() - os.Exit(1) - } - - err := opt.verifyFileExists() - if err != nil { - log.Printf("%v\n", err) - return - } - - // copy nv-ipam binary - if !opt.SkipNvIpamCNIBinaryCopy { - // Copy - if err = cmdutils.CopyFileAtomic(opt.NvIpamCNIBinFile, opt.CNIBinDir, "_nv-ipam", "nv-ipam"); err != nil { - log.Printf("failed at nv-ipam copy: %v\n", err) - return - } - } - - // copy host-local binary - if !opt.SkipHostLocalBinaryCopy { - // Copy - hostLocalCNIBinDir := fmt.Sprintf("%s/bin", opt.NvIpamCNIDataDir) - if err = cmdutils.CopyFileAtomic(opt.HostLocalBinFile, hostLocalCNIBinDir, "_host-local", "host-local"); err != nil { - log.Printf("failed at host-local copy: %v\n", err) - return - } - } - - _, err = opt.createKubeConfig(nil) - if err != nil { - log.Printf("failed to create nv-ipam kubeconfig: %v\n", err) - return - } - log.Printf("kubeconfig file is created.\n") - - _, err = opt.createNvIpamConfig(nil) - if err != nil { - log.Printf("failed to create nv-ipam config file: %v\n", err) - return - } - log.Printf("nv-ipam config file is created.\n") - - nodeName := os.Getenv("NODE_NAME") - err = os.WriteFile(fmt.Sprintf("%s/nv-ipam.d/k8s-node-name", opt.CNIConfDir), []byte(nodeName), 0600) - if err != nil { - log.Printf("failed to create nv-ipam k8s-node-name: %v\n", err) - return - } - log.Printf("k8s-node-name file is created.\n") - - // sleep infinitely - for { - time.Sleep(time.Duration(1<<63 - 1)) - } + os.Exit(cli.Run(app.NewControllerCommand())) } diff --git a/go.mod b/go.mod index a612893..eff403e 100644 --- a/go.mod +++ b/go.mod @@ -6,11 +6,11 @@ require ( github.com/containernetworking/cni v1.1.2 github.com/go-logr/logr v1.2.4 github.com/google/renameio/v2 v2.0.0 + github.com/google/uuid v1.3.0 github.com/k8snetworkplumbingwg/cni-log v0.0.0-20230321145726-634c593dd11f github.com/onsi/ginkgo/v2 v2.9.2 github.com/onsi/gomega v1.27.6 github.com/spf13/cobra v1.7.0 - github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.8.3 google.golang.org/grpc v1.56.2 google.golang.org/protobuf v1.31.0 @@ -43,7 +43,6 @@ require ( github.com/google/go-cmp v0.5.9 // indirect github.com/google/gofuzz v1.1.0 // indirect github.com/google/pprof v0.0.0-20230323073829-e72429f035bd // indirect - github.com/google/uuid v1.3.0 // indirect github.com/imdario/mergo v0.3.6 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -60,6 +59,7 @@ require ( github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.0 // indirect go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect diff --git a/pkg/ipam-node/controllers/node/node.go b/pkg/ipam-node/controllers/node/node.go new file mode 100644 index 0000000..13df8d7 --- /dev/null +++ b/pkg/ipam-node/controllers/node/node.go @@ -0,0 +1,73 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package controllers + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + "github.com/Mellanox/nvidia-k8s-ipam/pkg/pool" +) + +// NodeReconciler reconciles Node objects +type NodeReconciler struct { + PoolManager pool.Manager + client.Client + Scheme *runtime.Scheme +} + +// Reconcile contains logic to sync Node objects +func (r *NodeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + reqLog := log.FromContext(ctx) + node := &corev1.Node{} + err := r.Client.Get(ctx, req.NamespacedName, node) + if err != nil { + if apiErrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + if err := r.PoolManager.Update(node); err != nil { + reqLog.Info("pool config from the node object is not updated, reset pool config", + "reason", err.Error()) + r.PoolManager.Reset() + } else { + reqLog.Info("pools configuration updated", "data", r.PoolManager.GetPools()) + } + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *NodeReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&corev1.Node{}). + WithEventFilter(predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + if e.ObjectOld == nil || e.ObjectNew == nil { + return true + } + return e.ObjectOld.GetAnnotations()[pool.IPBlocksAnnotation] != + e.ObjectNew.GetAnnotations()[pool.IPBlocksAnnotation] + }, + }). + Complete(r) +} diff --git a/pkg/ipam-node/grpc/middleware/middleware.go b/pkg/ipam-node/grpc/middleware/middleware.go new file mode 100644 index 0000000..767c1a2 --- /dev/null +++ b/pkg/ipam-node/grpc/middleware/middleware.go @@ -0,0 +1,49 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package middleware + +import ( + "context" + "time" + + "github.com/go-logr/logr" + "github.com/google/uuid" + "google.golang.org/grpc" + "k8s.io/klog/v2" +) + +// SetLoggerMiddleware creates logger instance with additional information and saves it to req context +func SetLoggerMiddleware(ctx context.Context, req interface{}, + info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + ctx = logr.NewContext(ctx, + klog.NewKlogr().WithValues("method", info.FullMethod, "reqID", uuid.New().String())) + return handler(ctx, req) +} + +// LogCallMiddleware log request and response with configured logger +func LogCallMiddleware(ctx context.Context, req interface{}, + _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + reqLogger := logr.FromContextOrDiscard(ctx) + startTime := time.Now() + reqLogger.V(1).Info("REQUEST") + resp, err := handler(ctx, req) + reqLogger = reqLogger.WithValues( + "call_duration_sec", time.Since(startTime).Seconds()) + if err != nil { + reqLogger.Error(err, "ERROR RESPONSE") + } else { + reqLogger.V(1).Info("RESPONSE") + } + return resp, err +} diff --git a/pkg/ipam-node/grpc/middleware/middleware_suite_test.go b/pkg/ipam-node/grpc/middleware/middleware_suite_test.go new file mode 100644 index 0000000..03ddfd8 --- /dev/null +++ b/pkg/ipam-node/grpc/middleware/middleware_suite_test.go @@ -0,0 +1,26 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package middleware_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestMiddleware(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Middleware Suite") +} diff --git a/pkg/ipam-node/grpc/middleware/middleware_test.go b/pkg/ipam-node/grpc/middleware/middleware_test.go new file mode 100644 index 0000000..74ece41 --- /dev/null +++ b/pkg/ipam-node/grpc/middleware/middleware_test.go @@ -0,0 +1,70 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package middleware_test + +import ( + "context" + + "github.com/go-logr/logr" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "google.golang.org/grpc" + + "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/grpc/middleware" +) + +type fakeHandler struct { + IsCalled bool + CalledWithCtx context.Context + CalledWithReq interface{} + Error error +} + +func (fh *fakeHandler) Handle(ctx context.Context, req interface{}) (interface{}, error) { + fh.IsCalled = true + fh.CalledWithCtx = ctx + fh.CalledWithReq = req + return nil, fh.Error +} + +const testFullMethod = "foobar" + +var testUnaryServerInfo = &grpc.UnaryServerInfo{ + FullMethod: testFullMethod, +} + +var _ = Describe("Middleware tests", func() { + var ( + ctx context.Context + ) + BeforeEach(func() { + ctx = context.Background() + }) + Describe("Logger middleware", func() { + It("Set", func() { + handler := fakeHandler{} + _, err := middleware.SetLoggerMiddleware(ctx, nil, testUnaryServerInfo, handler.Handle) + Expect(err).NotTo(HaveOccurred()) + Expect(handler.IsCalled).To(BeTrue()) + Expect(logr.FromContextOrDiscard(handler.CalledWithCtx)).NotTo(BeNil()) + }) + It("Req/Resp", func() { + handler := fakeHandler{} + ctx = logr.NewContext(ctx, logr.Discard()) + _, err := middleware.LogCallMiddleware(ctx, nil, testUnaryServerInfo, handler.Handle) + Expect(err).NotTo(HaveOccurred()) + Expect(handler.IsCalled).To(BeTrue()) + }) + }) +}) diff --git a/pkg/ipam-node/handlers/allocate.go b/pkg/ipam-node/handlers/allocate.go new file mode 100644 index 0000000..339e2d0 --- /dev/null +++ b/pkg/ipam-node/handlers/allocate.go @@ -0,0 +1,28 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package handlers + +import ( + "context" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + daemonv1 "github.com/Mellanox/nvidia-k8s-ipam/api/grpc/nvidia/ipam/node/v1" +) + +// Allocate is the handler for Allocate GRPC endpoint +func (s *Handlers) Allocate(context.Context, *daemonv1.AllocateRequest) (*daemonv1.AllocateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Allocate not implemented") +} diff --git a/pkg/ipam-node/handlers/deallocate.go b/pkg/ipam-node/handlers/deallocate.go new file mode 100644 index 0000000..5d22deb --- /dev/null +++ b/pkg/ipam-node/handlers/deallocate.go @@ -0,0 +1,28 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package handlers + +import ( + "context" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + daemonv1 "github.com/Mellanox/nvidia-k8s-ipam/api/grpc/nvidia/ipam/node/v1" +) + +// Deallocate is the handler for Deallocate GRPC endpoint +func (s *Handlers) Deallocate(context.Context, *daemonv1.DeallocateRequest) (*daemonv1.DeallocateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Deallocate not implemented") +} diff --git a/pkg/ipam-node/handlers/handlers.go b/pkg/ipam-node/handlers/handlers.go new file mode 100644 index 0000000..f210789 --- /dev/null +++ b/pkg/ipam-node/handlers/handlers.go @@ -0,0 +1,40 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package handlers + +import ( + nodev1 "github.com/Mellanox/nvidia-k8s-ipam/api/grpc/nvidia/ipam/node/v1" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/allocator" + storePkg "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/store" + poolPkg "github.com/Mellanox/nvidia-k8s-ipam/pkg/pool" +) + +type GetAllocatorFunc = func(s *allocator.RangeSet, poolName string, session storePkg.Session) allocator.IPAllocator + +// New create and initialize new instance of grpc Handlers +func New(poolConfReader poolPkg.ConfigReader, store storePkg.Store, getAllocFunc GetAllocatorFunc) *Handlers { + return &Handlers{ + poolConfReader: poolConfReader, + store: store, + getAllocFunc: getAllocFunc, + } +} + +// Handlers contains implementation of the GRPC endpoints handlers for ipam-daemon +type Handlers struct { + poolConfReader poolPkg.ConfigReader + store storePkg.Store + getAllocFunc GetAllocatorFunc + nodev1.UnsafeIPAMServiceServer +} diff --git a/pkg/ipam-node/handlers/isallocated.go b/pkg/ipam-node/handlers/isallocated.go new file mode 100644 index 0000000..be421d3 --- /dev/null +++ b/pkg/ipam-node/handlers/isallocated.go @@ -0,0 +1,28 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package handlers + +import ( + "context" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + daemonv1 "github.com/Mellanox/nvidia-k8s-ipam/api/grpc/nvidia/ipam/node/v1" +) + +// IsAllocated is the handler for IsAllocated GRPC endpoint +func (s *Handlers) IsAllocated(context.Context, *daemonv1.IsAllocatedRequest) (*daemonv1.IsAllocatedResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method IsAllocated not implemented") +} From ea96c6c3a40567f284ff94d2bffdfaa206d7d65a Mon Sep 17 00:00:00 2001 From: Yury Kulazhenkov Date: Fri, 14 Jul 2023 12:51:45 +0300 Subject: [PATCH 09/18] ipam-node: Add GRPC handlers implementation Signed-off-by: Yury Kulazhenkov --- pkg/ipam-node/handlers/allocate.go | 138 +++++++++- pkg/ipam-node/handlers/deallocate.go | 30 +- pkg/ipam-node/handlers/handlers.go | 96 +++++++ pkg/ipam-node/handlers/handlers_suite_test.go | 26 ++ pkg/ipam-node/handlers/handlers_test.go | 260 ++++++++++++++++++ pkg/ipam-node/handlers/isallocated.go | 37 ++- 6 files changed, 576 insertions(+), 11 deletions(-) create mode 100644 pkg/ipam-node/handlers/handlers_suite_test.go create mode 100644 pkg/ipam-node/handlers/handlers_test.go diff --git a/pkg/ipam-node/handlers/allocate.go b/pkg/ipam-node/handlers/allocate.go index 339e2d0..9a9813d 100644 --- a/pkg/ipam-node/handlers/allocate.go +++ b/pkg/ipam-node/handlers/allocate.go @@ -15,14 +15,146 @@ package handlers import ( "context" + "errors" + "fmt" + "net" + "time" + cniTypes "github.com/containernetworking/cni/pkg/types" + current "github.com/containernetworking/cni/pkg/types/100" + "github.com/go-logr/logr" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - daemonv1 "github.com/Mellanox/nvidia-k8s-ipam/api/grpc/nvidia/ipam/node/v1" + nodev1 "github.com/Mellanox/nvidia-k8s-ipam/api/grpc/nvidia/ipam/node/v1" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/allocator" + storePkg "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/store" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/types" ) // Allocate is the handler for Allocate GRPC endpoint -func (s *Handlers) Allocate(context.Context, *daemonv1.AllocateRequest) (*daemonv1.AllocateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Allocate not implemented") +func (h *Handlers) Allocate(ctx context.Context, req *nodev1.AllocateRequest) (*nodev1.AllocateResponse, error) { + reqLog := addFieldsToLogger(logr.FromContextOrDiscard(ctx), req) + ctx = logr.NewContext(ctx, reqLog) + + if err := validateReq(req); err != nil { + return nil, err + } + params := req.Parameters + store, err := h.openStore(ctx) + if err != nil { + return nil, err + } + if err := checkReqIsCanceled(ctx); err != nil { + return nil, h.closeSession(ctx, store, err) + } + result, err := h.allocate(reqLog, store, params) + if err := h.closeSession(ctx, store, err); err != nil { + return nil, err + } + resp := &nodev1.AllocateResponse{} + for _, r := range result { + resp.Allocations = append(resp.Allocations, &nodev1.AllocationInfo{ + Pool: r.Pool, + Ip: r.Address.String(), + Gateway: r.Gateway.String(), + }) + } + return resp, nil +} + +// PoolAlloc container which store Pool name and allocation +type PoolAlloc struct { + Pool string + *current.IPConfig +} + +func (h *Handlers) allocate(reqLog logr.Logger, + session storePkg.Session, params *nodev1.IPAMParameters) ([]PoolAlloc, error) { + var err error + result := make([]PoolAlloc, 0, len(params.Pools)) + for _, pool := range params.Pools { + var alloc PoolAlloc + alloc, err = h.allocateInPool(pool, reqLog, session, params) + if err != nil { + break + } + result = append(result, alloc) + } + if err != nil { + return nil, err + } + + return result, nil +} + +func (h *Handlers) allocateInPool(pool string, reqLog logr.Logger, + session storePkg.Session, params *nodev1.IPAMParameters) (PoolAlloc, error) { + poolLog := reqLog.WithValues("pool", pool) + + poolCfg := h.poolConfReader.GetPoolByName(pool) + if poolCfg == nil { + return PoolAlloc{}, status.Errorf(codes.NotFound, "configuration for pool %s not found", pool) + } + rangeStart := net.ParseIP(poolCfg.StartIP) + if rangeStart == nil { + return PoolAlloc{}, poolCfgError(poolLog, pool, "invalid rangeStart") + } + rangeEnd := net.ParseIP(poolCfg.EndIP) + if rangeEnd == nil { + return PoolAlloc{}, poolCfgError(poolLog, pool, "invalid rangeEnd") + } + _, subnet, err := net.ParseCIDR(poolCfg.Subnet) + if err != nil || subnet == nil || subnet.IP == nil || subnet.Mask == nil { + return PoolAlloc{}, poolCfgError(poolLog, pool, "invalid subnet") + } + gateway := net.ParseIP(poolCfg.Gateway) + if gateway == nil { + return PoolAlloc{}, poolCfgError(poolLog, pool, "invalid gateway") + } + rangeSet := &allocator.RangeSet{allocator.Range{ + RangeStart: rangeStart, + RangeEnd: rangeEnd, + Subnet: cniTypes.IPNet(*subnet), + Gateway: gateway, + }} + if err := rangeSet.Canonicalize(); err != nil { + return PoolAlloc{}, poolCfgError(poolLog, pool, + fmt.Sprintf("invalid range config: %s", err.Error())) + } + alloc := h.getAllocFunc(rangeSet, pool, session) + allocMeta := types.ReservationMetadata{ + CreateTime: time.Now().Format(time.RFC3339Nano), + PoolConfigSnapshot: poolCfg.String(), + } + if params.Metadata != nil { + allocMeta.PodUUID = params.Metadata.K8SPodUid + allocMeta.PodName = params.Metadata.K8SPodName + allocMeta.PodNamespace = params.Metadata.K8SPodNamespace + allocMeta.DeviceID = params.Metadata.DeviceId + } + result, err := alloc.Allocate(params.CniContainerid, params.CniIfname, allocMeta) + if err != nil { + poolLog.Error(err, "failed to allocate IP address") + if errors.Is(err, storePkg.ErrReservationAlreadyExist) { + return PoolAlloc{}, status.Errorf(codes.AlreadyExists, + "allocation already exist in the pool %s", pool) + } + if errors.Is(err, allocator.ErrNoFreeAddresses) { + return PoolAlloc{}, status.Errorf(codes.ResourceExhausted, "no free addresses in the pool %s", pool) + } + return PoolAlloc{}, status.Errorf(codes.Internal, "failed to allocate IP address in pool %s", pool) + } + poolLog.Info("IP address allocated", "allocation", result.String()) + + return PoolAlloc{ + Pool: pool, + IPConfig: result, + }, nil +} + +func poolCfgError(reqLog logr.Logger, pool, reason string) error { + reqLog.Error(nil, "invalid pool config", "pool", pool, + "reason", reason) + return status.Errorf(codes.Internal, "invalid config for pool %s", pool) } diff --git a/pkg/ipam-node/handlers/deallocate.go b/pkg/ipam-node/handlers/deallocate.go index 5d22deb..015fefa 100644 --- a/pkg/ipam-node/handlers/deallocate.go +++ b/pkg/ipam-node/handlers/deallocate.go @@ -16,13 +16,33 @@ package handlers import ( "context" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" + "github.com/go-logr/logr" - daemonv1 "github.com/Mellanox/nvidia-k8s-ipam/api/grpc/nvidia/ipam/node/v1" + nodev1 "github.com/Mellanox/nvidia-k8s-ipam/api/grpc/nvidia/ipam/node/v1" ) // Deallocate is the handler for Deallocate GRPC endpoint -func (s *Handlers) Deallocate(context.Context, *daemonv1.DeallocateRequest) (*daemonv1.DeallocateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Deallocate not implemented") +func (h *Handlers) Deallocate( + ctx context.Context, req *nodev1.DeallocateRequest) (*nodev1.DeallocateResponse, error) { + reqLog := addFieldsToLogger(logr.FromContextOrDiscard(ctx), req) + ctx = logr.NewContext(ctx, reqLog) + if err := validateReq(req); err != nil { + return nil, err + } + params := req.Parameters + store, err := h.openStore(ctx) + if err != nil { + return nil, err + } + if err := checkReqIsCanceled(ctx); err != nil { + return nil, h.closeSession(ctx, store, err) + } + for _, p := range params.Pools { + store.ReleaseReservationByID(p, params.CniContainerid, params.CniIfname) + } + if err := h.closeSession(ctx, store, nil); err != nil { + return nil, err + } + reqLog.Info("reservation released") + return &nodev1.DeallocateResponse{}, nil } diff --git a/pkg/ipam-node/handlers/handlers.go b/pkg/ipam-node/handlers/handlers.go index f210789..c01cfd4 100644 --- a/pkg/ipam-node/handlers/handlers.go +++ b/pkg/ipam-node/handlers/handlers.go @@ -14,6 +14,13 @@ package handlers import ( + "context" + "fmt" + + "github.com/go-logr/logr" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + nodev1 "github.com/Mellanox/nvidia-k8s-ipam/api/grpc/nvidia/ipam/node/v1" "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/allocator" storePkg "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/store" @@ -38,3 +45,92 @@ type Handlers struct { getAllocFunc GetAllocatorFunc nodev1.UnsafeIPAMServiceServer } + +func (h *Handlers) openStore(ctx context.Context) (storePkg.Session, error) { + reqLog := logr.FromContextOrDiscard(ctx) + store, err := h.store.Open(ctx) + if err != nil { + reqLog.Error(err, "failed to open store") + return nil, status.Errorf(codes.Internal, "failed to open store") + } + return store, nil +} + +func (h *Handlers) closeSession(ctx context.Context, session storePkg.Session, err error) error { + reqLog := logr.FromContextOrDiscard(ctx) + if err == nil { + if err := session.Commit(); err != nil { + reqLog.Error(err, "failed to close session") + return status.Errorf(codes.Internal, "failed to close session") + } + reqLog.Info("all changes are committed to the store") + return nil + } + reqLog.Info("all store modifications are canceled") + session.Cancel() + return err +} + +type paramsGetter interface { + GetParameters() *nodev1.IPAMParameters +} + +func fieldIsRequiredError(field string) error { + return status.Errorf(codes.InvalidArgument, "%s is required field", field) +} + +func fieldsIsInvalidError(field string) error { + return status.Errorf(codes.InvalidArgument, "%s is invalid", field) +} + +func validateReq(req paramsGetter) error { + params := req.GetParameters() + if params == nil { + return fieldIsRequiredError("parameters") + } + if len(params.Pools) == 0 || len(params.Pools) > 2 { + return fieldsIsInvalidError("parameters.pools") + } + for i, p := range params.Pools { + if p == "" { + return fieldsIsInvalidError(fmt.Sprintf("parameters.pools[%d]", i)) + } + } + if params.CniContainerid == "" { + return fieldIsRequiredError("parameters.cni_containerid") + } + if params.CniIfname == "" { + return fieldIsRequiredError("parameters.cni_ifname") + } + if params.Metadata == nil { + return fieldIsRequiredError("parameters.metadata") + } + if params.Metadata.K8SPodName == "" { + return fieldIsRequiredError("parameters.metadata.k8s_pod_name") + } + if params.Metadata.K8SPodNamespace == "" { + return fieldIsRequiredError("parameters.metadata.k8s_pod_namespace") + } + return nil +} + +func addFieldsToLogger(log logr.Logger, req paramsGetter) logr.Logger { + params := req.GetParameters() + if params == nil { + return log + } + return log.WithValues("pools", params.Pools, + "container_id", params.CniContainerid, + "interface_name", params.CniIfname, + "meta", params.Metadata.String(), + ) +} + +func checkReqIsCanceled(ctx context.Context) error { + select { + case <-ctx.Done(): + return status.Error(codes.Canceled, "request has been canceled") + default: + return nil + } +} diff --git a/pkg/ipam-node/handlers/handlers_suite_test.go b/pkg/ipam-node/handlers/handlers_suite_test.go new file mode 100644 index 0000000..c8ef69c --- /dev/null +++ b/pkg/ipam-node/handlers/handlers_suite_test.go @@ -0,0 +1,26 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package handlers_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestHandlers(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Handlers Suite") +} diff --git a/pkg/ipam-node/handlers/handlers_test.go b/pkg/ipam-node/handlers/handlers_test.go new file mode 100644 index 0000000..d75ae56 --- /dev/null +++ b/pkg/ipam-node/handlers/handlers_test.go @@ -0,0 +1,260 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package handlers_test + +import ( + "context" + "fmt" + "net" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + current "github.com/containernetworking/cni/pkg/types/100" + "github.com/stretchr/testify/mock" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + nodev1 "github.com/Mellanox/nvidia-k8s-ipam/api/grpc/nvidia/ipam/node/v1" + allocatorPkg "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/allocator" + allocatorMockPkg "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/allocator/mocks" + handlersPkg "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/handlers" + storePkg "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/store" + storeMockPkg "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/store/mocks" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/types" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/pool" + poolMockPkg "github.com/Mellanox/nvidia-k8s-ipam/pkg/pool/mocks" +) + +const ( + testPoolName1 = "pool1" + testPoolName2 = "pool2" + testPodName = "test-pod" + testNamespace = "default" + testPodUID = "aaf0a0fc-9869-41ef-9214-48599f85b4fa" +) + +func getPoolConfigs() map[string]*pool.IPPool { + return map[string]*pool.IPPool{ + testPoolName1: { + Name: testPoolName1, + Subnet: "192.168.0.0/16", + StartIP: "192.168.0.2", + EndIP: "192.168.0.254", + Gateway: "192.168.0.1", + }, + testPoolName2: {Name: testPoolName2, + Subnet: "10.100.0.0/16", + StartIP: "10.100.0.2", + EndIP: "10.100.0.254", + Gateway: "10.100.0.1", + }, + } +} + +func getValidIPAMParams() *nodev1.IPAMParameters { + return &nodev1.IPAMParameters{ + Pools: []string{testPoolName1, testPoolName2}, + CniContainerid: "id1", + CniIfname: "net0", + Metadata: &nodev1.IPAMMetadata{ + K8SPodName: testPodName, + K8SPodNamespace: testNamespace, + K8SPodUid: testPodUID, + DeviceId: "0000:d8:00.1", + }, + } +} + +var _ = Describe("Handlers", func() { + var ( + poolManager *poolMockPkg.Manager + store *storeMockPkg.Store + session *storeMockPkg.Session + allocators map[string]*allocatorMockPkg.IPAllocator + getAllocFunc handlersPkg.GetAllocatorFunc + handlers *handlersPkg.Handlers + ctx context.Context + ) + BeforeEach(func() { + ctx = context.Background() + poolManager = poolMockPkg.NewManager(GinkgoT()) + store = storeMockPkg.NewStore(GinkgoT()) + session = storeMockPkg.NewSession(GinkgoT()) + allocators = map[string]*allocatorMockPkg.IPAllocator{ + testPoolName1: allocatorMockPkg.NewIPAllocator(GinkgoT()), + testPoolName2: allocatorMockPkg.NewIPAllocator(GinkgoT())} + getAllocFunc = func(s *allocatorPkg.RangeSet, poolName string, store storePkg.Session) allocatorPkg.IPAllocator { + return allocators[poolName] + } + handlers = handlersPkg.New(poolManager, store, getAllocFunc) + }) + + It("Allocate succeed", func() { + store.On("Open", mock.Anything).Return(session, nil) + poolManager.On("GetPoolByName", testPoolName1).Return(getPoolConfigs()[testPoolName1]) + poolManager.On("GetPoolByName", testPoolName2).Return(getPoolConfigs()[testPoolName2]) + allocators[testPoolName1].On("Allocate", "id1", "net0", mock.Anything).Return( + ¤t.IPConfig{ + Gateway: net.ParseIP("192.168.0.1"), + Address: getIPWithMask("192.168.0.2/16"), + }, nil) + allocators[testPoolName2].On("Allocate", "id1", "net0", mock.Anything).Return( + ¤t.IPConfig{ + Gateway: net.ParseIP("10.100.0.1"), + Address: getIPWithMask("10.100.0.2/16"), + }, nil) + session.On("Commit").Return(nil) + + resp, err := handlers.Allocate(ctx, &nodev1.AllocateRequest{Parameters: getValidIPAMParams()}) + Expect(err).NotTo(HaveOccurred()) + Expect(resp.Allocations).To(HaveLen(2)) + Expect(resp.Allocations).To(ContainElement( + And( + HaveField("Pool", testPoolName1), + HaveField("Ip", "192.168.0.2/16"), + HaveField("Gateway", "192.168.0.1"), + ))) + Expect(resp.Allocations).To(ContainElement( + And( + HaveField("Pool", testPoolName2), + HaveField("Ip", "10.100.0.2/16"), + HaveField("Gateway", "10.100.0.1"), + ))) + }) + It("Allocation failed: unknown pool", func() { + store.On("Open", mock.Anything).Return(session, nil) + poolManager.On("GetPoolByName", testPoolName1).Return(nil) + session.On("Cancel").Return() + _, err := handlers.Allocate(ctx, &nodev1.AllocateRequest{Parameters: getValidIPAMParams()}) + Expect(status.Code(err) == codes.NotFound).To(BeTrue()) + }) + It("Allocation failed: bad pool config", func() { + store.On("Open", mock.Anything).Return(session, nil) + pool1Cfg := getPoolConfigs()[testPoolName1] + endIP := pool1Cfg.EndIP + startIP := pool1Cfg.StartIP + pool1Cfg.StartIP = endIP + pool1Cfg.EndIP = startIP + poolManager.On("GetPoolByName", testPoolName1).Return(pool1Cfg) + session.On("Cancel").Return() + _, err := handlers.Allocate(ctx, &nodev1.AllocateRequest{Parameters: getValidIPAMParams()}) + Expect(status.Code(err) == codes.Internal).To(BeTrue()) + }) + It("Allocation failed: pool2 has no free IPs", func() { + store.On("Open", mock.Anything).Return(session, nil) + poolManager.On("GetPoolByName", testPoolName1).Return(getPoolConfigs()[testPoolName1]) + poolManager.On("GetPoolByName", testPoolName2).Return(getPoolConfigs()[testPoolName2]) + allocators[testPoolName1].On("Allocate", "id1", "net0", mock.Anything).Return( + ¤t.IPConfig{ + Gateway: net.ParseIP("192.168.0.1"), + Address: getIPWithMask("192.168.0.2/16"), + }, nil) + allocators[testPoolName2].On("Allocate", "id1", "net0", mock.Anything).Return( + nil, allocatorPkg.ErrNoFreeAddresses) + session.On("Cancel").Return() + + _, err := handlers.Allocate(ctx, &nodev1.AllocateRequest{Parameters: getValidIPAMParams()}) + Expect(status.Code(err) == codes.ResourceExhausted).To(BeTrue()) + }) + It("Allocation failed: already allocated", func() { + store.On("Open", mock.Anything).Return(session, nil) + poolManager.On("GetPoolByName", testPoolName1).Return(getPoolConfigs()[testPoolName1]) + allocators[testPoolName1].On("Allocate", "id1", "net0", mock.Anything).Return( + nil, storePkg.ErrReservationAlreadyExist) + session.On("Cancel").Return() + _, err := handlers.Allocate(ctx, &nodev1.AllocateRequest{Parameters: getValidIPAMParams()}) + Expect(status.Code(err) == codes.AlreadyExists).To(BeTrue()) + }) + It("Allocation failed: failed to commit", func() { + store.On("Open", mock.Anything).Return(session, nil) + poolManager.On("GetPoolByName", testPoolName1).Return(getPoolConfigs()[testPoolName1]) + poolManager.On("GetPoolByName", testPoolName2).Return(getPoolConfigs()[testPoolName2]) + allocators[testPoolName1].On("Allocate", "id1", "net0", mock.Anything).Return( + ¤t.IPConfig{ + Gateway: net.ParseIP("192.168.0.1"), + Address: getIPWithMask("192.168.0.2/16"), + }, nil) + allocators[testPoolName2].On("Allocate", "id1", "net0", mock.Anything).Return( + ¤t.IPConfig{ + Gateway: net.ParseIP("10.100.0.1"), + Address: getIPWithMask("10.100.0.2/16"), + }, nil) + session.On("Commit").Return(fmt.Errorf("test")) + _, err := handlers.Allocate(ctx, &nodev1.AllocateRequest{Parameters: getValidIPAMParams()}) + Expect(status.Code(err) == codes.Internal).To(BeTrue()) + }) + It("Allocation failed: canceled", func() { + store.On("Open", mock.Anything).Return(session, nil) + session.On("Cancel").Return() + ctx, cFunc := context.WithCancel(ctx) + cFunc() + _, err := handlers.Allocate(ctx, &nodev1.AllocateRequest{Parameters: getValidIPAMParams()}) + Expect(status.Code(err) == codes.Canceled).To(BeTrue()) + }) + It("IsAllocated succeed", func() { + store.On("Open", mock.Anything).Return(session, nil) + session.On("GetReservationByID", testPoolName1, "id1", "net0").Return(&types.Reservation{}) + session.On("GetReservationByID", testPoolName2, "id1", "net0").Return(&types.Reservation{}) + session.On("Commit").Return(nil) + _, err := handlers.IsAllocated(ctx, &nodev1.IsAllocatedRequest{Parameters: getValidIPAMParams()}) + Expect(err).NotTo(HaveOccurred()) + }) + It("IsAllocated failed: no reservation", func() { + store.On("Open", mock.Anything).Return(session, nil) + session.On("GetReservationByID", testPoolName1, "id1", "net0").Return(nil) + session.On("Cancel").Return(nil) + _, err := handlers.IsAllocated(ctx, &nodev1.IsAllocatedRequest{Parameters: getValidIPAMParams()}) + Expect(status.Code(err) == codes.NotFound).To(BeTrue()) + }) + It("IsAllocated failed: canceled", func() { + store.On("Open", mock.Anything).Return(session, nil) + session.On("Cancel").Return() + ctx, cFunc := context.WithCancel(ctx) + cFunc() + _, err := handlers.IsAllocated(ctx, &nodev1.IsAllocatedRequest{Parameters: getValidIPAMParams()}) + Expect(status.Code(err) == codes.Canceled).To(BeTrue()) + }) + It("Deallocate succeed", func() { + store.On("Open", mock.Anything).Return(session, nil) + session.On("ReleaseReservationByID", testPoolName1, "id1", "net0").Return() + session.On("ReleaseReservationByID", testPoolName2, "id1", "net0").Return() + session.On("Commit").Return(nil) + _, err := handlers.Deallocate(ctx, &nodev1.DeallocateRequest{Parameters: getValidIPAMParams()}) + Expect(err).NotTo(HaveOccurred()) + }) + It("Deallocate failed: failed to commit", func() { + store.On("Open", mock.Anything).Return(session, nil) + session.On("ReleaseReservationByID", testPoolName1, "id1", "net0").Return() + session.On("ReleaseReservationByID", testPoolName2, "id1", "net0").Return() + session.On("Commit").Return(fmt.Errorf("test error")) + _, err := handlers.Deallocate(ctx, &nodev1.DeallocateRequest{Parameters: getValidIPAMParams()}) + Expect(status.Code(err) == codes.Internal).To(BeTrue()) + }) + It("Deallocate failed: canceled", func() { + store.On("Open", mock.Anything).Return(session, nil) + session.On("Cancel").Return() + ctx, cFunc := context.WithCancel(ctx) + cFunc() + _, err := handlers.Deallocate(ctx, &nodev1.DeallocateRequest{Parameters: getValidIPAMParams()}) + Expect(status.Code(err) == codes.Canceled).To(BeTrue()) + }) +}) + +func getIPWithMask(addr string) net.IPNet { + ipAddr, netAddr, err := net.ParseCIDR(addr) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + netAddr.IP = ipAddr + return *netAddr +} diff --git a/pkg/ipam-node/handlers/isallocated.go b/pkg/ipam-node/handlers/isallocated.go index be421d3..cd7cb06 100644 --- a/pkg/ipam-node/handlers/isallocated.go +++ b/pkg/ipam-node/handlers/isallocated.go @@ -16,13 +16,44 @@ package handlers import ( "context" + "github.com/go-logr/logr" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - daemonv1 "github.com/Mellanox/nvidia-k8s-ipam/api/grpc/nvidia/ipam/node/v1" + nodev1 "github.com/Mellanox/nvidia-k8s-ipam/api/grpc/nvidia/ipam/node/v1" ) // IsAllocated is the handler for IsAllocated GRPC endpoint -func (s *Handlers) IsAllocated(context.Context, *daemonv1.IsAllocatedRequest) (*daemonv1.IsAllocatedResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method IsAllocated not implemented") +func (h *Handlers) IsAllocated( + ctx context.Context, req *nodev1.IsAllocatedRequest) (*nodev1.IsAllocatedResponse, error) { + reqLog := addFieldsToLogger(logr.FromContextOrDiscard(ctx), req) + ctx = logr.NewContext(ctx, reqLog) + if err := validateReq(req); err != nil { + return nil, err + } + params := req.Parameters + store, err := h.openStore(ctx) + if err != nil { + return nil, err + } + if err := checkReqIsCanceled(ctx); err != nil { + return nil, h.closeSession(ctx, store, err) + } + + for _, p := range params.Pools { + poolLog := reqLog.WithValues("pool", p) + res := store.GetReservationByID(p, params.CniContainerid, params.CniIfname) + if res == nil { + poolLog.Info("reservation not found") + err = status.Errorf(codes.NotFound, "reservation for pool %s not found", p) + break + } + reqLog.Info("reservation exist") + reqLog.V(1).Info("reservation data", "data", res.String()) + } + if err := h.closeSession(ctx, store, err); err != nil { + return nil, err + } + + return &nodev1.IsAllocatedResponse{}, nil } From f9116e265e69e316e540d5475cd2fde6b8d61768 Mon Sep 17 00:00:00 2001 From: Yury Kulazhenkov Date: Fri, 14 Jul 2023 12:54:10 +0300 Subject: [PATCH 10/18] ipam-node: Add logic to copy Shim CNI to ipam-node Signed-off-by: Yury Kulazhenkov --- cmd/ipam-node/app/app.go | 41 +++++++++++++++ cmd/ipam-node/app/options/options.go | 76 +++++++++++++++++++++++++--- 2 files changed, 111 insertions(+), 6 deletions(-) diff --git a/cmd/ipam-node/app/app.go b/cmd/ipam-node/app/app.go index 421f650..6970b96 100644 --- a/cmd/ipam-node/app/app.go +++ b/cmd/ipam-node/app/app.go @@ -19,9 +19,11 @@ import ( "context" "fmt" "net" + "path/filepath" "sync" "github.com/go-logr/logr" + "github.com/google/renameio/v2" "github.com/spf13/cobra" "google.golang.org/grpc" corev1 "k8s.io/api/core/v1" @@ -44,6 +46,8 @@ import ( nodev1 "github.com/Mellanox/nvidia-k8s-ipam/api/grpc/nvidia/ipam/node/v1" "github.com/Mellanox/nvidia-k8s-ipam/cmd/ipam-node/app/options" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/cmdutils" + cniTypes "github.com/Mellanox/nvidia-k8s-ipam/pkg/cni/types" "github.com/Mellanox/nvidia-k8s-ipam/pkg/common" "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/allocator" nodectrl "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/controllers/node" @@ -107,6 +111,10 @@ func RunNodeDaemon(ctx context.Context, config *rest.Config, opts *options.Optio logger.Info("start IPAM node daemon", "version", version.GetVersionString(), "node", opts.NodeName) + if err := deployShimCNI(logger, opts); err != nil { + return err + } + scheme := runtime.NewScheme() if err := clientgoscheme.AddToScheme(scheme); err != nil { @@ -222,3 +230,36 @@ func initGRPCServer(opts *options.Options, handlers.New(poolConfReader, store.New(opts.StoreFile), allocator.NewIPAllocator)) return grpcServer, listener, nil } + +func deployShimCNI(log logr.Logger, opts *options.Options) error { + // copy nv-ipam binary + if !opts.CNISkipBinFileCopy { + if err := cmdutils.CopyFileAtomic(opts.CNIBinFile, opts.CNIBinDir, + "_nv-ipam", "nv-ipam"); err != nil { + log.Error(err, "failed at nv-ipam copy") + return err + } + } + if !opts.CNISkipConfigCreation { + return createNVIPAMConfig(log, opts) + } + return nil +} + +func createNVIPAMConfig(log logr.Logger, opts *options.Options) error { + cfg := fmt.Sprintf(`{ + "daemonSocket": "%s", + "daemonCallTimeoutSeconds": %d, + "logFile": "%s", + "logLevel": "%s" +} +`, opts.CNIDaemonSocket, opts.CNIDaemonCallTimeoutSeconds, opts.CNILogFile, opts.CNILogLevel) + + err := renameio.WriteFile(filepath.Join(opts.CNIConfDir, cniTypes.ConfFileName), []byte(cfg), 0664) + if err != nil { + log.Error(err, "failed to write configuration for shim CNI") + return err + } + log.Info("config for shim CNI written", "config", cfg) + return nil +} diff --git a/cmd/ipam-node/app/options/options.go b/cmd/ipam-node/app/options/options.go index 4a2ecb3..8e4ed1f 100644 --- a/cmd/ipam-node/app/options/options.go +++ b/cmd/ipam-node/app/options/options.go @@ -30,7 +30,7 @@ import ( const ( // DefaultStoreFile contains path of the default store file DefaultStoreFile = "/var/lib/cni/nv-ipam/store" - DefaultBindAddress = "unix://" + cniTypes.DefaultDaemonSocket + DefaultBindAddress = cniTypes.DefaultDaemonSocket ) // New initialize and return new Options object @@ -42,6 +42,16 @@ func New() *Options { NodeName: "", BindAddress: DefaultBindAddress, StoreFile: DefaultStoreFile, + // shim CNI parameters + CNIBinDir: "/opt/cni/bin", + CNIBinFile: "/nv-ipam", + CNISkipBinFileCopy: false, + CNISkipConfigCreation: false, + CNIDaemonSocket: cniTypes.DefaultDaemonSocket, + CNIDaemonCallTimeoutSeconds: 5, + CNIConfDir: cniTypes.DefaultConfDir, + CNILogLevel: cniTypes.DefaultLogLevel, + CNILogFile: cniTypes.DefaultLogFile, } } @@ -53,6 +63,16 @@ type Options struct { NodeName string BindAddress string StoreFile string + // shim CNI parameters + CNIBinDir string + CNIBinFile string + CNISkipBinFileCopy bool + CNISkipConfigCreation bool + CNIConfDir string + CNIDaemonSocket string + CNIDaemonCallTimeoutSeconds int + CNILogFile string + CNILogLevel string } // AddNamedFlagSets register flags for common options in NamedFlagSets @@ -75,6 +95,26 @@ func (o *Options) AddNamedFlagSets(sharedFS *cliflag.NamedFlagSets) { "GPRC server bind address. e.g.: tcp://127.0.0.1:9092, unix:///var/lib/foo") daemonFS.StringVar(&o.StoreFile, "store-file", o.StoreFile, "Path of the file which used to store allocations") + + cniFS := sharedFS.FlagSet("Shim CNI Configuration") + cniFS.StringVar(&o.CNIBinDir, + "cni-bin-dir", o.CNIBinDir, "CNI binary directory") + cniFS.StringVar(&o.CNIBinFile, + "cni-nv-ipam-bin-file", o.CNIBinFile, "nv-ipam binary file path") + cniFS.BoolVar(&o.CNISkipBinFileCopy, + "cni-skip-nv-ipam-binary-copy", o.CNISkipBinFileCopy, "skip nv-ipam binary file copy") + cniFS.BoolVar(&o.CNISkipConfigCreation, + "cni-skip-nv-ipam-config-creation", o.CNISkipConfigCreation, "skip config file creation for nv-ipam CNI") + cniFS.StringVar(&o.CNIConfDir, "cni-conf-dir", o.CNIConfDir, + "shim CNI config: path with config file") + cniFS.StringVar(&o.CNIDaemonSocket, "cni-daemon-socket", o.CNIDaemonSocket, + "shim CNI config: IPAM daemon socket path") + cniFS.IntVar(&o.CNIDaemonCallTimeoutSeconds, "cni-daemon-call-timeout", o.CNIDaemonCallTimeoutSeconds, + "shim CNI config: timeout for IPAM daemon calls") + cniFS.StringVar(&o.CNILogFile, "cni-log-file", o.CNILogFile, + "shim CNI config: path to log file for shim CNI") + cniFS.StringVar(&o.CNILogLevel, "cni-log-level", o.CNILogLevel, + "shim CNI config: log level for shim CNI") } // Validate registered options @@ -86,14 +126,38 @@ func (o *Options) Validate() error { if err != nil { return fmt.Errorf("bind-address is invalid: %v", err) } - if len(o.StoreFile) == 0 { - return fmt.Errorf("store-file can't be empty") + if err := o.verifyPaths(); err != nil { + return err } - _, err = os.Stat(filepath.Dir(o.StoreFile)) + return o.Options.Validate() +} + +func (o *Options) verifyPaths() error { + _, err := os.Stat(filepath.Dir(o.StoreFile)) if err != nil { - return fmt.Errorf("store-file is invalid: %v", err) + return fmt.Errorf("dir for store-file is not found: %v", err) } - return o.Options.Validate() + if !o.CNISkipBinFileCopy { + // CNIBinFile + if _, err := os.Stat(o.CNIBinFile); err != nil { + return fmt.Errorf("cni-nv-ipam-bin-file is not found: %v", err) + } + // CNIBinDir + if _, err := os.Stat(o.CNIBinDir); err != nil { + return fmt.Errorf("cni-bin-dir is not found: %v", err) + } + } + if !o.CNISkipConfigCreation { + // CNIConfDir + if _, err := os.Stat(o.CNIConfDir); err != nil { + return fmt.Errorf("cni-conf-dir is not found: %v", err) + } + } + // parent dir for CNI log file + if _, err := os.Stat(filepath.Dir(o.CNILogFile)); err != nil { + return fmt.Errorf("cni-log-file is not found: %v", err) + } + return nil } // ParseBindAddress parses bind-address and return network and address part separately, From d282a16400729bb03ce213fb0b8944ef15d422f5 Mon Sep 17 00:00:00 2001 From: Yury Kulazhenkov Date: Fri, 7 Jul 2023 11:12:46 +0300 Subject: [PATCH 11/18] ipam-node: Add logic to cleanup daemon socket Signed-off-by: Yury Kulazhenkov --- cmd/ipam-node/app/app.go | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/cmd/ipam-node/app/app.go b/cmd/ipam-node/app/app.go index 6970b96..3f85e0d 100644 --- a/cmd/ipam-node/app/app.go +++ b/cmd/ipam-node/app/app.go @@ -17,9 +17,12 @@ package app import ( "context" + "errors" "fmt" "net" + "os" "path/filepath" + "strings" "sync" "github.com/go-logr/logr" @@ -217,6 +220,10 @@ func initGRPCServer(opts *options.Options, if err != nil { return nil, nil, err } + if err := cleanUNIXSocketIfRequired(opts); err != nil { + log.Error(err, "failed to clean socket path") + return nil, nil, err + } listener, err := net.Listen(network, address) if err != nil { log.Error(err, "failed to start listener for GRPC server") @@ -231,6 +238,31 @@ func initGRPCServer(opts *options.Options, return grpcServer, listener, nil } +func cleanUNIXSocketIfRequired(opts *options.Options) error { + socketPrefix := "unix://" + if !strings.HasPrefix(opts.BindAddress, socketPrefix) { + return nil + } + socketPath, _ := strings.CutPrefix(opts.BindAddress, socketPrefix) + info, err := os.Stat(socketPath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return nil + } + return err + } + if info.Mode().Type() != os.ModeSocket { + return fmt.Errorf("socket bind path exist, but not a socket") + } + if err := os.Remove(socketPath); err != nil { + if errors.Is(err, os.ErrNotExist) { + return nil + } + return fmt.Errorf("failed to remove socket: %v", err) + } + return nil +} + func deployShimCNI(log logr.Logger, opts *options.Options) error { // copy nv-ipam binary if !opts.CNISkipBinFileCopy { From ae87c92d76efd623945f4b4c66223e2edbe80c3e Mon Sep 17 00:00:00 2001 From: Yury Kulazhenkov Date: Fri, 14 Jul 2023 12:56:05 +0300 Subject: [PATCH 12/18] image: Remove host-local IPAM bin from the image Signed-off-by: Yury Kulazhenkov --- Dockerfile | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/Dockerfile b/Dockerfile index 6829362..0fa1205 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,9 +14,6 @@ COPY . /workspace # Build with make to apply all build logic defined in Makefile RUN make build -# Build host-local cni -RUN git clone https://github.com/containernetworking/plugins.git ; cd plugins ; git checkout v1.2.0 -b v1.2.0 -RUN cd plugins ; go build -o plugins/bin/host-local ./plugins/ipam/host-local # Use distroless as minimal base image to package the manager binary # Refer to https://github.com/GoogleContainerTools/distroless for more details @@ -25,4 +22,4 @@ WORKDIR / COPY --from=builder /workspace/build/ipam-controller . COPY --from=builder /workspace/build/ipam-node . COPY --from=builder /workspace/build/nv-ipam . -COPY --from=builder /workspace/plugins/plugins/bin/host-local . + From 372958ebb04c5ab3dbad785946a727d266f407ab Mon Sep 17 00:00:00 2001 From: Yury Kulazhenkov Date: Tue, 18 Jul 2023 17:03:14 +0300 Subject: [PATCH 13/18] ipam-node: add periodic cleanup loop for stale reservations Signed-off-by: Yury Kulazhenkov --- cmd/ipam-node/app/app.go | 32 ++++- pkg/ipam-node/cleaner/cleaner.go | 145 ++++++++++++++++++++ pkg/ipam-node/cleaner/cleaner_suite_test.go | 66 +++++++++ pkg/ipam-node/cleaner/cleaner_test.go | 116 ++++++++++++++++ 4 files changed, 353 insertions(+), 6 deletions(-) create mode 100644 pkg/ipam-node/cleaner/cleaner.go create mode 100644 pkg/ipam-node/cleaner/cleaner_suite_test.go create mode 100644 pkg/ipam-node/cleaner/cleaner_test.go diff --git a/cmd/ipam-node/app/app.go b/cmd/ipam-node/app/app.go index 3f85e0d..ea85f22 100644 --- a/cmd/ipam-node/app/app.go +++ b/cmd/ipam-node/app/app.go @@ -24,6 +24,7 @@ import ( "path/filepath" "strings" "sync" + "time" "github.com/go-logr/logr" "github.com/google/renameio/v2" @@ -53,10 +54,11 @@ import ( cniTypes "github.com/Mellanox/nvidia-k8s-ipam/pkg/cni/types" "github.com/Mellanox/nvidia-k8s-ipam/pkg/common" "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/allocator" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/cleaner" nodectrl "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/controllers/node" "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/grpc/middleware" "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/handlers" - "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/store" + storePkg "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/store" poolPkg "github.com/Mellanox/nvidia-k8s-ipam/pkg/pool" "github.com/Mellanox/nvidia-k8s-ipam/pkg/version" ) @@ -160,24 +162,26 @@ func RunNodeDaemon(ctx context.Context, config *rest.Config, opts *options.Optio return err } - grpcServer, listener, err := initGRPCServer(opts, logger, poolManager) + store := storePkg.New(opts.StoreFile) + + grpcServer, listener, err := initGRPCServer(opts, logger, poolManager, store) if err != nil { return err } wg := sync.WaitGroup{} - wg.Add(3) - errCh := make(chan error, 1) innerCtx, innerCFunc := context.WithCancel(ctx) defer innerCFunc() + wg.Add(1) go func() { defer wg.Done() <-innerCtx.Done() grpcServer.GracefulStop() }() + wg.Add(1) go func() { defer wg.Done() logger.Info("start grpc server") @@ -190,6 +194,7 @@ func RunNodeDaemon(ctx context.Context, config *rest.Config, opts *options.Optio } logger.Info("grpc server stopped") }() + wg.Add(1) go func() { defer wg.Done() logger.Info("start manager") @@ -202,6 +207,21 @@ func RunNodeDaemon(ctx context.Context, config *rest.Config, opts *options.Optio } logger.Info("manager stopped") }() + wg.Add(1) + go func() { + defer wg.Done() + logger.Info("start stale reservations cleaner") + if !mgr.GetCache().WaitForCacheSync(innerCtx) { + select { + case errCh <- fmt.Errorf("failed to sync informer cache"): + default: + } + return + } + c := cleaner.New(mgr.GetClient(), store, time.Minute, 3) + c.Start(innerCtx) + logger.Info("cleaner stopped") + }() select { case <-ctx.Done(): @@ -215,7 +235,7 @@ func RunNodeDaemon(ctx context.Context, config *rest.Config, opts *options.Optio } func initGRPCServer(opts *options.Options, - log logr.Logger, poolConfReader poolPkg.ConfigReader) (*grpc.Server, net.Listener, error) { + log logr.Logger, poolConfReader poolPkg.ConfigReader, store storePkg.Store) (*grpc.Server, net.Listener, error) { network, address, err := options.ParseBindAddress(opts.BindAddress) if err != nil { return nil, nil, err @@ -234,7 +254,7 @@ func initGRPCServer(opts *options.Options, middleware.LogCallMiddleware)) nodev1.RegisterIPAMServiceServer(grpcServer, - handlers.New(poolConfReader, store.New(opts.StoreFile), allocator.NewIPAllocator)) + handlers.New(poolConfReader, store, allocator.NewIPAllocator)) return grpcServer, listener, nil } diff --git a/pkg/ipam-node/cleaner/cleaner.go b/pkg/ipam-node/cleaner/cleaner.go new file mode 100644 index 0000000..93c5112 --- /dev/null +++ b/pkg/ipam-node/cleaner/cleaner.go @@ -0,0 +1,145 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cleaner + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/go-logr/logr" + "github.com/google/uuid" + corev1 "k8s.io/api/core/v1" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + apiTypes "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + storePkg "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/store" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/types" +) + +// Cleaner is the interface of the cleaner package. +// The cleaner periodically scan the store and check for allocations which doesn't have +// related Pod in the k8s API. If allocation has no Pod for more than X checks, then the cleaner +// will release the allocation. +type Cleaner interface { + // Start starts the cleaner loop. + // The cleaner loop discovers stale allocations and clean up them. + Start(ctx context.Context) +} + +// New creates and initialize new cleaner instance +// "checkInterval" defines delay between checks for stale allocations. +// "checkCountBeforeRelease: defines how many check to do before remove the allocation +func New(client client.Client, store storePkg.Store, + checkInterval time.Duration, + checkCountBeforeRelease int) Cleaner { + return &cleaner{ + client: client, + store: store, + checkInterval: checkInterval, + checkCountBeforeRelease: checkCountBeforeRelease, + staleAllocations: make(map[string]int), + } +} + +type cleaner struct { + client client.Client + store storePkg.Store + checkInterval time.Duration + checkCountBeforeRelease int + // key is ||, value is count of failed checks + staleAllocations map[string]int +} + +func (c *cleaner) Start(ctx context.Context) { + logger := logr.FromContextOrDiscard(ctx).WithName("cleaner") + for { + loopLogger := logger.WithValues("checkID", uuid.NewString()) + loopLogger.Info("check for stale IPs") + ctx = logr.NewContext(ctx, loopLogger) + if err := c.loop(ctx); err != nil { + loopLogger.Error(err, "check failed") + } + select { + case <-ctx.Done(): + logger.Info("shutdown cleaner") + return + case <-time.After(c.checkInterval): + } + } +} + +func (c *cleaner) loop(ctx context.Context) error { + logger := logr.FromContextOrDiscard(ctx) + store, err := c.store.Open(ctx) + if err != nil { + return fmt.Errorf("failed to open store: %v", err) + } + allReservations := map[string]struct{}{} + for _, poolName := range store.ListPools() { + for _, reservation := range store.ListReservations(poolName) { + resLogger := logger.WithValues("pool", poolName, + "container_id", reservation.ContainerID, "interface_name", reservation.InterfaceName) + key := c.getStaleAllocKey(poolName, reservation) + allReservations[key] = struct{}{} + if reservation.Metadata.PodName == "" || reservation.Metadata.PodNamespace == "" { + resLogger.V(2).Info("reservation has no required metadata fields, skip") + continue + } + pod := &corev1.Pod{} + err := c.client.Get(ctx, apiTypes.NamespacedName{ + Namespace: reservation.Metadata.PodNamespace, + Name: reservation.Metadata.PodName, + }, pod) + if err != nil && !apiErrors.IsNotFound(err) { + store.Cancel() + return fmt.Errorf("failed to read Pod info from the cache: %v", err) + } + if apiErrors.IsNotFound(err) || + (reservation.Metadata.PodUUID != "" && reservation.Metadata.PodUUID != string(pod.UID)) { + c.staleAllocations[key]++ + resLogger.V(2).Info("pod not found in the API, increase stale counter", + "value", c.staleAllocations[key]) + } else { + delete(c.staleAllocations, key) + } + } + } + + for k, count := range c.staleAllocations { + // remove unknown reservations from c.staleAllocations + if _, isKnownReservation := allReservations[k]; !isKnownReservation { + delete(c.staleAllocations, k) + continue + } + // release reservations which were marked as stale multiple times + if count > c.checkCountBeforeRelease { + keyFields := strings.SplitN(k, "|", 3) + pool, containerID, ifName := keyFields[0], keyFields[1], keyFields[2] + logger.Info("stale reservation released", "pool", pool, + "container_id", containerID, "interface_name", ifName) + store.ReleaseReservationByID(pool, containerID, ifName) + } + } + if err := store.Commit(); err != nil { + return fmt.Errorf("failed to commit changes to the store: %v", err) + } + return nil +} + +func (c *cleaner) getStaleAllocKey(poolName string, r types.Reservation) string { + return fmt.Sprintf("%s|%s|%s", poolName, r.ContainerID, r.InterfaceName) +} diff --git a/pkg/ipam-node/cleaner/cleaner_suite_test.go b/pkg/ipam-node/cleaner/cleaner_suite_test.go new file mode 100644 index 0000000..5ab4121 --- /dev/null +++ b/pkg/ipam-node/cleaner/cleaner_suite_test.go @@ -0,0 +1,66 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cleaner_test + +import ( + "context" + "testing" + + "github.com/go-logr/logr" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/klog/v2" + + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" +) + +var ( + cfg *rest.Config + k8sClient client.Client + testEnv *envtest.Environment + cFunc context.CancelFunc + ctx context.Context +) + +func TestCleaner(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Cleaner Suite") +} + +var _ = BeforeSuite(func() { + By("bootstrapping test environment") + testEnv = &envtest.Environment{} + + ctx, cFunc = context.WithCancel(context.Background()) + ctx = logr.NewContext(ctx, klog.NewKlogr()) + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + k8sClient, err = client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) +}) + +var _ = AfterSuite(func() { + cFunc() + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/pkg/ipam-node/cleaner/cleaner_test.go b/pkg/ipam-node/cleaner/cleaner_test.go new file mode 100644 index 0000000..14433cd --- /dev/null +++ b/pkg/ipam-node/cleaner/cleaner_test.go @@ -0,0 +1,116 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cleaner_test + +import ( + "net" + "path/filepath" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + cleanerPkg "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/cleaner" + storePkg "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/store" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/types" +) + +const ( + testNamespace = "default" + testPodName1 = "test-pod1" + testPodName2 = "test-pod2" + testPool1 = "pool1" + testPool2 = "pool2" + testIFName = "net0" +) + +func createPod(name, namespace string) string { + p := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "something", Image: "something"}}}, + } + ExpectWithOffset(1, k8sClient.Create(ctx, p)).NotTo(HaveOccurred()) + return string(p.UID) +} + +var _ = Describe("Cleaner", func() { + It("Cleanup test", func() { + done := make(chan interface{}) + go func() { + storePath := filepath.Join(GinkgoT().TempDir(), "test_store") + storeMgr := storePkg.New(storePath) + cleaner := cleanerPkg.New(k8sClient, storeMgr, time.Millisecond*100, 3) + + pod1UID := createPod(testPodName1, testNamespace) + _ = createPod(testPodName2, testNamespace) + + store, err := storeMgr.Open(ctx) + Expect(err).NotTo(HaveOccurred()) + + // should keep these reservations + Expect(store.Reserve(testPool1, "id1", testIFName, types.ReservationMetadata{ + CreateTime: time.Now().Format(time.RFC3339Nano), + PodUUID: pod1UID, + PodName: testPodName1, + PodNamespace: testNamespace, + }, net.ParseIP("192.168.1.100"))).NotTo(HaveOccurred()) + + Expect(store.Reserve(testPool1, "id2", testIFName, types.ReservationMetadata{}, + net.ParseIP("192.168.1.101"))).NotTo(HaveOccurred()) + + // should remove these reservations + Expect(store.Reserve(testPool1, "id3", testIFName, types.ReservationMetadata{ + CreateTime: time.Now().Format(time.RFC3339Nano), + PodName: "unknown", + PodNamespace: testNamespace, + }, net.ParseIP("192.168.1.102"))).NotTo(HaveOccurred()) + Expect(store.Reserve(testPool2, "id4", testIFName, types.ReservationMetadata{ + CreateTime: time.Now().Format(time.RFC3339Nano), + PodName: "unknown2", + PodNamespace: testNamespace, + }, net.ParseIP("192.168.2.100"))).NotTo(HaveOccurred()) + Expect(store.Reserve(testPool2, "id5", testIFName, types.ReservationMetadata{ + CreateTime: time.Now().Format(time.RFC3339Nano), + PodUUID: "something", // differ from the reservation + PodName: testPodName2, + PodNamespace: testNamespace, + }, net.ParseIP("192.168.2.101"))).NotTo(HaveOccurred()) + + Expect(store.Commit()).NotTo(HaveOccurred()) + + go func() { + cleaner.Start(ctx) + }() + Eventually(func(g Gomega) { + store, err := storeMgr.Open(ctx) + g.Expect(err).NotTo(HaveOccurred()) + defer store.Cancel() + g.Expect(store.GetReservationByID(testPool1, "id1", testIFName)).NotTo(BeNil()) + g.Expect(store.GetReservationByID(testPool1, "id2", testIFName)).NotTo(BeNil()) + g.Expect(store.GetReservationByID(testPool1, "id3", testIFName)).To(BeNil()) + g.Expect(store.GetReservationByID(testPool2, "id4", testIFName)).To(BeNil()) + g.Expect(store.GetReservationByID(testPool2, "id5", testIFName)).To(BeNil()) + }, 10).Should(Succeed()) + + close(done) + }() + Eventually(done, time.Minute).Should(BeClosed()) + }) +}) From 496e7a49a21d09607195704f61dac5abe8e86a8a Mon Sep 17 00:00:00 2001 From: Yury Kulazhenkov Date: Wed, 19 Jul 2023 16:40:46 +0300 Subject: [PATCH 14/18] ipam-node: add tests to validate basic allocation/deallocation flow Signed-off-by: Yury Kulazhenkov --- cmd/ipam-node/app/app_suite_test.go | 62 ++++++++++ cmd/ipam-node/app/app_test.go | 176 ++++++++++++++++++++++++++++ 2 files changed, 238 insertions(+) create mode 100644 cmd/ipam-node/app/app_suite_test.go create mode 100644 cmd/ipam-node/app/app_test.go diff --git a/cmd/ipam-node/app/app_suite_test.go b/cmd/ipam-node/app/app_suite_test.go new file mode 100644 index 0000000..7d88376 --- /dev/null +++ b/cmd/ipam-node/app/app_suite_test.go @@ -0,0 +1,62 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package app_test + +import ( + "context" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" +) + +var ( + cfg *rest.Config + k8sClient client.Client + testEnv *envtest.Environment + cFunc context.CancelFunc + ctx context.Context +) + +func TestApp(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "IPAM Node Suite") +} + +var _ = BeforeSuite(func() { + By("bootstrapping test environment") + testEnv = &envtest.Environment{} + + ctx, cFunc = context.WithCancel(context.Background()) + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + k8sClient, err = client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) +}) + +var _ = AfterSuite(func() { + cFunc() + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/cmd/ipam-node/app/app_test.go b/cmd/ipam-node/app/app_test.go new file mode 100644 index 0000000..1457550 --- /dev/null +++ b/cmd/ipam-node/app/app_test.go @@ -0,0 +1,176 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package app_test + +import ( + "os" + "path/filepath" + "time" + + "github.com/go-logr/logr" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/status" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" + + nodev1 "github.com/Mellanox/nvidia-k8s-ipam/api/grpc/nvidia/ipam/node/v1" + "github.com/Mellanox/nvidia-k8s-ipam/cmd/ipam-node/app" + "github.com/Mellanox/nvidia-k8s-ipam/cmd/ipam-node/app/options" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/pool" +) + +const ( + testNodeName = "test-node" + testPodName = "test-pod" + testPoolName1 = "my-pool-1" + testPoolName2 = "my-pool-2" + testNamespace = "default" +) + +func createTestNode() *corev1.Node { + nodeObj := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: testNodeName}, + } + ExpectWithOffset(1, pool.SetIPBlockAnnotation(nodeObj, map[string]*pool.IPPool{ + testPoolName1: { + Name: testPoolName1, + Subnet: "192.168.0.0/16", + StartIP: "192.168.0.2", + EndIP: "192.168.0.254", + Gateway: "192.168.0.1", + }, + testPoolName2: {Name: testPoolName2, + Subnet: "10.100.0.0/16", + StartIP: "10.100.0.2", + EndIP: "10.100.0.254", + Gateway: "10.100.0.1", + }, + })).NotTo(HaveOccurred()) + ExpectWithOffset(1, k8sClient.Create(ctx, nodeObj)) + return nodeObj +} + +func createTestPod() *corev1.Pod { + podObj := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: testPodName, Namespace: testNamespace}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "name", Image: "image"}}, + }, + } + ExpectWithOffset(1, k8sClient.Create(ctx, podObj)) + return podObj +} + +func getOptions(testDir string) *options.Options { + daemonSocket := "unix://" + filepath.Join(testDir, "daemon") + storePath := filepath.Join(testDir, "store") + cniBinDir := filepath.Join(testDir, "cnibin") + cniConfDir := filepath.Join(testDir, "cniconf") + dummyCNIBin := filepath.Join(testDir, "dummycni") + + Expect(os.WriteFile(dummyCNIBin, []byte("dummy"), 0777)).NotTo(HaveOccurred()) + Expect(os.Mkdir(cniBinDir, 0777)).NotTo(HaveOccurred()) + Expect(os.Mkdir(cniConfDir, 0777)).NotTo(HaveOccurred()) + + opts := options.New() + opts.NodeName = testNodeName + opts.ProbeAddr = "0" // disable + opts.MetricsAddr = "0" // disable + opts.BindAddress = daemonSocket + opts.StoreFile = storePath + opts.CNIBinFile = dummyCNIBin + opts.CNIBinDir = cniBinDir + opts.CNIConfDir = cniConfDir + opts.CNIDaemonSocket = daemonSocket + return opts +} + +func getValidReqParams(uid, name, namespace string) *nodev1.IPAMParameters { + return &nodev1.IPAMParameters{ + Pools: []string{testPoolName1, testPoolName2}, + CniContainerid: "id1", + CniIfname: "net0", + Metadata: &nodev1.IPAMMetadata{ + K8SPodName: name, + K8SPodNamespace: namespace, + K8SPodUid: uid, + DeviceId: "0000:d8:00.1", + }, + } +} + +var _ = Describe("IPAM Node daemon", func() { + It("Validate main flows", func() { + done := make(chan interface{}) + go func() { + testDir := GinkgoT().TempDir() + opts := getOptions(testDir) + + createTestNode() + pod := createTestPod() + + ctx = logr.NewContext(ctx, klog.NewKlogr()) + + go func() { + Expect(app.RunNodeDaemon(ctx, cfg, opts)).NotTo(HaveOccurred()) + }() + + conn, err := grpc.DialContext(ctx, opts.CNIDaemonSocket, + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithBlock()) + Expect(err).NotTo(HaveOccurred()) + + grpcClient := nodev1.NewIPAMServiceClient(conn) + + params := getValidReqParams(string(pod.UID), pod.Name, pod.Namespace) + + // no allocation yet + _, err = grpcClient.IsAllocated(ctx, + &nodev1.IsAllocatedRequest{Parameters: params}) + Expect(status.Code(err) == codes.NotFound).To(BeTrue()) + + // allocate + resp, err := grpcClient.Allocate(ctx, &nodev1.AllocateRequest{Parameters: params}) + Expect(err).NotTo(HaveOccurred()) + Expect(resp.Allocations).To(HaveLen(2)) + Expect(resp.Allocations[0].Pool).NotTo(BeEmpty()) + Expect(resp.Allocations[0].Gateway).NotTo(BeEmpty()) + Expect(resp.Allocations[0].Ip).NotTo(BeEmpty()) + + _, err = grpcClient.IsAllocated(ctx, + &nodev1.IsAllocatedRequest{Parameters: params}) + Expect(err).NotTo(HaveOccurred()) + + // deallocate + _, err = grpcClient.Deallocate(ctx, &nodev1.DeallocateRequest{Parameters: params}) + Expect(err).NotTo(HaveOccurred()) + + // deallocate should be idempotent + _, err = grpcClient.Deallocate(ctx, &nodev1.DeallocateRequest{Parameters: params}) + Expect(err).NotTo(HaveOccurred()) + + // check should fail + _, err = grpcClient.IsAllocated(ctx, + &nodev1.IsAllocatedRequest{Parameters: params}) + Expect(status.Code(err) == codes.NotFound).To(BeTrue()) + close(done) + }() + Eventually(done, 5*time.Minute).Should(BeClosed()) + }) +}) From 98743969e5076c9b91f696523aecf8f4debc8ab5 Mon Sep 17 00:00:00 2001 From: Yury Kulazhenkov Date: Fri, 21 Jul 2023 14:15:57 +0300 Subject: [PATCH 15/18] ipam-node: use flock for store file Signed-off-by: Yury Kulazhenkov --- cmd/ipam-node/app/app.go | 25 +++++++++++++++++++++++++ go.mod | 1 + go.sum | 2 ++ 3 files changed, 28 insertions(+) diff --git a/cmd/ipam-node/app/app.go b/cmd/ipam-node/app/app.go index ea85f22..55ee06a 100644 --- a/cmd/ipam-node/app/app.go +++ b/cmd/ipam-node/app/app.go @@ -27,6 +27,7 @@ import ( "time" "github.com/go-logr/logr" + "github.com/gofrs/flock" "github.com/google/renameio/v2" "github.com/spf13/cobra" "google.golang.org/grpc" @@ -162,7 +163,31 @@ func RunNodeDaemon(ctx context.Context, config *rest.Config, opts *options.Optio return err } + storeLock := flock.New(opts.StoreFile) + locked, err := storeLock.TryLock() + if err != nil { + logger.Error(err, "failed to set store lock") + return err + } + if !locked { + err := fmt.Errorf("store lock is held by a different process") + logger.Error(err, "failed to lock store") + return err + } + defer func() { + err := storeLock.Unlock() + if err != nil { + logger.Error(err, "failed to release store lock") + } + }() store := storePkg.New(opts.StoreFile) + // do initial store loading, to validate stored data + s, err := store.Open(ctx) + if err != nil { + logger.Error(err, "failed to validate store data") + return err + } + s.Cancel() grpcServer, listener, err := initGRPCServer(opts, logger, poolManager, store) if err != nil { diff --git a/go.mod b/go.mod index eff403e..7767783 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,7 @@ go 1.20 require ( github.com/containernetworking/cni v1.1.2 github.com/go-logr/logr v1.2.4 + github.com/gofrs/flock v0.8.1 github.com/google/renameio/v2 v2.0.0 github.com/google/uuid v1.3.0 github.com/k8snetworkplumbingwg/cni-log v0.0.0-20230321145726-634c593dd11f diff --git a/go.sum b/go.sum index 7e3cb87..0406159 100644 --- a/go.sum +++ b/go.sum @@ -109,6 +109,8 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= From 31218a70c28c3acfc675f23c5d5b7bebcd0d45fa Mon Sep 17 00:00:00 2001 From: Yury Kulazhenkov Date: Wed, 19 Jul 2023 16:54:32 +0300 Subject: [PATCH 16/18] ipam-controller: disable metrics and probe endpoints in ipam-controller tests Signed-off-by: Yury Kulazhenkov --- cmd/ipam-controller/app/app_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/ipam-controller/app/app_test.go b/cmd/ipam-controller/app/app_test.go index 2dc7db4..ab9b56a 100644 --- a/cmd/ipam-controller/app/app_test.go +++ b/cmd/ipam-controller/app/app_test.go @@ -168,6 +168,8 @@ var _ = Describe("App", func() { Expect(app.RunController(logr.NewContext(ctrlCtx, klog.NewKlogr()), cfg, &options.Options{ ConfigMapName: TestConfigMapName, ConfigMapNamespace: TestNamespace, + MetricsAddr: "0", // disable + ProbeAddr: "0", // disable })).NotTo(HaveOccurred()) close(controllerStopped) }() From c7a8a8e9e0089effe480f3a83b3caba636dee922 Mon Sep 17 00:00:00 2001 From: Yury Kulazhenkov Date: Thu, 6 Jul 2023 13:27:05 +0300 Subject: [PATCH 17/18] deployment: Update deployment to support ipam-node in daemon mode Signed-off-by: Yury Kulazhenkov --- deploy/nv-ipam.yaml | 47 ++++++++++++++++++++++++--------------------- 1 file changed, 25 insertions(+), 22 deletions(-) diff --git a/deploy/nv-ipam.yaml b/deploy/nv-ipam.yaml index b6a8373..5acea18 100644 --- a/deploy/nv-ipam.yaml +++ b/deploy/nv-ipam.yaml @@ -8,8 +8,11 @@ rules: - "" resources: - nodes + - pods verbs: - get + - list + - watch --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 @@ -52,7 +55,6 @@ spec: app: nv-ipam-node name: nv-ipam-node spec: - hostNetwork: true tolerations: - operator: Exists effect: NoSchedule @@ -70,43 +72,44 @@ spec: fieldPath: spec.nodeName command: [ "/ipam-node" ] args: - - --nv-ipam-log-file=/var/log/nv-ipam-cni.log - - --nv-ipam-log-level=info + - --node-name=$(NODE_NAME) + - --v=1 # log level for ipam-node + - --logging-format=json + - --bind-address=unix:///var/lib/cni/nv-ipam/daemon.sock + - --store-file=/var/lib/cni/nv-ipam/store + - --cni-daemon-socket=unix:///var/lib/cni/nv-ipam/daemon.sock + - --cni-daemon-call-timeout=5 # 5 seconds + - --cni-bin-dir=/opt/cni/bin + - --cni-conf-dir=/etc/cni/net.d/nv-ipam.d + - --cni-log-file=/var/log/nv-ipam-cni.log + - --cni-log-level=info # log level for shim CNI resources: requests: cpu: "100m" memory: "50Mi" limits: - cpu: "100m" - memory: "50Mi" - securityContext: - privileged: true + cpu: "300m" + memory: "300Mi" volumeMounts: - name: cnibin - mountPath: /host/opt/cni/bin - - name: cni - mountPath: /host/etc/cni/net.d - - name: hostlocalcnibin - mountPath: /host/var/lib/cni/nv-ipam/bin - - name: hostlocalcnistate - mountPath: /host/var/lib/cni/nv-ipam/state/host-local + mountPath: /opt/cni/bin + - name: cniconf + mountPath: /etc/cni/net.d/nv-ipam.d + - name: daemonstate + mountPath: /var/lib/cni/nv-ipam/ terminationGracePeriodSeconds: 10 volumes: - name: cnibin hostPath: path: /opt/cni/bin type: DirectoryOrCreate - - name: cni - hostPath: - path: /etc/cni/net.d - type: DirectoryOrCreate - - name: hostlocalcnibin + - name: cniconf hostPath: - path: /var/lib/cni/nv-ipam/bin + path: /etc/cni/net.d/nv-ipam.d type: DirectoryOrCreate - - name: hostlocalcnistate + - name: daemonstate hostPath: - path: /var/lib/cni/nv-ipam/state/host-local + path: /var/lib/cni/nv-ipam/ type: DirectoryOrCreate --- kind: ClusterRole From 4f386864023c56ac1bfba35f4ca0a19086ddeb3c Mon Sep 17 00:00:00 2001 From: Yury Kulazhenkov Date: Fri, 21 Jul 2023 11:49:57 +0300 Subject: [PATCH 18/18] Update README.md Signed-off-by: Yury Kulazhenkov --- README.md | 120 +++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 88 insertions(+), 32 deletions(-) diff --git a/README.md b/README.md index a42cc5a..5e96f6d 100644 --- a/README.md +++ b/README.md @@ -24,7 +24,7 @@ This repository is in its first steps of development, APIs may change in a non b NVIDIA IPAM plugin consists of 3 main components: 1. controller ([ipam-controller](#ipam-controller)) -2. node agent ([ipam-node](#ipam-node)) +2. node daemon ([ipam-node](#ipam-node)) 3. IPAM CNI plugin ([nv-ipam](#nv-ipam)) ### ipam-controller @@ -35,19 +35,20 @@ annotation a cluster unique range of IPs of the defined IP Pools. ### ipam-node -A node agent that performs initial setup and installation of nv-ipam CNI plugin. +The daemon is responsible for: +- perform initial setup and installation of nv-ipam CNI plugin +- perform allocations of the IPs and persist them on the disk +- run periodic jobs, such as cleanup of the stale IP address allocations -### nv-ipam - -An IPAM CNI plugin that allocates IPs for a given interface out of the defined IP Pool as provided -via CNI configuration. - -IPs are allocated out of the provided IP Block assigned by ipam-controller for the node. -To determine the cluster unique IP Block for the defined IP Pool, nv-ipam CNI queries K8s API +A node daemon provides GRPC service, which nv-ipam CNI plugin uses to request IP address allocation/deallocation. +IPs are allocated from the provided IP Block assigned by ipam-controller for the node. +To determine the cluster unique IP Block for the defined IP Pool, ipam-node watches K8s API for the Node object and extracts IP Block information from node annotation. -nv-ipam plugin currently leverages [host-local](https://www.cni.dev/plugins/current/ipam/host-local/) -IPAM to allocate IPs from the given range. +### nv-ipam + +An IPAM CNI plugin that handles CNI requests according to the CNI spec. +To allocate/deallocate IP address nv-ipam calls GRPC API of ipam-node daemon. ### IP allocation flow @@ -189,21 +190,76 @@ data: ### ipam-node configuration -ipam-node accepts configuration via command line flags +ipam-node accepts configuration via command line flags. +Options that begins with the `cni-` prefix are used to create the config file for nv-ipam CNI. +All other options are for the ipam-node daemon itself. ```text - --cni-bin-dir string CNI binary directory (default "/host/opt/cni/bin") - --cni-conf-dir string CNI config directory (default "/host/etc/cni/net.d") - -h, --help show help message and quit - --host-local-bin-file string host-local binary file path (default "/host-local") - --nv-ipam-bin-file string nv-ipam binary file path (default "/nv-ipam") - --nv-ipam-cni-data-dir string nv-ipam CNI data directory (default "/host/var/lib/cni/nv-ipam") - --nv-ipam-cni-data-dir-host string nv-ipam CNI data directory on host (default "/var/lib/cni/nv-ipam") - --nv-ipam-kubeconfig-file-host string kubeconfig for nv-ipam (default "/etc/cni/net.d/nv-ipam.d/nv-ipam.kubeconfig") - --nv-ipam-log-file string nv-ipam log file (default "/var/log/nv-ipam-cni.log") - --nv-ipam-log-level string nv-ipam log level (default "info") - --skip-host-local-binary-copy skip host-loca binary file copy - --skip-nv-ipam-binary-copy skip nv-ipam binary file copy +Logging flags: + + --log-flush-frequency duration + Maximum number of seconds between log flushes (default 5s) + --log-json-info-buffer-size quantity + [Alpha] In JSON format with split output streams, the info messages can be buffered for a while to increase performance. The default value of zero bytes disables buffering. The size + can be specified as number of bytes (512), multiples of 1000 (1K), multiples of 1024 (2Ki), or powers of those (3M, 4G, 5Mi, 6Gi). Enable the LoggingAlphaOptions feature gate to use this. + --log-json-split-stream + [Alpha] In JSON format, write error messages to stderr and info messages to stdout. The default is to write a single stream to stdout. Enable the LoggingAlphaOptions feature gate to + use this. + --logging-format string + Sets the log format. Permitted formats: "json" (gated by LoggingBetaOptions), "text". (default "text") + -v, --v Level + number for the log level verbosity + --vmodule pattern=N,... + comma-separated list of pattern=N settings for file-filtered logging (only works for text log format) + +Common flags: + + --feature-gates mapStringBool + A set of key=value pairs that describe feature gates for alpha/experimental features. Options are: + AllAlpha=true|false (ALPHA - default=false) + AllBeta=true|false (BETA - default=false) + ContextualLogging=true|false (ALPHA - default=false) + LoggingAlphaOptions=true|false (ALPHA - default=false) + LoggingBetaOptions=true|false (BETA - default=true) + --version + print binary version and exit + +Node daemon flags: + + --bind-address string + GPRC server bind address. e.g.: tcp://127.0.0.1:9092, unix:///var/lib/foo (default "unix:///var/lib/cni/nv-ipam/daemon.sock") + --health-probe-bind-address string + The address the probe endpoint binds to. (default ":8081") + --kubeconfig string + Paths to a kubeconfig. Only required if out-of-cluster. + --metrics-bind-address string + The address the metric endpoint binds to. (default ":8080") + --node-name string + The name of the Node on which the daemon runs + --store-file string + Path of the file which used to store allocations (default "/var/lib/cni/nv-ipam/store") + +Shim CNI Configuration flags: + + --cni-bin-dir string + CNI binary directory (default "/opt/cni/bin") + --cni-conf-dir string + shim CNI config: path with config file (default "/etc/cni/net.d/nv-ipam.d") + --cni-daemon-call-timeout int + shim CNI config: timeout for IPAM daemon calls (default 5) + --cni-daemon-socket string + shim CNI config: IPAM daemon socket path (default "unix:///var/lib/cni/nv-ipam/daemon.sock") + --cni-log-file string + shim CNI config: path to log file for shim CNI (default "/var/log/nv-ipam-cni.log") + --cni-log-level string + shim CNI config: log level for shim CNI (default "info") + --cni-nv-ipam-bin-file string + nv-ipam binary file path (default "/nv-ipam") + --cni-skip-nv-ipam-binary-copy + skip nv-ipam binary file copy + --cni-skip-nv-ipam-config-creation + skip config file creation for nv-ipam CNI + ``` ### nv-ipam CNI configuration @@ -214,8 +270,8 @@ nv-ipam accepts the following CNI configuration: { "type": "nv-ipam", "poolName": "my-pool", - "kubeconfig": "/etc/cni/net.d/nv-ipam.d/nv-ipam.kubeconfig", - "dataDir": "/var/lib/cni/nv-ipam", + "daemonSocket": "unix:///var/lib/cni/nv-ipam/daemon.sock", + "daemonCallTimeoutSeconds": 5, "confDir": "/etc/cni/net.d/nv-ipam.d", "logFile": "/var/log/nv-ipam-cni.log", "logLevel": "info" @@ -223,9 +279,12 @@ nv-ipam accepts the following CNI configuration: ``` * `type` (string, required): CNI plugin name, MUST be `"nv-ipam"` -* `poolName` (string, optional): name of the IP Pool to be used for IP allocation. (default: network name as provided in CNI call) -* `kubeconfig` (string, optional): path to kubeconfig file. (default: `"/etc/cni/net.d/nv-ipam.d/nv-ipam.kubeconfig"`) -* `dataDir` (string, optional): path to data dir. (default: `/var/lib/cni/nv-ipam`) +* `poolName` (string, optional): name of the IP Pool to be used for IP allocation. +It is possible to allocate two IPs for the interface from different pools by specifying pool names separated by coma, +e.g. `"my-ipv4-pool,my-ipv6-pool"`. The primary intent to support multiple pools is a dual-stack use-case when an +interface should have two IP addresses: one IPv4 and one IPv6. (default: network name as provided in CNI call) +* `daemonSocket` (string, optional): address of GRPC server socket served by IPAM daemon +* `daemonCallTimeoutSeconds` (integer, optional): timeout for GRPC calls to IPAM daemon * `confDir` (string, optional): path to configuration dir. (default: `"/etc/cni/net.d/nv-ipam.d"`) * `logFile` (string, optional): log file path. (default: `"/var/log/nv-ipam-cni.log"`) * `logLevel` (string, optional): logging level. one of: `["verbose", "debug", "info", "warning", "error", "panic"]`. (default: `"info"`) @@ -319,11 +378,8 @@ cat /var/log/nv-ipam-cni.log ## Limitations -* Deleting an IP Pool from config map while there are pods scheduled on nodes with IPs from deleted Pool, deleting these pods will fail (CNI CMD DEL fails) * Before removing a node from cluster, drain all workloads to ensure proper cleanup of IPs on node. * IP Block allocated to a node with Gateway IP in its range will have one less IP than what defined in perNodeBlockSize, deployers should take this into account. -* IPv6 not supported -* Allocating Multiple IPs per interface is not supported * Defining multiple IP Pools while supported, was not thoroughly testing ## Contributing