Skip to content

Commit

Permalink
Adds NFS CSI plugin integration test (#7)
Browse files Browse the repository at this point in the history
The Helm chart depends on a few other CSI components. We have rocks for
these, and we'll be using them in the helm chart as well.

The test will also check the Pod probes to make sure that the service is
up, without waiting for Kubernetes to do the check.

The test will also spawn a NFS server, and create an NGINX Pod requiring
a NFS volume.
  • Loading branch information
claudiubelu authored Aug 19, 2024
1 parent 1a520e2 commit 6292e06
Show file tree
Hide file tree
Showing 4 changed files with 210 additions and 3 deletions.
5 changes: 5 additions & 0 deletions tests/integration/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
#
# Copyright 2024 Canonical, Ltd.
#

pytest_plugins = ["k8s_test_harness.plugin"]
86 changes: 83 additions & 3 deletions tests/integration/test_csi_driver_nfs.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,87 @@
# Copyright 2024 Canonical, Ltd.
#

import logging
import pathlib

def test_csi_driver_nfs_integration():
"""Test NFS CSI driver rock."""
pass
from k8s_test_harness import harness
from k8s_test_harness.util import constants, env_util, exec_util, k8s_util

LOG = logging.getLogger(__name__)

DIR = pathlib.Path(__file__).absolute().parent
MANIFESTS_DIR = DIR / ".." / "templates"


def _get_nfsplugin_csi_helm_cmd(version: str):
rock = env_util.get_build_meta_info_for_rock_version("nfsplugin", version, "amd64")

images = [
k8s_util.HelmImage(rock.image, subitem="nfs"),
]

set_configs = [
"externalSnapshotter.enabled=true",
]

return k8s_util.get_helm_install_command(
"csi-driver-nfs",
"csi-driver-nfs",
repository="https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts",
chart_version="v4.7.0",
images=images,
set_configs=set_configs,
)


def test_nfsplugin_integration(function_instance: harness.Instance):
helm_command = _get_nfsplugin_csi_helm_cmd("4.7.0")
function_instance.exec(helm_command)

# wait for all the components to become active.
k8s_util.wait_for_daemonset(function_instance, "csi-nfs-node", "kube-system")
k8s_util.wait_for_deployment(function_instance, "csi-nfs-controller", "kube-system")
k8s_util.wait_for_deployment(
function_instance, "snapshot-controller", "kube-system"
)

# call the nfsplugin's liveness probes to check that they're running as intended.
for port in [29652, 29653]:
# It has hostNetwork=true, which means that curling localhost should work.
exec_util.stubbornly(retries=5, delay_s=5).on(function_instance).exec(
["curl", f"http://localhost:{port}/healthz"]
)

# Deploy a NFS server and an nginx Pod with a NFS volume attached.
for item in ["nfs-server.yaml", "nginx-pod.yaml"]:
manifest = MANIFESTS_DIR / item
function_instance.exec(
["k8s", "kubectl", "apply", "-f", "-"],
input=pathlib.Path(manifest).read_bytes(),
)

# Expect the Pod to become ready, and that it has the volume attached.
k8s_util.wait_for_deployment(function_instance, "nfs-server")
k8s_util.wait_for_resource(
function_instance,
"pod",
"nginx-nfs-example",
condition=constants.K8S_CONDITION_READY,
)

process = function_instance.exec(
[
"k8s",
"kubectl",
"exec",
"nginx-nfs-example",
"--",
"bash",
"-c",
"findmnt /var/www -o TARGET,SOURCE,FSTYPE",
],
capture_output=True,
text=True,
)

assert "/var/www nfs-server.default.svc.cluster.local:/ nfs4" in process.stdout
62 changes: 62 additions & 0 deletions tests/templates/nfs-server.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
# from: https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/deploy/example/nfs-provisioner/nfs-server.yaml
---
kind: Service
apiVersion: v1
metadata:
name: nfs-server
namespace: default
labels:
app: nfs-server
spec:
type: ClusterIP # use "LoadBalancer" to get a public ip
selector:
app: nfs-server
ports:
- name: tcp-2049
port: 2049
protocol: TCP
- name: udp-111
port: 111
protocol: UDP
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: nfs-server
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: nfs-server
template:
metadata:
name: nfs-server
labels:
app: nfs-server
spec:
nodeSelector:
"kubernetes.io/os": linux
containers:
- name: nfs-server
image: itsthenetwork/nfs-server-alpine:latest
env:
- name: SHARED_DIRECTORY
value: "/exports"
volumeMounts:
- mountPath: /exports
name: nfs-vol
securityContext:
privileged: true
ports:
- name: tcp-2049
containerPort: 2049
protocol: TCP
- name: udp-111
containerPort: 111
protocol: UDP
volumes:
- name: nfs-vol
hostPath:
path: /nfs-vol # modify this to specify another path to store nfs share data
type: DirectoryOrCreate
60 changes: 60 additions & 0 deletions tests/templates/nginx-pod.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
# from: https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/deploy/example/nfs-provisioner/nginx-pod.yaml
---
apiVersion: v1
kind: PersistentVolume
metadata:
annotations:
pv.kubernetes.io/provisioned-by: nfs.csi.k8s.io
name: pv-nginx
namespace: default
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Delete
mountOptions:
- nfsvers=4.1
csi:
driver: nfs.csi.k8s.io
# volumeHandle format: {nfs-server-address}#{sub-dir-name}#{share-name}
# make sure this value is unique for every share in the cluster
volumeHandle: nfs-server.default.svc.cluster.local/share##
volumeAttributes:
server: nfs-server.default.svc.cluster.local
share: /
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: pvc-nginx
namespace: default
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
volumeName: pv-nginx
storageClassName: ""
---
apiVersion: v1
kind: Pod
metadata:
name: nginx-nfs-example
namespace: default
spec:
containers:
- image: nginx
name: nginx
ports:
- containerPort: 80
protocol: TCP
volumeMounts:
- mountPath: /var/www
name: pvc-nginx
readOnly: false
volumes:
- name: pvc-nginx
persistentVolumeClaim:
claimName: pvc-nginx

0 comments on commit 6292e06

Please sign in to comment.