Compare commits

..

5 Commits

Author SHA1 Message Date
Andy Zhang e4122cc01d
Merge pull request #304 from andyzhangx/cve-3.0
fix: CVE issues in image build on release-3.0
2022-03-30 20:06:05 +08:00
andyzhangx 4f86151e0b fix: CVE issues in image build on release-3.0 2022-03-30 11:32:41 +00:00
andyzhangx d1409ab179 feat: support NFSv3 protocol 2021-12-06 11:08:41 +00:00
andyzhangx b52b213253 fix: v3.0.0 chart 2021-12-04 14:15:27 +00:00
andyzhangx 182050107f fix: release-image script 2021-11-30 08:04:26 +00:00
953 changed files with 20219 additions and 59000 deletions

View File

@ -1,66 +0,0 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: [ master, 'release-**' ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ master, 'release-**' ]
schedule:
- cron: '0 */24 * * *'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'go' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
# Learn more about CodeQL language support at https://git.io/codeql-language-support
steps:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: ^1.18
id: go
- name: Checkout repository
uses: actions/checkout@v3
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
run: |
make all
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1

View File

@ -1,15 +0,0 @@
# GitHub Action to automate the identification of common misspellings in text files.
# https://github.com/codespell-project/actions-codespell
# https://github.com/codespell-project/codespell
name: codespell
on: [push, pull_request]
jobs:
codespell:
name: Check for spelling errors
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: codespell-project/actions-codespell@master
with:
check_filenames: true
skip: ./.git,./.github/workflows/codespell.yml,.git,*.png,*.jpg,*.svg,*.sum,./vendor,go.sum,./release-tools/prow.sh

View File

@ -13,7 +13,7 @@ jobs:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: ^1.17
go-version: ^1.16
id: go
- name: Check out code into the Go module directory

View File

@ -1,26 +0,0 @@
name: k8s api version check
on:
pull_request: {}
push: {}
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
# https://pluto.docs.fairwinds.com/advanced/#display-options
- name: Download pluto
uses: FairwindsOps/pluto/github-action@master
- name: Check deploy folder
run: |
pluto detect-files -d deploy
- name: Check example folder
run: |
pluto detect-files -d deploy/example

View File

@ -1,29 +0,0 @@
name: ShellCheck
on:
push:
tags:
- v*
branches:
- master
- release-*
pull_request:
branches:
- master
- release-*
jobs:
shellcheck:
name: Shellcheck
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Run ShellCheck
uses: ludeeus/action-shellcheck@master
env:
SHELLCHECK_OPTS: -e SC2034
with:
severity: warning
check_together: 'yes'
disable_matcher: false
ignore_paths: vendor release-tools hack
format: gcc

View File

@ -12,8 +12,8 @@ jobs:
- name: Run linter
uses: golangci/golangci-lint-action@v2
with:
version: v1.45
args: -E=gofmt,deadcode,unused,varcheck,ineffassign,revive,misspell,exportloopref,asciicheck,bodyclose,contextcheck --timeout=30m0s
version: v1.29
args: -E=gofmt,golint,misspell --timeout=30m0s
verify-helm:
name: Verify Helm
runs-on: ubuntu-latest
@ -23,4 +23,4 @@ jobs:
- name: Verify Helm
run: |
sudo snap install yq
sudo hack/verify-helm-chart.sh
sudo hack/verify-helm-chart.sh

View File

@ -1,5 +1,3 @@
#! /bin/bash
# Copyright 2020 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -14,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#! /bin/bash
# A Prow job can override these defaults, but this shouldn't be necessary.
# Only these tests make sense for csi-driver-nfs until we can integrate k/k

View File

@ -12,12 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM registry.k8s.io/build-image/debian-base:bullseye-v1.2.0
FROM k8s.gcr.io/build-image/debian-base:bullseye-v1.1.0
ARG ARCH
ARG binary=./bin/${ARCH}/nfsplugin
COPY ${binary} /nfsplugin
RUN apt update && apt upgrade -y && apt-mark unhold libcap2 && clean-install ca-certificates mount nfs-common netbase
RUN apt update && apt-mark unhold libcap2
RUN clean-install ca-certificates mount nfs-common netbase
# install updated packages to fix CVE issues
RUN clean-install libgmp10 bsdutils libssl1.1 openssl libc6 libc-bin libsystemd0 libudev1
ENTRYPOINT ["/nfsplugin"]

View File

@ -27,7 +27,7 @@ include release-tools/build.make
GIT_COMMIT = $(shell git rev-parse HEAD)
BUILD_DATE = $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
IMAGE_VERSION ?= v4.1.0
IMAGE_VERSION ?= v3.0.0
LDFLAGS = -X ${PKG}/pkg/nfs.driverVersion=${IMAGE_VERSION} -X ${PKG}/pkg/nfs.gitCommit=${GIT_COMMIT} -X ${PKG}/pkg/nfs.buildDate=${BUILD_DATE}
EXT_LDFLAGS = -s -w -extldflags "-static"
# Use a custom version for E2E tests if we are testing in CI
@ -42,14 +42,14 @@ REGISTRY_NAME ?= $(shell echo $(REGISTRY) | sed "s/.azurecr.io//g")
IMAGE_TAG = $(REGISTRY)/$(IMAGENAME):$(IMAGE_VERSION)
IMAGE_TAG_LATEST = $(REGISTRY)/$(IMAGENAME):latest
E2E_HELM_OPTIONS ?= --set image.nfs.repository=$(REGISTRY)/$(IMAGENAME) --set image.nfs.tag=$(IMAGE_VERSION) --set image.nfs.pullPolicy=Always --set feature.enableInlineVolume=true
E2E_HELM_OPTIONS ?= --set image.nfs.repository=$(REGISTRY)/$(IMAGENAME) --set image.nfs.tag=$(IMAGE_VERSION) --set image.nfs.pullPolicy=Always
E2E_HELM_OPTIONS += ${EXTRA_HELM_OPTIONS}
# Output type of docker buildx build
OUTPUT_TYPE ?= docker
ALL_ARCH.linux = arm64 amd64 ppc64le
ALL_OS_ARCH = linux-arm64 linux-arm-v7 linux-amd64 linux-ppc64le
ALL_ARCH.linux = arm64 amd64
ALL_OS_ARCH = linux-arm64 linux-arm-v7 linux-amd64
.EXPORT_ALL_VARIABLES:
@ -76,6 +76,24 @@ local-build-push: nfs
docker build -t $(LOCAL_USER)/nfsplugin:latest .
docker push $(LOCAL_USER)/nfsplugin
.PHONY: local-k8s-install
local-k8s-install:
echo "Instlling locally"
kubectl apply -f $(DEPLOY_FOLDER)/rbac-csi-nfs-controller.yaml
kubectl apply -f $(DEPLOY_FOLDER)/csi-nfs-driverinfo.yaml
kubectl apply -f $(DEPLOY_FOLDER)/csi-nfs-controller.yaml
kubectl apply -f $(DEPLOY_FOLDER)/csi-nfs-node.yaml
echo "Successfully installed"
.PHONY: local-k8s-uninstall
local-k8s-uninstall:
echo "Uninstalling driver"
kubectl delete -f $(DEPLOY_FOLDER)/csi-nfs-controller.yaml --ignore-not-found
kubectl delete -f $(DEPLOY_FOLDER)/csi-nfs-node.yaml --ignore-not-found
kubectl delete -f $(DEPLOY_FOLDER)/csi-nfs-driverinfo.yaml --ignore-not-found
kubectl delete -f $(DEPLOY_FOLDER)/rbac-csi-nfs-controller.yaml --ignore-not-found
echo "Uninstalled NFS driver"
.PHONY: nfs
nfs:
CGO_ENABLED=0 GOOS=linux GOARCH=$(ARCH) go build -a -ldflags "${LDFLAGS} ${EXT_LDFLAGS}" -mod vendor -o bin/${ARCH}/nfsplugin ./cmd/nfsplugin
@ -133,8 +151,6 @@ endif
.PHONY: install-nfs-server
install-nfs-server:
kubectl apply -f ./deploy/example/nfs-provisioner/nfs-server.yaml
kubectl delete secret mount-options --ignore-not-found
kubectl create secret generic mount-options --from-literal mountOptions="nfsvers=4.1"
.PHONY: install-helm
install-helm:
@ -145,7 +161,6 @@ e2e-bootstrap: install-helm
OUTPUT_TYPE=registry $(MAKE) container push
helm install csi-driver-nfs ./charts/latest/csi-driver-nfs --namespace kube-system --wait --timeout=15m -v=5 --debug \
${E2E_HELM_OPTIONS} \
--set controller.dnsPolicy=ClusterFirstWithHostNet \
--set controller.logLevel=8 \
--set node.logLevel=8

View File

@ -1,21 +1,20 @@
# NFS CSI driver for Kubernetes
![build status](https://github.com/kubernetes-csi/csi-driver-nfs/actions/workflows/linux.yaml/badge.svg)
# CSI NFS driver
[![Coverage Status](https://coveralls.io/repos/github/kubernetes-csi/csi-driver-nfs/badge.svg?branch=master)](https://coveralls.io/github/kubernetes-csi/csi-driver-nfs?branch=master)
### Overview
This is a repository for [NFS](https://en.wikipedia.org/wiki/Network_File_System) [CSI](https://kubernetes-csi.github.io/docs/) driver, csi plugin name: `nfs.csi.k8s.io`. This driver requires existing and already configured NFSv3 or NFSv4 server, it supports dynamic provisioning of Persistent Volumes via Persistent Volume Claims by creating a new sub directory under NFS server.
### Project status: GA
This is a repository for [NFS](https://en.wikipedia.org/wiki/Network_File_System) [CSI](https://kubernetes-csi.github.io/docs/) Driver, csi plugin name: `nfs.csi.k8s.io`
### Container Images & Kubernetes Compatibility:
|driver version | supported k8s version | status |
|----------------|-----------------------|--------|
|master branch | 1.20+ | GA |
|v4.0.0 | 1.10+ | GA |
|v3.1.0 | 1.19+ | beta |
|v3.0.0 | 1.19+ | beta |
|v2.0.0 | 1.14+ | alpha |
|driver version | supported k8s version |
|----------------|-----------------------|
|master branch | 1.19+ |
|v3.0.0 | 1.19+ |
|v2.0.0 | 1.14+ |
### Requirements
This driver requires existing NFSv3 or NFSv4 server.
### Install driver on a Kubernetes cluster
- install by [kubectl](./docs/install-nfs-csi-driver.md)
@ -25,6 +24,7 @@ This is a repository for [NFS](https://en.wikipedia.org/wiki/Network_File_System
Please refer to [`nfs.csi.k8s.io` driver parameters](./docs/driver-parameters.md)
### Examples
- [Set up a NFS Server on a Kubernetes cluster](./deploy/example/nfs-provisioner/README.md)
- [Basic usage](./deploy/example/README.md)
- [fsGroupPolicy](./deploy/example/fsgroup)

View File

@ -5,16 +5,23 @@
### Tips
- make controller only run on master node: `--set controller.runOnMaster=true`
- set replica of controller as `2`: `--set controller.replicas=2`
- set replica of controller as `1`: `--set controller.replicas=1`
- enable `fsGroupPolicy` on a k8s 1.20+ cluster (this feature is in beta, check details [here](../deploy/example/fsgroup)): `--set feature.enableFSGroupPolicy=true`
## install latest version
```console
helm repo add csi-driver-nfs https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts
helm install csi-driver-nfs csi-driver-nfs/csi-driver-nfs --namespace kube-system
```
### install a specific version
```console
helm repo add csi-driver-nfs https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts
helm install csi-driver-nfs csi-driver-nfs/csi-driver-nfs --namespace kube-system --version v4.0.0
helm install csi-driver-nfs csi-driver-nfs/csi-driver-nfs --namespace kube-system --version v3.0.0
```
### install driver with customized driver name, deployment name
> only supported from `v3.1.0`+
> only supported from `v3.0.0`+
- following example would install a driver with name `nfs2`
```console
helm install csi-driver-nfs2 csi-driver-nfs/csi-driver-nfs --namespace kube-system --set driver.name="nfs2.csi.k8s.io" --set controller.name="csi-nfs2-controller" --set rbac.name=nfs2 --set serviceAccount.controller=csi-nfs2-controller-sa --set serviceAccount.node=csi-nfs2-node-sa --set node.name=csi-nfs2-node --set node.livenessProbe.healthPort=39653
@ -36,53 +43,52 @@ The following table lists the configurable parameters of the latest NFS CSI Driv
| Parameter | Description | Default |
|---------------------------------------------------|------------------------------------------------------------|-------------------------------------------------------------------|
| `driver.name` | alternative driver name | `nfs.csi.k8s.io` |
| `driver.mountPermissions` | mounted folder permissions name | `0777`
| `feature.enableFSGroupPolicy` | enable `fsGroupPolicy` on a k8s 1.20+ cluster | `true` |
| `feature.enableInlineVolume` | enable inline volume | `false` |
| `kubeletDir` | alternative kubelet directory | `/var/lib/kubelet` |
| `image.nfs.repository` | csi-driver-nfs image | `registry.k8s.io/sig-storage/nfsplugin` |
| `image.nfs.tag` | csi-driver-nfs image tag | `latest` |
| `driver.name` | alternative driver name | `nfs.csi.k8s.io` |
| `feature.enableFSGroupPolicy` | enable `fsGroupPolicy` on a k8s 1.20+ cluster | `false` |
| `image.nfs.repository` | csi-driver-nfs docker image | `gcr.io/k8s-staging-sig-storage/nfsplugin` |
| `image.nfs.tag` | csi-driver-nfs docker image tag | `amd64-linux-canary` |
| `image.nfs.pullPolicy` | csi-driver-nfs image pull policy | `IfNotPresent` |
| `image.csiProvisioner.repository` | csi-provisioner docker image | `registry.k8s.io/sig-storage/csi-provisioner` |
| `image.csiProvisioner.tag` | csi-provisioner docker image tag | `v3.1.0` |
| `image.csiProvisioner.repository` | csi-provisioner docker image | `k8s.gcr.io/sig-storage/csi-provisioner` |
| `image.csiProvisioner.tag` | csi-provisioner docker image tag | `v2.0.4` |
| `image.csiProvisioner.pullPolicy` | csi-provisioner image pull policy | `IfNotPresent` |
| `image.livenessProbe.repository` | liveness-probe docker image | `registry.k8s.io/sig-storage/livenessprobe` |
| `image.livenessProbe.tag` | liveness-probe docker image tag | `v2.6.0` |
| `image.livenessProbe.repository` | liveness-probe docker image | `k8s.gcr.io/sig-storage/livenessprobe` |
| `image.livenessProbe.tag` | liveness-probe docker image tag | `v2.5.0` |
| `image.livenessProbe.pullPolicy` | liveness-probe image pull policy | `IfNotPresent` |
| `image.nodeDriverRegistrar.repository` | csi-node-driver-registrar docker image | `registry.k8s.io/sig-storage/csi-node-driver-registrar` |
| `image.nodeDriverRegistrar.tag` | csi-node-driver-registrar docker image tag | `v2.5.0` |
| `image.nodeDriverRegistrar.repository` | csi-node-driver-registrar docker image | `k8s.gcr.io/sig-storage/csi-node-driver-registrar` |
| `image.nodeDriverRegistrar.tag` | csi-node-driver-registrar docker image tag | `v2.4.0` |
| `image.nodeDriverRegistrar.pullPolicy` | csi-node-driver-registrar image pull policy | `IfNotPresent` |
| `imagePullSecrets` | Specify docker-registry secret names as an array | [] (does not add image pull secrets to deployed pods) |
| `serviceAccount.create` | whether create service account of csi-nfs-controller | `true` |
| `rbac.create` | whether create rbac of csi-nfs-controller | `true` |
| `controller.replicas` | replica number of csi-nfs-controller | `1` |
| `controller.replicas` | the replicas of csi-nfs-controller | `2` |
| `controller.runOnMaster` | run controller on master node | `false` |
| `controller.dnsPolicy` | dnsPolicy of controller driver, available values: `Default`, `ClusterFirstWithHostNet`, `ClusterFirst` | `Default` |
| `controller.logLevel` | controller driver log level |`5` |
| `controller.workingMountDir` | working directory for provisioner to mount nfs shares temporarily | `/tmp` |
| `controller.tolerations` | controller pod tolerations | |
| `controller.resources.csiProvisioner.limits.cpu` | csi-provisioner cpu limits | 1 |
| `controller.resources.csiProvisioner.limits.memory` | csi-provisioner memory limits | 100Mi |
| `controller.resources.csiProvisioner.requests.cpu` | csi-provisioner cpu requests limits | 10m |
| `controller.resources.csiProvisioner.requests.memory` | csi-provisioner memory requests limits | 20Mi |
| `controller.resources.livenessProbe.limits.cpu` | liveness-probe cpu limits | 1 |
| `controller.resources.livenessProbe.limits.memory` | liveness-probe memory limits | 100Mi |
| `controller.resources.livenessProbe.requests.cpu` | liveness-probe cpu requests limits | 10m |
| `controller.resources.livenessProbe.requests.memory` | liveness-probe memory requests limits | 20Mi |
| `controller.resources.nfs.limits.cpu` | csi-driver-nfs cpu limits | 1 |
| `controller.resources.nfs.limits.memory` | csi-driver-nfs memory limits | 200Mi |
| `controller.resources.nfs.requests.cpu` | csi-driver-nfs cpu requests limits | 10m |
| `controller.resources.nfs.requests.memory` | csi-driver-nfs memory requests limits | 20Mi |
| `node.name` | driver node daemonset name | `csi-nfs-node`
| `node.dnsPolicy` | dnsPolicy of driver node daemonset, available values: `Default`, `ClusterFirstWithHostNet`, `ClusterFirst` |
| `node.maxUnavailable` | `maxUnavailable` value of driver node daemonset | `1`
| `node.logLevel` | node driver log level |`5` |
| `node.livenessProbe.healthPort ` | the health check port for liveness probe |`29653` |
| `node.tolerations` | node pod tolerations | |
| `node.resources.livenessProbe.limits.cpu` | liveness-probe cpu limits | 1 |
| `node.resources.livenessProbe.limits.memory` | liveness-probe memory limits | 100Mi |
| `node.resources.livenessProbe.requests.cpu` | liveness-probe cpu requests limits | 10m |
| `node.resources.livenessProbe.requests.memory` | liveness-probe memory requests limits | 20Mi |
| `node.resources.nodeDriverRegistrar.limits.cpu` | csi-node-driver-registrar cpu limits | 1 |
| `node.resources.nodeDriverRegistrar.limits.memory` | csi-node-driver-registrar memory limits | 100Mi |
| `node.resources.nodeDriverRegistrar.requests.cpu` | csi-node-driver-registrar cpu requests limits | 10m |
| `node.resources.nodeDriverRegistrar.requests.memory` | csi-node-driver-registrar memory requests limits | 20Mi |
| `node.resources.nfs.limits.cpu` | csi-driver-nfs cpu limits | 1 |
| `node.resources.nfs.limits.memory` | csi-driver-nfs memory limits | 300Mi |
| `node.resources.nfs.requests.cpu` | csi-driver-nfs cpu requests limits | 10m |
| `node.resources.nfs.requests.memory` | csi-driver-nfs memory requests limits | 20Mi |

View File

@ -2,12 +2,30 @@ apiVersion: v1
entries:
csi-driver-nfs:
- apiVersion: v1
appVersion: v4.2.0
created: "2022-05-06T12:35:56.6991353Z"
appVersion: latest
created: "2021-12-04T14:14:45.161037914Z"
description: CSI NFS Driver for Kubernetes
digest: cb537287512ce9f99adaead8cd4904ed7284780bdc44c9b8d6705e66f28bfa5c
digest: d915fea55b4c764a1534754048210835ff42834c4c787768293b02272b5331f7
name: csi-driver-nfs
urls:
- https://gitea.devindata.com/devindata-public/csi-driver-nfs/raw/branch/master/charts/v4.2.0/csi-driver-nfs-v4.2.0.tgz
version: v4.2.0
generated: "2022-05-06T12:35:56.693722959Z"
- https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts/latest/csi-driver-nfs-v3.0.0.tgz
version: v3.0.0
- apiVersion: v1
appVersion: v3.0.0
created: "2021-12-04T14:14:45.16202812Z"
description: CSI NFS Driver for Kubernetes
digest: cfb3811336cdc846fb6b11c093e7f4aa0e0e8b2f8b77b7768af2f46354145113
name: csi-driver-nfs
urls:
- https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts/v3.0.0/csi-driver-nfs-v3.0.0.tgz
version: v3.0.0
- apiVersion: v1
appVersion: v2.0.0
created: "2021-12-04T14:14:45.161380116Z"
description: CSI NFS Driver for Kubernetes
digest: f537a133eaa965f1c053ffac130f82c9b2b624e1f8bd42937c9c48818464eaac
name: csi-driver-nfs
urls:
- https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts/v2.0.0/csi-driver-nfs-v2.0.0.tgz
version: v2.0.0
generated: "2021-12-04T14:14:45.160131608Z"

Binary file not shown.

View File

@ -2,4 +2,4 @@ apiVersion: v1
appVersion: latest
description: CSI NFS Driver for Kubernetes
name: csi-driver-nfs
version: v4.1.0
version: v3.0.0

View File

@ -13,7 +13,4 @@ labels:
app.kubernetes.io/name: "{{ template "nfs.name" . }}"
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
{{- if .Values.customLabels }}
{{ toYaml .Values.customLabels | indent 2 -}}
{{- end }}
{{- end -}}
{{- end -}}

View File

@ -20,7 +20,7 @@ spec:
{{ toYaml .Values.imagePullSecrets | indent 8 }}
{{- end }}
hostNetwork: true # controller also needs to mount nfs to create dir
dnsPolicy: {{ .Values.controller.dnsPolicy }}
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: {{ .Values.serviceAccount.controller }}
nodeSelector:
kubernetes.io/os: linux
@ -39,7 +39,6 @@ spec:
- "-v=2"
- "--csi-address=$(ADDRESS)"
- "--leader-election"
- "--leader-election-namespace={{ .Release.Namespace }}"
env:
- name: ADDRESS
value: /csi/csi.sock
@ -73,8 +72,6 @@ spec:
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
- "--drivername={{ .Values.driver.name }}"
- "--mount-permissions={{ .Values.driver.mountPermissions }}"
- "--working-mount-dir={{ .Values.controller.workingMountDir }}"
env:
- name: NODE_ID
valueFrom:
@ -96,7 +93,7 @@ spec:
periodSeconds: 30
volumeMounts:
- name: pods-mount-dir
mountPath: {{ .Values.kubeletDir }}/pods
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
- mountPath: /csi
name: socket-dir
@ -104,7 +101,7 @@ spec:
volumes:
- name: pods-mount-dir
hostPath:
path: {{ .Values.kubeletDir }}/pods
path: /var/lib/kubelet/pods
type: Directory
- name: socket-dir
emptyDir: {}

View File

@ -1,4 +1,3 @@
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
@ -7,9 +6,6 @@ spec:
attachRequired: false
volumeLifecycleModes:
- Persistent
{{- if .Values.feature.enableInlineVolume}}
- Ephemeral
{{- end}}
{{- if .Values.feature.enableFSGroupPolicy}}
fsGroupPolicy: File
{{- end}}

View File

@ -1,4 +1,5 @@
---
# This YAML file contains driver-registrar & csi driver nodeplugin API objects
# that are necessary to run CSI nodeplugin for nfs
kind: DaemonSet
apiVersion: apps/v1
metadata:
@ -23,8 +24,7 @@ spec:
{{ toYaml .Values.imagePullSecrets | indent 8 }}
{{- end }}
hostNetwork: true # original nfs connection would be broken without hostNetwork setting
dnsPolicy: {{ .Values.controller.dnsPolicy }}
serviceAccountName: csi-nfs-node-sa
dnsPolicy: ClusterFirstWithHostNet
nodeSelector:
kubernetes.io/os: linux
{{- with .Values.node.tolerations }}
@ -60,7 +60,7 @@ spec:
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
env:
- name: DRIVER_REG_SOCK_PATH
value: {{ .Values.kubeletDir }}/plugins/csi-nfsplugin/csi.sock
value: /var/lib/kubelet/plugins/csi-nfsplugin/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
@ -84,7 +84,6 @@ spec:
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
- "--drivername={{ .Values.driver.name }}"
- "--mount-permissions={{ .Values.driver.mountPermissions }}"
env:
- name: NODE_ID
valueFrom:
@ -109,19 +108,19 @@ spec:
- name: socket-dir
mountPath: /csi
- name: pods-mount-dir
mountPath: {{ .Values.kubeletDir }}/pods
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
resources: {{- toYaml .Values.node.resources.nfs | nindent 12 }}
volumes:
- name: socket-dir
hostPath:
path: {{ .Values.kubeletDir }}/plugins/csi-nfsplugin
path: /var/lib/kubelet/plugins/csi-nfsplugin
type: DirectoryOrCreate
- name: pods-mount-dir
hostPath:
path: {{ .Values.kubeletDir }}/pods
path: /var/lib/kubelet/pods
type: Directory
- hostPath:
path: {{ .Values.kubeletDir }}/plugins_registry
path: /var/lib/kubelet/plugins_registry
type: Directory
name: registration-dir

View File

@ -37,9 +37,6 @@ rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1

View File

@ -1,64 +0,0 @@
{{- if .Values.serviceAccount.create -}}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-{{ .Values.rbac.name }}-controller-sa
namespace: {{ .Release.Namespace }}
{{ include "nfs.labels" . | indent 2 }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-{{ .Values.rbac.name }}-node-sa
namespace: {{ .Release.Namespace }}
{{ include "nfs.labels" . | indent 2 }}
---
{{- end -}}
{{- if .Values.rbac.create -}}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Values.rbac.name }}-external-provisioner-role
{{ include "nfs.labels" . | indent 2 }}
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Values.rbac.name }}-csi-provisioner-binding
{{ include "nfs.labels" . | indent 2 }}
subjects:
- kind: ServiceAccount
name: csi-{{ .Values.rbac.name }}-controller-sa
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ .Values.rbac.name }}-external-provisioner-role
apiGroup: rbac.authorization.k8s.io
{{- end -}}

View File

@ -1,20 +1,19 @@
customLabels: {}
image:
nfs:
repository: gcr.io/k8s-staging-sig-storage/nfsplugin
tag: canary
repository: mcr.microsoft.com/k8s/csi/nfs-csi
tag: v3.0.0
pullPolicy: IfNotPresent
csiProvisioner:
repository: registry.k8s.io/sig-storage/csi-provisioner
tag: v3.1.0
repository: k8s.gcr.io/sig-storage/csi-provisioner
tag: v2.2.2
pullPolicy: IfNotPresent
livenessProbe:
repository: registry.k8s.io/sig-storage/livenessprobe
tag: v2.7.0
repository: k8s.gcr.io/sig-storage/livenessprobe
tag: v2.5.0
pullPolicy: IfNotPresent
nodeDriverRegistrar:
repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
tag: v2.5.1
repository: k8s.gcr.io/sig-storage/csi-node-driver-registrar
tag: v2.4.0
pullPolicy: IfNotPresent
serviceAccount:
@ -25,25 +24,13 @@ rbac:
create: true
name: nfs
driver:
name: nfs.csi.k8s.io
mountPermissions: 0777
feature:
enableFSGroupPolicy: true
enableInlineVolume: false
kubeletDir: /var/lib/kubelet
controller:
name: csi-nfs-controller
replicas: 1
replicas: 2
runOnMaster: false
livenessProbe:
healthPort: 29652
logLevel: 5
workingMountDir: "/tmp"
dnsPolicy: Default # available values: Default, ClusterFirstWithHostNet, ClusterFirst
tolerations:
- key: "node-role.kubernetes.io/master"
operator: "Exists"
@ -51,24 +38,24 @@ controller:
- key: "node-role.kubernetes.io/controlplane"
operator: "Exists"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/control-plane"
operator: "Exists"
effect: "NoSchedule"
resources:
csiProvisioner:
limits:
cpu: 1
memory: 400Mi
requests:
cpu: 10m
memory: 20Mi
livenessProbe:
limits:
cpu: 1
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
nfs:
limits:
cpu: 1
memory: 200Mi
requests:
cpu: 10m
@ -76,7 +63,6 @@ controller:
node:
name: csi-nfs-node
dnsPolicy: Default # available values: Default, ClusterFirstWithHostNet, ClusterFirst
maxUnavailable: 1
logLevel: 5
livenessProbe:
@ -86,23 +72,32 @@ node:
resources:
livenessProbe:
limits:
cpu: 1
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
nodeDriverRegistrar:
limits:
cpu: 1
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
nfs:
limits:
cpu: 1
memory: 300Mi
requests:
cpu: 10m
memory: 20Mi
feature:
enableFSGroupPolicy: false
driver:
name: nfs.csi.k8s.io
## Reference to one or more secrets to be used when pulling images
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##

View File

@ -4,15 +4,15 @@ image:
tag: v2.0.0
pullPolicy: IfNotPresent
csiProvisioner:
repository: registry.k8s.io/sig-storage/csi-provisioner
repository: k8s.gcr.io/sig-storage/csi-provisioner
tag: v2.0.4
pullPolicy: IfNotPresent
livenessProbe:
repository: registry.k8s.io/sig-storage/livenessprobe
repository: k8s.gcr.io/sig-storage/livenessprobe
tag: v2.1.0
pullPolicy: IfNotPresent
nodeDriverRegistrar:
repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
repository: k8s.gcr.io/sig-storage/csi-node-driver-registrar
tag: v2.0.1
pullPolicy: IfNotPresent
serviceAccount:

View File

@ -1,18 +1,18 @@
image:
nfs:
repository: registry.k8s.io/sig-storage/nfsplugin
repository: mcr.microsoft.com/k8s/csi/nfs-csi
tag: v3.0.0
pullPolicy: IfNotPresent
csiProvisioner:
repository: registry.k8s.io/sig-storage/csi-provisioner
repository: k8s.gcr.io/sig-storage/csi-provisioner
tag: v2.2.2
pullPolicy: IfNotPresent
livenessProbe:
repository: registry.k8s.io/sig-storage/livenessprobe
repository: k8s.gcr.io/sig-storage/livenessprobe
tag: v2.5.0
pullPolicy: IfNotPresent
nodeDriverRegistrar:
repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
repository: k8s.gcr.io/sig-storage/csi-node-driver-registrar
tag: v2.4.0
pullPolicy: IfNotPresent

View File

@ -1,22 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -1,5 +0,0 @@
apiVersion: v1
appVersion: v3.1.0
description: CSI NFS Driver for Kubernetes
name: csi-driver-nfs
version: v3.1.0

View File

@ -1,5 +0,0 @@
The CSI NFS Driver is getting deployed to your cluster.
To check CSI NFS Driver pods status, please run:
kubectl --namespace={{ .Release.Namespace }} get pods --selector="release={{ .Release.Name }}" --watch

View File

@ -1,16 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/* Expand the name of the chart.*/}}
{{- define "nfs.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/* labels for helm resources */}}
{{- define "nfs.labels" -}}
labels:
app.kubernetes.io/instance: "{{ .Release.Name }}"
app.kubernetes.io/managed-by: "{{ .Release.Service }}"
app.kubernetes.io/name: "{{ template "nfs.name" . }}"
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
{{- end -}}

View File

@ -1,109 +0,0 @@
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: {{ .Values.controller.name }}
namespace: {{ .Release.Namespace }}
{{ include "nfs.labels" . | indent 2 }}
spec:
replicas: {{ .Values.controller.replicas }}
selector:
matchLabels:
app: {{ .Values.controller.name }}
template:
metadata:
{{ include "nfs.labels" . | indent 6 }}
app: {{ .Values.controller.name }}
spec:
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 8 }}
{{- end }}
hostNetwork: true # controller also needs to mount nfs to create dir
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: {{ .Values.serviceAccount.controller }}
nodeSelector:
kubernetes.io/os: linux
{{- if .Values.controller.runOnMaster}}
kubernetes.io/role: master
{{- end}}
priorityClassName: system-cluster-critical
{{- with .Values.controller.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}
containers:
- name: csi-provisioner
image: "{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}"
args:
- "-v=2"
- "--csi-address=$(ADDRESS)"
- "--leader-election"
env:
- name: ADDRESS
value: /csi/csi.sock
imagePullPolicy: {{ .Values.image.csiProvisioner.pullPolicy }}
volumeMounts:
- mountPath: /csi
name: socket-dir
resources: {{- toYaml .Values.controller.resources.csiProvisioner | nindent 12 }}
- name: liveness-probe
image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}"
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
- --health-port={{ .Values.controller.livenessProbe.healthPort }}
- --v=2
imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }}
volumeMounts:
- name: socket-dir
mountPath: /csi
resources: {{- toYaml .Values.controller.resources.livenessProbe | nindent 12 }}
- name: nfs
image: "{{ .Values.image.nfs.repository }}:{{ .Values.image.nfs.tag }}"
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
imagePullPolicy: {{ .Values.image.nfs.pullPolicy }}
args:
- "--v={{ .Values.controller.logLevel }}"
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
- "--drivername={{ .Values.driver.name }}"
- "--mount-permissions={{ .Values.driver.mountPermissions }}"
- "--working-mount-dir={{ .Values.controller.workingMountDir }}"
env:
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
ports:
- containerPort: {{ .Values.controller.livenessProbe.healthPort }}
name: healthz
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 30
timeoutSeconds: 10
periodSeconds: 30
volumeMounts:
- name: pods-mount-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
- mountPath: /csi
name: socket-dir
resources: {{- toYaml .Values.controller.resources.nfs | nindent 12 }}
volumes:
- name: pods-mount-dir
hostPath:
path: /var/lib/kubelet/pods
type: Directory
- name: socket-dir
emptyDir: {}

View File

@ -1,14 +0,0 @@
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: {{ .Values.driver.name }}
spec:
attachRequired: false
volumeLifecycleModes:
- Persistent
{{- if .Values.feature.enableInlineVolume}}
- Ephemeral
{{- end}}
{{- if .Values.feature.enableFSGroupPolicy}}
fsGroupPolicy: File
{{- end}}

View File

@ -1,127 +0,0 @@
# This YAML file contains driver-registrar & csi driver nodeplugin API objects
# that are necessary to run CSI nodeplugin for nfs
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: {{ .Values.node.name }}
namespace: {{ .Release.Namespace }}
{{ include "nfs.labels" . | indent 2 }}
spec:
updateStrategy:
rollingUpdate:
maxUnavailable: {{ .Values.node.maxUnavailable }}
type: RollingUpdate
selector:
matchLabels:
app: {{ .Values.node.name }}
template:
metadata:
{{ include "nfs.labels" . | indent 6 }}
app: {{ .Values.node.name }}
spec:
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 8 }}
{{- end }}
hostNetwork: true # original nfs connection would be broken without hostNetwork setting
dnsPolicy: ClusterFirstWithHostNet
nodeSelector:
kubernetes.io/os: linux
{{- with .Values.node.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}
containers:
- name: liveness-probe
image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}"
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
- --health-port={{ .Values.node.livenessProbe.healthPort }}
- --v=2
imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }}
volumeMounts:
- name: socket-dir
mountPath: /csi
resources: {{- toYaml .Values.node.resources.livenessProbe | nindent 12 }}
- name: node-driver-registrar
image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}"
livenessProbe:
exec:
command:
- /csi-node-driver-registrar
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --mode=kubelet-registration-probe
initialDelaySeconds: 30
timeoutSeconds: 15
args:
- --v=2
- --csi-address=/csi/csi.sock
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
env:
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/csi-nfsplugin/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
imagePullPolicy: {{ .Values.image.nodeDriverRegistrar.pullPolicy }}
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
resources: {{- toYaml .Values.node.resources.nodeDriverRegistrar | nindent 12 }}
- name: nfs
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: "{{ .Values.image.nfs.repository }}:{{ .Values.image.nfs.tag }}"
args :
- "--v={{ .Values.node.logLevel }}"
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
- "--drivername={{ .Values.driver.name }}"
- "--mount-permissions={{ .Values.driver.mountPermissions }}"
env:
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
ports:
- containerPort: {{ .Values.node.livenessProbe.healthPort }}
name: healthz
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 30
timeoutSeconds: 10
periodSeconds: 30
imagePullPolicy: {{ .Values.image.nfs.pullPolicy }}
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: pods-mount-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
resources: {{- toYaml .Values.node.resources.nfs | nindent 12 }}
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/csi-nfsplugin
type: DirectoryOrCreate
- name: pods-mount-dir
hostPath:
path: /var/lib/kubelet/pods
type: Directory
- hostPath:
path: /var/lib/kubelet/plugins_registry
type: Directory
name: registration-dir

View File

@ -1,102 +0,0 @@
image:
nfs:
repository: registry.k8s.io/sig-storage/nfsplugin
tag: v3.1.0
pullPolicy: IfNotPresent
csiProvisioner:
repository: registry.k8s.io/sig-storage/csi-provisioner
tag: v2.2.2
pullPolicy: IfNotPresent
livenessProbe:
repository: registry.k8s.io/sig-storage/livenessprobe
tag: v2.5.0
pullPolicy: IfNotPresent
nodeDriverRegistrar:
repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
tag: v2.4.0
pullPolicy: IfNotPresent
serviceAccount:
create: true # When true, service accounts will be created for you. Set to false if you want to use your own.
controller: csi-nfs-controller-sa # Name of Service Account to be created or used
rbac:
create: true
name: nfs
driver:
name: nfs.csi.k8s.io
mountPermissions: 0777
feature:
enableFSGroupPolicy: false
enableInlineVolume: false
controller:
name: csi-nfs-controller
replicas: 2
runOnMaster: false
livenessProbe:
healthPort: 29652
logLevel: 5
workingMountDir: "/tmp"
tolerations:
- key: "node-role.kubernetes.io/master"
operator: "Exists"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/controlplane"
operator: "Exists"
effect: "NoSchedule"
resources:
csiProvisioner:
limits:
memory: 400Mi
requests:
cpu: 10m
memory: 20Mi
livenessProbe:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
nfs:
limits:
memory: 200Mi
requests:
cpu: 10m
memory: 20Mi
node:
name: csi-nfs-node
maxUnavailable: 1
logLevel: 5
livenessProbe:
healthPort: 29653
tolerations:
- operator: "Exists"
resources:
livenessProbe:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
nodeDriverRegistrar:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
nfs:
limits:
memory: 300Mi
requests:
cpu: 10m
memory: 20Mi
## Reference to one or more secrets to be used when pulling images
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
imagePullSecrets: []
# - name: "image-pull-secret"

View File

@ -1,22 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -1,5 +0,0 @@
apiVersion: v1
appVersion: v4.0.0
description: CSI NFS Driver for Kubernetes
name: csi-driver-nfs
version: v4.0.0

View File

@ -1,5 +0,0 @@
The CSI NFS Driver is getting deployed to your cluster.
To check CSI NFS Driver pods status, please run:
kubectl --namespace={{ .Release.Namespace }} get pods --selector="release={{ .Release.Name }}" --watch

View File

@ -1,19 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/* Expand the name of the chart.*/}}
{{- define "nfs.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/* labels for helm resources */}}
{{- define "nfs.labels" -}}
labels:
app.kubernetes.io/instance: "{{ .Release.Name }}"
app.kubernetes.io/managed-by: "{{ .Release.Service }}"
app.kubernetes.io/name: "{{ template "nfs.name" . }}"
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
{{- if .Values.customLabels }}
{{ toYaml .Values.customLabels | indent 2 -}}
{{- end }}
{{- end -}}

View File

@ -1,110 +0,0 @@
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: {{ .Values.controller.name }}
namespace: {{ .Release.Namespace }}
{{ include "nfs.labels" . | indent 2 }}
spec:
replicas: {{ .Values.controller.replicas }}
selector:
matchLabels:
app: {{ .Values.controller.name }}
template:
metadata:
{{ include "nfs.labels" . | indent 6 }}
app: {{ .Values.controller.name }}
spec:
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 8 }}
{{- end }}
hostNetwork: true # controller also needs to mount nfs to create dir
dnsPolicy: {{ .Values.controller.dnsPolicy }}
serviceAccountName: {{ .Values.serviceAccount.controller }}
nodeSelector:
kubernetes.io/os: linux
{{- if .Values.controller.runOnMaster}}
kubernetes.io/role: master
{{- end}}
priorityClassName: system-cluster-critical
{{- with .Values.controller.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}
containers:
- name: csi-provisioner
image: "{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}"
args:
- "-v=2"
- "--csi-address=$(ADDRESS)"
- "--leader-election"
- "--leader-election-namespace={{ .Release.Namespace }}"
env:
- name: ADDRESS
value: /csi/csi.sock
imagePullPolicy: {{ .Values.image.csiProvisioner.pullPolicy }}
volumeMounts:
- mountPath: /csi
name: socket-dir
resources: {{- toYaml .Values.controller.resources.csiProvisioner | nindent 12 }}
- name: liveness-probe
image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}"
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
- --health-port={{ .Values.controller.livenessProbe.healthPort }}
- --v=2
imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }}
volumeMounts:
- name: socket-dir
mountPath: /csi
resources: {{- toYaml .Values.controller.resources.livenessProbe | nindent 12 }}
- name: nfs
image: "{{ .Values.image.nfs.repository }}:{{ .Values.image.nfs.tag }}"
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
imagePullPolicy: {{ .Values.image.nfs.pullPolicy }}
args:
- "--v={{ .Values.controller.logLevel }}"
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
- "--drivername={{ .Values.driver.name }}"
- "--mount-permissions={{ .Values.driver.mountPermissions }}"
- "--working-mount-dir={{ .Values.controller.workingMountDir }}"
env:
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
ports:
- containerPort: {{ .Values.controller.livenessProbe.healthPort }}
name: healthz
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 30
timeoutSeconds: 10
periodSeconds: 30
volumeMounts:
- name: pods-mount-dir
mountPath: {{ .Values.kubeletDir }}/pods
mountPropagation: "Bidirectional"
- mountPath: /csi
name: socket-dir
resources: {{- toYaml .Values.controller.resources.nfs | nindent 12 }}
volumes:
- name: pods-mount-dir
hostPath:
path: {{ .Values.kubeletDir }}/pods
type: Directory
- name: socket-dir
emptyDir: {}

View File

@ -1,15 +0,0 @@
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: {{ .Values.driver.name }}
spec:
attachRequired: false
volumeLifecycleModes:
- Persistent
{{- if .Values.feature.enableInlineVolume}}
- Ephemeral
{{- end}}
{{- if .Values.feature.enableFSGroupPolicy}}
fsGroupPolicy: File
{{- end}}

View File

@ -1,126 +0,0 @@
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: {{ .Values.node.name }}
namespace: {{ .Release.Namespace }}
{{ include "nfs.labels" . | indent 2 }}
spec:
updateStrategy:
rollingUpdate:
maxUnavailable: {{ .Values.node.maxUnavailable }}
type: RollingUpdate
selector:
matchLabels:
app: {{ .Values.node.name }}
template:
metadata:
{{ include "nfs.labels" . | indent 6 }}
app: {{ .Values.node.name }}
spec:
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 8 }}
{{- end }}
hostNetwork: true # original nfs connection would be broken without hostNetwork setting
dnsPolicy: {{ .Values.controller.dnsPolicy }}
nodeSelector:
kubernetes.io/os: linux
{{- with .Values.node.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}
containers:
- name: liveness-probe
image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}"
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
- --health-port={{ .Values.node.livenessProbe.healthPort }}
- --v=2
imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }}
volumeMounts:
- name: socket-dir
mountPath: /csi
resources: {{- toYaml .Values.node.resources.livenessProbe | nindent 12 }}
- name: node-driver-registrar
image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}"
livenessProbe:
exec:
command:
- /csi-node-driver-registrar
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --mode=kubelet-registration-probe
initialDelaySeconds: 30
timeoutSeconds: 15
args:
- --v=2
- --csi-address=/csi/csi.sock
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
env:
- name: DRIVER_REG_SOCK_PATH
value: {{ .Values.kubeletDir }}/plugins/csi-nfsplugin/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
imagePullPolicy: {{ .Values.image.nodeDriverRegistrar.pullPolicy }}
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
resources: {{- toYaml .Values.node.resources.nodeDriverRegistrar | nindent 12 }}
- name: nfs
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: "{{ .Values.image.nfs.repository }}:{{ .Values.image.nfs.tag }}"
args :
- "--v={{ .Values.node.logLevel }}"
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
- "--drivername={{ .Values.driver.name }}"
- "--mount-permissions={{ .Values.driver.mountPermissions }}"
env:
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
ports:
- containerPort: {{ .Values.node.livenessProbe.healthPort }}
name: healthz
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 30
timeoutSeconds: 10
periodSeconds: 30
imagePullPolicy: {{ .Values.image.nfs.pullPolicy }}
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: pods-mount-dir
mountPath: {{ .Values.kubeletDir }}/pods
mountPropagation: "Bidirectional"
resources: {{- toYaml .Values.node.resources.nfs | nindent 12 }}
volumes:
- name: socket-dir
hostPath:
path: {{ .Values.kubeletDir }}/plugins/csi-nfsplugin
type: DirectoryOrCreate
- name: pods-mount-dir
hostPath:
path: {{ .Values.kubeletDir }}/pods
type: Directory
- hostPath:
path: {{ .Values.kubeletDir }}/plugins_registry
type: Directory
name: registration-dir

View File

@ -1,57 +0,0 @@
{{- if .Values.serviceAccount.create -}}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-{{ .Values.rbac.name }}-controller-sa
namespace: {{ .Release.Namespace }}
{{ include "nfs.labels" . | indent 2 }}
---
{{- end -}}
{{- if .Values.rbac.create -}}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Values.rbac.name }}-external-provisioner-role
{{ include "nfs.labels" . | indent 2 }}
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Values.rbac.name }}-csi-provisioner-binding
{{ include "nfs.labels" . | indent 2 }}
subjects:
- kind: ServiceAccount
name: csi-{{ .Values.rbac.name }}-controller-sa
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ .Values.rbac.name }}-external-provisioner-role
apiGroup: rbac.authorization.k8s.io
{{- end -}}

View File

@ -1,110 +0,0 @@
customLabels: {}
image:
nfs:
repository: registry.k8s.io/sig-storage/nfsplugin
tag: v4.2.0
pullPolicy: IfNotPresent
csiProvisioner:
repository: registry.k8s.io/sig-storage/csi-provisioner
tag: v3.3.0
pullPolicy: IfNotPresent
livenessProbe:
repository: registry.k8s.io/sig-storage/livenessprobe
tag: v2.8.0
pullPolicy: IfNotPresent
nodeDriverRegistrar:
repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
tag: v2.6.2
pullPolicy: IfNotPresent
serviceAccount:
create: true # When true, service accounts will be created for you. Set to false if you want to use your own.
controller: csi-nfs-controller-sa # Name of Service Account to be created or used
rbac:
create: true
name: nfs
driver:
name: nfs.csi.k8s.io
mountPermissions: 0777
feature:
enableFSGroupPolicy: true
enableInlineVolume: false
kubeletDir: /var/lib/kubelet
controller:
name: csi-nfs-controller
replicas: 1
runOnMaster: false
livenessProbe:
healthPort: 29652
logLevel: 5
workingMountDir: "/tmp"
dnsPolicy: Default # available values: Default, ClusterFirstWithHostNet, ClusterFirst
tolerations:
- key: "node-role.kubernetes.io/master"
operator: "Exists"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/controlplane"
operator: "Exists"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/control-plane"
operator: "Exists"
effect: "NoSchedule"
resources:
csiProvisioner:
limits:
memory: 400Mi
requests:
cpu: 10m
memory: 20Mi
livenessProbe:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
nfs:
limits:
memory: 200Mi
requests:
cpu: 10m
memory: 20Mi
node:
name: csi-nfs-node
dnsPolicy: Default # available values: Default, ClusterFirstWithHostNet, ClusterFirst
maxUnavailable: 1
logLevel: 5
livenessProbe:
healthPort: 29653
tolerations:
- operator: "Exists"
resources:
livenessProbe:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
nodeDriverRegistrar:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
nfs:
limits:
memory: 300Mi
requests:
cpu: 10m
memory: 20Mi
## Reference to one or more secrets to be used when pulling images
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
imagePullSecrets: []
# - name: "image-pull-secret"

View File

@ -1,22 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -1,5 +0,0 @@
apiVersion: v1
appVersion: v4.2.0
description: CSI NFS Driver for Kubernetes
name: csi-driver-nfs
version: v4.2.0

View File

@ -1,5 +0,0 @@
The CSI NFS Driver is getting deployed to your cluster.
To check CSI NFS Driver pods status, please run:
kubectl --namespace={{ .Release.Namespace }} get pods --selector="app.kubernetes.io/instance={{ .Release.Name }}" --watch

View File

@ -1,19 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/* Expand the name of the chart.*/}}
{{- define "nfs.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/* labels for helm resources */}}
{{- define "nfs.labels" -}}
labels:
app.kubernetes.io/instance: "{{ .Release.Name }}"
app.kubernetes.io/managed-by: "{{ .Release.Service }}"
app.kubernetes.io/name: "{{ template "nfs.name" . }}"
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
{{- if .Values.customLabels }}
{{ toYaml .Values.customLabels | indent 2 -}}
{{- end }}
{{- end -}}

View File

@ -1,123 +0,0 @@
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: {{ .Values.controller.name }}
namespace: {{ .Release.Namespace }}
{{ include "nfs.labels" . | indent 2 }}
spec:
replicas: {{ .Values.controller.replicas }}
selector:
matchLabels:
app: {{ .Values.controller.name }}
strategy:
type: {{ .Values.controller.strategyType }}
template:
metadata:
{{ include "nfs.labels" . | indent 6 }}
app: {{ .Values.controller.name }}
spec:
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 8 }}
{{- end }}
hostNetwork: true # controller also needs to mount nfs to create dir
dnsPolicy: {{ .Values.controller.dnsPolicy }}
serviceAccountName: {{ .Values.serviceAccount.controller }}
{{- with .Values.controller.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
nodeSelector:
kubernetes.io/os: linux
{{- if .Values.controller.runOnMaster}}
node-role.kubernetes.io/master: ""
{{- end}}
{{- if .Values.controller.runOnControlPlane}}
node-role.kubernetes.io/control-plane: ""
{{- end}}
{{- with .Values.controller.nodeSelector }}
{{ toYaml . | indent 8 }}
{{- end }}
priorityClassName: system-cluster-critical
{{- with .Values.controller.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}
containers:
- name: csi-provisioner
image: "{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}"
args:
- "-v=2"
- "--csi-address=$(ADDRESS)"
- "--leader-election"
- "--leader-election-namespace={{ .Release.Namespace }}"
- "--extra-create-metadata=true"
env:
- name: ADDRESS
value: /csi/csi.sock
imagePullPolicy: {{ .Values.image.csiProvisioner.pullPolicy }}
volumeMounts:
- mountPath: /csi
name: socket-dir
resources: {{- toYaml .Values.controller.resources.csiProvisioner | nindent 12 }}
- name: liveness-probe
image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}"
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
- --health-port={{ .Values.controller.livenessProbe.healthPort }}
- --v=2
imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }}
volumeMounts:
- name: socket-dir
mountPath: /csi
resources: {{- toYaml .Values.controller.resources.livenessProbe | nindent 12 }}
- name: nfs
image: "{{ .Values.image.nfs.repository }}:{{ .Values.image.nfs.tag }}"
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
imagePullPolicy: {{ .Values.image.nfs.pullPolicy }}
args:
- "--v={{ .Values.controller.logLevel }}"
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
- "--drivername={{ .Values.driver.name }}"
- "--mount-permissions={{ .Values.driver.mountPermissions }}"
- "--working-mount-dir={{ .Values.controller.workingMountDir }}"
env:
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
ports:
- containerPort: {{ .Values.controller.livenessProbe.healthPort }}
name: healthz
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 30
timeoutSeconds: 10
periodSeconds: 30
volumeMounts:
- name: pods-mount-dir
mountPath: {{ .Values.kubeletDir }}/pods
mountPropagation: "Bidirectional"
- mountPath: /csi
name: socket-dir
resources: {{- toYaml .Values.controller.resources.nfs | nindent 12 }}
volumes:
- name: pods-mount-dir
hostPath:
path: {{ .Values.kubeletDir }}/pods
type: Directory
- name: socket-dir
emptyDir: {}

View File

@ -1,15 +0,0 @@
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: {{ .Values.driver.name }}
spec:
attachRequired: false
volumeLifecycleModes:
- Persistent
{{- if .Values.feature.enableInlineVolume}}
- Ephemeral
{{- end}}
{{- if .Values.feature.enableFSGroupPolicy}}
fsGroupPolicy: File
{{- end}}

View File

@ -1,134 +0,0 @@
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: {{ .Values.node.name }}
namespace: {{ .Release.Namespace }}
{{ include "nfs.labels" . | indent 2 }}
spec:
updateStrategy:
rollingUpdate:
maxUnavailable: {{ .Values.node.maxUnavailable }}
type: RollingUpdate
selector:
matchLabels:
app: {{ .Values.node.name }}
template:
metadata:
{{ include "nfs.labels" . | indent 6 }}
app: {{ .Values.node.name }}
spec:
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 8 }}
{{- end }}
hostNetwork: true # original nfs connection would be broken without hostNetwork setting
dnsPolicy: {{ .Values.controller.dnsPolicy }}
serviceAccountName: csi-nfs-node-sa
{{- with .Values.node.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
nodeSelector:
kubernetes.io/os: linux
{{- with .Values.node.nodeSelector }}
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.node.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}
containers:
- name: liveness-probe
image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}"
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
- --health-port={{ .Values.node.livenessProbe.healthPort }}
- --v=2
imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }}
volumeMounts:
- name: socket-dir
mountPath: /csi
resources: {{- toYaml .Values.node.resources.livenessProbe | nindent 12 }}
- name: node-driver-registrar
image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}"
livenessProbe:
exec:
command:
- /csi-node-driver-registrar
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --mode=kubelet-registration-probe
initialDelaySeconds: 30
timeoutSeconds: 15
args:
- --v=2
- --csi-address=/csi/csi.sock
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
env:
- name: DRIVER_REG_SOCK_PATH
value: {{ .Values.kubeletDir }}/plugins/csi-nfsplugin/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
imagePullPolicy: {{ .Values.image.nodeDriverRegistrar.pullPolicy }}
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
resources: {{- toYaml .Values.node.resources.nodeDriverRegistrar | nindent 12 }}
- name: nfs
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: "{{ .Values.image.nfs.repository }}:{{ .Values.image.nfs.tag }}"
args :
- "--v={{ .Values.node.logLevel }}"
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
- "--drivername={{ .Values.driver.name }}"
- "--mount-permissions={{ .Values.driver.mountPermissions }}"
env:
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
ports:
- containerPort: {{ .Values.node.livenessProbe.healthPort }}
name: healthz
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 30
timeoutSeconds: 10
periodSeconds: 30
imagePullPolicy: {{ .Values.image.nfs.pullPolicy }}
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: pods-mount-dir
mountPath: {{ .Values.kubeletDir }}/pods
mountPropagation: "Bidirectional"
resources: {{- toYaml .Values.node.resources.nfs | nindent 12 }}
volumes:
- name: socket-dir
hostPath:
path: {{ .Values.kubeletDir }}/plugins/csi-nfsplugin
type: DirectoryOrCreate
- name: pods-mount-dir
hostPath:
path: {{ .Values.kubeletDir }}/pods
type: Directory
- hostPath:
path: {{ .Values.kubeletDir }}/plugins_registry
type: Directory
name: registration-dir

View File

@ -1,64 +0,0 @@
{{- if .Values.serviceAccount.create -}}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-{{ .Values.rbac.name }}-controller-sa
namespace: {{ .Release.Namespace }}
{{ include "nfs.labels" . | indent 2 }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-{{ .Values.rbac.name }}-node-sa
namespace: {{ .Release.Namespace }}
{{ include "nfs.labels" . | indent 2 }}
---
{{- end }}
{{ if .Values.rbac.create -}}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Values.rbac.name }}-external-provisioner-role
{{ include "nfs.labels" . | indent 2 }}
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Values.rbac.name }}-csi-provisioner-binding
{{ include "nfs.labels" . | indent 2 }}
subjects:
- kind: ServiceAccount
name: csi-{{ .Values.rbac.name }}-controller-sa
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ .Values.rbac.name }}-external-provisioner-role
apiGroup: rbac.authorization.k8s.io
{{- end -}}

View File

@ -1,116 +0,0 @@
customLabels: {}
image:
nfs:
repository: registry.k8s.io/sig-storage/nfsplugin
tag: v4.2.0
pullPolicy: IfNotPresent
csiProvisioner:
repository: registry.k8s.io/sig-storage/csi-provisioner
tag: v3.3.0
pullPolicy: IfNotPresent
livenessProbe:
repository: registry.k8s.io/sig-storage/livenessprobe
tag: v2.8.0
pullPolicy: IfNotPresent
nodeDriverRegistrar:
repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
tag: v2.6.2
pullPolicy: IfNotPresent
serviceAccount:
create: true # When true, service accounts will be created for you. Set to false if you want to use your own.
controller: csi-nfs-controller-sa # Name of Service Account to be created or used
rbac:
create: true
name: nfs
driver:
name: nfs.csi.k8s.io
mountPermissions: 0
feature:
enableFSGroupPolicy: true
enableInlineVolume: false
kubeletDir: /var/lib/kubelet
controller:
name: csi-nfs-controller
replicas: 1
strategyType: Recreate
runOnMaster: false
runOnControlPlane: false
livenessProbe:
healthPort: 29652
logLevel: 5
workingMountDir: "/tmp"
dnsPolicy: Default # available values: Default, ClusterFirstWithHostNet, ClusterFirst
affinity: {}
nodeSelector: {}
tolerations:
- key: "node-role.kubernetes.io/master"
operator: "Exists"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/controlplane"
operator: "Exists"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/control-plane"
operator: "Exists"
effect: "NoSchedule"
resources:
csiProvisioner:
limits:
memory: 400Mi
requests:
cpu: 10m
memory: 20Mi
livenessProbe:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
nfs:
limits:
memory: 200Mi
requests:
cpu: 10m
memory: 20Mi
node:
name: csi-nfs-node
dnsPolicy: Default # available values: Default, ClusterFirstWithHostNet, ClusterFirst
maxUnavailable: 1
logLevel: 5
livenessProbe:
healthPort: 29653
affinity: {}
nodeSelector: {}
tolerations:
- operator: "Exists"
resources:
livenessProbe:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
nodeDriverRegistrar:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
nfs:
limits:
memory: 300Mi
requests:
cpu: 10m
memory: 20Mi
## Reference to one or more secrets to be used when pulling images
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
imagePullSecrets: []
# - name: "image-pull-secret"

View File

@ -18,7 +18,9 @@ package main
import (
"flag"
"fmt"
"os"
"strconv"
"github.com/kubernetes-csi/csi-driver-nfs/pkg/nfs"
@ -26,11 +28,10 @@ import (
)
var (
endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint")
nodeID = flag.String("nodeid", "", "node id")
mountPermissions = flag.Uint64("mount-permissions", 0777, "mounted folder permissions")
driverName = flag.String("drivername", nfs.DefaultDriverName, "name of the driver")
workingMountDir = flag.String("working-mount-dir", "/tmp", "working directory for provisioner to mount nfs shares temporarily")
endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint")
nodeID = flag.String("nodeid", "", "node id")
perm = flag.String("mount-permissions", "", "mounted folder permissions")
driverName = flag.String("drivername", nfs.DefaultDriverName, "name of the driver")
)
func init() {
@ -49,13 +50,18 @@ func main() {
}
func handle() {
driverOptions := nfs.DriverOptions{
NodeID: *nodeID,
DriverName: *driverName,
Endpoint: *endpoint,
MountPermissions: *mountPermissions,
WorkingMountDir: *workingMountDir,
// Converting string permission representation to *uint32
var parsedPerm *uint32
if perm != nil && *perm != "" {
permu64, err := strconv.ParseUint(*perm, 8, 32)
if err != nil {
fmt.Fprintf(os.Stderr, "incorrect mount-permissions value: %q", *perm)
os.Exit(1)
}
permu32 := uint32(permu64)
parsedPerm = &permu32
}
d := nfs.NewDriver(&driverOptions)
d := nfs.NewNFSdriver(*nodeID, *driverName, *endpoint, parsedPerm)
d.Run(false)
}

View File

@ -5,7 +5,7 @@ metadata:
name: csi-nfs-controller
namespace: kube-system
spec:
replicas: 1
replicas: 2
selector:
matchLabels:
app: csi-nfs-controller
@ -15,7 +15,7 @@ spec:
app: csi-nfs-controller
spec:
hostNetwork: true # controller also needs to mount nfs to create dir
dnsPolicy: Default # available values: Default, ClusterFirstWithHostNet, ClusterFirst
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: csi-nfs-controller-sa
nodeSelector:
kubernetes.io/os: linux # add "kubernetes.io/role: master" to run controller on master node
@ -27,17 +27,13 @@ spec:
- key: "node-role.kubernetes.io/controlplane"
operator: "Exists"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/control-plane"
operator: "Exists"
effect: "NoSchedule"
containers:
- name: csi-provisioner
image: registry.k8s.io/sig-storage/csi-provisioner:v3.1.0
image: k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2
args:
- "-v=2"
- "--csi-address=$(ADDRESS)"
- "--leader-election"
- "--leader-election-namespace=kube-system"
env:
- name: ADDRESS
value: /csi/csi.sock
@ -46,12 +42,13 @@ spec:
name: socket-dir
resources:
limits:
cpu: 1
memory: 400Mi
requests:
cpu: 10m
memory: 20Mi
- name: liveness-probe
image: registry.k8s.io/sig-storage/livenessprobe:v2.7.0
image: k8s.gcr.io/sig-storage/livenessprobe:v2.5.0
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
@ -62,12 +59,13 @@ spec:
mountPath: /csi
resources:
limits:
cpu: 1
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
- name: nfs
image: gcr.io/k8s-staging-sig-storage/nfsplugin:canary
image: mcr.microsoft.com/k8s/csi/nfs-csi:v3.0.0
securityContext:
privileged: true
capabilities:
@ -105,6 +103,7 @@ spec:
name: socket-dir
resources:
limits:
cpu: 1
memory: 200Mi
requests:
cpu: 10m

View File

@ -7,5 +7,3 @@ spec:
attachRequired: false
volumeLifecycleModes:
- Persistent
- Ephemeral
fsGroupPolicy: File

View File

@ -1,4 +1,6 @@
---
# This YAML file contains driver-registrar & csi driver nodeplugin API objects
# that are necessary to run CSI nodeplugin for nfs
kind: DaemonSet
apiVersion: apps/v1
metadata:
@ -18,15 +20,14 @@ spec:
app: csi-nfs-node
spec:
hostNetwork: true # original nfs connection would be broken without hostNetwork setting
dnsPolicy: Default # available values: Default, ClusterFirstWithHostNet, ClusterFirst
serviceAccountName: csi-nfs-node-sa
dnsPolicy: ClusterFirstWithHostNet
nodeSelector:
kubernetes.io/os: linux
tolerations:
- operator: "Exists"
containers:
- name: liveness-probe
image: registry.k8s.io/sig-storage/livenessprobe:v2.7.0
image: k8s.gcr.io/sig-storage/livenessprobe:v2.5.0
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
@ -37,12 +38,13 @@ spec:
mountPath: /csi
resources:
limits:
cpu: 1
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
- name: node-driver-registrar
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0
args:
- --v=2
- --csi-address=/csi/csi.sock
@ -69,6 +71,7 @@ spec:
mountPath: /registration
resources:
limits:
cpu: 1
memory: 100Mi
requests:
cpu: 10m
@ -79,7 +82,7 @@ spec:
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: gcr.io/k8s-staging-sig-storage/nfsplugin:canary
image: mcr.microsoft.com/k8s/csi/nfs-csi:v3.0.0
args:
- "-v=5"
- "--nodeid=$(NODE_ID)"
@ -112,6 +115,7 @@ spec:
mountPropagation: "Bidirectional"
resources:
limits:
cpu: 1
memory: 300Mi
requests:
cpu: 10m

View File

@ -1,39 +1,25 @@
# CSI driver example
After the NFS CSI Driver is deployed in your cluster, you can follow this documentation to quickly deploy some examples.
You can use NFS CSI Driver to provision Persistent Volumes statically or dynamically. Please read [Kubernetes Persistent Volumes documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) for more information about Static and Dynamic provisioning.
Please refer to [driver parameters](../../docs/driver-parameters.md) for more detailed usage.
## Prerequisite
- [Set up a NFS Server on a Kubernetes cluster](./nfs-provisioner/README.md) as an example
- [Install NFS CSI Driver](../../docs/install-nfs-csi-driver.md)
- [Set up a NFS Server on a Kubernetes cluster](./nfs-provisioner/README.md)
- [Install NFS CSI Driver](../../docs/install-csi-driver.md)
## Storage Class Usage (Dynamic Provisioning)
- Create a storage class
> change `server`, `share` with your existing NFS server address and share name
```yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-csi
provisioner: nfs.csi.k8s.io
parameters:
server: nfs-server.default.svc.cluster.local
share: /
# csi.storage.k8s.io/provisioner-secret is only needed for providing mountOptions in DeleteVolume
# csi.storage.k8s.io/provisioner-secret-name: "mount-options"
# csi.storage.k8s.io/provisioner-secret-namespace: "default"
reclaimPolicy: Delete
volumeBindingMode: Immediate
mountOptions:
- nconnect=8 # only supported on linux kernel version >= 5.3
- nfsvers=4.1
```
- Follow the following command to create a `StorageClass`, and then `PersistentVolume` and `PersistentVolumeClaim` dynamically.
- create PVC
```console
```bash
# create StorageClass
kubectl create -f https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/deploy/example/storageclass-nfs.yaml
# create PVC
kubectl create -f https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/deploy/example/pvc-nfs-csi-dynamic.yaml
```
@ -49,7 +35,13 @@ kubectl create -f https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nf
kubectl create -f https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/deploy/example/pvc-nfs-csi-static.yaml
```
## Create a deployment
```console
kubectl create -f https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/deploy/example/deployment.yaml
```
## Deployment/Statefulset Usage
- Follow the following command to create `Deployment` and `Statefulset` .
```bash
# create Deployment and Statefulset
git clone https://github.com/kubernetes-csi/csi-driver-nfs.git
cd csi-driver-nfs
./hack/verify-examples.sh
```

View File

@ -14,7 +14,7 @@ kubectl create -f https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nf
- During the deployment, a new service `nfs-server` will be created which exposes the NFS server endpoint `nfs-server.default.svc.cluster.local` and the share path `/`. You can specify `PersistentVolume` or `StorageClass` using these information.
- Deploy the NFS CSI driver, please refer to [install NFS CSI driver](../../../docs/install-nfs-csi-driver.md).
- Deploy the NFS CSI driver, please refer to [install NFS CSI driver](../../../docs/install-csi-driver.md).
- To check if the NFS server is working, we can statically create a PersistentVolume and a PersistentVolumeClaim, and mount it onto a sample pod:

View File

@ -10,6 +10,7 @@ spec:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Delete
mountOptions:
- hard
- nfsvers=4.1
csi:
driver: nfs.csi.k8s.io

View File

@ -1,26 +0,0 @@
---
kind: Pod
apiVersion: v1
metadata:
name: nginx-pod-inline-volume
spec:
nodeSelector:
"kubernetes.io/os": linux
containers:
- image: mcr.microsoft.com/oss/nginx/nginx:1.19.5
name: nginx-nfs
command:
- "/bin/bash"
- "-c"
- set -euo pipefail; while true; do echo $(date) >> /mnt/nfs/outfile; sleep 1; done
volumeMounts:
- name: persistent-storage
mountPath: "/mnt/nfs"
volumes:
- name: persistent-storage
csi:
driver: nfs.csi.k8s.io
volumeAttributes:
server: nfs-server.default.svc.cluster.local # required
share: / # required
mountOptions: "nfsvers=4.1,sec=sys" # optional

View File

@ -9,9 +9,8 @@ spec:
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-csi
mountOptions:
- nconnect=8 # only supported on linux kernel version >= 5.3
- hard
- nfsvers=4.1
csi:
driver: nfs.csi.k8s.io

View File

@ -10,4 +10,4 @@ spec:
requests:
storage: 10Gi
volumeName: pv-nfs
storageClassName: nfs-csi
storageClassName: ""

View File

@ -7,11 +7,8 @@ provisioner: nfs.csi.k8s.io
parameters:
server: nfs-server.default.svc.cluster.local
share: /
# csi.storage.k8s.io/provisioner-secret is only needed for providing mountOptions in DeleteVolume
# csi.storage.k8s.io/provisioner-secret-name: "mount-options"
# csi.storage.k8s.io/provisioner-secret-namespace: "default"
reclaimPolicy: Delete
volumeBindingMode: Immediate
mountOptions:
- nconnect=8 # only supported on linux kernel version >= 5.3
- hard
- nfsvers=4.1

View File

@ -34,7 +34,7 @@ if [ $ver != "master" ]; then
fi
echo "Installing NFS CSI driver, version: $ver ..."
kubectl apply -f $repo/rbac-csi-nfs.yaml
kubectl apply -f $repo/rbac-csi-nfs-controller.yaml
kubectl apply -f $repo/csi-nfs-driverinfo.yaml
kubectl apply -f $repo/csi-nfs-controller.yaml
kubectl apply -f $repo/csi-nfs-node.yaml

View File

@ -33,9 +33,6 @@ rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
---
kind: ClusterRoleBinding

View File

@ -1,57 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-nfs-controller-sa
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-nfs-node-sa
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-external-provisioner-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-csi-provisioner-binding
subjects:
- kind: ServiceAccount
name: csi-nfs-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: nfs-external-provisioner-role
apiGroup: rbac.authorization.k8s.io

View File

@ -37,5 +37,5 @@ echo "Uninstalling NFS driver, version: $ver ..."
kubectl delete -f $repo/csi-nfs-controller.yaml --ignore-not-found
kubectl delete -f $repo/csi-nfs-node.yaml --ignore-not-found
kubectl delete -f $repo/csi-nfs-driverinfo.yaml --ignore-not-found
kubectl delete -f $repo/rbac-csi-nfs.yaml --ignore-not-found
kubectl delete -f $repo/rbac-csi-nfs-controller.yaml --ignore-not-found
echo 'Uninstalled NFS driver successfully.'

View File

@ -29,7 +29,7 @@ spec:
effect: "NoSchedule"
containers:
- name: csi-provisioner
image: registry.k8s.io/sig-storage/csi-provisioner:v2.2.2
image: k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2
args:
- "-v=2"
- "--csi-address=$(ADDRESS)"
@ -48,7 +48,7 @@ spec:
cpu: 10m
memory: 20Mi
- name: liveness-probe
image: registry.k8s.io/sig-storage/livenessprobe:v2.5.0
image: k8s.gcr.io/sig-storage/livenessprobe:v2.5.0
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
@ -65,7 +65,7 @@ spec:
cpu: 10m
memory: 20Mi
- name: nfs
image: registry.k8s.io/sig-storage/nfsplugin:v3.0.0
image: mcr.microsoft.com/k8s/csi/nfs-csi:v3.0.0
securityContext:
privileged: true
capabilities:

View File

@ -27,7 +27,7 @@ spec:
- operator: "Exists"
containers:
- name: liveness-probe
image: registry.k8s.io/sig-storage/livenessprobe:v2.5.0
image: k8s.gcr.io/sig-storage/livenessprobe:v2.5.0
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
@ -44,7 +44,7 @@ spec:
cpu: 10m
memory: 20Mi
- name: node-driver-registrar
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.4.0
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0
args:
- --v=2
- --csi-address=/csi/csi.sock
@ -82,7 +82,7 @@ spec:
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: registry.k8s.io/sig-storage/nfsplugin:v3.0.0
image: mcr.microsoft.com/k8s/csi/nfs-csi:v3.0.0
args:
- "-v=5"
- "--nodeid=$(NODE_ID)"

View File

@ -1,114 +0,0 @@
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: csi-nfs-controller
namespace: kube-system
spec:
replicas: 2
selector:
matchLabels:
app: csi-nfs-controller
template:
metadata:
labels:
app: csi-nfs-controller
spec:
hostNetwork: true # controller also needs to mount nfs to create dir
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: csi-nfs-controller-sa
nodeSelector:
kubernetes.io/os: linux # add "kubernetes.io/role: master" to run controller on master node
priorityClassName: system-cluster-critical
tolerations:
- key: "node-role.kubernetes.io/master"
operator: "Exists"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/controlplane"
operator: "Exists"
effect: "NoSchedule"
containers:
- name: csi-provisioner
image: registry.k8s.io/sig-storage/csi-provisioner:v2.2.2
args:
- "-v=2"
- "--csi-address=$(ADDRESS)"
- "--leader-election"
env:
- name: ADDRESS
value: /csi/csi.sock
volumeMounts:
- mountPath: /csi
name: socket-dir
resources:
limits:
memory: 400Mi
requests:
cpu: 10m
memory: 20Mi
- name: liveness-probe
image: registry.k8s.io/sig-storage/livenessprobe:v2.5.0
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
- --health-port=29652
- --v=2
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
- name: nfs
image: registry.k8s.io/sig-storage/nfsplugin:v3.1.0
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
imagePullPolicy: IfNotPresent
args:
- "-v=5"
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
env:
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
ports:
- containerPort: 29652
name: healthz
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 30
timeoutSeconds: 10
periodSeconds: 30
volumeMounts:
- name: pods-mount-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
- mountPath: /csi
name: socket-dir
resources:
limits:
memory: 200Mi
requests:
cpu: 10m
memory: 20Mi
volumes:
- name: pods-mount-dir
hostPath:
path: /var/lib/kubelet/pods
type: Directory
- name: socket-dir
emptyDir: {}

View File

@ -1,10 +0,0 @@
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: nfs.csi.k8s.io
spec:
attachRequired: false
volumeLifecycleModes:
- Persistent
- Ephemeral

View File

@ -1,132 +0,0 @@
---
# This YAML file contains driver-registrar & csi driver nodeplugin API objects
# that are necessary to run CSI nodeplugin for nfs
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: csi-nfs-node
namespace: kube-system
spec:
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
app: csi-nfs-node
template:
metadata:
labels:
app: csi-nfs-node
spec:
hostNetwork: true # original nfs connection would be broken without hostNetwork setting
dnsPolicy: ClusterFirstWithHostNet
nodeSelector:
kubernetes.io/os: linux
tolerations:
- operator: "Exists"
containers:
- name: liveness-probe
image: registry.k8s.io/sig-storage/livenessprobe:v2.5.0
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
- --health-port=29653
- --v=2
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
- name: node-driver-registrar
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.4.0
args:
- --v=2
- --csi-address=/csi/csi.sock
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
livenessProbe:
exec:
command:
- /csi-node-driver-registrar
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --mode=kubelet-registration-probe
initialDelaySeconds: 30
timeoutSeconds: 15
env:
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/csi-nfsplugin/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
resources:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
- name: nfs
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: registry.k8s.io/sig-storage/nfsplugin:v3.1.0
args:
- "-v=5"
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
env:
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
ports:
- containerPort: 29653
name: healthz
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 30
timeoutSeconds: 10
periodSeconds: 30
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: pods-mount-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
resources:
limits:
memory: 300Mi
requests:
cpu: 10m
memory: 20Mi
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/csi-nfsplugin
type: DirectoryOrCreate
- name: pods-mount-dir
hostPath:
path: /var/lib/kubelet/pods
type: Directory
- hostPath:
path: /var/lib/kubelet/plugins_registry
type: Directory
name: registration-dir

View File

@ -1,118 +0,0 @@
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: csi-nfs-controller
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
app: csi-nfs-controller
template:
metadata:
labels:
app: csi-nfs-controller
spec:
hostNetwork: true # controller also needs to mount nfs to create dir
dnsPolicy: Default # available values: Default, ClusterFirstWithHostNet, ClusterFirst
serviceAccountName: csi-nfs-controller-sa
nodeSelector:
kubernetes.io/os: linux # add "kubernetes.io/role: master" to run controller on master node
priorityClassName: system-cluster-critical
tolerations:
- key: "node-role.kubernetes.io/master"
operator: "Exists"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/controlplane"
operator: "Exists"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/control-plane"
operator: "Exists"
effect: "NoSchedule"
containers:
- name: csi-provisioner
image: registry.k8s.io/sig-storage/csi-provisioner:v3.1.0
args:
- "-v=2"
- "--csi-address=$(ADDRESS)"
- "--leader-election"
- "--leader-election-namespace=kube-system"
env:
- name: ADDRESS
value: /csi/csi.sock
volumeMounts:
- mountPath: /csi
name: socket-dir
resources:
limits:
memory: 400Mi
requests:
cpu: 10m
memory: 20Mi
- name: liveness-probe
image: registry.k8s.io/sig-storage/livenessprobe:v2.6.0
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
- --health-port=29652
- --v=2
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
- name: nfs
image: registry.k8s.io/sig-storage/nfsplugin:v4.0.0
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
imagePullPolicy: IfNotPresent
args:
- "-v=5"
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
env:
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
ports:
- containerPort: 29652
name: healthz
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 30
timeoutSeconds: 10
periodSeconds: 30
volumeMounts:
- name: pods-mount-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
- mountPath: /csi
name: socket-dir
resources:
limits:
memory: 200Mi
requests:
cpu: 10m
memory: 20Mi
volumes:
- name: pods-mount-dir
hostPath:
path: /var/lib/kubelet/pods
type: Directory
- name: socket-dir
emptyDir: {}

View File

@ -1,11 +0,0 @@
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: nfs.csi.k8s.io
spec:
attachRequired: false
volumeLifecycleModes:
- Persistent
- Ephemeral
fsGroupPolicy: File

View File

@ -1,130 +0,0 @@
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: csi-nfs-node
namespace: kube-system
spec:
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
app: csi-nfs-node
template:
metadata:
labels:
app: csi-nfs-node
spec:
hostNetwork: true # original nfs connection would be broken without hostNetwork setting
dnsPolicy: Default # available values: Default, ClusterFirstWithHostNet, ClusterFirst
nodeSelector:
kubernetes.io/os: linux
tolerations:
- operator: "Exists"
containers:
- name: liveness-probe
image: registry.k8s.io/sig-storage/livenessprobe:v2.6.0
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
- --health-port=29653
- --v=2
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
- name: node-driver-registrar
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.0
args:
- --v=2
- --csi-address=/csi/csi.sock
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
livenessProbe:
exec:
command:
- /csi-node-driver-registrar
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --mode=kubelet-registration-probe
initialDelaySeconds: 30
timeoutSeconds: 15
env:
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/csi-nfsplugin/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
resources:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
- name: nfs
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: registry.k8s.io/sig-storage/nfsplugin:v4.0.0
args:
- "-v=5"
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
env:
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
ports:
- containerPort: 29653
name: healthz
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 30
timeoutSeconds: 10
periodSeconds: 30
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: pods-mount-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
resources:
limits:
memory: 300Mi
requests:
cpu: 10m
memory: 20Mi
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/csi-nfsplugin
type: DirectoryOrCreate
- name: pods-mount-dir
hostPath:
path: /var/lib/kubelet/pods
type: Directory
- hostPath:
path: /var/lib/kubelet/plugins_registry
type: Directory
name: registration-dir

View File

@ -1,52 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-nfs-controller-sa
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-external-provisioner-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-csi-provisioner-binding
subjects:
- kind: ServiceAccount
name: csi-nfs-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: nfs-external-provisioner-role
apiGroup: rbac.authorization.k8s.io

View File

@ -1,6 +1,6 @@
## CSI driver debug tips
### case#1: volume create/delete failed
### Case#1: volume create/delete failed
- locate csi driver pod
```console
$ kubectl get pod -o wide -n kube-system | grep csi-nfs-controller
@ -14,8 +14,8 @@ $ kubectl logs csi-nfs-controller-56bfddd689-dh5tk -c nfs -n kube-system > csi-n
```
> note: there could be multiple controller pods, if there are no helpful logs, try to get logs from other controller pods
### case#2: volume mount/unmount failed
- locate csi driver pod that does the actual volume mount/unmount
### Case#2: volume mount/unmount failed
- locate csi driver pod and figure out which pod does tha actual volume mount/unmount
```console
$ kubectl get pod -o wide -n kube-system | grep csi-nfs-node

View File

@ -37,7 +37,7 @@ $ make build
#### Start CSI driver locally
```console
$ cd $GOPATH/src/github.com/kubernetes-csi/csi-driver-nfs
$ ./bin/nfsplugin --endpoint unix:///tmp/csi.sock --nodeid CSINode -v=5 &
$ ./_output/nfsplugin --endpoint tcp://127.0.0.1:10000 --nodeid CSINode -v=5 &
```
#### 0. Set environment variables

View File

@ -1,46 +1,18 @@
## Driver Parameters
> This driver requires existing and already configured NFSv3 or NFSv4 server, it supports dynamic provisioning of Persistent Volumes via Persistent Volume Claims by creating a new sub directory under NFS server.
> This plugin driver itself only provides a communication layer between resources in the cluser and the NFS server, you need to bring your own NFS server before using this driver.
### storage class usage (dynamic provisioning)
### Storage Class Usage (Dynamic Provisioning)
> [`StorageClass` example](../deploy/example/storageclass-nfs.yaml)
Name | Meaning | Example Value | Mandatory | Default value
--- | --- | --- | --- | ---
server | NFS Server address | domain name `nfs-server.default.svc.cluster.local` <br>or IP address `127.0.0.1` | Yes |
server | NFS Server endpoint | Domain name `nfs-server.default.svc.cluster.local` <br>Or IP address `127.0.0.1` | Yes |
share | NFS share path | `/` | Yes |
mountPermissions | mounted folder permissions. The default is `0777`, if set as `0`, driver will not perform `chmod` after mount | | No |
### PV/PVC usage (static provisioning)
### PV/PVC Usage (Static Provisioning)
> [`PersistentVolume` example](../deploy/example/pv-nfs-csi.yaml)
Name | Meaning | Example Value | Mandatory | Default value
--- | --- | --- | --- | ---
volumeAttributes.server | NFS Server address | domain name `nfs-server.default.svc.cluster.local` <br>or IP address `127.0.0.1` | Yes |
volumeAttributes.server | NFS Server endpoint | Domain name `nfs-server.default.svc.cluster.local` <br>Or IP address `127.0.0.1` | Yes |
volumeAttributes.share | NFS share path | `/` | Yes |
volumeAttributes.mountPermissions | mounted folder permissions. The default is `0777` | | No |
### Tips
#### provide `mountOptions` for `DeleteVolume`
> since `DeleteVolumeRequest` does not provide `mountOptions`, following is the workaround to provide `mountOptions` for `DeleteVolume`, check details [here](https://github.com/kubernetes-csi/csi-driver-nfs/issues/260)
- create a secret with `mountOptions`
```console
kubectl create secret generic mount-options --from-literal mountOptions="nfsvers=3,hard"
```
- define a storage class with `csi.storage.k8s.io/provisioner-secret-name` and `csi.storage.k8s.io/provisioner-secret-namespace` setting:
```yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-csi
provisioner: nfs.csi.k8s.io
parameters:
server: nfs-server.default.svc.cluster.local
share: /
# csi.storage.k8s.io/provisioner-secret is only needed for providing mountOptions in DeleteVolume
csi.storage.k8s.io/provisioner-secret-name: "mount-options"
csi.storage.k8s.io/provisioner-secret-namespace: "default"
reclaimPolicy: Delete
volumeBindingMode: Immediate
mountOptions:
- nfsvers=4.1
```

View File

@ -1,14 +1,14 @@
# Install NFS CSI driver master version on a kubernetes cluster
If you have already installed Helm, you can also use it to install this driver. Please check [Installation with Helm](../charts/README.md).
If you have already installed Helm, you can also use it to install NFS CSI driver. Please see [Installation with Helm](../charts/README.md).
## Install with kubectl
- Option#1. remote install
- remote install
```console
curl -skSL https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/deploy/install-driver.sh | bash -s master --
```
- Option#2. local install
- local install
```console
git clone https://github.com/kubernetes-csi/csi-driver-nfs.git
cd csi-driver-nfs
@ -26,20 +26,12 @@ example output:
```console
NAME READY STATUS RESTARTS AGE IP NODE
csi-nfs-controller-56bfddd689-dh5tk 4/4 Running 0 35s 10.240.0.19 k8s-agentpool-22533604-0
csi-nfs-controller-56bfddd689-8pgr4 4/4 Running 0 35s 10.240.0.35 k8s-agentpool-22533604-1
csi-nfs-node-cvgbs 3/3 Running 0 35s 10.240.0.35 k8s-agentpool-22533604-1
csi-nfs-node-dr4s4 3/3 Running 0 35s 10.240.0.4 k8s-agentpool-22533604-0
```
### clean up NFS CSI driver
- Option#1. remote uninstall
- clean up NFS CSI driver
```console
curl -skSL https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/deploy/uninstall-driver.sh | bash -s master --
```
- Option#2. local uninstall
```console
git clone https://github.com/kubernetes-csi/csi-driver-nfs.git
cd csi-driver-nfs
git checkout master
./deploy/uninstall-driver.sh master local
```
```

View File

@ -1,37 +0,0 @@
# Install NFS CSI driver v3.1.0 version on a kubernetes cluster
If you have already installed Helm, you can also use it to install NFS CSI driver. Please see [Installation with Helm](../charts/README.md).
## Install with kubectl
- remote install
```console
curl -skSL https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/v3.1.0/deploy/install-driver.sh | bash -s v3.1.0 --
```
- local install
```console
git clone https://github.com/kubernetes-csi/csi-driver-nfs.git
cd csi-driver-nfs
./deploy/install-driver.sh v3.1.0 local
```
- check pods status:
```console
kubectl -n kube-system get pod -o wide -l app=csi-nfs-controller
kubectl -n kube-system get pod -o wide -l app=csi-nfs-node
```
example output:
```console
NAME READY STATUS RESTARTS AGE IP NODE
csi-nfs-controller-56bfddd689-dh5tk 4/4 Running 0 35s 10.240.0.19 k8s-agentpool-22533604-0
csi-nfs-controller-56bfddd689-8pgr4 4/4 Running 0 35s 10.240.0.35 k8s-agentpool-22533604-1
csi-nfs-node-cvgbs 3/3 Running 0 35s 10.240.0.35 k8s-agentpool-22533604-1
csi-nfs-node-dr4s4 3/3 Running 0 35s 10.240.0.4 k8s-agentpool-22533604-0
```
- clean up NFS CSI driver
```console
curl -skSL https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/v3.1.0/deploy/uninstall-driver.sh | bash -s v3.1.0 --
```

View File

@ -1,45 +0,0 @@
# Install NFS CSI driver v4.0.0 version on a kubernetes cluster
If you have already installed Helm, you can also use it to install this driver. Please check [Installation with Helm](../charts/README.md).
## Install with kubectl
- Option#1. remote install
```console
curl -skSL https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/v4.0.0/deploy/install-driver.sh | bash -s v4.0.0 --
```
- Option#2. local install
```console
git clone https://github.com/kubernetes-csi/csi-driver-nfs.git
cd csi-driver-nfs
./deploy/install-driver.sh v4.0.0 local
```
- check pods status:
```console
kubectl -n kube-system get pod -o wide -l app=csi-nfs-controller
kubectl -n kube-system get pod -o wide -l app=csi-nfs-node
```
example output:
```console
NAME READY STATUS RESTARTS AGE IP NODE
csi-nfs-controller-56bfddd689-dh5tk 4/4 Running 0 35s 10.240.0.19 k8s-agentpool-22533604-0
csi-nfs-node-cvgbs 3/3 Running 0 35s 10.240.0.35 k8s-agentpool-22533604-1
csi-nfs-node-dr4s4 3/3 Running 0 35s 10.240.0.4 k8s-agentpool-22533604-0
```
### clean up NFS CSI driver
- Option#1. remote uninstall
```console
curl -skSL https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/v4.0.0/deploy/uninstall-driver.sh | bash -s v4.0.0 --
```
- Option#2. local uninstall
```console
git clone https://github.com/kubernetes-csi/csi-driver-nfs.git
cd csi-driver-nfs
git checkout v4.0.0
./deploy/uninstall-driver.sh v4.0.0 local
```

View File

@ -1,6 +1,4 @@
## Install NFS CSI driver on a Kubernetes cluster
- [install CSI driver master version](./install-csi-driver-master.md)
- [install CSI driver v4.0.0 version](./install-csi-driver-v4.0.0.md)
- [install CSI driver v3.1.0 version](./install-csi-driver-v3.1.0.md)
- [install CSI driver v3.0.0 version](./install-csi-driver-v3.0.0.md)

75
go.mod
View File

@ -6,51 +6,48 @@ require (
github.com/container-storage-interface/spec v1.5.0
github.com/golang/protobuf v1.5.2
github.com/kubernetes-csi/csi-lib-utils v0.9.0
github.com/kubernetes-csi/external-snapshotter/v2 v2.0.0-20200617021606-4800ca72d403
github.com/onsi/ginkgo v1.14.0
github.com/onsi/gomega v1.10.1
github.com/pborman/uuid v1.2.0
github.com/prometheus/client_golang v1.11.1 // indirect
github.com/stretchr/testify v1.7.0
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 // indirect
golang.org/x/net v0.0.0-20211209124913-491a49abca63
google.golang.org/grpc v1.40.0
k8s.io/api v0.23.3
k8s.io/apimachinery v0.23.3
k8s.io/client-go v0.23.3
k8s.io/klog/v2 v2.30.0
k8s.io/kubernetes v1.23.3
k8s.io/mount-utils v0.23.3
k8s.io/utils v0.0.0-20211116205334-6203023598ed
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023
google.golang.org/grpc v1.38.0
k8s.io/api v0.22.3
k8s.io/apimachinery v0.22.3
k8s.io/client-go v0.22.3
k8s.io/klog/v2 v2.9.0
k8s.io/kubernetes v1.22.3
k8s.io/mount-utils v0.22.3
sigs.k8s.io/yaml v1.2.0
)
replace (
golang.org/x/text => golang.org/x/text v0.3.7
k8s.io/api => k8s.io/api v0.23.3
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.23.3
k8s.io/apimachinery => k8s.io/apimachinery v0.23.3
k8s.io/apiserver => k8s.io/apiserver v0.23.3
k8s.io/cli-runtime => k8s.io/cli-runtime v0.23.3
k8s.io/client-go => k8s.io/client-go v0.23.3
k8s.io/cloud-provider => k8s.io/cloud-provider v0.23.3
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.23.3
k8s.io/code-generator => k8s.io/code-generator v0.23.3
k8s.io/component-base => k8s.io/component-base v0.23.3
k8s.io/component-helpers => k8s.io/component-helpers v0.23.3
k8s.io/controller-manager => k8s.io/controller-manager v0.23.3
k8s.io/cri-api => k8s.io/cri-api v0.23.3
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.23.3
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.23.3
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.23.3
k8s.io/kube-proxy => k8s.io/kube-proxy v0.23.3
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.23.3
k8s.io/kubectl => k8s.io/kubectl v0.23.3
k8s.io/kubelet => k8s.io/kubelet v0.23.3
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.23.3
k8s.io/metrics => k8s.io/metrics v0.23.3
k8s.io/mount-utils => k8s.io/mount-utils v0.23.3
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.23.3
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.23.3
k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.23.3
k8s.io/sample-controller => k8s.io/sample-controller v0.23.3
k8s.io/api => k8s.io/api v0.22.3
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.22.3
k8s.io/apimachinery => k8s.io/apimachinery v0.22.3
k8s.io/apiserver => k8s.io/apiserver v0.22.3
k8s.io/cli-runtime => k8s.io/cli-runtime v0.22.3
k8s.io/client-go => k8s.io/client-go v0.22.3
k8s.io/cloud-provider => k8s.io/cloud-provider v0.22.3
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.22.3
k8s.io/code-generator => k8s.io/code-generator v0.22.3
k8s.io/component-base => k8s.io/component-base v0.22.3
k8s.io/component-helpers => k8s.io/component-helpers v0.22.3
k8s.io/controller-manager => k8s.io/controller-manager v0.22.3
k8s.io/cri-api => k8s.io/cri-api v0.22.3
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.22.3
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.22.3
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.22.3
k8s.io/kube-proxy => k8s.io/kube-proxy v0.22.3
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.22.3
k8s.io/kubectl => k8s.io/kubectl v0.22.3
k8s.io/kubelet => k8s.io/kubelet v0.22.3
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.22.3
k8s.io/metrics => k8s.io/metrics v0.22.3
k8s.io/mount-utils => k8s.io/mount-utils v0.22.3
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.22.3
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.22.3
k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.22.3
k8s.io/sample-controller => k8s.io/sample-controller v0.22.3
)

755
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -28,7 +28,7 @@ export IMAGENAME=public/k8s/csi/nfs-csi
export CI=1
export PUBLISH=1
az acr login --name $REGISTRY_NAME
make && make container push push-latest
make container push push-latest
echo "sleep 60s ..."
sleep 60

View File

@ -20,8 +20,10 @@ readonly PKG_ROOT="$(git rev-parse --show-toplevel)"
${PKG_ROOT}/hack/verify-gofmt.sh
${PKG_ROOT}/hack/verify-govet.sh
${PKG_ROOT}/hack/verify-golint.sh
${PKG_ROOT}/hack/verify-yamllint.sh
${PKG_ROOT}/hack/verify-boilerplate.sh
${PKG_ROOT}/hack/verify-spelling.sh
${PKG_ROOT}/hack/verify-helm-chart-files.sh
${PKG_ROOT}/hack/verify-helm-chart.sh
${PKG_ROOT}/hack/verify-gomod.sh

View File

@ -20,7 +20,7 @@ rollout_and_wait() {
trap "echo \"Failed to apply config \\\"$1\\\"\" >&2" err
APPNAME=$(kubectl apply -f $1 | grep -E "^(:?daemonset|deployment|statefulset|pod)" | awk '{printf $1}')
if [[ -n $(expr "${APPNAME}" : "\(daemonset\|deployment\|statefulset\|pod\)" || true) ]]; then
if [[ -n $(expr "${APPNAME}" : "\(daemonset\|deployment\|statefulset\)" || true) ]]; then
kubectl rollout status $APPNAME --watch --timeout=5m
else
kubectl wait "${APPNAME}" --for condition=ready --timeout=5m

View File

@ -50,6 +50,6 @@ echo "chart tgz files verified."
echo "verify helm chart index ..."
curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash
helm repo add csi-driver-nfs https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts
helm repo add csi-driver-smb https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts
helm search repo -l csi-driver-nfs
echo "helm chart index verified."

View File

@ -21,7 +21,6 @@ import (
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"github.com/container-storage-interface/spec/lib/go/csi"
@ -35,6 +34,8 @@ import (
// ControllerServer controller server setting
type ControllerServer struct {
Driver *Driver
// Working directory for the provisioner to temporarily mount nfs shares at
workingMountDir string
}
// nfsVolume is an internal representation of a volume
@ -67,45 +68,18 @@ const (
totalIDElements // Always last
)
const separator = "#"
// CreateVolume create a volume
func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
name := req.GetName()
if len(name) == 0 {
return nil, status.Error(codes.InvalidArgument, "CreateVolume name must be provided")
}
if err := isValidVolumeCapabilities(req.GetVolumeCapabilities()); err != nil {
if err := cs.validateVolumeCapabilities(req.GetVolumeCapabilities()); err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
mountPermissions := cs.Driver.mountPermissions
reqCapacity := req.GetCapacityRange().GetRequiredBytes()
parameters := req.GetParameters()
if parameters == nil {
parameters = make(map[string]string)
}
// validate parameters (case-insensitive)
for k, v := range parameters {
switch strings.ToLower(k) {
case paramServer:
// no op
case paramShare:
// no op
case mountPermissionsField:
if v != "" {
var err error
if mountPermissions, err = strconv.ParseUint(v, 8, 32); err != nil {
return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("invalid mountPermissions %s in storage class", v))
}
}
default:
return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("invalid parameter %q in storage class", k))
}
}
nfsVol, err := cs.newNFSVolume(name, reqCapacity, parameters)
nfsVol, err := cs.newNFSVolume(name, reqCapacity, req.GetParameters())
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
@ -115,7 +89,7 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
volCap = req.GetVolumeCapabilities()[0]
}
// Mount nfs base share so we can create a subdirectory
if err = cs.internalMount(ctx, nfsVol, parameters, volCap); err != nil {
if err = cs.internalMount(ctx, nfsVol, volCap); err != nil {
return nil, status.Errorf(codes.Internal, "failed to mount nfs server: %v", err.Error())
}
defer func() {
@ -124,26 +98,17 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
}
}()
fileMode := os.FileMode(mountPermissions)
// Create subdirectory under base-dir
// TODO: revisit permissions
internalVolumePath := cs.getInternalVolumePath(nfsVol)
if err = os.Mkdir(internalVolumePath, fileMode); err != nil && !os.IsExist(err) {
if err = os.Mkdir(internalVolumePath, 0777); err != nil && !os.IsExist(err) {
return nil, status.Errorf(codes.Internal, "failed to make subdirectory: %v", err.Error())
}
// Reset directory permissions because of umask problems
if err = os.Chmod(internalVolumePath, fileMode); err != nil {
if err = os.Chmod(internalVolumePath, 0777); err != nil {
klog.Warningf("failed to chmod subdirectory: %v", err.Error())
}
parameters[paramServer] = nfsVol.server
parameters[paramShare] = cs.getVolumeSharePath(nfsVol)
return &csi.CreateVolumeResponse{
Volume: &csi.Volume{
VolumeId: nfsVol.id,
CapacityBytes: 0, // by setting it to zero, Provisioner will use PVC requested size as PV size
VolumeContext: parameters,
},
}, nil
return &csi.CreateVolumeResponse{Volume: cs.nfsVolToCSI(nfsVol)}, nil
}
// DeleteVolume delete a volume
@ -152,28 +117,15 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
if volumeID == "" {
return nil, status.Error(codes.InvalidArgument, "volume id is empty")
}
nfsVol, err := getNfsVolFromID(volumeID)
nfsVol, err := cs.getNfsVolFromID(volumeID)
if err != nil {
// An invalid ID should be treated as doesn't exist
klog.Warningf("failed to get nfs volume for volume id %v deletion: %v", volumeID, err)
return &csi.DeleteVolumeResponse{}, nil
}
var volCap *csi.VolumeCapability
mountOptions := getMountOptions(req.GetSecrets())
if mountOptions != "" {
klog.V(2).Infof("DeleteVolume: found mountOptions(%s) for volume(%s)", mountOptions, volumeID)
volCap = &csi.VolumeCapability{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{
MountFlags: []string{mountOptions},
},
},
}
}
// mount nfs base share so we can delete the subdirectory
if err = cs.internalMount(ctx, nfsVol, nil, volCap); err != nil {
// Mount nfs base share so we can delete the subdirectory
if err = cs.internalMount(ctx, nfsVol, nil); err != nil {
return nil, status.Errorf(codes.Internal, "failed to mount nfs server: %v", err.Error())
}
defer func() {
@ -182,7 +134,7 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
}
}()
// delete subdirectory under base-dir
// Delete subdirectory under base-dir
internalVolumePath := cs.getInternalVolumePath(nfsVol)
klog.V(2).Infof("Removing subdirectory at %v", internalVolumePath)
@ -209,16 +161,12 @@ func (cs *ControllerServer) ValidateVolumeCapabilities(ctx context.Context, req
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if err := isValidVolumeCapabilities(req.GetVolumeCapabilities()); err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
if req.GetVolumeCapabilities() == nil {
return nil, status.Error(codes.InvalidArgument, "Volume capabilities missing in request")
}
return &csi.ValidateVolumeCapabilitiesResponse{
Confirmed: &csi.ValidateVolumeCapabilitiesResponse_Confirmed{
VolumeCapabilities: req.GetVolumeCapabilities(),
},
Message: "",
}, nil
// supports all AccessModes, no need to check capabilities here
return &csi.ValidateVolumeCapabilitiesResponse{Message: ""}, nil
}
func (cs *ControllerServer) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {
@ -253,8 +201,43 @@ func (cs *ControllerServer) ControllerExpandVolume(ctx context.Context, req *csi
return nil, status.Error(codes.Unimplemented, "")
}
func (cs *ControllerServer) validateVolumeCapabilities(caps []*csi.VolumeCapability) error {
if len(caps) == 0 {
return fmt.Errorf("volume capabilities must be provided")
}
for _, c := range caps {
if err := cs.validateVolumeCapability(c); err != nil {
return err
}
}
return nil
}
func (cs *ControllerServer) validateVolumeCapability(c *csi.VolumeCapability) error {
if c == nil {
return fmt.Errorf("volume capability must be provided")
}
// Validate access mode
accessMode := c.GetAccessMode()
if accessMode == nil {
return fmt.Errorf("volume capability access mode not set")
}
if !cs.Driver.cap[accessMode.Mode] {
return fmt.Errorf("driver does not support access mode: %v", accessMode.Mode.String())
}
// Validate access type
accessType := c.GetAccessType()
if accessType == nil {
return fmt.Errorf("volume capability access type not set")
}
return nil
}
// Mount nfs server at base-dir
func (cs *ControllerServer) internalMount(ctx context.Context, vol *nfsVolume, volumeContext map[string]string, volCap *csi.VolumeCapability) error {
func (cs *ControllerServer) internalMount(ctx context.Context, vol *nfsVolume, volCap *csi.VolumeCapability) error {
sharePath := filepath.Join(string(filepath.Separator) + vol.baseDir)
targetPath := cs.getInternalMountPath(vol)
@ -266,16 +249,13 @@ func (cs *ControllerServer) internalMount(ctx context.Context, vol *nfsVolume, v
}
}
if volumeContext == nil {
volumeContext = make(map[string]string)
}
volumeContext[paramServer] = vol.server
volumeContext[paramShare] = sharePath
klog.V(2).Infof("internally mounting %v:%v at %v", vol.server, sharePath, targetPath)
klog.V(4).Infof("internally mounting %v:%v at %v", vol.server, sharePath, targetPath)
_, err := cs.Driver.ns.NodePublishVolume(ctx, &csi.NodePublishVolumeRequest{
TargetPath: targetPath,
VolumeContext: volumeContext,
TargetPath: targetPath,
VolumeContext: map[string]string{
paramServer: vol.server,
paramShare: sharePath,
},
VolumeCapability: volCap,
VolumeId: vol.id,
})
@ -297,18 +277,25 @@ func (cs *ControllerServer) internalUnmount(ctx context.Context, vol *nfsVolume)
// Convert VolumeCreate parameters to an nfsVolume
func (cs *ControllerServer) newNFSVolume(name string, size int64, params map[string]string) (*nfsVolume, error) {
var server, baseDir string
var (
server string
baseDir string
)
// validate parameters (case-insensitive)
// Validate parameters (case-insensitive).
// TODO do more strict validation.
for k, v := range params {
switch strings.ToLower(k) {
case paramServer:
server = v
case paramShare:
baseDir = v
default:
return nil, fmt.Errorf("invalid parameter %q", k)
}
}
// Validate required parameters
if server == "" {
return nil, fmt.Errorf("%v is a required parameter", paramServer)
}
@ -329,7 +316,11 @@ func (cs *ControllerServer) newNFSVolume(name string, size int64, params map[str
// Get working directory for CreateVolume and DeleteVolume
func (cs *ControllerServer) getInternalMountPath(vol *nfsVolume) string {
return filepath.Join(cs.Driver.workingMountDir, vol.subDir)
// use default if empty
if cs.workingMountDir == "" {
cs.workingMountDir = "/tmp"
}
return filepath.Join(cs.workingMountDir, vol.subDir)
}
// Get internal path where the volume is created
@ -348,56 +339,39 @@ func (cs *ControllerServer) getVolumeSharePath(vol *nfsVolume) string {
return filepath.Join(string(filepath.Separator), vol.baseDir, vol.subDir)
}
// Convert into nfsVolume into a csi.Volume
func (cs *ControllerServer) nfsVolToCSI(vol *nfsVolume) *csi.Volume {
return &csi.Volume{
CapacityBytes: 0, // by setting it to zero, Provisioner will use PVC requested size as PV size
VolumeId: vol.id,
VolumeContext: map[string]string{
paramServer: vol.server,
paramShare: cs.getVolumeSharePath(vol),
},
}
}
// Given a nfsVolume, return a CSI volume id
func (cs *ControllerServer) getVolumeIDFromNfsVol(vol *nfsVolume) string {
idElements := make([]string, totalIDElements)
idElements[idServer] = strings.Trim(vol.server, "/")
idElements[idBaseDir] = strings.Trim(vol.baseDir, "/")
idElements[idSubDir] = strings.Trim(vol.subDir, "/")
return strings.Join(idElements, separator)
return strings.Join(idElements, "/")
}
// Given a CSI volume id, return a nfsVolume
// sample volume Id:
// new volumeID: nfs-server.default.svc.cluster.local#share#pvc-4bcbf944-b6f7-4bd0-b50f-3c3dd00efc64
// old volumeID: nfs-server.default.svc.cluster.local/share/pvc-4bcbf944-b6f7-4bd0-b50f-3c3dd00efc64
func getNfsVolFromID(id string) (*nfsVolume, error) {
var server, baseDir, subDir string
segments := strings.Split(id, separator)
if len(segments) < 3 {
klog.V(2).Infof("could not split %s into server, baseDir and subDir with separator(%s)", id, separator)
// try with separator "/""
volRegex := regexp.MustCompile("^([^/]+)/(.*)/([^/]+)$")
tokens := volRegex.FindStringSubmatch(id)
if tokens == nil {
return nil, fmt.Errorf("could not split %s into server, baseDir and subDir with separator(%s)", id, "/")
}
server = tokens[1]
baseDir = tokens[2]
subDir = tokens[3]
} else {
server = segments[0]
baseDir = segments[1]
subDir = segments[2]
func (cs *ControllerServer) getNfsVolFromID(id string) (*nfsVolume, error) {
volRegex := regexp.MustCompile("^([^/]+)/(.*)/([^/]+)$")
tokens := volRegex.FindStringSubmatch(id)
if tokens == nil {
return nil, fmt.Errorf("Could not split %q into server, baseDir and subDir", id)
}
return &nfsVolume{
id: id,
server: server,
baseDir: baseDir,
subDir: subDir,
server: tokens[1],
baseDir: tokens[2],
subDir: tokens[3],
}, nil
}
// isValidVolumeCapabilities validates the given VolumeCapability array is valid
func isValidVolumeCapabilities(volCaps []*csi.VolumeCapability) error {
if len(volCaps) == 0 {
return fmt.Errorf("volume capabilities missing in request")
}
for _, c := range volCaps {
if c.GetBlock() != nil {
return fmt.Errorf("block volume capability not supported")
}
}
return nil
}

View File

@ -20,7 +20,6 @@ import (
"os"
"path/filepath"
"reflect"
"runtime"
"strings"
"testing"
@ -34,14 +33,12 @@ import (
)
const (
testServer = "test-server"
testBaseDir = "test-base-dir"
testBaseDirNested = "test/base/dir"
testCSIVolume = "test-csi"
testVolumeID = "test-server/test-base-dir/test-csi"
newTestVolumeID = "test-server#test-base-dir#test-csi"
testVolumeIDNested = "test-server/test/base/dir/test-csi"
newTestVolumeIDNested = "test-server#test/base/dir#test-csi"
testServer = "test-server"
testBaseDir = "test-base-dir"
testBaseDirNested = "test/base/dir"
testCSIVolume = "test-csi"
testVolumeID = "test-server/test-base-dir/test-csi"
testVolumeIDNested = "test-server/test/base/dir/test-csi"
)
// for Windows support in the future
@ -50,13 +47,12 @@ var (
)
func initTestController(t *testing.T) *ControllerServer {
var perm *uint32
mounter := &mount.FakeMounter{MountPoints: []mount.MountPoint{}}
driver := NewDriver(&DriverOptions{
WorkingMountDir: "/tmp",
MountPermissions: 0777,
})
driver := NewNFSdriver("", "", "", perm)
driver.ns = NewNodeServer(driver, mounter)
cs := NewControllerServer(driver)
cs.workingMountDir = "/tmp"
return cs
}
@ -87,37 +83,6 @@ func TestCreateVolume(t *testing.T) {
}{
{
name: "valid defaults",
req: &csi.CreateVolumeRequest{
Name: testCSIVolume,
VolumeCapabilities: []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
},
},
},
Parameters: map[string]string{
paramServer: testServer,
paramShare: testBaseDir,
mountPermissionsField: "0750",
},
},
resp: &csi.CreateVolumeResponse{
Volume: &csi.Volume{
VolumeId: newTestVolumeID,
VolumeContext: map[string]string{
paramServer: testServer,
paramShare: testShare,
mountPermissionsField: "0750",
},
},
},
},
{
name: "valid defaults with newTestVolumeID",
req: &csi.CreateVolumeRequest{
Name: testCSIVolume,
VolumeCapabilities: []*csi.VolumeCapability{
@ -137,7 +102,7 @@ func TestCreateVolume(t *testing.T) {
},
resp: &csi.CreateVolumeResponse{
Volume: &csi.Volume{
VolumeId: newTestVolumeID,
VolumeId: testVolumeID,
VolumeContext: map[string]string{
paramServer: testServer,
paramShare: testShare,
@ -165,6 +130,24 @@ func TestCreateVolume(t *testing.T) {
},
expectErr: true,
},
{
name: "invalid volume capability",
req: &csi.CreateVolumeRequest{
Name: testCSIVolume,
VolumeCapabilities: []*csi.VolumeCapability{
{
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
},
},
},
Parameters: map[string]string{
paramServer: testServer,
paramShare: testBaseDir,
},
},
expectErr: true,
},
{
name: "invalid create context",
req: &csi.CreateVolumeRequest{
@ -185,28 +168,6 @@ func TestCreateVolume(t *testing.T) {
},
expectErr: true,
},
{
name: "[Error] invalid mountPermissions",
req: &csi.CreateVolumeRequest{
Name: testCSIVolume,
VolumeCapabilities: []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
},
},
},
Parameters: map[string]string{
paramServer: testServer,
paramShare: testBaseDir,
mountPermissionsField: "07ab",
},
},
expectErr: true,
},
}
for _, test := range cases {
@ -228,7 +189,7 @@ func TestCreateVolume(t *testing.T) {
t.Errorf("test %q failed: got resp %+v, expected %+v", test.name, resp, test.resp)
}
if !test.expectErr {
info, err := os.Stat(filepath.Join(cs.Driver.workingMountDir, test.req.Name, test.req.Name))
info, err := os.Stat(filepath.Join(cs.workingMountDir, test.req.Name, test.req.Name))
if err != nil {
t.Errorf("test %q failed: couldn't find volume subdirectory: %v", test.name, err)
}
@ -242,47 +203,37 @@ func TestCreateVolume(t *testing.T) {
func TestDeleteVolume(t *testing.T) {
cases := []struct {
desc string
testOnWindows bool
req *csi.DeleteVolumeRequest
resp *csi.DeleteVolumeResponse
expectedErr error
desc string
req *csi.DeleteVolumeRequest
resp *csi.DeleteVolumeResponse
expectedErr error
}{
{
desc: "Volume ID missing",
testOnWindows: true,
req: &csi.DeleteVolumeRequest{},
resp: nil,
expectedErr: status.Error(codes.InvalidArgument, "Volume ID missing in request"),
desc: "Volume ID missing",
req: &csi.DeleteVolumeRequest{},
resp: nil,
expectedErr: status.Error(codes.InvalidArgument, "Volume ID missing in request"),
},
{
desc: "Valid request",
testOnWindows: false,
req: &csi.DeleteVolumeRequest{VolumeId: testVolumeID},
resp: &csi.DeleteVolumeResponse{},
expectedErr: nil,
},
{
desc: "Valid request with newTestVolumeID",
testOnWindows: true,
req: &csi.DeleteVolumeRequest{VolumeId: newTestVolumeID},
resp: &csi.DeleteVolumeResponse{},
expectedErr: nil,
desc: "Valid request",
req: &csi.DeleteVolumeRequest{VolumeId: testVolumeID},
resp: &csi.DeleteVolumeResponse{},
expectedErr: nil,
},
}
for _, test := range cases {
test := test //pin
if runtime.GOOS == "windows" && !test.testOnWindows {
continue
}
t.Run(test.desc, func(t *testing.T) {
// Setup
cs := initTestController(t)
_ = os.MkdirAll(filepath.Join(cs.Driver.workingMountDir, testCSIVolume), os.ModePerm)
_, _ = os.Create(filepath.Join(cs.Driver.workingMountDir, testCSIVolume, testCSIVolume))
_ = os.MkdirAll(filepath.Join(cs.workingMountDir, testCSIVolume), os.ModePerm)
_, _ = os.Create(filepath.Join(cs.workingMountDir, testCSIVolume, testCSIVolume))
// Run
resp, err := cs.DeleteVolume(context.TODO(), test.req)
// Verify
if test.expectedErr == nil && err != nil {
t.Errorf("test %q failed: %v", test.desc, err)
}
@ -292,13 +243,75 @@ func TestDeleteVolume(t *testing.T) {
if !reflect.DeepEqual(resp, test.resp) {
t.Errorf("test %q failed: got resp %+v, expected %+v", test.desc, resp, test.resp)
}
if _, err := os.Stat(filepath.Join(cs.Driver.workingMountDir, testCSIVolume, testCSIVolume)); test.expectedErr == nil && !os.IsNotExist(err) {
if _, err := os.Stat(filepath.Join(cs.workingMountDir, testCSIVolume, testCSIVolume)); test.expectedErr == nil && !os.IsNotExist(err) {
t.Errorf("test %q failed: expected volume subdirectory deleted, it still exists", test.desc)
}
})
}
}
func TestValidateVolumeCapabilities(t *testing.T) {
cases := []struct {
desc string
req *csi.ValidateVolumeCapabilitiesRequest
resp *csi.ValidateVolumeCapabilitiesResponse
expectedErr error
}{
{
desc: "Volume ID missing",
req: &csi.ValidateVolumeCapabilitiesRequest{},
resp: nil,
expectedErr: status.Error(codes.InvalidArgument, "Volume ID missing in request"),
},
{
desc: "Volume capabilities missing",
req: &csi.ValidateVolumeCapabilitiesRequest{VolumeId: testVolumeID},
resp: nil,
expectedErr: status.Error(codes.InvalidArgument, "Volume capabilities missing in request"),
},
{
desc: "valid request",
req: &csi.ValidateVolumeCapabilitiesRequest{
VolumeId: testVolumeID,
VolumeCapabilities: []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
},
},
},
},
resp: &csi.ValidateVolumeCapabilitiesResponse{Message: ""},
expectedErr: nil,
},
}
for _, test := range cases {
test := test //pin
t.Run(test.desc, func(t *testing.T) {
// Setup
cs := initTestController(t)
// Run
resp, err := cs.ValidateVolumeCapabilities(context.TODO(), test.req)
// Verify
if test.expectedErr == nil && err != nil {
t.Errorf("test %q failed: %v", test.desc, err)
}
if test.expectedErr != nil && err == nil {
t.Errorf("test %q failed; expected error %v, got success", test.desc, test.expectedErr)
}
if !reflect.DeepEqual(resp, test.resp) {
t.Errorf("test %q failed: got resp %+v, expected %+v", test.desc, resp, test.resp)
}
})
}
}
func TestControllerGetCapabilities(t *testing.T) {
cases := []struct {
desc string
@ -357,25 +370,25 @@ func TestControllerGetCapabilities(t *testing.T) {
func TestNfsVolFromId(t *testing.T) {
cases := []struct {
name string
volumeID string
req string
resp *nfsVolume
expectErr bool
}{
{
name: "ID only server",
volumeID: testServer,
req: testServer,
resp: nil,
expectErr: true,
},
{
name: "ID missing subDir",
volumeID: strings.Join([]string{testServer, testBaseDir}, "/"),
req: strings.Join([]string{testServer, testBaseDir}, "/"),
resp: nil,
expectErr: true,
},
{
name: "valid request single baseDir",
volumeID: testVolumeID,
name: "valid request single baseDir",
req: testVolumeID,
resp: &nfsVolume{
id: testVolumeID,
server: testServer,
@ -385,19 +398,8 @@ func TestNfsVolFromId(t *testing.T) {
expectErr: false,
},
{
name: "valid request single baseDir with newTestVolumeID",
volumeID: newTestVolumeID,
resp: &nfsVolume{
id: newTestVolumeID,
server: testServer,
baseDir: testBaseDir,
subDir: testCSIVolume,
},
expectErr: false,
},
{
name: "valid request nested baseDir",
volumeID: testVolumeIDNested,
name: "valid request nested baseDir",
req: testVolumeIDNested,
resp: &nfsVolume{
id: testVolumeIDNested,
server: testServer,
@ -406,24 +408,18 @@ func TestNfsVolFromId(t *testing.T) {
},
expectErr: false,
},
{
name: "valid request nested baseDir with newTestVolumeIDNested",
volumeID: newTestVolumeIDNested,
resp: &nfsVolume{
id: newTestVolumeIDNested,
server: testServer,
baseDir: testBaseDirNested,
subDir: testCSIVolume,
},
expectErr: false,
},
}
for _, test := range cases {
test := test //pin
t.Run(test.name, func(t *testing.T) {
resp, err := getNfsVolFromID(test.volumeID)
// Setup
cs := initTestController(t)
// Run
resp, err := cs.getNfsVolFromID(test.req)
// Verify
if !test.expectErr && err != nil {
t.Errorf("test %q failed: %v", test.name, err)
}
@ -436,46 +432,3 @@ func TestNfsVolFromId(t *testing.T) {
})
}
}
func TestIsValidVolumeCapabilities(t *testing.T) {
mountVolumeCapabilities := []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
},
}
blockVolumeCapabilities := []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Block{
Block: &csi.VolumeCapability_BlockVolume{},
},
},
}
cases := []struct {
desc string
volCaps []*csi.VolumeCapability
expectErr error
}{
{
volCaps: mountVolumeCapabilities,
expectErr: nil,
},
{
volCaps: blockVolumeCapabilities,
expectErr: fmt.Errorf("block volume capability not supported"),
},
{
volCaps: []*csi.VolumeCapability{},
expectErr: fmt.Errorf("volume capabilities missing in request"),
},
}
for _, test := range cases {
err := isValidVolumeCapabilities(test.volCaps)
if !reflect.DeepEqual(err, test.expectErr) {
t.Errorf("[test: %s] Unexpected error: %v, expected error: %v", test.desc, err, test.expectErr)
}
}
}

View File

@ -17,30 +17,25 @@ limitations under the License.
package nfs
import (
"fmt"
"github.com/container-storage-interface/spec/lib/go/csi"
"k8s.io/klog/v2"
mount "k8s.io/mount-utils"
)
// DriverOptions defines driver parameters specified in driver deployment
type DriverOptions struct {
NodeID string
DriverName string
Endpoint string
MountPermissions uint64
WorkingMountDir string
}
type Driver struct {
name string
nodeID string
version string
endpoint string
mountPermissions uint64
workingMountDir string
name string
nodeID string
version string
endpoint string
perm *uint32
//ids *identityServer
ns *NodeServer
cap map[csi.VolumeCapability_AccessMode_Mode]bool
cscap []*csi.ControllerServiceCapability
nscap []*csi.NodeServiceCapability
volumeLocks *VolumeLocks
@ -54,23 +49,39 @@ const (
// The base directory must be a direct child of the root directory.
// The root directory is omitted from the string, for example:
// "base" instead of "/base"
paramShare = "share"
mountOptionsField = "mountoptions"
mountPermissionsField = "mountpermissions"
paramShare = "share"
)
func NewDriver(options *DriverOptions) *Driver {
klog.V(2).Infof("Driver: %v version: %v", options.DriverName, driverVersion)
var (
version = "3.0.0"
)
func NewNFSdriver(nodeID, driverName, endpoint string, perm *uint32) *Driver {
klog.Infof("Driver: %v version: %v", driverName, version)
n := &Driver{
name: options.DriverName,
version: driverVersion,
nodeID: options.NodeID,
endpoint: options.Endpoint,
mountPermissions: options.MountPermissions,
workingMountDir: options.WorkingMountDir,
name: driverName,
version: version,
nodeID: nodeID,
endpoint: endpoint,
cap: map[csi.VolumeCapability_AccessMode_Mode]bool{},
perm: perm,
}
vcam := []csi.VolumeCapability_AccessMode_Mode{
csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY,
csi.VolumeCapability_AccessMode_SINGLE_NODE_SINGLE_WRITER,
csi.VolumeCapability_AccessMode_SINGLE_NODE_MULTI_WRITER,
csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY,
csi.VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER,
csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
}
n.AddVolumeCapabilityAccessModes(vcam)
// NFS plugin does not support ControllerServiceCapability now.
// If support is added, it should set to appropriate
// ControllerServiceCapability RPC types.
n.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME,
csi.ControllerServiceCapability_RPC_SINGLE_NODE_MULTI_WRITER,
@ -97,7 +108,7 @@ func (n *Driver) Run(testMode bool) {
if err != nil {
klog.Fatalf("%v", err)
}
klog.V(2).Infof("\nDRIVER INFORMATION:\n-------------------\n%s\n\nStreaming logs below:", versionMeta)
klog.Infof("\nDRIVER INFORMATION:\n-------------------\n%s\n\nStreaming logs below:", versionMeta)
n.ns = NewNodeServer(n, mount.New(""))
s := NewNonBlockingGRPCServer()
@ -111,17 +122,31 @@ func (n *Driver) Run(testMode bool) {
s.Wait()
}
func (n *Driver) AddVolumeCapabilityAccessModes(vc []csi.VolumeCapability_AccessMode_Mode) []*csi.VolumeCapability_AccessMode {
var vca []*csi.VolumeCapability_AccessMode
for _, c := range vc {
klog.Infof("Enabling volume access mode: %v", c.String())
vca = append(vca, &csi.VolumeCapability_AccessMode{Mode: c})
n.cap[c] = true
}
return vca
}
func (n *Driver) AddControllerServiceCapabilities(cl []csi.ControllerServiceCapability_RPC_Type) {
var csc []*csi.ControllerServiceCapability
for _, c := range cl {
klog.Infof("Enabling controller service capability: %v", c.String())
csc = append(csc, NewControllerServiceCapability(c))
}
n.cscap = csc
}
func (n *Driver) AddNodeServiceCapabilities(nl []csi.NodeServiceCapability_RPC_Type) {
var nsc []*csi.NodeServiceCapability
for _, n := range nl {
klog.Infof("Enabling node service capability: %v", n.String())
nsc = append(nsc, NewNodeServiceCapability(n))
}
n.nscap = nsc
@ -129,5 +154,6 @@ func (n *Driver) AddNodeServiceCapabilities(nl []csi.NodeServiceCapability_RPC_T
func IsCorruptedDir(dir string) bool {
_, pathErr := mount.PathExists(dir)
fmt.Printf("IsCorruptedDir(%s) returned with error: %v", dir, pathErr)
return pathErr != nil && mount.IsCorruptedMnt(pathErr)
}

View File

@ -32,24 +32,31 @@ const (
func NewEmptyDriver(emptyField string) *Driver {
var d *Driver
var perm *uint32
switch emptyField {
case "version":
d = &Driver{
name: DefaultDriverName,
version: "",
nodeID: fakeNodeID,
cap: map[csi.VolumeCapability_AccessMode_Mode]bool{},
perm: perm,
}
case "name":
d = &Driver{
name: "",
version: driverVersion,
version: version,
nodeID: fakeNodeID,
cap: map[csi.VolumeCapability_AccessMode_Mode]bool{},
perm: perm,
}
default:
d = &Driver{
name: DefaultDriverName,
version: driverVersion,
version: version,
nodeID: fakeNodeID,
cap: map[csi.VolumeCapability_AccessMode_Mode]bool{},
perm: perm,
}
}
d.volumeLocks = NewVolumeLocks()

View File

@ -19,7 +19,6 @@ package nfs
import (
"fmt"
"os"
"strconv"
"strings"
"github.com/container-storage-interface/spec/lib/go/csi"
@ -39,8 +38,7 @@ type NodeServer struct {
// NodePublishVolume mount the volume
func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
volCap := req.GetVolumeCapability()
if volCap == nil {
if req.GetVolumeCapability() == nil {
return nil, status.Error(codes.InvalidArgument, "Volume capability missing in request")
}
volumeID := req.GetVolumeId()
@ -51,53 +49,11 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
if len(targetPath) == 0 {
return nil, status.Error(codes.InvalidArgument, "Target path not provided")
}
mountOptions := volCap.GetMount().GetMountFlags()
if req.GetReadonly() {
mountOptions = append(mountOptions, "ro")
}
var server, baseDir string
mountPermissions := ns.Driver.mountPermissions
performChmodOp := (mountPermissions > 0)
for k, v := range req.GetVolumeContext() {
switch strings.ToLower(k) {
case paramServer:
server = v
case paramShare:
baseDir = v
case mountOptionsField:
if v != "" {
mountOptions = append(mountOptions, v)
}
case mountPermissionsField:
if v != "" {
var err error
var perm uint64
if perm, err = strconv.ParseUint(v, 8, 32); err != nil {
return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("invalid mountPermissions %s", v))
}
if perm == 0 {
performChmodOp = false
} else {
mountPermissions = perm
}
}
}
}
if server == "" {
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("%v is a required parameter", paramServer))
}
if baseDir == "" {
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("%v is a required parameter", paramShare))
}
server = getServerFromSource(server)
source := fmt.Sprintf("%s:%s", server, baseDir)
notMnt, err := ns.mounter.IsLikelyNotMountPoint(targetPath)
if err != nil {
if os.IsNotExist(err) {
if err := os.MkdirAll(targetPath, os.FileMode(mountPermissions)); err != nil {
if err := os.MkdirAll(targetPath, 0750); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
notMnt = true
@ -109,6 +65,15 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
return &csi.NodePublishVolumeResponse{}, nil
}
mountOptions := req.GetVolumeCapability().GetMount().GetMountFlags()
if req.GetReadonly() {
mountOptions = append(mountOptions, "ro")
}
s := req.GetVolumeContext()[paramServer]
ep := req.GetVolumeContext()[paramShare]
source := fmt.Sprintf("%s:%s", s, ep)
klog.V(2).Infof("NodePublishVolume: volumeID(%v) source(%s) targetPath(%s) mountflags(%v)", volumeID, source, targetPath, mountOptions)
err = ns.mounter.Mount(source, targetPath, "nfs", mountOptions)
if err != nil {
@ -121,14 +86,12 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
return nil, status.Error(codes.Internal, err.Error())
}
if performChmodOp {
if err := chmodIfPermissionMismatch(targetPath, os.FileMode(mountPermissions)); err != nil {
if ns.Driver.perm != nil {
if err := os.Chmod(targetPath, os.FileMode(*ns.Driver.perm)); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
} else {
klog.V(2).Infof("skip chmod on targetPath(%s) since mountPermissions is set as 0", targetPath)
}
klog.V(2).Infof("volume(%s) mount %s on %s succeeded", volumeID, source, targetPath)
return &csi.NodePublishVolumeResponse{}, nil
}
@ -142,13 +105,23 @@ func (ns *NodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
if len(targetPath) == 0 {
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
}
notMnt, err := ns.mounter.IsLikelyNotMountPoint(targetPath)
klog.V(2).Infof("NodeUnpublishVolume: unmounting volume %s on %s", volumeID, targetPath)
err := mount.CleanupMountPoint(targetPath, ns.mounter, true /*extensiveMountPointCheck*/)
if err != nil {
return nil, status.Errorf(codes.Internal, "failed to unmount target %q: %v", targetPath, err)
if os.IsNotExist(err) {
return nil, status.Error(codes.NotFound, "Targetpath not found")
}
return nil, status.Error(codes.Internal, err.Error())
}
if notMnt {
return nil, status.Error(codes.NotFound, "Volume not mounted")
}
klog.V(2).Infof("NodeUnpublishVolume: CleanupMountPoint %s on volumeID(%s)", targetPath, volumeID)
err = mount.CleanupMountPoint(targetPath, ns.mounter, false)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
klog.V(2).Infof("NodeUnpublishVolume: unmount volume %s on %s successfully", volumeID, targetPath)
return &csi.NodeUnpublishVolumeResponse{}, nil
}

View File

@ -19,10 +19,8 @@ package nfs
import (
"context"
"errors"
"fmt"
"os"
"reflect"
"strings"
"testing"
"github.com/container-storage-interface/spec/lib/go/csi"
@ -42,23 +40,6 @@ func TestNodePublishVolume(t *testing.T) {
t.Fatalf(err.Error())
}
params := map[string]string{
"server": "server",
"share": "share",
mountPermissionsField: "0755",
}
paramsWithZeroPermissions := map[string]string{
"server": "server",
"share": "share",
mountPermissionsField: "0",
}
invalidParams := map[string]string{
"server": "server",
"share": "share",
mountPermissionsField: "07ab",
}
volumeCap := csi.VolumeCapability_AccessMode{Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER}
alreadyMountedTarget := testutil.GetWorkDirPath("false_is_likely_exist_target", t)
targetTest := testutil.GetWorkDirPath("target_test", t)
@ -89,68 +70,39 @@ func TestNodePublishVolume(t *testing.T) {
},
{
desc: "[Success] Stage target path missing",
req: csi.NodePublishVolumeRequest{
VolumeContext: params,
VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap},
VolumeId: "vol_1",
TargetPath: targetTest},
req: csi.NodePublishVolumeRequest{VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap},
VolumeId: "vol_1",
TargetPath: targetTest},
expectedErr: nil,
},
{
desc: "[Success] Valid request read only",
req: csi.NodePublishVolumeRequest{
VolumeContext: params,
VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap},
VolumeId: "vol_1",
TargetPath: targetTest,
Readonly: true},
req: csi.NodePublishVolumeRequest{VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap},
VolumeId: "vol_1",
TargetPath: targetTest,
Readonly: true},
expectedErr: nil,
},
{
desc: "[Success] Valid request already mounted",
req: csi.NodePublishVolumeRequest{
VolumeContext: params,
VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap},
VolumeId: "vol_1",
TargetPath: alreadyMountedTarget,
Readonly: true},
req: csi.NodePublishVolumeRequest{VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap},
VolumeId: "vol_1",
TargetPath: alreadyMountedTarget,
Readonly: true},
expectedErr: nil,
},
{
desc: "[Success] Valid request",
req: csi.NodePublishVolumeRequest{
VolumeContext: params,
VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap},
VolumeId: "vol_1",
TargetPath: targetTest,
Readonly: true},
req: csi.NodePublishVolumeRequest{VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap},
VolumeId: "vol_1",
TargetPath: targetTest,
Readonly: true},
expectedErr: nil,
},
{
desc: "[Success] Valid request with 0 mountPermissions",
req: csi.NodePublishVolumeRequest{
VolumeContext: paramsWithZeroPermissions,
VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap},
VolumeId: "vol_1",
TargetPath: targetTest,
Readonly: true},
expectedErr: nil,
},
{
desc: "[Error] invalid mountPermissions",
req: csi.NodePublishVolumeRequest{
VolumeContext: invalidParams,
VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap},
VolumeId: "vol_1",
TargetPath: targetTest,
Readonly: true},
expectedErr: status.Error(codes.InvalidArgument, "invalid mountPermissions 07ab"),
},
}
// setup
_ = makeDir(alreadyMountedTarget)
_ = makeDir(targetTest)
for _, tc := range tests {
if tc.setup != nil {
@ -203,11 +155,12 @@ func TestNodeUnpublishVolume(t *testing.T) {
{
desc: "[Error] Unmount error mocked by IsLikelyNotMountPoint",
req: csi.NodeUnpublishVolumeRequest{TargetPath: errorTarget, VolumeId: "vol_1"},
expectedErr: fmt.Errorf("fake IsLikelyNotMountPoint: fake error"),
expectedErr: status.Error(codes.Internal, "fake IsLikelyNotMountPoint: fake error"),
},
{
desc: "[Success] Volume not mounted",
req: csi.NodeUnpublishVolumeRequest{TargetPath: targetFile, VolumeId: "vol_1"},
desc: "[Error] Volume not mounted",
req: csi.NodeUnpublishVolumeRequest{TargetPath: targetFile, VolumeId: "vol_1"},
expectedErr: status.Error(codes.NotFound, "Volume not mounted"),
},
}
@ -220,9 +173,7 @@ func TestNodeUnpublishVolume(t *testing.T) {
}
_, err := ns.NodeUnpublishVolume(context.Background(), &tc.req)
if !reflect.DeepEqual(err, tc.expectedErr) {
if err == nil || tc.expectedErr == nil || !strings.Contains(err.Error(), tc.expectedErr.Error()) {
t.Errorf("Desc:%v\nUnexpected error: %v\nExpected: %v", tc.desc, err, tc.expectedErr)
}
t.Errorf("Desc:%v\nUnexpected error: %v\nExpected: %v", tc.desc, err, tc.expectedErr)
}
if tc.cleanup != nil {
tc.cleanup()

View File

@ -18,7 +18,6 @@ package nfs
import (
"fmt"
"os"
"strings"
"sync"
@ -29,7 +28,6 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"
netutil "k8s.io/utils/net"
)
func NewDefaultIdentityServer(d *Driver) *IdentityServer {
@ -123,40 +121,3 @@ func (vl *VolumeLocks) Release(volumeID string) {
defer vl.mux.Unlock()
vl.locks.Delete(volumeID)
}
// getMountOptions get mountOptions value from a map
func getMountOptions(context map[string]string) string {
for k, v := range context {
switch strings.ToLower(k) {
case mountOptionsField:
return v
}
}
return ""
}
// chmodIfPermissionMismatch only perform chmod when permission mismatches
func chmodIfPermissionMismatch(targetPath string, mode os.FileMode) error {
info, err := os.Lstat(targetPath)
if err != nil {
return err
}
perm := info.Mode() & os.ModePerm
if perm != mode {
klog.V(2).Infof("chmod targetPath(%s, mode:0%o) with permissions(0%o)", targetPath, info.Mode(), mode)
if err := os.Chmod(targetPath, mode); err != nil {
return err
}
} else {
klog.V(2).Infof("skip chmod on targetPath(%s) since mode is already 0%o)", targetPath, info.Mode())
}
return nil
}
// getServerFromSource if server is IPv6, return [IPv6]
func getServerFromSource(server string) string {
if netutil.IsIPv6String(server) {
return fmt.Sprintf("[%s]", server)
}
return server
}

View File

@ -18,9 +18,6 @@ package nfs
import (
"fmt"
"os"
"reflect"
"strings"
"testing"
)
@ -121,129 +118,3 @@ func TestGetLogLevel(t *testing.T) {
}
}
}
func TestGetMountOptions(t *testing.T) {
tests := []struct {
desc string
context map[string]string
result string
}{
{
desc: "nil context",
context: nil,
result: "",
},
{
desc: "empty context",
context: map[string]string{},
result: "",
},
{
desc: "valid mountOptions",
context: map[string]string{"mountOptions": "nfsvers=3"},
result: "nfsvers=3",
},
{
desc: "valid mountOptions(lowercase)",
context: map[string]string{"mountoptions": "nfsvers=4"},
result: "nfsvers=4",
},
}
for _, test := range tests {
result := getMountOptions(test.context)
if result != test.result {
t.Errorf("Unexpected result: %s, expected: %s", result, test.result)
}
}
}
func TestChmodIfPermissionMismatch(t *testing.T) {
permissionMatchingPath, _ := getWorkDirPath("permissionMatchingPath")
_ = makeDir(permissionMatchingPath)
defer os.RemoveAll(permissionMatchingPath)
permissionMismatchPath, _ := getWorkDirPath("permissionMismatchPath")
_ = os.MkdirAll(permissionMismatchPath, os.FileMode(0721))
defer os.RemoveAll(permissionMismatchPath)
tests := []struct {
desc string
path string
mode os.FileMode
expectedError error
}{
{
desc: "Invalid path",
path: "invalid-path",
mode: 0755,
expectedError: fmt.Errorf("CreateFile invalid-path: The system cannot find the file specified"),
},
{
desc: "permission matching path",
path: permissionMatchingPath,
mode: 0755,
expectedError: nil,
},
{
desc: "permission mismatch path",
path: permissionMismatchPath,
mode: 0755,
expectedError: nil,
},
}
for _, test := range tests {
err := chmodIfPermissionMismatch(test.path, test.mode)
if !reflect.DeepEqual(err, test.expectedError) {
if err == nil || test.expectedError == nil && !strings.Contains(err.Error(), test.expectedError.Error()) {
t.Errorf("test[%s]: unexpected error: %v, expected error: %v", test.desc, err, test.expectedError)
}
}
}
}
// getWorkDirPath returns the path to the current working directory
func getWorkDirPath(dir string) (string, error) {
path, err := os.Getwd()
if err != nil {
return "", err
}
return fmt.Sprintf("%s%c%s", path, os.PathSeparator, dir), nil
}
func TestGetServerFromSource(t *testing.T) {
tests := []struct {
desc string
server string
result string
}{
{
desc: "ipv4",
server: "10.127.0.1",
result: "10.127.0.1",
},
{
desc: "ipv6",
server: "0:0:0:0:0:0:0:1",
result: "[0:0:0:0:0:0:0:1]",
},
{
desc: "ipv6 with brackets",
server: "[0:0:0:0:0:0:0:2]",
result: "[0:0:0:0:0:0:0:2]",
},
{
desc: "other fqdn",
server: "bing.com",
result: "bing.com",
},
}
for _, test := range tests {
result := getServerFromSource(test.server)
if result != test.result {
t.Errorf("Unexpected result: %s, expected: %s", result, test.result)
}
}
}

Some files were not shown because too many files have changed in this diff Show More