Compare commits

..

No commits in common. "master" and "v3.1.0" have entirely different histories.

175 changed files with 6925 additions and 4363 deletions

View File

@ -1,66 +0,0 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: [ master, 'release-**' ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ master, 'release-**' ]
schedule:
- cron: '0 */24 * * *'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'go' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
# Learn more about CodeQL language support at https://git.io/codeql-language-support
steps:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: ^1.18
id: go
- name: Checkout repository
uses: actions/checkout@v3
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
run: |
make all
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1

View File

@ -1,15 +0,0 @@
# GitHub Action to automate the identification of common misspellings in text files.
# https://github.com/codespell-project/actions-codespell
# https://github.com/codespell-project/codespell
name: codespell
on: [push, pull_request]
jobs:
codespell:
name: Check for spelling errors
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: codespell-project/actions-codespell@master
with:
check_filenames: true
skip: ./.git,./.github/workflows/codespell.yml,.git,*.png,*.jpg,*.svg,*.sum,./vendor,go.sum,./release-tools/prow.sh

View File

@ -1,29 +0,0 @@
name: ShellCheck
on:
push:
tags:
- v*
branches:
- master
- release-*
pull_request:
branches:
- master
- release-*
jobs:
shellcheck:
name: Shellcheck
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Run ShellCheck
uses: ludeeus/action-shellcheck@master
env:
SHELLCHECK_OPTS: -e SC2034
with:
severity: warning
check_together: 'yes'
disable_matcher: false
ignore_paths: vendor release-tools hack
format: gcc

View File

@ -12,8 +12,8 @@ jobs:
- name: Run linter
uses: golangci/golangci-lint-action@v2
with:
version: v1.45
args: -E=gofmt,deadcode,unused,varcheck,ineffassign,revive,misspell,exportloopref,asciicheck,bodyclose,contextcheck --timeout=30m0s
version: v1.31
args: -E=gofmt,deadcode,unused,varcheck,ineffassign,golint,misspell --timeout=30m0s
verify-helm:
name: Verify Helm
runs-on: ubuntu-latest

View File

@ -1,5 +1,3 @@
#! /bin/bash
# Copyright 2020 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -14,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#! /bin/bash
# A Prow job can override these defaults, but this shouldn't be necessary.
# Only these tests make sense for csi-driver-nfs until we can integrate k/k

View File

@ -12,12 +12,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM registry.k8s.io/build-image/debian-base:bullseye-v1.2.0
FROM k8s.gcr.io/build-image/debian-base:bullseye-v1.0.0
# Architecture for bin folder
ARG ARCH
ARG binary=./bin/${ARCH}/nfsplugin
COPY ${binary} /nfsplugin
RUN apt update && apt upgrade -y && apt-mark unhold libcap2 && clean-install ca-certificates mount nfs-common netbase
# Copy nfsplugin from build _output directory
COPY bin/${ARCH}/nfsplugin /nfsplugin
RUN apt update && apt-mark unhold libcap2
RUN clean-install ca-certificates mount nfs-common netbase
# install updated packages to fix CVE issues
RUN clean-install libssl1.1 libgssapi-krb5-2 libk5crypto3 libkrb5-3 libkrb5support0 libgmp10
ENTRYPOINT ["/nfsplugin"]

View File

@ -27,7 +27,7 @@ include release-tools/build.make
GIT_COMMIT = $(shell git rev-parse HEAD)
BUILD_DATE = $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
IMAGE_VERSION ?= v4.1.0
IMAGE_VERSION ?= v3.1.0
LDFLAGS = -X ${PKG}/pkg/nfs.driverVersion=${IMAGE_VERSION} -X ${PKG}/pkg/nfs.gitCommit=${GIT_COMMIT} -X ${PKG}/pkg/nfs.buildDate=${BUILD_DATE}
EXT_LDFLAGS = -s -w -extldflags "-static"
# Use a custom version for E2E tests if we are testing in CI
@ -48,8 +48,8 @@ E2E_HELM_OPTIONS += ${EXTRA_HELM_OPTIONS}
# Output type of docker buildx build
OUTPUT_TYPE ?= docker
ALL_ARCH.linux = arm64 amd64 ppc64le
ALL_OS_ARCH = linux-arm64 linux-arm-v7 linux-amd64 linux-ppc64le
ALL_ARCH.linux = arm64 amd64
ALL_OS_ARCH = linux-arm64 linux-arm-v7 linux-amd64
.EXPORT_ALL_VARIABLES:
@ -76,6 +76,24 @@ local-build-push: nfs
docker build -t $(LOCAL_USER)/nfsplugin:latest .
docker push $(LOCAL_USER)/nfsplugin
.PHONY: local-k8s-install
local-k8s-install:
echo "Instlling locally"
kubectl apply -f $(DEPLOY_FOLDER)/rbac-csi-nfs-controller.yaml
kubectl apply -f $(DEPLOY_FOLDER)/csi-nfs-driverinfo.yaml
kubectl apply -f $(DEPLOY_FOLDER)/csi-nfs-controller.yaml
kubectl apply -f $(DEPLOY_FOLDER)/csi-nfs-node.yaml
echo "Successfully installed"
.PHONY: local-k8s-uninstall
local-k8s-uninstall:
echo "Uninstalling driver"
kubectl delete -f $(DEPLOY_FOLDER)/csi-nfs-controller.yaml --ignore-not-found
kubectl delete -f $(DEPLOY_FOLDER)/csi-nfs-node.yaml --ignore-not-found
kubectl delete -f $(DEPLOY_FOLDER)/csi-nfs-driverinfo.yaml --ignore-not-found
kubectl delete -f $(DEPLOY_FOLDER)/rbac-csi-nfs-controller.yaml --ignore-not-found
echo "Uninstalled NFS driver"
.PHONY: nfs
nfs:
CGO_ENABLED=0 GOOS=linux GOARCH=$(ARCH) go build -a -ldflags "${LDFLAGS} ${EXT_LDFLAGS}" -mod vendor -o bin/${ARCH}/nfsplugin ./cmd/nfsplugin
@ -145,7 +163,6 @@ e2e-bootstrap: install-helm
OUTPUT_TYPE=registry $(MAKE) container push
helm install csi-driver-nfs ./charts/latest/csi-driver-nfs --namespace kube-system --wait --timeout=15m -v=5 --debug \
${E2E_HELM_OPTIONS} \
--set controller.dnsPolicy=ClusterFirstWithHostNet \
--set controller.logLevel=8 \
--set node.logLevel=8

View File

@ -1,18 +1,16 @@
# NFS CSI driver for Kubernetes
![build status](https://github.com/kubernetes-csi/csi-driver-nfs/actions/workflows/linux.yaml/badge.svg)
# CSI NFS driver
[![Coverage Status](https://coveralls.io/repos/github/kubernetes-csi/csi-driver-nfs/badge.svg?branch=master)](https://coveralls.io/github/kubernetes-csi/csi-driver-nfs?branch=master)
### Overview
This is a repository for [NFS](https://en.wikipedia.org/wiki/Network_File_System) [CSI](https://kubernetes-csi.github.io/docs/) driver, csi plugin name: `nfs.csi.k8s.io`. This driver requires existing and already configured NFSv3 or NFSv4 server, it supports dynamic provisioning of Persistent Volumes via Persistent Volume Claims by creating a new sub directory under NFS server.
### Project status: GA
### Project status: Beta
### Container Images & Kubernetes Compatibility:
|driver version | supported k8s version | status |
|----------------|-----------------------|--------|
|master branch | 1.20+ | GA |
|v4.0.0 | 1.10+ | GA |
|master branch | 1.19+ | beta |
|v3.1.0 | 1.19+ | beta |
|v3.0.0 | 1.19+ | beta |
|v2.0.0 | 1.14+ | alpha |
@ -25,6 +23,7 @@ This is a repository for [NFS](https://en.wikipedia.org/wiki/Network_File_System
Please refer to [`nfs.csi.k8s.io` driver parameters](./docs/driver-parameters.md)
### Examples
- [Set up a NFS Server on a Kubernetes cluster](./deploy/example/nfs-provisioner/README.md)
- [Basic usage](./deploy/example/README.md)
- [fsGroupPolicy](./deploy/example/fsgroup)

View File

@ -5,12 +5,13 @@
### Tips
- make controller only run on master node: `--set controller.runOnMaster=true`
- set replica of controller as `2`: `--set controller.replicas=2`
- set replica of controller as `1`: `--set controller.replicas=1`
- enable `fsGroupPolicy` on a k8s 1.20+ cluster (this feature is in beta, check details [here](../deploy/example/fsgroup)): `--set feature.enableFSGroupPolicy=true`
### install a specific version
```console
helm repo add csi-driver-nfs https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts
helm install csi-driver-nfs csi-driver-nfs/csi-driver-nfs --namespace kube-system --version v4.0.0
helm install csi-driver-nfs csi-driver-nfs/csi-driver-nfs --namespace kube-system --version v3.1.0
```
### install driver with customized driver name, deployment name
@ -38,27 +39,25 @@ The following table lists the configurable parameters of the latest NFS CSI Driv
|---------------------------------------------------|------------------------------------------------------------|-------------------------------------------------------------------|
| `driver.name` | alternative driver name | `nfs.csi.k8s.io` |
| `driver.mountPermissions` | mounted folder permissions name | `0777`
| `feature.enableFSGroupPolicy` | enable `fsGroupPolicy` on a k8s 1.20+ cluster | `true` |
| `feature.enableFSGroupPolicy` | enable `fsGroupPolicy` on a k8s 1.20+ cluster | `false` |
| `feature.enableInlineVolume` | enable inline volume | `false` |
| `kubeletDir` | alternative kubelet directory | `/var/lib/kubelet` |
| `image.nfs.repository` | csi-driver-nfs image | `registry.k8s.io/sig-storage/nfsplugin` |
| `image.nfs.repository` | csi-driver-nfs image | `mcr.microsoft.com/k8s/csi/nfs-csi` |
| `image.nfs.tag` | csi-driver-nfs image tag | `latest` |
| `image.nfs.pullPolicy` | csi-driver-nfs image pull policy | `IfNotPresent` |
| `image.csiProvisioner.repository` | csi-provisioner docker image | `registry.k8s.io/sig-storage/csi-provisioner` |
| `image.csiProvisioner.tag` | csi-provisioner docker image tag | `v3.1.0` |
| `image.csiProvisioner.repository` | csi-provisioner docker image | `k8s.gcr.io/sig-storage/csi-provisioner` |
| `image.csiProvisioner.tag` | csi-provisioner docker image tag | `v2.0.4` |
| `image.csiProvisioner.pullPolicy` | csi-provisioner image pull policy | `IfNotPresent` |
| `image.livenessProbe.repository` | liveness-probe docker image | `registry.k8s.io/sig-storage/livenessprobe` |
| `image.livenessProbe.tag` | liveness-probe docker image tag | `v2.6.0` |
| `image.livenessProbe.repository` | liveness-probe docker image | `k8s.gcr.io/sig-storage/livenessprobe` |
| `image.livenessProbe.tag` | liveness-probe docker image tag | `v2.5.0` |
| `image.livenessProbe.pullPolicy` | liveness-probe image pull policy | `IfNotPresent` |
| `image.nodeDriverRegistrar.repository` | csi-node-driver-registrar docker image | `registry.k8s.io/sig-storage/csi-node-driver-registrar` |
| `image.nodeDriverRegistrar.tag` | csi-node-driver-registrar docker image tag | `v2.5.0` |
| `image.nodeDriverRegistrar.repository` | csi-node-driver-registrar docker image | `k8s.gcr.io/sig-storage/csi-node-driver-registrar` |
| `image.nodeDriverRegistrar.tag` | csi-node-driver-registrar docker image tag | `v2.4.0` |
| `image.nodeDriverRegistrar.pullPolicy` | csi-node-driver-registrar image pull policy | `IfNotPresent` |
| `imagePullSecrets` | Specify docker-registry secret names as an array | [] (does not add image pull secrets to deployed pods) |
| `serviceAccount.create` | whether create service account of csi-nfs-controller | `true` |
| `rbac.create` | whether create rbac of csi-nfs-controller | `true` |
| `controller.replicas` | replica number of csi-nfs-controller | `1` |
| `controller.replicas` | the replicas of csi-nfs-controller | `2` |
| `controller.runOnMaster` | run controller on master node | `false` |
| `controller.dnsPolicy` | dnsPolicy of controller driver, available values: `Default`, `ClusterFirstWithHostNet`, `ClusterFirst` | `Default` |
| `controller.logLevel` | controller driver log level |`5` |
| `controller.workingMountDir` | working directory for provisioner to mount nfs shares temporarily | `/tmp` |
| `controller.tolerations` | controller pod tolerations | |
@ -72,7 +71,6 @@ The following table lists the configurable parameters of the latest NFS CSI Driv
| `controller.resources.nfs.requests.cpu` | csi-driver-nfs cpu requests limits | 10m |
| `controller.resources.nfs.requests.memory` | csi-driver-nfs memory requests limits | 20Mi |
| `node.name` | driver node daemonset name | `csi-nfs-node`
| `node.dnsPolicy` | dnsPolicy of driver node daemonset, available values: `Default`, `ClusterFirstWithHostNet`, `ClusterFirst` |
| `node.maxUnavailable` | `maxUnavailable` value of driver node daemonset | `1`
| `node.logLevel` | node driver log level |`5` |
| `node.livenessProbe.healthPort ` | the health check port for liveness probe |`29653` |

View File

@ -2,12 +2,39 @@ apiVersion: v1
entries:
csi-driver-nfs:
- apiVersion: v1
appVersion: v4.2.0
created: "2022-05-06T12:35:56.6991353Z"
appVersion: v3.1.0
created: "2022-01-15T13:54:34.069016394Z"
description: CSI NFS Driver for Kubernetes
digest: cb537287512ce9f99adaead8cd4904ed7284780bdc44c9b8d6705e66f28bfa5c
digest: 05ddbdcc552781551d3578137996e6f0616132b1a068b03663224cd031e59739
name: csi-driver-nfs
urls:
- https://gitea.devindata.com/devindata-public/csi-driver-nfs/raw/branch/master/charts/v4.2.0/csi-driver-nfs-v4.2.0.tgz
version: v4.2.0
generated: "2022-05-06T12:35:56.693722959Z"
- https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts/latest/csi-driver-nfs-v3.1.0.tgz
version: v3.1.0
- apiVersion: v1
appVersion: v3.1.0
created: "2022-01-15T13:54:34.071572409Z"
description: CSI NFS Driver for Kubernetes
digest: be2757357ed0a4c5c689b1c06de8eaa75da43430f08f04c8fb42fd17fffb0959
name: csi-driver-nfs
urls:
- https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts/v3.1.0/csi-driver-nfs-v3.1.0.tgz
version: v3.1.0
- apiVersion: v1
appVersion: v3.0.0
created: "2022-01-15T13:54:34.070855705Z"
description: CSI NFS Driver for Kubernetes
digest: c19a1780bbdf240ff4628666a5cf70fea9d44fc20966b63796550e83a45ef50a
name: csi-driver-nfs
urls:
- https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts/v3.0.0/csi-driver-nfs-v3.0.0.tgz
version: v3.0.0
- apiVersion: v1
appVersion: v2.0.0
created: "2022-01-15T13:54:34.069693798Z"
description: CSI NFS Driver for Kubernetes
digest: f537a133eaa965f1c053ffac130f82c9b2b624e1f8bd42937c9c48818464eaac
name: csi-driver-nfs
urls:
- https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts/v2.0.0/csi-driver-nfs-v2.0.0.tgz
version: v2.0.0
generated: "2022-01-15T13:54:34.066289278Z"

Binary file not shown.

View File

@ -1,5 +1,5 @@
apiVersion: v1
appVersion: latest
appVersion: v3.1.0
description: CSI NFS Driver for Kubernetes
name: csi-driver-nfs
version: v4.1.0
version: v3.1.0

View File

@ -13,7 +13,4 @@ labels:
app.kubernetes.io/name: "{{ template "nfs.name" . }}"
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
{{- if .Values.customLabels }}
{{ toYaml .Values.customLabels | indent 2 -}}
{{- end }}
{{- end -}}
{{- end -}}

View File

@ -20,7 +20,7 @@ spec:
{{ toYaml .Values.imagePullSecrets | indent 8 }}
{{- end }}
hostNetwork: true # controller also needs to mount nfs to create dir
dnsPolicy: {{ .Values.controller.dnsPolicy }}
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: {{ .Values.serviceAccount.controller }}
nodeSelector:
kubernetes.io/os: linux
@ -39,7 +39,6 @@ spec:
- "-v=2"
- "--csi-address=$(ADDRESS)"
- "--leader-election"
- "--leader-election-namespace={{ .Release.Namespace }}"
env:
- name: ADDRESS
value: /csi/csi.sock
@ -96,7 +95,7 @@ spec:
periodSeconds: 30
volumeMounts:
- name: pods-mount-dir
mountPath: {{ .Values.kubeletDir }}/pods
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
- mountPath: /csi
name: socket-dir
@ -104,7 +103,7 @@ spec:
volumes:
- name: pods-mount-dir
hostPath:
path: {{ .Values.kubeletDir }}/pods
path: /var/lib/kubelet/pods
type: Directory
- name: socket-dir
emptyDir: {}

View File

@ -1,4 +1,3 @@
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:

View File

@ -1,4 +1,5 @@
---
# This YAML file contains driver-registrar & csi driver nodeplugin API objects
# that are necessary to run CSI nodeplugin for nfs
kind: DaemonSet
apiVersion: apps/v1
metadata:
@ -23,8 +24,7 @@ spec:
{{ toYaml .Values.imagePullSecrets | indent 8 }}
{{- end }}
hostNetwork: true # original nfs connection would be broken without hostNetwork setting
dnsPolicy: {{ .Values.controller.dnsPolicy }}
serviceAccountName: csi-nfs-node-sa
dnsPolicy: ClusterFirstWithHostNet
nodeSelector:
kubernetes.io/os: linux
{{- with .Values.node.tolerations }}
@ -60,7 +60,7 @@ spec:
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
env:
- name: DRIVER_REG_SOCK_PATH
value: {{ .Values.kubeletDir }}/plugins/csi-nfsplugin/csi.sock
value: /var/lib/kubelet/plugins/csi-nfsplugin/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
@ -109,19 +109,19 @@ spec:
- name: socket-dir
mountPath: /csi
- name: pods-mount-dir
mountPath: {{ .Values.kubeletDir }}/pods
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
resources: {{- toYaml .Values.node.resources.nfs | nindent 12 }}
volumes:
- name: socket-dir
hostPath:
path: {{ .Values.kubeletDir }}/plugins/csi-nfsplugin
path: /var/lib/kubelet/plugins/csi-nfsplugin
type: DirectoryOrCreate
- name: pods-mount-dir
hostPath:
path: {{ .Values.kubeletDir }}/pods
path: /var/lib/kubelet/pods
type: Directory
- hostPath:
path: {{ .Values.kubeletDir }}/plugins_registry
path: /var/lib/kubelet/plugins_registry
type: Directory
name: registration-dir

View File

@ -1,64 +0,0 @@
{{- if .Values.serviceAccount.create -}}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-{{ .Values.rbac.name }}-controller-sa
namespace: {{ .Release.Namespace }}
{{ include "nfs.labels" . | indent 2 }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-{{ .Values.rbac.name }}-node-sa
namespace: {{ .Release.Namespace }}
{{ include "nfs.labels" . | indent 2 }}
---
{{- end -}}
{{- if .Values.rbac.create -}}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Values.rbac.name }}-external-provisioner-role
{{ include "nfs.labels" . | indent 2 }}
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Values.rbac.name }}-csi-provisioner-binding
{{ include "nfs.labels" . | indent 2 }}
subjects:
- kind: ServiceAccount
name: csi-{{ .Values.rbac.name }}-controller-sa
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ .Values.rbac.name }}-external-provisioner-role
apiGroup: rbac.authorization.k8s.io
{{- end -}}

View File

@ -1,20 +1,19 @@
customLabels: {}
image:
nfs:
repository: gcr.io/k8s-staging-sig-storage/nfsplugin
tag: canary
pullPolicy: IfNotPresent
csiProvisioner:
repository: registry.k8s.io/sig-storage/csi-provisioner
repository: mcr.microsoft.com/k8s/csi/nfs-csi
tag: v3.1.0
pullPolicy: IfNotPresent
csiProvisioner:
repository: k8s.gcr.io/sig-storage/csi-provisioner
tag: v2.2.2
pullPolicy: IfNotPresent
livenessProbe:
repository: registry.k8s.io/sig-storage/livenessprobe
tag: v2.7.0
repository: k8s.gcr.io/sig-storage/livenessprobe
tag: v2.5.0
pullPolicy: IfNotPresent
nodeDriverRegistrar:
repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
tag: v2.5.1
repository: k8s.gcr.io/sig-storage/csi-node-driver-registrar
tag: v2.4.0
pullPolicy: IfNotPresent
serviceAccount:
@ -30,20 +29,17 @@ driver:
mountPermissions: 0777
feature:
enableFSGroupPolicy: true
enableFSGroupPolicy: false
enableInlineVolume: false
kubeletDir: /var/lib/kubelet
controller:
name: csi-nfs-controller
replicas: 1
replicas: 2
runOnMaster: false
livenessProbe:
healthPort: 29652
logLevel: 5
workingMountDir: "/tmp"
dnsPolicy: Default # available values: Default, ClusterFirstWithHostNet, ClusterFirst
tolerations:
- key: "node-role.kubernetes.io/master"
operator: "Exists"
@ -51,9 +47,6 @@ controller:
- key: "node-role.kubernetes.io/controlplane"
operator: "Exists"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/control-plane"
operator: "Exists"
effect: "NoSchedule"
resources:
csiProvisioner:
limits:
@ -76,7 +69,6 @@ controller:
node:
name: csi-nfs-node
dnsPolicy: Default # available values: Default, ClusterFirstWithHostNet, ClusterFirst
maxUnavailable: 1
logLevel: 5
livenessProbe:

View File

@ -4,15 +4,15 @@ image:
tag: v2.0.0
pullPolicy: IfNotPresent
csiProvisioner:
repository: registry.k8s.io/sig-storage/csi-provisioner
repository: k8s.gcr.io/sig-storage/csi-provisioner
tag: v2.0.4
pullPolicy: IfNotPresent
livenessProbe:
repository: registry.k8s.io/sig-storage/livenessprobe
repository: k8s.gcr.io/sig-storage/livenessprobe
tag: v2.1.0
pullPolicy: IfNotPresent
nodeDriverRegistrar:
repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
repository: k8s.gcr.io/sig-storage/csi-node-driver-registrar
tag: v2.0.1
pullPolicy: IfNotPresent
serviceAccount:

View File

@ -1,18 +1,18 @@
image:
nfs:
repository: registry.k8s.io/sig-storage/nfsplugin
repository: mcr.microsoft.com/k8s/csi/nfs-csi
tag: v3.0.0
pullPolicy: IfNotPresent
csiProvisioner:
repository: registry.k8s.io/sig-storage/csi-provisioner
repository: k8s.gcr.io/sig-storage/csi-provisioner
tag: v2.2.2
pullPolicy: IfNotPresent
livenessProbe:
repository: registry.k8s.io/sig-storage/livenessprobe
repository: k8s.gcr.io/sig-storage/livenessprobe
tag: v2.5.0
pullPolicy: IfNotPresent
nodeDriverRegistrar:
repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
repository: k8s.gcr.io/sig-storage/csi-node-driver-registrar
tag: v2.4.0
pullPolicy: IfNotPresent

View File

@ -1,18 +1,18 @@
image:
nfs:
repository: registry.k8s.io/sig-storage/nfsplugin
repository: mcr.microsoft.com/k8s/csi/nfs-csi
tag: v3.1.0
pullPolicy: IfNotPresent
csiProvisioner:
repository: registry.k8s.io/sig-storage/csi-provisioner
repository: k8s.gcr.io/sig-storage/csi-provisioner
tag: v2.2.2
pullPolicy: IfNotPresent
livenessProbe:
repository: registry.k8s.io/sig-storage/livenessprobe
repository: k8s.gcr.io/sig-storage/livenessprobe
tag: v2.5.0
pullPolicy: IfNotPresent
nodeDriverRegistrar:
repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
repository: k8s.gcr.io/sig-storage/csi-node-driver-registrar
tag: v2.4.0
pullPolicy: IfNotPresent

View File

@ -1,22 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -1,5 +0,0 @@
apiVersion: v1
appVersion: v4.0.0
description: CSI NFS Driver for Kubernetes
name: csi-driver-nfs
version: v4.0.0

View File

@ -1,5 +0,0 @@
The CSI NFS Driver is getting deployed to your cluster.
To check CSI NFS Driver pods status, please run:
kubectl --namespace={{ .Release.Namespace }} get pods --selector="release={{ .Release.Name }}" --watch

View File

@ -1,19 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/* Expand the name of the chart.*/}}
{{- define "nfs.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/* labels for helm resources */}}
{{- define "nfs.labels" -}}
labels:
app.kubernetes.io/instance: "{{ .Release.Name }}"
app.kubernetes.io/managed-by: "{{ .Release.Service }}"
app.kubernetes.io/name: "{{ template "nfs.name" . }}"
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
{{- if .Values.customLabels }}
{{ toYaml .Values.customLabels | indent 2 -}}
{{- end }}
{{- end -}}

View File

@ -1,110 +0,0 @@
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: {{ .Values.controller.name }}
namespace: {{ .Release.Namespace }}
{{ include "nfs.labels" . | indent 2 }}
spec:
replicas: {{ .Values.controller.replicas }}
selector:
matchLabels:
app: {{ .Values.controller.name }}
template:
metadata:
{{ include "nfs.labels" . | indent 6 }}
app: {{ .Values.controller.name }}
spec:
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 8 }}
{{- end }}
hostNetwork: true # controller also needs to mount nfs to create dir
dnsPolicy: {{ .Values.controller.dnsPolicy }}
serviceAccountName: {{ .Values.serviceAccount.controller }}
nodeSelector:
kubernetes.io/os: linux
{{- if .Values.controller.runOnMaster}}
kubernetes.io/role: master
{{- end}}
priorityClassName: system-cluster-critical
{{- with .Values.controller.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}
containers:
- name: csi-provisioner
image: "{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}"
args:
- "-v=2"
- "--csi-address=$(ADDRESS)"
- "--leader-election"
- "--leader-election-namespace={{ .Release.Namespace }}"
env:
- name: ADDRESS
value: /csi/csi.sock
imagePullPolicy: {{ .Values.image.csiProvisioner.pullPolicy }}
volumeMounts:
- mountPath: /csi
name: socket-dir
resources: {{- toYaml .Values.controller.resources.csiProvisioner | nindent 12 }}
- name: liveness-probe
image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}"
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
- --health-port={{ .Values.controller.livenessProbe.healthPort }}
- --v=2
imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }}
volumeMounts:
- name: socket-dir
mountPath: /csi
resources: {{- toYaml .Values.controller.resources.livenessProbe | nindent 12 }}
- name: nfs
image: "{{ .Values.image.nfs.repository }}:{{ .Values.image.nfs.tag }}"
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
imagePullPolicy: {{ .Values.image.nfs.pullPolicy }}
args:
- "--v={{ .Values.controller.logLevel }}"
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
- "--drivername={{ .Values.driver.name }}"
- "--mount-permissions={{ .Values.driver.mountPermissions }}"
- "--working-mount-dir={{ .Values.controller.workingMountDir }}"
env:
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
ports:
- containerPort: {{ .Values.controller.livenessProbe.healthPort }}
name: healthz
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 30
timeoutSeconds: 10
periodSeconds: 30
volumeMounts:
- name: pods-mount-dir
mountPath: {{ .Values.kubeletDir }}/pods
mountPropagation: "Bidirectional"
- mountPath: /csi
name: socket-dir
resources: {{- toYaml .Values.controller.resources.nfs | nindent 12 }}
volumes:
- name: pods-mount-dir
hostPath:
path: {{ .Values.kubeletDir }}/pods
type: Directory
- name: socket-dir
emptyDir: {}

View File

@ -1,15 +0,0 @@
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: {{ .Values.driver.name }}
spec:
attachRequired: false
volumeLifecycleModes:
- Persistent
{{- if .Values.feature.enableInlineVolume}}
- Ephemeral
{{- end}}
{{- if .Values.feature.enableFSGroupPolicy}}
fsGroupPolicy: File
{{- end}}

View File

@ -1,126 +0,0 @@
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: {{ .Values.node.name }}
namespace: {{ .Release.Namespace }}
{{ include "nfs.labels" . | indent 2 }}
spec:
updateStrategy:
rollingUpdate:
maxUnavailable: {{ .Values.node.maxUnavailable }}
type: RollingUpdate
selector:
matchLabels:
app: {{ .Values.node.name }}
template:
metadata:
{{ include "nfs.labels" . | indent 6 }}
app: {{ .Values.node.name }}
spec:
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 8 }}
{{- end }}
hostNetwork: true # original nfs connection would be broken without hostNetwork setting
dnsPolicy: {{ .Values.controller.dnsPolicy }}
nodeSelector:
kubernetes.io/os: linux
{{- with .Values.node.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}
containers:
- name: liveness-probe
image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}"
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
- --health-port={{ .Values.node.livenessProbe.healthPort }}
- --v=2
imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }}
volumeMounts:
- name: socket-dir
mountPath: /csi
resources: {{- toYaml .Values.node.resources.livenessProbe | nindent 12 }}
- name: node-driver-registrar
image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}"
livenessProbe:
exec:
command:
- /csi-node-driver-registrar
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --mode=kubelet-registration-probe
initialDelaySeconds: 30
timeoutSeconds: 15
args:
- --v=2
- --csi-address=/csi/csi.sock
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
env:
- name: DRIVER_REG_SOCK_PATH
value: {{ .Values.kubeletDir }}/plugins/csi-nfsplugin/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
imagePullPolicy: {{ .Values.image.nodeDriverRegistrar.pullPolicy }}
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
resources: {{- toYaml .Values.node.resources.nodeDriverRegistrar | nindent 12 }}
- name: nfs
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: "{{ .Values.image.nfs.repository }}:{{ .Values.image.nfs.tag }}"
args :
- "--v={{ .Values.node.logLevel }}"
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
- "--drivername={{ .Values.driver.name }}"
- "--mount-permissions={{ .Values.driver.mountPermissions }}"
env:
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
ports:
- containerPort: {{ .Values.node.livenessProbe.healthPort }}
name: healthz
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 30
timeoutSeconds: 10
periodSeconds: 30
imagePullPolicy: {{ .Values.image.nfs.pullPolicy }}
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: pods-mount-dir
mountPath: {{ .Values.kubeletDir }}/pods
mountPropagation: "Bidirectional"
resources: {{- toYaml .Values.node.resources.nfs | nindent 12 }}
volumes:
- name: socket-dir
hostPath:
path: {{ .Values.kubeletDir }}/plugins/csi-nfsplugin
type: DirectoryOrCreate
- name: pods-mount-dir
hostPath:
path: {{ .Values.kubeletDir }}/pods
type: Directory
- hostPath:
path: {{ .Values.kubeletDir }}/plugins_registry
type: Directory
name: registration-dir

View File

@ -1,110 +0,0 @@
customLabels: {}
image:
nfs:
repository: registry.k8s.io/sig-storage/nfsplugin
tag: v4.2.0
pullPolicy: IfNotPresent
csiProvisioner:
repository: registry.k8s.io/sig-storage/csi-provisioner
tag: v3.3.0
pullPolicy: IfNotPresent
livenessProbe:
repository: registry.k8s.io/sig-storage/livenessprobe
tag: v2.8.0
pullPolicy: IfNotPresent
nodeDriverRegistrar:
repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
tag: v2.6.2
pullPolicy: IfNotPresent
serviceAccount:
create: true # When true, service accounts will be created for you. Set to false if you want to use your own.
controller: csi-nfs-controller-sa # Name of Service Account to be created or used
rbac:
create: true
name: nfs
driver:
name: nfs.csi.k8s.io
mountPermissions: 0777
feature:
enableFSGroupPolicy: true
enableInlineVolume: false
kubeletDir: /var/lib/kubelet
controller:
name: csi-nfs-controller
replicas: 1
runOnMaster: false
livenessProbe:
healthPort: 29652
logLevel: 5
workingMountDir: "/tmp"
dnsPolicy: Default # available values: Default, ClusterFirstWithHostNet, ClusterFirst
tolerations:
- key: "node-role.kubernetes.io/master"
operator: "Exists"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/controlplane"
operator: "Exists"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/control-plane"
operator: "Exists"
effect: "NoSchedule"
resources:
csiProvisioner:
limits:
memory: 400Mi
requests:
cpu: 10m
memory: 20Mi
livenessProbe:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
nfs:
limits:
memory: 200Mi
requests:
cpu: 10m
memory: 20Mi
node:
name: csi-nfs-node
dnsPolicy: Default # available values: Default, ClusterFirstWithHostNet, ClusterFirst
maxUnavailable: 1
logLevel: 5
livenessProbe:
healthPort: 29653
tolerations:
- operator: "Exists"
resources:
livenessProbe:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
nodeDriverRegistrar:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
nfs:
limits:
memory: 300Mi
requests:
cpu: 10m
memory: 20Mi
## Reference to one or more secrets to be used when pulling images
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
imagePullSecrets: []
# - name: "image-pull-secret"

View File

@ -1,22 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -1,5 +0,0 @@
apiVersion: v1
appVersion: v4.2.0
description: CSI NFS Driver for Kubernetes
name: csi-driver-nfs
version: v4.2.0

View File

@ -1,5 +0,0 @@
The CSI NFS Driver is getting deployed to your cluster.
To check CSI NFS Driver pods status, please run:
kubectl --namespace={{ .Release.Namespace }} get pods --selector="app.kubernetes.io/instance={{ .Release.Name }}" --watch

View File

@ -1,19 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/* Expand the name of the chart.*/}}
{{- define "nfs.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/* labels for helm resources */}}
{{- define "nfs.labels" -}}
labels:
app.kubernetes.io/instance: "{{ .Release.Name }}"
app.kubernetes.io/managed-by: "{{ .Release.Service }}"
app.kubernetes.io/name: "{{ template "nfs.name" . }}"
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
{{- if .Values.customLabels }}
{{ toYaml .Values.customLabels | indent 2 -}}
{{- end }}
{{- end -}}

View File

@ -1,123 +0,0 @@
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: {{ .Values.controller.name }}
namespace: {{ .Release.Namespace }}
{{ include "nfs.labels" . | indent 2 }}
spec:
replicas: {{ .Values.controller.replicas }}
selector:
matchLabels:
app: {{ .Values.controller.name }}
strategy:
type: {{ .Values.controller.strategyType }}
template:
metadata:
{{ include "nfs.labels" . | indent 6 }}
app: {{ .Values.controller.name }}
spec:
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 8 }}
{{- end }}
hostNetwork: true # controller also needs to mount nfs to create dir
dnsPolicy: {{ .Values.controller.dnsPolicy }}
serviceAccountName: {{ .Values.serviceAccount.controller }}
{{- with .Values.controller.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
nodeSelector:
kubernetes.io/os: linux
{{- if .Values.controller.runOnMaster}}
node-role.kubernetes.io/master: ""
{{- end}}
{{- if .Values.controller.runOnControlPlane}}
node-role.kubernetes.io/control-plane: ""
{{- end}}
{{- with .Values.controller.nodeSelector }}
{{ toYaml . | indent 8 }}
{{- end }}
priorityClassName: system-cluster-critical
{{- with .Values.controller.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}
containers:
- name: csi-provisioner
image: "{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}"
args:
- "-v=2"
- "--csi-address=$(ADDRESS)"
- "--leader-election"
- "--leader-election-namespace={{ .Release.Namespace }}"
- "--extra-create-metadata=true"
env:
- name: ADDRESS
value: /csi/csi.sock
imagePullPolicy: {{ .Values.image.csiProvisioner.pullPolicy }}
volumeMounts:
- mountPath: /csi
name: socket-dir
resources: {{- toYaml .Values.controller.resources.csiProvisioner | nindent 12 }}
- name: liveness-probe
image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}"
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
- --health-port={{ .Values.controller.livenessProbe.healthPort }}
- --v=2
imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }}
volumeMounts:
- name: socket-dir
mountPath: /csi
resources: {{- toYaml .Values.controller.resources.livenessProbe | nindent 12 }}
- name: nfs
image: "{{ .Values.image.nfs.repository }}:{{ .Values.image.nfs.tag }}"
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
imagePullPolicy: {{ .Values.image.nfs.pullPolicy }}
args:
- "--v={{ .Values.controller.logLevel }}"
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
- "--drivername={{ .Values.driver.name }}"
- "--mount-permissions={{ .Values.driver.mountPermissions }}"
- "--working-mount-dir={{ .Values.controller.workingMountDir }}"
env:
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
ports:
- containerPort: {{ .Values.controller.livenessProbe.healthPort }}
name: healthz
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 30
timeoutSeconds: 10
periodSeconds: 30
volumeMounts:
- name: pods-mount-dir
mountPath: {{ .Values.kubeletDir }}/pods
mountPropagation: "Bidirectional"
- mountPath: /csi
name: socket-dir
resources: {{- toYaml .Values.controller.resources.nfs | nindent 12 }}
volumes:
- name: pods-mount-dir
hostPath:
path: {{ .Values.kubeletDir }}/pods
type: Directory
- name: socket-dir
emptyDir: {}

View File

@ -1,15 +0,0 @@
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: {{ .Values.driver.name }}
spec:
attachRequired: false
volumeLifecycleModes:
- Persistent
{{- if .Values.feature.enableInlineVolume}}
- Ephemeral
{{- end}}
{{- if .Values.feature.enableFSGroupPolicy}}
fsGroupPolicy: File
{{- end}}

View File

@ -1,134 +0,0 @@
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: {{ .Values.node.name }}
namespace: {{ .Release.Namespace }}
{{ include "nfs.labels" . | indent 2 }}
spec:
updateStrategy:
rollingUpdate:
maxUnavailable: {{ .Values.node.maxUnavailable }}
type: RollingUpdate
selector:
matchLabels:
app: {{ .Values.node.name }}
template:
metadata:
{{ include "nfs.labels" . | indent 6 }}
app: {{ .Values.node.name }}
spec:
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 8 }}
{{- end }}
hostNetwork: true # original nfs connection would be broken without hostNetwork setting
dnsPolicy: {{ .Values.controller.dnsPolicy }}
serviceAccountName: csi-nfs-node-sa
{{- with .Values.node.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
nodeSelector:
kubernetes.io/os: linux
{{- with .Values.node.nodeSelector }}
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.node.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}
containers:
- name: liveness-probe
image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}"
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
- --health-port={{ .Values.node.livenessProbe.healthPort }}
- --v=2
imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }}
volumeMounts:
- name: socket-dir
mountPath: /csi
resources: {{- toYaml .Values.node.resources.livenessProbe | nindent 12 }}
- name: node-driver-registrar
image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}"
livenessProbe:
exec:
command:
- /csi-node-driver-registrar
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --mode=kubelet-registration-probe
initialDelaySeconds: 30
timeoutSeconds: 15
args:
- --v=2
- --csi-address=/csi/csi.sock
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
env:
- name: DRIVER_REG_SOCK_PATH
value: {{ .Values.kubeletDir }}/plugins/csi-nfsplugin/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
imagePullPolicy: {{ .Values.image.nodeDriverRegistrar.pullPolicy }}
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
resources: {{- toYaml .Values.node.resources.nodeDriverRegistrar | nindent 12 }}
- name: nfs
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: "{{ .Values.image.nfs.repository }}:{{ .Values.image.nfs.tag }}"
args :
- "--v={{ .Values.node.logLevel }}"
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
- "--drivername={{ .Values.driver.name }}"
- "--mount-permissions={{ .Values.driver.mountPermissions }}"
env:
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
ports:
- containerPort: {{ .Values.node.livenessProbe.healthPort }}
name: healthz
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 30
timeoutSeconds: 10
periodSeconds: 30
imagePullPolicy: {{ .Values.image.nfs.pullPolicy }}
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: pods-mount-dir
mountPath: {{ .Values.kubeletDir }}/pods
mountPropagation: "Bidirectional"
resources: {{- toYaml .Values.node.resources.nfs | nindent 12 }}
volumes:
- name: socket-dir
hostPath:
path: {{ .Values.kubeletDir }}/plugins/csi-nfsplugin
type: DirectoryOrCreate
- name: pods-mount-dir
hostPath:
path: {{ .Values.kubeletDir }}/pods
type: Directory
- hostPath:
path: {{ .Values.kubeletDir }}/plugins_registry
type: Directory
name: registration-dir

View File

@ -1,64 +0,0 @@
{{- if .Values.serviceAccount.create -}}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-{{ .Values.rbac.name }}-controller-sa
namespace: {{ .Release.Namespace }}
{{ include "nfs.labels" . | indent 2 }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-{{ .Values.rbac.name }}-node-sa
namespace: {{ .Release.Namespace }}
{{ include "nfs.labels" . | indent 2 }}
---
{{- end }}
{{ if .Values.rbac.create -}}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Values.rbac.name }}-external-provisioner-role
{{ include "nfs.labels" . | indent 2 }}
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Values.rbac.name }}-csi-provisioner-binding
{{ include "nfs.labels" . | indent 2 }}
subjects:
- kind: ServiceAccount
name: csi-{{ .Values.rbac.name }}-controller-sa
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ .Values.rbac.name }}-external-provisioner-role
apiGroup: rbac.authorization.k8s.io
{{- end -}}

View File

@ -1,116 +0,0 @@
customLabels: {}
image:
nfs:
repository: registry.k8s.io/sig-storage/nfsplugin
tag: v4.2.0
pullPolicy: IfNotPresent
csiProvisioner:
repository: registry.k8s.io/sig-storage/csi-provisioner
tag: v3.3.0
pullPolicy: IfNotPresent
livenessProbe:
repository: registry.k8s.io/sig-storage/livenessprobe
tag: v2.8.0
pullPolicy: IfNotPresent
nodeDriverRegistrar:
repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
tag: v2.6.2
pullPolicy: IfNotPresent
serviceAccount:
create: true # When true, service accounts will be created for you. Set to false if you want to use your own.
controller: csi-nfs-controller-sa # Name of Service Account to be created or used
rbac:
create: true
name: nfs
driver:
name: nfs.csi.k8s.io
mountPermissions: 0
feature:
enableFSGroupPolicy: true
enableInlineVolume: false
kubeletDir: /var/lib/kubelet
controller:
name: csi-nfs-controller
replicas: 1
strategyType: Recreate
runOnMaster: false
runOnControlPlane: false
livenessProbe:
healthPort: 29652
logLevel: 5
workingMountDir: "/tmp"
dnsPolicy: Default # available values: Default, ClusterFirstWithHostNet, ClusterFirst
affinity: {}
nodeSelector: {}
tolerations:
- key: "node-role.kubernetes.io/master"
operator: "Exists"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/controlplane"
operator: "Exists"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/control-plane"
operator: "Exists"
effect: "NoSchedule"
resources:
csiProvisioner:
limits:
memory: 400Mi
requests:
cpu: 10m
memory: 20Mi
livenessProbe:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
nfs:
limits:
memory: 200Mi
requests:
cpu: 10m
memory: 20Mi
node:
name: csi-nfs-node
dnsPolicy: Default # available values: Default, ClusterFirstWithHostNet, ClusterFirst
maxUnavailable: 1
logLevel: 5
livenessProbe:
healthPort: 29653
affinity: {}
nodeSelector: {}
tolerations:
- operator: "Exists"
resources:
livenessProbe:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
nodeDriverRegistrar:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
nfs:
limits:
memory: 300Mi
requests:
cpu: 10m
memory: 20Mi
## Reference to one or more secrets to be used when pulling images
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
imagePullSecrets: []
# - name: "image-pull-secret"

View File

@ -5,7 +5,7 @@ metadata:
name: csi-nfs-controller
namespace: kube-system
spec:
replicas: 1
replicas: 2
selector:
matchLabels:
app: csi-nfs-controller
@ -15,7 +15,7 @@ spec:
app: csi-nfs-controller
spec:
hostNetwork: true # controller also needs to mount nfs to create dir
dnsPolicy: Default # available values: Default, ClusterFirstWithHostNet, ClusterFirst
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: csi-nfs-controller-sa
nodeSelector:
kubernetes.io/os: linux # add "kubernetes.io/role: master" to run controller on master node
@ -27,17 +27,13 @@ spec:
- key: "node-role.kubernetes.io/controlplane"
operator: "Exists"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/control-plane"
operator: "Exists"
effect: "NoSchedule"
containers:
- name: csi-provisioner
image: registry.k8s.io/sig-storage/csi-provisioner:v3.1.0
image: k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2
args:
- "-v=2"
- "--csi-address=$(ADDRESS)"
- "--leader-election"
- "--leader-election-namespace=kube-system"
env:
- name: ADDRESS
value: /csi/csi.sock
@ -51,7 +47,7 @@ spec:
cpu: 10m
memory: 20Mi
- name: liveness-probe
image: registry.k8s.io/sig-storage/livenessprobe:v2.7.0
image: k8s.gcr.io/sig-storage/livenessprobe:v2.5.0
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
@ -67,7 +63,7 @@ spec:
cpu: 10m
memory: 20Mi
- name: nfs
image: gcr.io/k8s-staging-sig-storage/nfsplugin:canary
image: mcr.microsoft.com/k8s/csi/nfs-csi:v3.1.0
securityContext:
privileged: true
capabilities:

View File

@ -8,4 +8,3 @@ spec:
volumeLifecycleModes:
- Persistent
- Ephemeral
fsGroupPolicy: File

View File

@ -1,4 +1,6 @@
---
# This YAML file contains driver-registrar & csi driver nodeplugin API objects
# that are necessary to run CSI nodeplugin for nfs
kind: DaemonSet
apiVersion: apps/v1
metadata:
@ -18,15 +20,14 @@ spec:
app: csi-nfs-node
spec:
hostNetwork: true # original nfs connection would be broken without hostNetwork setting
dnsPolicy: Default # available values: Default, ClusterFirstWithHostNet, ClusterFirst
serviceAccountName: csi-nfs-node-sa
dnsPolicy: ClusterFirstWithHostNet
nodeSelector:
kubernetes.io/os: linux
tolerations:
- operator: "Exists"
containers:
- name: liveness-probe
image: registry.k8s.io/sig-storage/livenessprobe:v2.7.0
image: k8s.gcr.io/sig-storage/livenessprobe:v2.5.0
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
@ -42,7 +43,7 @@ spec:
cpu: 10m
memory: 20Mi
- name: node-driver-registrar
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0
args:
- --v=2
- --csi-address=/csi/csi.sock
@ -79,7 +80,7 @@ spec:
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: gcr.io/k8s-staging-sig-storage/nfsplugin:canary
image: mcr.microsoft.com/k8s/csi/nfs-csi:v3.1.0
args:
- "-v=5"
- "--nodeid=$(NODE_ID)"

View File

@ -1,39 +1,25 @@
# CSI driver example
After the NFS CSI Driver is deployed in your cluster, you can follow this documentation to quickly deploy some examples.
You can use NFS CSI Driver to provision Persistent Volumes statically or dynamically. Please read [Kubernetes Persistent Volumes documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) for more information about Static and Dynamic provisioning.
Please refer to [driver parameters](../../docs/driver-parameters.md) for more detailed usage.
## Prerequisite
- [Set up a NFS Server on a Kubernetes cluster](./nfs-provisioner/README.md) as an example
- [Set up a NFS Server on a Kubernetes cluster](./nfs-provisioner/README.md)
- [Install NFS CSI Driver](../../docs/install-nfs-csi-driver.md)
## Storage Class Usage (Dynamic Provisioning)
- Create a storage class
> change `server`, `share` with your existing NFS server address and share name
```yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-csi
provisioner: nfs.csi.k8s.io
parameters:
server: nfs-server.default.svc.cluster.local
share: /
# csi.storage.k8s.io/provisioner-secret is only needed for providing mountOptions in DeleteVolume
# csi.storage.k8s.io/provisioner-secret-name: "mount-options"
# csi.storage.k8s.io/provisioner-secret-namespace: "default"
reclaimPolicy: Delete
volumeBindingMode: Immediate
mountOptions:
- nconnect=8 # only supported on linux kernel version >= 5.3
- nfsvers=4.1
```
- Follow the following command to create a `StorageClass`, and then `PersistentVolume` and `PersistentVolumeClaim` dynamically.
- create PVC
```console
```bash
# create StorageClass
kubectl create -f https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/deploy/example/storageclass-nfs.yaml
# create PVC
kubectl create -f https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/deploy/example/pvc-nfs-csi-dynamic.yaml
```
@ -49,7 +35,13 @@ kubectl create -f https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nf
kubectl create -f https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/deploy/example/pvc-nfs-csi-static.yaml
```
## Create a deployment
```console
kubectl create -f https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/deploy/example/deployment.yaml
```
## Deployment/Statefulset Usage
- Follow the following command to create `Deployment` and `Statefulset` .
```bash
# create Deployment and Statefulset
git clone https://github.com/kubernetes-csi/csi-driver-nfs.git
cd csi-driver-nfs
./hack/verify-examples.sh
```

View File

@ -10,6 +10,7 @@ spec:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Delete
mountOptions:
- hard
- nfsvers=4.1
csi:
driver: nfs.csi.k8s.io

View File

@ -9,9 +9,8 @@ spec:
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-csi
mountOptions:
- nconnect=8 # only supported on linux kernel version >= 5.3
- hard
- nfsvers=4.1
csi:
driver: nfs.csi.k8s.io

View File

@ -10,4 +10,4 @@ spec:
requests:
storage: 10Gi
volumeName: pv-nfs
storageClassName: nfs-csi
storageClassName: ""

View File

@ -8,10 +8,10 @@ parameters:
server: nfs-server.default.svc.cluster.local
share: /
# csi.storage.k8s.io/provisioner-secret is only needed for providing mountOptions in DeleteVolume
# csi.storage.k8s.io/provisioner-secret-name: "mount-options"
# csi.storage.k8s.io/provisioner-secret-namespace: "default"
csi.storage.k8s.io/provisioner-secret-name: "mount-options"
csi.storage.k8s.io/provisioner-secret-namespace: "default"
reclaimPolicy: Delete
volumeBindingMode: Immediate
mountOptions:
- nconnect=8 # only supported on linux kernel version >= 5.3
- hard
- nfsvers=4.1

View File

@ -34,7 +34,7 @@ if [ $ver != "master" ]; then
fi
echo "Installing NFS CSI driver, version: $ver ..."
kubectl apply -f $repo/rbac-csi-nfs.yaml
kubectl apply -f $repo/rbac-csi-nfs-controller.yaml
kubectl apply -f $repo/csi-nfs-driverinfo.yaml
kubectl apply -f $repo/csi-nfs-controller.yaml
kubectl apply -f $repo/csi-nfs-node.yaml

View File

@ -1,57 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-nfs-controller-sa
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-nfs-node-sa
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-external-provisioner-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-csi-provisioner-binding
subjects:
- kind: ServiceAccount
name: csi-nfs-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: nfs-external-provisioner-role
apiGroup: rbac.authorization.k8s.io

View File

@ -37,5 +37,5 @@ echo "Uninstalling NFS driver, version: $ver ..."
kubectl delete -f $repo/csi-nfs-controller.yaml --ignore-not-found
kubectl delete -f $repo/csi-nfs-node.yaml --ignore-not-found
kubectl delete -f $repo/csi-nfs-driverinfo.yaml --ignore-not-found
kubectl delete -f $repo/rbac-csi-nfs.yaml --ignore-not-found
kubectl delete -f $repo/rbac-csi-nfs-controller.yaml --ignore-not-found
echo 'Uninstalled NFS driver successfully.'

View File

@ -29,7 +29,7 @@ spec:
effect: "NoSchedule"
containers:
- name: csi-provisioner
image: registry.k8s.io/sig-storage/csi-provisioner:v2.2.2
image: k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2
args:
- "-v=2"
- "--csi-address=$(ADDRESS)"
@ -48,7 +48,7 @@ spec:
cpu: 10m
memory: 20Mi
- name: liveness-probe
image: registry.k8s.io/sig-storage/livenessprobe:v2.5.0
image: k8s.gcr.io/sig-storage/livenessprobe:v2.5.0
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
@ -65,7 +65,7 @@ spec:
cpu: 10m
memory: 20Mi
- name: nfs
image: registry.k8s.io/sig-storage/nfsplugin:v3.0.0
image: mcr.microsoft.com/k8s/csi/nfs-csi:v3.0.0
securityContext:
privileged: true
capabilities:

View File

@ -27,7 +27,7 @@ spec:
- operator: "Exists"
containers:
- name: liveness-probe
image: registry.k8s.io/sig-storage/livenessprobe:v2.5.0
image: k8s.gcr.io/sig-storage/livenessprobe:v2.5.0
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
@ -44,7 +44,7 @@ spec:
cpu: 10m
memory: 20Mi
- name: node-driver-registrar
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.4.0
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0
args:
- --v=2
- --csi-address=/csi/csi.sock
@ -82,7 +82,7 @@ spec:
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: registry.k8s.io/sig-storage/nfsplugin:v3.0.0
image: mcr.microsoft.com/k8s/csi/nfs-csi:v3.0.0
args:
- "-v=5"
- "--nodeid=$(NODE_ID)"

View File

@ -29,7 +29,7 @@ spec:
effect: "NoSchedule"
containers:
- name: csi-provisioner
image: registry.k8s.io/sig-storage/csi-provisioner:v2.2.2
image: k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2
args:
- "-v=2"
- "--csi-address=$(ADDRESS)"
@ -47,7 +47,7 @@ spec:
cpu: 10m
memory: 20Mi
- name: liveness-probe
image: registry.k8s.io/sig-storage/livenessprobe:v2.5.0
image: k8s.gcr.io/sig-storage/livenessprobe:v2.5.0
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
@ -63,7 +63,7 @@ spec:
cpu: 10m
memory: 20Mi
- name: nfs
image: registry.k8s.io/sig-storage/nfsplugin:v3.1.0
image: mcr.microsoft.com/k8s/csi/nfs-csi:v3.1.0
securityContext:
privileged: true
capabilities:

View File

@ -27,7 +27,7 @@ spec:
- operator: "Exists"
containers:
- name: liveness-probe
image: registry.k8s.io/sig-storage/livenessprobe:v2.5.0
image: k8s.gcr.io/sig-storage/livenessprobe:v2.5.0
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
@ -43,7 +43,7 @@ spec:
cpu: 10m
memory: 20Mi
- name: node-driver-registrar
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.4.0
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0
args:
- --v=2
- --csi-address=/csi/csi.sock
@ -80,7 +80,7 @@ spec:
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: registry.k8s.io/sig-storage/nfsplugin:v3.1.0
image: mcr.microsoft.com/k8s/csi/nfs-csi:v3.1.0
args:
- "-v=5"
- "--nodeid=$(NODE_ID)"

View File

@ -1,118 +0,0 @@
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: csi-nfs-controller
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
app: csi-nfs-controller
template:
metadata:
labels:
app: csi-nfs-controller
spec:
hostNetwork: true # controller also needs to mount nfs to create dir
dnsPolicy: Default # available values: Default, ClusterFirstWithHostNet, ClusterFirst
serviceAccountName: csi-nfs-controller-sa
nodeSelector:
kubernetes.io/os: linux # add "kubernetes.io/role: master" to run controller on master node
priorityClassName: system-cluster-critical
tolerations:
- key: "node-role.kubernetes.io/master"
operator: "Exists"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/controlplane"
operator: "Exists"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/control-plane"
operator: "Exists"
effect: "NoSchedule"
containers:
- name: csi-provisioner
image: registry.k8s.io/sig-storage/csi-provisioner:v3.1.0
args:
- "-v=2"
- "--csi-address=$(ADDRESS)"
- "--leader-election"
- "--leader-election-namespace=kube-system"
env:
- name: ADDRESS
value: /csi/csi.sock
volumeMounts:
- mountPath: /csi
name: socket-dir
resources:
limits:
memory: 400Mi
requests:
cpu: 10m
memory: 20Mi
- name: liveness-probe
image: registry.k8s.io/sig-storage/livenessprobe:v2.6.0
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
- --health-port=29652
- --v=2
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
- name: nfs
image: registry.k8s.io/sig-storage/nfsplugin:v4.0.0
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
imagePullPolicy: IfNotPresent
args:
- "-v=5"
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
env:
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
ports:
- containerPort: 29652
name: healthz
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 30
timeoutSeconds: 10
periodSeconds: 30
volumeMounts:
- name: pods-mount-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
- mountPath: /csi
name: socket-dir
resources:
limits:
memory: 200Mi
requests:
cpu: 10m
memory: 20Mi
volumes:
- name: pods-mount-dir
hostPath:
path: /var/lib/kubelet/pods
type: Directory
- name: socket-dir
emptyDir: {}

View File

@ -1,11 +0,0 @@
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: nfs.csi.k8s.io
spec:
attachRequired: false
volumeLifecycleModes:
- Persistent
- Ephemeral
fsGroupPolicy: File

View File

@ -1,130 +0,0 @@
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: csi-nfs-node
namespace: kube-system
spec:
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
app: csi-nfs-node
template:
metadata:
labels:
app: csi-nfs-node
spec:
hostNetwork: true # original nfs connection would be broken without hostNetwork setting
dnsPolicy: Default # available values: Default, ClusterFirstWithHostNet, ClusterFirst
nodeSelector:
kubernetes.io/os: linux
tolerations:
- operator: "Exists"
containers:
- name: liveness-probe
image: registry.k8s.io/sig-storage/livenessprobe:v2.6.0
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
- --health-port=29653
- --v=2
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
- name: node-driver-registrar
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.0
args:
- --v=2
- --csi-address=/csi/csi.sock
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
livenessProbe:
exec:
command:
- /csi-node-driver-registrar
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --mode=kubelet-registration-probe
initialDelaySeconds: 30
timeoutSeconds: 15
env:
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/csi-nfsplugin/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
resources:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
- name: nfs
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: registry.k8s.io/sig-storage/nfsplugin:v4.0.0
args:
- "-v=5"
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
env:
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
ports:
- containerPort: 29653
name: healthz
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 30
timeoutSeconds: 10
periodSeconds: 30
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: pods-mount-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
resources:
limits:
memory: 300Mi
requests:
cpu: 10m
memory: 20Mi
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/csi-nfsplugin
type: DirectoryOrCreate
- name: pods-mount-dir
hostPath:
path: /var/lib/kubelet/pods
type: Directory
- hostPath:
path: /var/lib/kubelet/plugins_registry
type: Directory
name: registration-dir

View File

@ -1,6 +1,6 @@
## CSI driver debug tips
### case#1: volume create/delete failed
### Case#1: volume create/delete failed
- locate csi driver pod
```console
$ kubectl get pod -o wide -n kube-system | grep csi-nfs-controller
@ -14,8 +14,8 @@ $ kubectl logs csi-nfs-controller-56bfddd689-dh5tk -c nfs -n kube-system > csi-n
```
> note: there could be multiple controller pods, if there are no helpful logs, try to get logs from other controller pods
### case#2: volume mount/unmount failed
- locate csi driver pod that does the actual volume mount/unmount
### Case#2: volume mount/unmount failed
- locate csi driver pod and figure out which pod does tha actual volume mount/unmount
```console
$ kubectl get pod -o wide -n kube-system | grep csi-nfs-node

View File

@ -37,7 +37,7 @@ $ make build
#### Start CSI driver locally
```console
$ cd $GOPATH/src/github.com/kubernetes-csi/csi-driver-nfs
$ ./bin/nfsplugin --endpoint unix:///tmp/csi.sock --nodeid CSINode -v=5 &
$ ./_output/nfsplugin --endpoint tcp://127.0.0.1:10000 --nodeid CSINode -v=5 &
```
#### 0. Set environment variables

View File

@ -8,7 +8,6 @@ Name | Meaning | Example Value | Mandatory | Default value
--- | --- | --- | --- | ---
server | NFS Server address | domain name `nfs-server.default.svc.cluster.local` <br>or IP address `127.0.0.1` | Yes |
share | NFS share path | `/` | Yes |
mountPermissions | mounted folder permissions. The default is `0777`, if set as `0`, driver will not perform `chmod` after mount | | No |
### PV/PVC usage (static provisioning)
> [`PersistentVolume` example](../deploy/example/pv-nfs-csi.yaml)
@ -17,7 +16,7 @@ Name | Meaning | Example Value | Mandatory | Default value
--- | --- | --- | --- | ---
volumeAttributes.server | NFS Server address | domain name `nfs-server.default.svc.cluster.local` <br>or IP address `127.0.0.1` | Yes |
volumeAttributes.share | NFS share path | `/` | Yes |
volumeAttributes.mountPermissions | mounted folder permissions. The default is `0777` | | No |
### Tips
#### provide `mountOptions` for `DeleteVolume`
@ -42,5 +41,6 @@ parameters:
reclaimPolicy: Delete
volumeBindingMode: Immediate
mountOptions:
- nfsvers=4.1
- hard
- nfsvers=3
```

View File

@ -1,14 +1,14 @@
# Install NFS CSI driver master version on a kubernetes cluster
If you have already installed Helm, you can also use it to install this driver. Please check [Installation with Helm](../charts/README.md).
If you have already installed Helm, you can also use it to install NFS CSI driver. Please see [Installation with Helm](../charts/README.md).
## Install with kubectl
- Option#1. remote install
- remote install
```console
curl -skSL https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/deploy/install-driver.sh | bash -s master --
```
- Option#2. local install
- local install
```console
git clone https://github.com/kubernetes-csi/csi-driver-nfs.git
cd csi-driver-nfs
@ -26,20 +26,12 @@ example output:
```console
NAME READY STATUS RESTARTS AGE IP NODE
csi-nfs-controller-56bfddd689-dh5tk 4/4 Running 0 35s 10.240.0.19 k8s-agentpool-22533604-0
csi-nfs-controller-56bfddd689-8pgr4 4/4 Running 0 35s 10.240.0.35 k8s-agentpool-22533604-1
csi-nfs-node-cvgbs 3/3 Running 0 35s 10.240.0.35 k8s-agentpool-22533604-1
csi-nfs-node-dr4s4 3/3 Running 0 35s 10.240.0.4 k8s-agentpool-22533604-0
```
### clean up NFS CSI driver
- Option#1. remote uninstall
- clean up NFS CSI driver
```console
curl -skSL https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/deploy/uninstall-driver.sh | bash -s master --
```
- Option#2. local uninstall
```console
git clone https://github.com/kubernetes-csi/csi-driver-nfs.git
cd csi-driver-nfs
git checkout master
./deploy/uninstall-driver.sh master local
```
```

View File

@ -1,45 +0,0 @@
# Install NFS CSI driver v4.0.0 version on a kubernetes cluster
If you have already installed Helm, you can also use it to install this driver. Please check [Installation with Helm](../charts/README.md).
## Install with kubectl
- Option#1. remote install
```console
curl -skSL https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/v4.0.0/deploy/install-driver.sh | bash -s v4.0.0 --
```
- Option#2. local install
```console
git clone https://github.com/kubernetes-csi/csi-driver-nfs.git
cd csi-driver-nfs
./deploy/install-driver.sh v4.0.0 local
```
- check pods status:
```console
kubectl -n kube-system get pod -o wide -l app=csi-nfs-controller
kubectl -n kube-system get pod -o wide -l app=csi-nfs-node
```
example output:
```console
NAME READY STATUS RESTARTS AGE IP NODE
csi-nfs-controller-56bfddd689-dh5tk 4/4 Running 0 35s 10.240.0.19 k8s-agentpool-22533604-0
csi-nfs-node-cvgbs 3/3 Running 0 35s 10.240.0.35 k8s-agentpool-22533604-1
csi-nfs-node-dr4s4 3/3 Running 0 35s 10.240.0.4 k8s-agentpool-22533604-0
```
### clean up NFS CSI driver
- Option#1. remote uninstall
```console
curl -skSL https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/v4.0.0/deploy/uninstall-driver.sh | bash -s v4.0.0 --
```
- Option#2. local uninstall
```console
git clone https://github.com/kubernetes-csi/csi-driver-nfs.git
cd csi-driver-nfs
git checkout v4.0.0
./deploy/uninstall-driver.sh v4.0.0 local
```

View File

@ -1,6 +1,5 @@
## Install NFS CSI driver on a Kubernetes cluster
- [install CSI driver master version](./install-csi-driver-master.md)
- [install CSI driver v4.0.0 version](./install-csi-driver-v4.0.0.md)
- [install CSI driver v3.1.0 version](./install-csi-driver-v3.1.0.md)
- [install CSI driver v3.0.0 version](./install-csi-driver-v3.0.0.md)

70
go.mod
View File

@ -6,51 +6,49 @@ require (
github.com/container-storage-interface/spec v1.5.0
github.com/golang/protobuf v1.5.2
github.com/kubernetes-csi/csi-lib-utils v0.9.0
github.com/kubernetes-csi/external-snapshotter/v2 v2.0.0-20200617021606-4800ca72d403
github.com/onsi/ginkgo v1.14.0
github.com/onsi/gomega v1.10.1
github.com/pborman/uuid v1.2.0
github.com/prometheus/client_golang v1.11.1 // indirect
github.com/stretchr/testify v1.7.0
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 // indirect
golang.org/x/net v0.0.0-20211209124913-491a49abca63
golang.org/x/net v0.0.0-20210825183410-e898025ed96a
google.golang.org/grpc v1.40.0
k8s.io/api v0.23.3
k8s.io/apimachinery v0.23.3
k8s.io/client-go v0.23.3
k8s.io/api v0.23.0
k8s.io/apimachinery v0.23.0
k8s.io/client-go v0.23.0
k8s.io/klog/v2 v2.30.0
k8s.io/kubernetes v1.23.3
k8s.io/mount-utils v0.23.3
k8s.io/utils v0.0.0-20211116205334-6203023598ed
k8s.io/kubernetes v1.23.0
k8s.io/mount-utils v0.23.0
sigs.k8s.io/yaml v1.2.0
)
replace (
golang.org/x/text => golang.org/x/text v0.3.7
k8s.io/api => k8s.io/api v0.23.3
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.23.3
k8s.io/apimachinery => k8s.io/apimachinery v0.23.3
k8s.io/apiserver => k8s.io/apiserver v0.23.3
k8s.io/cli-runtime => k8s.io/cli-runtime v0.23.3
k8s.io/client-go => k8s.io/client-go v0.23.3
k8s.io/cloud-provider => k8s.io/cloud-provider v0.23.3
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.23.3
k8s.io/code-generator => k8s.io/code-generator v0.23.3
k8s.io/component-base => k8s.io/component-base v0.23.3
k8s.io/component-helpers => k8s.io/component-helpers v0.23.3
k8s.io/controller-manager => k8s.io/controller-manager v0.23.3
k8s.io/cri-api => k8s.io/cri-api v0.23.3
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.23.3
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.23.3
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.23.3
k8s.io/kube-proxy => k8s.io/kube-proxy v0.23.3
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.23.3
k8s.io/kubectl => k8s.io/kubectl v0.23.3
k8s.io/kubelet => k8s.io/kubelet v0.23.3
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.23.3
k8s.io/metrics => k8s.io/metrics v0.23.3
k8s.io/mount-utils => k8s.io/mount-utils v0.23.3
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.23.3
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.23.3
k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.23.3
k8s.io/sample-controller => k8s.io/sample-controller v0.23.3
k8s.io/api => k8s.io/api v0.23.0
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.23.0
k8s.io/apimachinery => k8s.io/apimachinery v0.23.0
k8s.io/apiserver => k8s.io/apiserver v0.23.0
k8s.io/cli-runtime => k8s.io/cli-runtime v0.23.0
k8s.io/client-go => k8s.io/client-go v0.23.0
k8s.io/cloud-provider => k8s.io/cloud-provider v0.23.0
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.23.0
k8s.io/code-generator => k8s.io/code-generator v0.23.0
k8s.io/component-base => k8s.io/component-base v0.23.0
k8s.io/component-helpers => k8s.io/component-helpers v0.23.0
k8s.io/controller-manager => k8s.io/controller-manager v0.23.0
k8s.io/cri-api => k8s.io/cri-api v0.23.0
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.23.0
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.23.0
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.23.0
k8s.io/kube-proxy => k8s.io/kube-proxy v0.23.0
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.23.0
k8s.io/kubectl => k8s.io/kubectl v0.23.0
k8s.io/kubelet => k8s.io/kubelet v0.23.0
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.23.0
k8s.io/metrics => k8s.io/metrics v0.23.0
k8s.io/mount-utils => k8s.io/mount-utils v0.23.0
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.23.0
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.23.0
k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.23.0
k8s.io/sample-controller => k8s.io/sample-controller v0.23.0
)

450
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -20,8 +20,10 @@ readonly PKG_ROOT="$(git rev-parse --show-toplevel)"
${PKG_ROOT}/hack/verify-gofmt.sh
${PKG_ROOT}/hack/verify-govet.sh
${PKG_ROOT}/hack/verify-golint.sh
${PKG_ROOT}/hack/verify-yamllint.sh
${PKG_ROOT}/hack/verify-boilerplate.sh
${PKG_ROOT}/hack/verify-spelling.sh
${PKG_ROOT}/hack/verify-helm-chart-files.sh
${PKG_ROOT}/hack/verify-helm-chart.sh
${PKG_ROOT}/hack/verify-gomod.sh

View File

@ -21,7 +21,6 @@ import (
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"github.com/container-storage-interface/spec/lib/go/csi"
@ -75,37 +74,12 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
if len(name) == 0 {
return nil, status.Error(codes.InvalidArgument, "CreateVolume name must be provided")
}
if err := isValidVolumeCapabilities(req.GetVolumeCapabilities()); err != nil {
if err := cs.validateVolumeCapabilities(req.GetVolumeCapabilities()); err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
mountPermissions := cs.Driver.mountPermissions
reqCapacity := req.GetCapacityRange().GetRequiredBytes()
parameters := req.GetParameters()
if parameters == nil {
parameters = make(map[string]string)
}
// validate parameters (case-insensitive)
for k, v := range parameters {
switch strings.ToLower(k) {
case paramServer:
// no op
case paramShare:
// no op
case mountPermissionsField:
if v != "" {
var err error
if mountPermissions, err = strconv.ParseUint(v, 8, 32); err != nil {
return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("invalid mountPermissions %s in storage class", v))
}
}
default:
return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("invalid parameter %q in storage class", k))
}
}
nfsVol, err := cs.newNFSVolume(name, reqCapacity, parameters)
nfsVol, err := cs.newNFSVolume(name, reqCapacity, req.GetParameters())
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
@ -115,7 +89,7 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
volCap = req.GetVolumeCapabilities()[0]
}
// Mount nfs base share so we can create a subdirectory
if err = cs.internalMount(ctx, nfsVol, parameters, volCap); err != nil {
if err = cs.internalMount(ctx, nfsVol, volCap); err != nil {
return nil, status.Errorf(codes.Internal, "failed to mount nfs server: %v", err.Error())
}
defer func() {
@ -124,7 +98,7 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
}
}()
fileMode := os.FileMode(mountPermissions)
fileMode := os.FileMode(cs.Driver.mountPermissions)
// Create subdirectory under base-dir
internalVolumePath := cs.getInternalVolumePath(nfsVol)
if err = os.Mkdir(internalVolumePath, fileMode); err != nil && !os.IsExist(err) {
@ -134,16 +108,7 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
if err = os.Chmod(internalVolumePath, fileMode); err != nil {
klog.Warningf("failed to chmod subdirectory: %v", err.Error())
}
parameters[paramServer] = nfsVol.server
parameters[paramShare] = cs.getVolumeSharePath(nfsVol)
return &csi.CreateVolumeResponse{
Volume: &csi.Volume{
VolumeId: nfsVol.id,
CapacityBytes: 0, // by setting it to zero, Provisioner will use PVC requested size as PV size
VolumeContext: parameters,
},
}, nil
return &csi.CreateVolumeResponse{Volume: cs.nfsVolToCSI(nfsVol)}, nil
}
// DeleteVolume delete a volume
@ -173,7 +138,7 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
}
// mount nfs base share so we can delete the subdirectory
if err = cs.internalMount(ctx, nfsVol, nil, volCap); err != nil {
if err = cs.internalMount(ctx, nfsVol, volCap); err != nil {
return nil, status.Errorf(codes.Internal, "failed to mount nfs server: %v", err.Error())
}
defer func() {
@ -209,8 +174,8 @@ func (cs *ControllerServer) ValidateVolumeCapabilities(ctx context.Context, req
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if err := isValidVolumeCapabilities(req.GetVolumeCapabilities()); err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
if req.GetVolumeCapabilities() == nil {
return nil, status.Error(codes.InvalidArgument, "Volume capabilities missing in request")
}
return &csi.ValidateVolumeCapabilitiesResponse{
@ -253,8 +218,43 @@ func (cs *ControllerServer) ControllerExpandVolume(ctx context.Context, req *csi
return nil, status.Error(codes.Unimplemented, "")
}
func (cs *ControllerServer) validateVolumeCapabilities(caps []*csi.VolumeCapability) error {
if len(caps) == 0 {
return fmt.Errorf("volume capabilities must be provided")
}
for _, c := range caps {
if err := cs.validateVolumeCapability(c); err != nil {
return err
}
}
return nil
}
func (cs *ControllerServer) validateVolumeCapability(c *csi.VolumeCapability) error {
if c == nil {
return fmt.Errorf("volume capability must be provided")
}
// Validate access mode
accessMode := c.GetAccessMode()
if accessMode == nil {
return fmt.Errorf("volume capability access mode not set")
}
if !cs.Driver.cap[accessMode.Mode] {
return fmt.Errorf("driver does not support access mode: %v", accessMode.Mode.String())
}
// Validate access type
accessType := c.GetAccessType()
if accessType == nil {
return fmt.Errorf("volume capability access type not set")
}
return nil
}
// Mount nfs server at base-dir
func (cs *ControllerServer) internalMount(ctx context.Context, vol *nfsVolume, volumeContext map[string]string, volCap *csi.VolumeCapability) error {
func (cs *ControllerServer) internalMount(ctx context.Context, vol *nfsVolume, volCap *csi.VolumeCapability) error {
sharePath := filepath.Join(string(filepath.Separator) + vol.baseDir)
targetPath := cs.getInternalMountPath(vol)
@ -266,16 +266,13 @@ func (cs *ControllerServer) internalMount(ctx context.Context, vol *nfsVolume, v
}
}
if volumeContext == nil {
volumeContext = make(map[string]string)
}
volumeContext[paramServer] = vol.server
volumeContext[paramShare] = sharePath
klog.V(2).Infof("internally mounting %v:%v at %v", vol.server, sharePath, targetPath)
_, err := cs.Driver.ns.NodePublishVolume(ctx, &csi.NodePublishVolumeRequest{
TargetPath: targetPath,
VolumeContext: volumeContext,
TargetPath: targetPath,
VolumeContext: map[string]string{
paramServer: vol.server,
paramShare: sharePath,
},
VolumeCapability: volCap,
VolumeId: vol.id,
})
@ -306,6 +303,8 @@ func (cs *ControllerServer) newNFSVolume(name string, size int64, params map[str
server = v
case paramShare:
baseDir = v
default:
return nil, fmt.Errorf("invalid parameter %q", k)
}
}
@ -348,6 +347,18 @@ func (cs *ControllerServer) getVolumeSharePath(vol *nfsVolume) string {
return filepath.Join(string(filepath.Separator), vol.baseDir, vol.subDir)
}
// Convert into nfsVolume into a csi.Volume
func (cs *ControllerServer) nfsVolToCSI(vol *nfsVolume) *csi.Volume {
return &csi.Volume{
CapacityBytes: 0, // by setting it to zero, Provisioner will use PVC requested size as PV size
VolumeId: vol.id,
VolumeContext: map[string]string{
paramServer: vol.server,
paramShare: cs.getVolumeSharePath(vol),
},
}
}
// Given a nfsVolume, return a CSI volume id
func (cs *ControllerServer) getVolumeIDFromNfsVol(vol *nfsVolume) string {
idElements := make([]string, totalIDElements)
@ -388,16 +399,3 @@ func getNfsVolFromID(id string) (*nfsVolume, error) {
subDir: subDir,
}, nil
}
// isValidVolumeCapabilities validates the given VolumeCapability array is valid
func isValidVolumeCapabilities(volCaps []*csi.VolumeCapability) error {
if len(volCaps) == 0 {
return fmt.Errorf("volume capabilities missing in request")
}
for _, c := range volCaps {
if c.GetBlock() != nil {
return fmt.Errorf("block volume capability not supported")
}
}
return nil
}

View File

@ -100,18 +100,16 @@ func TestCreateVolume(t *testing.T) {
},
},
Parameters: map[string]string{
paramServer: testServer,
paramShare: testBaseDir,
mountPermissionsField: "0750",
paramServer: testServer,
paramShare: testBaseDir,
},
},
resp: &csi.CreateVolumeResponse{
Volume: &csi.Volume{
VolumeId: newTestVolumeID,
VolumeContext: map[string]string{
paramServer: testServer,
paramShare: testShare,
mountPermissionsField: "0750",
paramServer: testServer,
paramShare: testShare,
},
},
},
@ -165,6 +163,24 @@ func TestCreateVolume(t *testing.T) {
},
expectErr: true,
},
{
name: "invalid volume capability",
req: &csi.CreateVolumeRequest{
Name: testCSIVolume,
VolumeCapabilities: []*csi.VolumeCapability{
{
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
},
},
},
Parameters: map[string]string{
paramServer: testServer,
paramShare: testBaseDir,
},
},
expectErr: true,
},
{
name: "invalid create context",
req: &csi.CreateVolumeRequest{
@ -185,28 +201,6 @@ func TestCreateVolume(t *testing.T) {
},
expectErr: true,
},
{
name: "[Error] invalid mountPermissions",
req: &csi.CreateVolumeRequest{
Name: testCSIVolume,
VolumeCapabilities: []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
},
},
},
Parameters: map[string]string{
paramServer: testServer,
paramShare: testBaseDir,
mountPermissionsField: "07ab",
},
},
expectErr: true,
},
}
for _, test := range cases {
@ -299,6 +293,78 @@ func TestDeleteVolume(t *testing.T) {
}
}
func TestValidateVolumeCapabilities(t *testing.T) {
capabilities := []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
},
},
}
cases := []struct {
desc string
req *csi.ValidateVolumeCapabilitiesRequest
resp *csi.ValidateVolumeCapabilitiesResponse
expectedErr error
}{
{
desc: "Volume ID missing",
req: &csi.ValidateVolumeCapabilitiesRequest{},
resp: nil,
expectedErr: status.Error(codes.InvalidArgument, "Volume ID missing in request"),
},
{
desc: "Volume capabilities missing",
req: &csi.ValidateVolumeCapabilitiesRequest{VolumeId: testVolumeID},
resp: nil,
expectedErr: status.Error(codes.InvalidArgument, "Volume capabilities missing in request"),
},
{
desc: "valid request",
req: &csi.ValidateVolumeCapabilitiesRequest{
VolumeId: testVolumeID,
VolumeCapabilities: capabilities,
},
resp: &csi.ValidateVolumeCapabilitiesResponse{
Confirmed: &csi.ValidateVolumeCapabilitiesResponse_Confirmed{VolumeCapabilities: capabilities},
},
expectedErr: nil,
},
{
desc: "valid request with newTestVolumeID",
req: &csi.ValidateVolumeCapabilitiesRequest{
VolumeId: newTestVolumeID,
VolumeCapabilities: capabilities,
},
resp: &csi.ValidateVolumeCapabilitiesResponse{
Confirmed: &csi.ValidateVolumeCapabilitiesResponse_Confirmed{VolumeCapabilities: capabilities},
},
expectedErr: nil,
},
}
for _, test := range cases {
test := test //pin
t.Run(test.desc, func(t *testing.T) {
cs := initTestController(t)
resp, err := cs.ValidateVolumeCapabilities(context.TODO(), test.req)
if test.expectedErr == nil && err != nil {
t.Errorf("test %q failed: %v", test.desc, err)
}
if test.expectedErr != nil && err == nil {
t.Errorf("test %q failed; expected error %v, got success", test.desc, test.expectedErr)
}
if !reflect.DeepEqual(resp, test.resp) {
t.Errorf("test %q failed: got resp %+v, expected %+v", test.desc, resp, test.resp)
}
})
}
}
func TestControllerGetCapabilities(t *testing.T) {
cases := []struct {
desc string
@ -436,46 +502,3 @@ func TestNfsVolFromId(t *testing.T) {
})
}
}
func TestIsValidVolumeCapabilities(t *testing.T) {
mountVolumeCapabilities := []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
},
}
blockVolumeCapabilities := []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Block{
Block: &csi.VolumeCapability_BlockVolume{},
},
},
}
cases := []struct {
desc string
volCaps []*csi.VolumeCapability
expectErr error
}{
{
volCaps: mountVolumeCapabilities,
expectErr: nil,
},
{
volCaps: blockVolumeCapabilities,
expectErr: fmt.Errorf("block volume capability not supported"),
},
{
volCaps: []*csi.VolumeCapability{},
expectErr: fmt.Errorf("volume capabilities missing in request"),
},
}
for _, test := range cases {
err := isValidVolumeCapabilities(test.volCaps)
if !reflect.DeepEqual(err, test.expectErr) {
t.Errorf("[test: %s] Unexpected error: %v, expected error: %v", test.desc, err, test.expectErr)
}
}
}

View File

@ -41,6 +41,7 @@ type Driver struct {
//ids *identityServer
ns *NodeServer
cap map[csi.VolumeCapability_AccessMode_Mode]bool
cscap []*csi.ControllerServiceCapability
nscap []*csi.NodeServiceCapability
volumeLocks *VolumeLocks
@ -54,9 +55,8 @@ const (
// The base directory must be a direct child of the root directory.
// The root directory is omitted from the string, for example:
// "base" instead of "/base"
paramShare = "share"
mountOptionsField = "mountoptions"
mountPermissionsField = "mountpermissions"
paramShare = "share"
mountOptionsField = "mountoptions"
)
func NewDriver(options *DriverOptions) *Driver {
@ -69,8 +69,20 @@ func NewDriver(options *DriverOptions) *Driver {
endpoint: options.Endpoint,
mountPermissions: options.MountPermissions,
workingMountDir: options.WorkingMountDir,
cap: map[csi.VolumeCapability_AccessMode_Mode]bool{},
}
vcam := []csi.VolumeCapability_AccessMode_Mode{
csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY,
csi.VolumeCapability_AccessMode_SINGLE_NODE_SINGLE_WRITER,
csi.VolumeCapability_AccessMode_SINGLE_NODE_MULTI_WRITER,
csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY,
csi.VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER,
csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
}
n.AddVolumeCapabilityAccessModes(vcam)
n.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME,
csi.ControllerServiceCapability_RPC_SINGLE_NODE_MULTI_WRITER,
@ -111,6 +123,15 @@ func (n *Driver) Run(testMode bool) {
s.Wait()
}
func (n *Driver) AddVolumeCapabilityAccessModes(vc []csi.VolumeCapability_AccessMode_Mode) []*csi.VolumeCapability_AccessMode {
var vca []*csi.VolumeCapability_AccessMode
for _, c := range vc {
vca = append(vca, &csi.VolumeCapability_AccessMode{Mode: c})
n.cap[c] = true
}
return vca
}
func (n *Driver) AddControllerServiceCapabilities(cl []csi.ControllerServiceCapability_RPC_Type) {
var csc []*csi.ControllerServiceCapability
for _, c := range cl {

View File

@ -38,18 +38,21 @@ func NewEmptyDriver(emptyField string) *Driver {
name: DefaultDriverName,
version: "",
nodeID: fakeNodeID,
cap: map[csi.VolumeCapability_AccessMode_Mode]bool{},
}
case "name":
d = &Driver{
name: "",
version: driverVersion,
nodeID: fakeNodeID,
cap: map[csi.VolumeCapability_AccessMode_Mode]bool{},
}
default:
d = &Driver{
name: DefaultDriverName,
version: driverVersion,
nodeID: fakeNodeID,
cap: map[csi.VolumeCapability_AccessMode_Mode]bool{},
}
}
d.volumeLocks = NewVolumeLocks()

View File

@ -19,7 +19,6 @@ package nfs
import (
"fmt"
"os"
"strconv"
"strings"
"github.com/container-storage-interface/spec/lib/go/csi"
@ -57,8 +56,6 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
}
var server, baseDir string
mountPermissions := ns.Driver.mountPermissions
performChmodOp := (mountPermissions > 0)
for k, v := range req.GetVolumeContext() {
switch strings.ToLower(k) {
case paramServer:
@ -69,19 +66,6 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
if v != "" {
mountOptions = append(mountOptions, v)
}
case mountPermissionsField:
if v != "" {
var err error
var perm uint64
if perm, err = strconv.ParseUint(v, 8, 32); err != nil {
return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("invalid mountPermissions %s", v))
}
if perm == 0 {
performChmodOp = false
} else {
mountPermissions = perm
}
}
}
}
@ -91,13 +75,12 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
if baseDir == "" {
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("%v is a required parameter", paramShare))
}
server = getServerFromSource(server)
source := fmt.Sprintf("%s:%s", server, baseDir)
notMnt, err := ns.mounter.IsLikelyNotMountPoint(targetPath)
if err != nil {
if os.IsNotExist(err) {
if err := os.MkdirAll(targetPath, os.FileMode(mountPermissions)); err != nil {
if err := os.MkdirAll(targetPath, os.FileMode(ns.Driver.mountPermissions)); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
notMnt = true
@ -121,14 +104,10 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
return nil, status.Error(codes.Internal, err.Error())
}
if performChmodOp {
if err := chmodIfPermissionMismatch(targetPath, os.FileMode(mountPermissions)); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
} else {
klog.V(2).Infof("skip chmod on targetPath(%s) since mountPermissions is set as 0", targetPath)
klog.V(2).Infof("volumeID(%v): mount targetPath(%s) with permissions(0%o)", volumeID, targetPath, ns.Driver.mountPermissions)
if err := os.Chmod(targetPath, os.FileMode(ns.Driver.mountPermissions)); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
klog.V(2).Infof("volume(%s) mount %s on %s succeeded", volumeID, source, targetPath)
return &csi.NodePublishVolumeResponse{}, nil
}
@ -142,13 +121,23 @@ func (ns *NodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
if len(targetPath) == 0 {
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
}
notMnt, err := ns.mounter.IsLikelyNotMountPoint(targetPath)
klog.V(2).Infof("NodeUnpublishVolume: unmounting volume %s on %s", volumeID, targetPath)
err := mount.CleanupMountPoint(targetPath, ns.mounter, true /*extensiveMountPointCheck*/)
if err != nil {
return nil, status.Errorf(codes.Internal, "failed to unmount target %q: %v", targetPath, err)
if os.IsNotExist(err) {
return nil, status.Error(codes.NotFound, "Targetpath not found")
}
return nil, status.Error(codes.Internal, err.Error())
}
if notMnt {
return nil, status.Error(codes.NotFound, "Volume not mounted")
}
klog.V(2).Infof("NodeUnpublishVolume: CleanupMountPoint %s on volumeID(%s)", targetPath, volumeID)
err = mount.CleanupMountPoint(targetPath, ns.mounter, false)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
klog.V(2).Infof("NodeUnpublishVolume: unmount volume %s on %s successfully", volumeID, targetPath)
return &csi.NodeUnpublishVolumeResponse{}, nil
}

View File

@ -19,10 +19,8 @@ package nfs
import (
"context"
"errors"
"fmt"
"os"
"reflect"
"strings"
"testing"
"github.com/container-storage-interface/spec/lib/go/csi"
@ -43,20 +41,8 @@ func TestNodePublishVolume(t *testing.T) {
}
params := map[string]string{
"server": "server",
"share": "share",
mountPermissionsField: "0755",
}
paramsWithZeroPermissions := map[string]string{
"server": "server",
"share": "share",
mountPermissionsField: "0",
}
invalidParams := map[string]string{
"server": "server",
"share": "share",
mountPermissionsField: "07ab",
"server": "server",
"share": "share",
}
volumeCap := csi.VolumeCapability_AccessMode{Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER}
@ -126,26 +112,6 @@ func TestNodePublishVolume(t *testing.T) {
Readonly: true},
expectedErr: nil,
},
{
desc: "[Success] Valid request with 0 mountPermissions",
req: csi.NodePublishVolumeRequest{
VolumeContext: paramsWithZeroPermissions,
VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap},
VolumeId: "vol_1",
TargetPath: targetTest,
Readonly: true},
expectedErr: nil,
},
{
desc: "[Error] invalid mountPermissions",
req: csi.NodePublishVolumeRequest{
VolumeContext: invalidParams,
VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap},
VolumeId: "vol_1",
TargetPath: targetTest,
Readonly: true},
expectedErr: status.Error(codes.InvalidArgument, "invalid mountPermissions 07ab"),
},
}
// setup
@ -203,11 +169,12 @@ func TestNodeUnpublishVolume(t *testing.T) {
{
desc: "[Error] Unmount error mocked by IsLikelyNotMountPoint",
req: csi.NodeUnpublishVolumeRequest{TargetPath: errorTarget, VolumeId: "vol_1"},
expectedErr: fmt.Errorf("fake IsLikelyNotMountPoint: fake error"),
expectedErr: status.Error(codes.Internal, "fake IsLikelyNotMountPoint: fake error"),
},
{
desc: "[Success] Volume not mounted",
req: csi.NodeUnpublishVolumeRequest{TargetPath: targetFile, VolumeId: "vol_1"},
desc: "[Error] Volume not mounted",
req: csi.NodeUnpublishVolumeRequest{TargetPath: targetFile, VolumeId: "vol_1"},
expectedErr: status.Error(codes.NotFound, "Volume not mounted"),
},
}
@ -220,9 +187,7 @@ func TestNodeUnpublishVolume(t *testing.T) {
}
_, err := ns.NodeUnpublishVolume(context.Background(), &tc.req)
if !reflect.DeepEqual(err, tc.expectedErr) {
if err == nil || tc.expectedErr == nil || !strings.Contains(err.Error(), tc.expectedErr.Error()) {
t.Errorf("Desc:%v\nUnexpected error: %v\nExpected: %v", tc.desc, err, tc.expectedErr)
}
t.Errorf("Desc:%v\nUnexpected error: %v\nExpected: %v", tc.desc, err, tc.expectedErr)
}
if tc.cleanup != nil {
tc.cleanup()

View File

@ -18,7 +18,6 @@ package nfs
import (
"fmt"
"os"
"strings"
"sync"
@ -29,7 +28,6 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"
netutil "k8s.io/utils/net"
)
func NewDefaultIdentityServer(d *Driver) *IdentityServer {
@ -134,29 +132,3 @@ func getMountOptions(context map[string]string) string {
}
return ""
}
// chmodIfPermissionMismatch only perform chmod when permission mismatches
func chmodIfPermissionMismatch(targetPath string, mode os.FileMode) error {
info, err := os.Lstat(targetPath)
if err != nil {
return err
}
perm := info.Mode() & os.ModePerm
if perm != mode {
klog.V(2).Infof("chmod targetPath(%s, mode:0%o) with permissions(0%o)", targetPath, info.Mode(), mode)
if err := os.Chmod(targetPath, mode); err != nil {
return err
}
} else {
klog.V(2).Infof("skip chmod on targetPath(%s) since mode is already 0%o)", targetPath, info.Mode())
}
return nil
}
// getServerFromSource if server is IPv6, return [IPv6]
func getServerFromSource(server string) string {
if netutil.IsIPv6String(server) {
return fmt.Sprintf("[%s]", server)
}
return server
}

View File

@ -18,9 +18,6 @@ package nfs
import (
"fmt"
"os"
"reflect"
"strings"
"testing"
)
@ -157,93 +154,3 @@ func TestGetMountOptions(t *testing.T) {
}
}
}
func TestChmodIfPermissionMismatch(t *testing.T) {
permissionMatchingPath, _ := getWorkDirPath("permissionMatchingPath")
_ = makeDir(permissionMatchingPath)
defer os.RemoveAll(permissionMatchingPath)
permissionMismatchPath, _ := getWorkDirPath("permissionMismatchPath")
_ = os.MkdirAll(permissionMismatchPath, os.FileMode(0721))
defer os.RemoveAll(permissionMismatchPath)
tests := []struct {
desc string
path string
mode os.FileMode
expectedError error
}{
{
desc: "Invalid path",
path: "invalid-path",
mode: 0755,
expectedError: fmt.Errorf("CreateFile invalid-path: The system cannot find the file specified"),
},
{
desc: "permission matching path",
path: permissionMatchingPath,
mode: 0755,
expectedError: nil,
},
{
desc: "permission mismatch path",
path: permissionMismatchPath,
mode: 0755,
expectedError: nil,
},
}
for _, test := range tests {
err := chmodIfPermissionMismatch(test.path, test.mode)
if !reflect.DeepEqual(err, test.expectedError) {
if err == nil || test.expectedError == nil && !strings.Contains(err.Error(), test.expectedError.Error()) {
t.Errorf("test[%s]: unexpected error: %v, expected error: %v", test.desc, err, test.expectedError)
}
}
}
}
// getWorkDirPath returns the path to the current working directory
func getWorkDirPath(dir string) (string, error) {
path, err := os.Getwd()
if err != nil {
return "", err
}
return fmt.Sprintf("%s%c%s", path, os.PathSeparator, dir), nil
}
func TestGetServerFromSource(t *testing.T) {
tests := []struct {
desc string
server string
result string
}{
{
desc: "ipv4",
server: "10.127.0.1",
result: "10.127.0.1",
},
{
desc: "ipv6",
server: "0:0:0:0:0:0:0:1",
result: "[0:0:0:0:0:0:0:1]",
},
{
desc: "ipv6 with brackets",
server: "[0:0:0:0:0:0:0:2]",
result: "[0:0:0:0:0:0:0:2]",
},
{
desc: "other fqdn",
server: "bing.com",
result: "bing.com",
},
}
for _, test := range tests {
result := getServerFromSource(test.server)
if result != test.result {
t.Errorf("Unexpected result: %s, expected: %s", result, test.result)
}
}
}

View File

@ -31,7 +31,15 @@ aliases:
# This documents who previously contributed to Kubernetes-CSI
# as approver.
emeritus_approvers:
emeritus_approver:
- lpabon
- sbezverk
- vladimirvivien
# This documents who previously contributed to Kubernetes-CSI
# as reviewer.
emeritus_reviewer:
- lpabon
- saad-ali
- sbezverk
- vladimirvivien

View File

@ -46,13 +46,10 @@ naming convention `<hostpath-deployment-version>-on-<kubernetes-version>`.
## Release Process
1. Identify all issues and ongoing PRs that should go into the release, and
drive them to resolution.
1. Download the latest version of the
[K8s release notes generator](https://github.com/kubernetes/release/tree/HEAD/cmd/release-notes)
1. Create a
[Github personal access token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token)
with `repo:public_repo` access
1. Download v2.8+ [K8s release notes
generator](https://github.com/kubernetes/release/tree/HEAD/cmd/release-notes)
1. Generate release notes for the release. Replace arguments with the relevant
information.
information.
* Clean up old cached information (also needed if you are generating release
notes for multiple repos)
```bash
@ -60,24 +57,15 @@ naming convention `<hostpath-deployment-version>-on-<kubernetes-version>`.
```
* For new minor releases on master:
```bash
GITHUB_TOKEN=<token> release-notes \
--discover=mergebase-to-latest \
--org=kubernetes-csi \
--repo=external-provisioner \
--required-author="" \
--markdown-links \
--output out.md
GITHUB_TOKEN=<token> release-notes --discover=mergebase-to-latest
--github-org=kubernetes-csi --github-repo=external-provisioner
--required-author="" --output out.md
```
* For new patch releases on a release branch:
```bash
GITHUB_TOKEN=<token> release-notes \
--discover=patch-to-latest \
--branch=release-1.1 \
--org=kubernetes-csi \
--repo=external-provisioner \
--required-author="" \
--markdown-links \
--output out.md
GITHUB_TOKEN=<token> release-notes --discover=patch-to-latest --branch=release-1.1
--github-org=kubernetes-csi --github-repo=external-provisioner
--required-author="" --output out.md
```
1. Compare the generated output to the new commits for the release to check if
any notable change missed a release note.
@ -112,29 +100,6 @@ naming convention `<hostpath-deployment-version>-on-<kubernetes-version>`.
and [k/k
in-tree](https://github.com/kubernetes/kubernetes/tree/HEAD/test/e2e/testing-manifests/storage-csi/hostpath/hostpath)
### Troubleshooting
#### Image build jobs
The following jobs are triggered after tagging to produce the corresponding
image(s):
https://k8s-testgrid.appspot.com/sig-storage-image-build
Clicking on a failed build job opens that job in https://prow.k8s.io. Next to
the job title is a rerun icon (circle with arrow). Clicking it opens a popup
with a "rerun" button that maintainers with enough permissions can use. If in
doubt, ask someone on #sig-release to rerun the job.
Another way to rerun a job is to search for it in https://prow.k8s.io and click
the rerun icon in the resulting job list:
https://prow.k8s.io/?job=canary-csi-test-push-images
#### Verify images
Canary and staged images can be viewed at https://console.cloud.google.com/gcr/images/k8s-staging-sig-storage
Promoted images can be viewed at https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/sig-storage
## Adding support for a new Kubernetes release
1. Add the new release to `k8s_versions` in

View File

@ -78,7 +78,7 @@ version_to_git () {
# the list of windows versions was matched from:
# - https://hub.docker.com/_/microsoft-windows-nanoserver
# - https://hub.docker.com/_/microsoft-windows-servercore
configvar CSI_PROW_BUILD_PLATFORMS "linux amd64 amd64; linux ppc64le ppc64le -ppc64le; linux s390x s390x -s390x; linux arm arm -arm; linux arm64 arm64 -arm64; linux arm arm/v7 -armv7; windows amd64 amd64 .exe nanoserver:1809 servercore:ltsc2019; windows amd64 amd64 .exe nanoserver:20H2 servercore:20H2; windows amd64 amd64 .exe nanoserver:ltsc2022 servercore:ltsc2022" "Go target platforms (= GOOS + GOARCH) and file suffix of the resulting binaries"
configvar CSI_PROW_BUILD_PLATFORMS "linux amd64 amd64; linux ppc64le ppc64le -ppc64le; linux s390x s390x -s390x; linux arm arm -arm; linux arm64 arm64 -arm64; linux arm arm/v7 -armv7; windows amd64 amd64 .exe nanoserver:1809 servercore:ltsc2019; windows amd64 amd64 .exe nanoserver:1909 servercore:1909; windows amd64 amd64 .exe nanoserver:2004 servercore:2004; windows amd64 amd64 .exe nanoserver:20H2 servercore:20H2; windows amd64 amd64 .exe nanoserver:ltsc2022 servercore:ltsc2022" "Go target platforms (= GOOS + GOARCH) and file suffix of the resulting binaries"
# If we have a vendor directory, then use it. We must be careful to only
# use this for "make" invocations inside the project's repo itself because
@ -86,7 +86,7 @@ configvar CSI_PROW_BUILD_PLATFORMS "linux amd64 amd64; linux ppc64le ppc64le -pp
# which is disabled with GOFLAGS=-mod=vendor).
configvar GOFLAGS_VENDOR "$( [ -d vendor ] && echo '-mod=vendor' )" "Go flags for using the vendor directory"
configvar CSI_PROW_GO_VERSION_BUILD "1.18" "Go version for building the component" # depends on component's source code
configvar CSI_PROW_GO_VERSION_BUILD "1.17.3" "Go version for building the component" # depends on component's source code
configvar CSI_PROW_GO_VERSION_E2E "" "override Go version for building the Kubernetes E2E test suite" # normally doesn't need to be set, see install_e2e
configvar CSI_PROW_GO_VERSION_SANITY "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building the csi-sanity test suite" # depends on CSI_PROW_SANITY settings below
configvar CSI_PROW_GO_VERSION_KIND "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building 'kind'" # depends on CSI_PROW_KIND_VERSION below
@ -441,7 +441,10 @@ install_ginkgo () {
if [ "v$(ginkgo version 2>/dev/null | sed -e 's/.* //')" = "${CSI_PROW_GINKGO_VERSION}" ]; then
return
fi
run_with_go "${CSI_PROW_GO_VERSION_GINKGO}" env GOBIN="${CSI_PROW_BIN}" go install "github.com/onsi/ginkgo/ginkgo@${CSI_PROW_GINKGO_VERSION}" || die "building ginkgo failed"
git_checkout https://github.com/onsi/ginkgo "$GOPATH/src/github.com/onsi/ginkgo" "${CSI_PROW_GINKGO_VERSION}" --depth=1 &&
# We have to get dependencies and hence can't call just "go build".
run_with_go "${CSI_PROW_GO_VERSION_GINKGO}" go get github.com/onsi/ginkgo/ginkgo || die "building ginkgo failed" &&
mv "$GOPATH/bin/ginkgo" "${CSI_PROW_BIN}"
}
# Ensure that we have the desired version of dep.
@ -734,7 +737,7 @@ install_csi_driver () {
fi
}
# Installs all necessary snapshotter CRDs
# Installs all nessesary snapshotter CRDs
install_snapshot_crds() {
# Wait until volumesnapshot CRDs are in place.
CRD_BASE_DIR="https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/${CSI_SNAPSHOTTER_VERSION}/client/config/crd"

View File

@ -41,7 +41,7 @@ if [[ -z "$(command -v misspell)" ]]; then
# perform go get in a temp dir as we are not tracking this version in a go module
# if we do the go get in the repo, it will create / update a go.mod and go.sum
cd "${TMP_DIR}"
GO111MODULE=on GOBIN="${TMP_DIR}" go install "github.com/client9/misspell/cmd/misspell@${TOOL_VERSION}"
GO111MODULE=on GOBIN="${TMP_DIR}" go get "github.com/client9/misspell/cmd/misspell@${TOOL_VERSION}"
export PATH="${TMP_DIR}:${PATH}"
fi

View File

@ -17,14 +17,21 @@ limitations under the License.
package driver
import (
"github.com/kubernetes-csi/external-snapshotter/v2/pkg/apis/volumesnapshot/v1beta1"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
VolumeSnapshotClassKind = "VolumeSnapshotClass"
SnapshotAPIVersion = "snapshot.storage.k8s.io/v1beta1"
)
type PVTestDriver interface {
DynamicPVTestDriver
PreProvisionedVolumeTestDriver
VolumeSnapshotTestDriver
}
// DynamicPVTestDriver represents an interface for a CSI driver that supports DynamicPV
@ -41,6 +48,10 @@ type PreProvisionedVolumeTestDriver interface {
GetPreProvisionStorageClass(parameters map[string]string, mountOptions []string, reclaimPolicy *v1.PersistentVolumeReclaimPolicy, bindingMode *storagev1.VolumeBindingMode, allowedTopologyValues []string, namespace string) *storagev1.StorageClass
}
type VolumeSnapshotTestDriver interface {
GetVolumeSnapshotClass(namespace string) *v1beta1.VolumeSnapshotClass
}
func getStorageClass(
generateName string,
provisioner string,
@ -72,3 +83,18 @@ func getStorageClass(
AllowVolumeExpansion: &allowVolumeExpansion,
}
}
func getVolumeSnapshotClass(generateName string, provisioner string) *v1beta1.VolumeSnapshotClass {
return &v1beta1.VolumeSnapshotClass{
TypeMeta: metav1.TypeMeta{
Kind: VolumeSnapshotClassKind,
APIVersion: SnapshotAPIVersion,
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: generateName,
},
Driver: provisioner,
DeletionPolicy: v1beta1.VolumeSnapshotContentDelete,
}
}

View File

@ -22,6 +22,7 @@ import (
"strings"
"github.com/kubernetes-csi/csi-driver-nfs/pkg/nfs"
"github.com/kubernetes-csi/external-snapshotter/v2/pkg/apis/volumesnapshot/v1beta1"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
@ -68,6 +69,12 @@ func (d *NFSDriver) GetPreProvisionStorageClass(parameters map[string]string, mo
return getStorageClass(generateName, provisioner, parameters, mountOptions, reclaimPolicy, bindingMode, nil)
}
func (d *NFSDriver) GetVolumeSnapshotClass(namespace string) *v1beta1.VolumeSnapshotClass {
provisioner := d.driverName
generateName := fmt.Sprintf("%s-%s-dynamic-sc-", namespace, normalizeProvisioner(provisioner))
return getVolumeSnapshotClass(generateName, provisioner)
}
func (d *NFSDriver) GetPersistentVolume(volumeID string, fsType string, size string, reclaimPolicy *v1.PersistentVolumeReclaimPolicy, namespace string, attrib map[string]string, nodeStageSecretRef string) *v1.PersistentVolume {
provisioner := d.driverName
generateName := fmt.Sprintf("%s-%s-preprovsioned-pv-", namespace, normalizeProvisioner(provisioner))

View File

@ -23,7 +23,11 @@ import (
"github.com/kubernetes-csi/csi-driver-nfs/test/e2e/testsuites"
"github.com/onsi/ginkgo"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
clientset "k8s.io/client-go/kubernetes"
restclientset "k8s.io/client-go/rest"
"k8s.io/kubernetes/test/e2e/framework"
)
@ -47,6 +51,12 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
cs = f.ClientSet
ns = f.Namespace
var err error
_, err = restClient(testsuites.SnapshotAPIGroup, testsuites.APIVersionv1beta1)
if err != nil {
ginkgo.Fail(fmt.Sprintf("could not get rest clientset: %v", err))
}
})
testDriver = driver.InitNFSDriver()
@ -70,29 +80,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
Pods: pods,
StorageClassParameters: defaultStorageClassParameters,
}
test.Run(cs, ns)
})
ginkgo.It("should create a volume on demand with zero mountPermissions [nfs.csi.k8s.io]", func() {
pods := []testsuites.PodDetails{
{
Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data",
Volumes: []testsuites.VolumeDetails{
{
ClaimSize: "10Gi",
VolumeMount: testsuites.VolumeMountDetails{
NameGenerate: "test-volume-",
MountPathGenerate: "/mnt/test-",
},
},
},
},
}
test := testsuites.DynamicallyProvisionedCmdVolumeTest{
CSIDriver: testDriver,
Pods: pods,
StorageClassParameters: storageClassParametersWithZeroMountPermisssions,
}
test.Run(cs, ns)
})
@ -291,9 +279,21 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
Pods: pods,
Server: nfsServerAddress,
Share: nfsShare,
MountOptions: "nconnect=8,nfsvers=4.1,sec=sys",
MountOptions: "nfsvers=4.1,sec=sys",
ReadOnly: false,
}
test.Run(cs, ns)
})
})
func restClient(group string, version string) (restclientset.Interface, error) {
config, err := framework.LoadConfig()
if err != nil {
ginkgo.Fail(fmt.Sprintf("could not load config: %v", err))
}
gv := schema.GroupVersion{Group: group, Version: version}
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = serializer.WithoutConversionCodecFactory{CodecFactory: serializer.NewCodecFactory(runtime.NewScheme())}
return restclientset.RESTClientFor(config)
}

View File

@ -50,14 +50,6 @@ var (
"share": nfsShare,
"csi.storage.k8s.io/provisioner-secret-name": "mount-options",
"csi.storage.k8s.io/provisioner-secret-namespace": "default",
"mountPermissions": "0755",
}
storageClassParametersWithZeroMountPermisssions = map[string]string{
"server": nfsServerAddress,
"share": nfsShare,
"csi.storage.k8s.io/provisioner-secret-name": "mount-options",
"csi.storage.k8s.io/provisioner-secret-namespace": "default",
"mountPermissions": "0",
}
controllerServer *nfs.ControllerServer
)

View File

@ -44,7 +44,7 @@ func (t *DynamicallyProvisionedReclaimPolicyTest) Run(client clientset.Interface
if tpvc.ReclaimPolicy() == v1.PersistentVolumeReclaimRetain {
tpvc.WaitForPersistentVolumePhase(v1.VolumeReleased)
tpvc.DeleteBoundPersistentVolume()
// The controller server cannot resolve the nfs server hosting inside the testing k8s cluster, skipping the cleanup step.
// The controler server cannot resolve the nfs server hosting inside the testing k8s cluster, skipping the cleanup step.
// tpvc.DeleteBackingVolume(&t.ControllerServer)
}
}

View File

@ -32,8 +32,14 @@ const (
)
const (
VolumePVCKind = "PersistentVolumeClaim"
APIVersionv1beta1 = "v1beta1"
VolumeSnapshotKind = "VolumeSnapshot"
VolumePVCKind = "PersistentVolumeClaim"
APIVersionv1beta1 = "v1beta1"
SnapshotAPIVersion = "snapshot.storage.k8s.io/" + APIVersionv1beta1
)
var (
SnapshotAPIGroup = "snapshot.storage.k8s.io"
)
type PodDetails struct {

View File

@ -67,7 +67,7 @@ var podFailedCondition = func(pod *v1.Pod) (bool, error) {
ginkgo.By("Saw pod failure")
return true, nil
case v1.PodSucceeded:
return true, fmt.Errorf("pod %q succeeded with reason: %q, message: %q", pod.Name, pod.Status.Reason, pod.Status.Message)
return true, fmt.Errorf("pod %q successed with reason: %q, message: %q", pod.Name, pod.Status.Reason, pod.Status.Message)
default:
return false, nil
}

View File

@ -26,10 +26,11 @@ install_ginkgo () {
setup_e2e_binaries() {
# download k8s external e2e binary
curl -sL https://storage.googleapis.com/kubernetes-release/release/v1.24.0/kubernetes-test-linux-amd64.tar.gz --output e2e-tests.tar.gz
curl -sL https://storage.googleapis.com/kubernetes-release/release/v1.23.0/kubernetes-test-linux-amd64.tar.gz --output e2e-tests.tar.gz
tar -xvf e2e-tests.tar.gz && rm e2e-tests.tar.gz
export EXTRA_HELM_OPTIONS="--set driver.name=$DRIVER.csi.k8s.io --set controller.name=csi-$DRIVER-controller --set node.name=csi-$DRIVER-node --set feature.enableInlineVolume=true"
# enable fsGroupPolicy (only available from k8s 1.20)
export EXTRA_HELM_OPTIONS="--set feature.enableFSGroupPolicy=true --set driver.name=$DRIVER.csi.k8s.io --set controller.name=csi-$DRIVER-controller --set node.name=csi-$DRIVER-node --set image.csiProvisioner.tag=v3.0.0 --set driver.mountPermissions=0777"
# test on alternative driver name
sed -i "s/nfs.csi.k8s.io/$DRIVER.csi.k8s.io/g" deploy/example/storageclass-nfs.yaml
@ -50,6 +51,6 @@ setup_e2e_binaries
trap print_logs EXIT
ginkgo -p --progress --v -focus="External.Storage.*$DRIVER.csi.k8s.io" \
-skip='\[Disruptive\]|new pod with same fsgroup skips ownership changes to the volume contents|should provision storage with any volume data source' kubernetes/test/bin/e2e.test -- \
-skip='\[Disruptive\]|\[Slow\]' kubernetes/test/bin/e2e.test -- \
-storage.testdriver=$PROJECT_ROOT/test/external-e2e/testdriver.yaml \
--kubeconfig=$KUBECONFIG

View File

@ -4,16 +4,10 @@
StorageClass:
FromFile: /tmp/csi/storageclass.yaml
DriverInfo:
Name: test.csi.k8s.io
SupportedFsType: {"nfs"}
Name: nfs.csi.k8s.io
Capabilities:
persistence: true
exec: true
multipods: true
RWX: true
fsGroup: true
InlineVolumes:
- Attributes:
server: nfs-server.default.svc.cluster.local
share: /
Shared: true

View File

@ -32,14 +32,14 @@ function provision_nfs_server {
echo 'Installing NFS server on localhost'
apt-get update -y
apt-get install -y nfs-common
docker run -d --name nfs --privileged -p 2049:2049 -v "$(pwd)"/nfsshare:/nfsshare -e SHARED_DIRECTORY=/nfsshare itsthenetwork/nfs-server-alpine:latest
docker run -d --name nfs --privileged -p 2049:2049 -v $(pwd)/nfsshare:/nfsshare -e SHARED_DIRECTORY=/nfsshare itsthenetwork/nfs-server-alpine:latest
}
provision_nfs_server
readonly CSC_BIN="$GOBIN/csc"
readonly cap="1,mount,"
volname="citest-$(date +%s)"
readonly volname="citest-$(date +%s)"
readonly volsize="2147483648"
readonly endpoint="unix:///tmp/csi.sock"
readonly target_path="/tmp/targetpath"
@ -70,7 +70,6 @@ echo "publish volume test:"
"$CSC_BIN" node publish --endpoint "$endpoint" --cap "$cap" --vol-context "$params" --target-path "$target_path" "$volumeid"
sleep 2
declare staging_target_path
echo "node stats test:"
csc node stats --endpoint "$endpoint" "$volumeid:$target_path:$staging_target_path"
sleep 2

View File

@ -42,7 +42,7 @@ function provision_nfs_server {
echo 'Installing NFS server on localhost'
apt-get update -y
apt-get install -y nfs-common
docker run -d --name nfs --privileged -p 2049:2049 -v "$(pwd)"/nfsshare:/nfsshare -e SHARED_DIRECTORY=/nfsshare itsthenetwork/nfs-server-alpine:latest
docker run -d --name nfs --privileged -p 2049:2049 -v $(pwd)/nfsshare:/nfsshare -e SHARED_DIRECTORY=/nfsshare itsthenetwork/nfs-server-alpine:latest
}
provision_nfs_server

View File

@ -1,5 +1,3 @@
#!/bin/bash
# Copyright 2020 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -14,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
set -e
echo "check the driver pods if restarts ..."
@ -28,4 +28,4 @@ do
fi
done
echo "no driver pods have restarted"
echo "======================================================================================"
echo "======================================================================================"

View File

@ -1,5 +1,3 @@
#!/bin/bash
# Copyright 2020 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -14,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
set -e
NS=kube-system

12
vendor/github.com/imdario/mergo/.deepsource.toml generated vendored Normal file
View File

@ -0,0 +1,12 @@
version = 1
test_patterns = [
"*_test.go"
]
[[analyzers]]
name = "go"
enabled = true
[analyzers.meta]
import_path = "github.com/imdario/mergo"

View File

@ -4,4 +4,6 @@ install:
- go get golang.org/x/tools/cmd/cover
- go get github.com/mattn/goveralls
script:
- go test -race -v ./...
after_script:
- $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN

Some files were not shown because too many files have changed in this diff Show More