Merge remote-tracking branch 'upstream/master' into upstream-merge
* pkg/nfs/nodeserver.go
Significant manual merging due to conflicts with downstream
commit 59fe400d: https://github.com/openshift/csi-driver-nfs/pull/31
This commit is contained in:
commit
d32503ae8d
|
|
@ -0,0 +1 @@
|
|||
release-tools/cloudbuild.sh
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
---
|
||||
name: Bug Report
|
||||
about: Create a report to help us improve this project
|
||||
|
||||
---
|
||||
|
||||
<!-- Please use this template while reporting a bug and provide as much info as possible. Not doing so may result in your bug not being addressed in a timely manner. Thanks!
|
||||
-->
|
||||
|
||||
|
||||
**What happened**:
|
||||
|
||||
**What you expected to happen**:
|
||||
|
||||
**How to reproduce it**:
|
||||
|
||||
**Anything else we need to know?**:
|
||||
|
||||
**Environment**:
|
||||
- CSI Driver version:
|
||||
- Kubernetes version (use `kubectl version`):
|
||||
- OS (e.g. from /etc/os-release):
|
||||
- Kernel (e.g. `uname -a`):
|
||||
- Install tools:
|
||||
- Others:
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
---
|
||||
name: Enhancement Request
|
||||
about: Suggest an idea for this project
|
||||
|
||||
---
|
||||
|
||||
|
||||
**Is your feature request related to a problem?/Why is this needed**
|
||||
<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
|
||||
|
||||
**Describe the solution you'd like in detail**
|
||||
<!-- A clear and concise description of what you want to happen. -->
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
<!-- A clear and concise description of any alternative solutions or features you've considered. -->
|
||||
|
||||
**Additional context**
|
||||
<!-- Add any other context or screenshots about the feature request here. -->
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
---
|
||||
name: Support Request
|
||||
about: Ask questions about this project
|
||||
|
||||
---
|
||||
|
||||
<!--
|
||||
STOP -- PLEASE READ!
|
||||
|
||||
GitHub is not the right place for support requests.
|
||||
|
||||
If you're looking for help, post your question on the [Kubernetes Slack ](http://slack.k8s.io/) Sig-Storage Channel.
|
||||
|
||||
If the matter is security related, please disclose it privately via https://kubernetes.io/security/.
|
||||
-->
|
||||
|
|
@ -36,5 +36,5 @@ If yes, a release note is required:
|
|||
Enter your extended release note in the block below. If the PR requires additional action from users switching to the new release, include the string "action required".
|
||||
-->
|
||||
```release-note
|
||||
|
||||
none
|
||||
```
|
||||
|
|
|
|||
|
|
@ -0,0 +1,18 @@
|
|||
name: Darwin
|
||||
on:
|
||||
pull_request: {}
|
||||
push: {}
|
||||
jobs:
|
||||
build:
|
||||
name: Unit Tests
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ^1.16
|
||||
id: go
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
- name: Run unit tests
|
||||
run: go test -v -race ./pkg/...
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
name: Linux Unit tests
|
||||
on:
|
||||
pull_request: {}
|
||||
push: {}
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ^1.16
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Build Test
|
||||
run: |
|
||||
export PATH=$PATH:$HOME/.local/bin
|
||||
make verify
|
||||
go test -covermode=count -coverprofile=profile.cov ./pkg/...
|
||||
export DOCKER_CLI_EXPERIMENTAL=enabled && make container
|
||||
|
||||
- name: Send coverage
|
||||
env:
|
||||
COVERALLS_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
GO111MODULE=off go get github.com/mattn/goveralls
|
||||
$(go env GOPATH)/bin/goveralls -coverprofile=profile.cov -service=github
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
name: Static Checks
|
||||
on:
|
||||
pull_request: {}
|
||||
push: {}
|
||||
jobs:
|
||||
go_lint:
|
||||
name: Go Lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@master
|
||||
- name: Run linter
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
with:
|
||||
version: v1.29
|
||||
args: -E=gofmt,golint,misspell --timeout=30m0s
|
||||
verify-helm:
|
||||
name: Verify Helm
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@master
|
||||
- name: Verify Helm
|
||||
run: |
|
||||
sudo snap install yq
|
||||
sudo hack/verify-helm-chart.sh
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
name: Windows Tests
|
||||
on:
|
||||
pull_request: {}
|
||||
push: {}
|
||||
jobs:
|
||||
build:
|
||||
strategy:
|
||||
matrix:
|
||||
go-versions: [1.16.x]
|
||||
platform: [windows-latest]
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Run Windows Unit Tests
|
||||
run: |
|
||||
go test -v -race ./pkg/...
|
||||
|
|
@ -1 +1,72 @@
|
|||
/bin
|
||||
# OSX leaves these everywhere on SMB shares
|
||||
._*
|
||||
|
||||
# OSX trash
|
||||
.DS_Store
|
||||
|
||||
# Eclipse files
|
||||
.classpath
|
||||
.project
|
||||
.settings/**
|
||||
|
||||
# Files generated by JetBrains IDEs, e.g. IntelliJ IDEA
|
||||
.idea/
|
||||
*.iml
|
||||
|
||||
# Vscode files
|
||||
.vscode
|
||||
|
||||
# This is where the result of the go build goes
|
||||
/output*/
|
||||
/_output*/
|
||||
/_output
|
||||
/bin
|
||||
|
||||
# Emacs save files
|
||||
*~
|
||||
\#*\#
|
||||
.\#*
|
||||
|
||||
# Vim-related files
|
||||
[._]*.s[a-w][a-z]
|
||||
[._]s[a-w][a-z]
|
||||
*.un~
|
||||
Session.vim
|
||||
.netrwhist
|
||||
|
||||
# cscope-related files
|
||||
cscope.*
|
||||
|
||||
# Go test binaries
|
||||
*.test
|
||||
|
||||
# JUnit test output from ginkgo e2e tests
|
||||
/junit*.xml
|
||||
|
||||
# Mercurial files
|
||||
**/.hg
|
||||
**/.hg*
|
||||
|
||||
# Vagrant
|
||||
.vagrant
|
||||
|
||||
.tags*
|
||||
|
||||
# Test artifacts produced by Jenkins jobs
|
||||
/_artifacts/
|
||||
|
||||
# Go dependencies installed on Jenkins
|
||||
/_gopath/
|
||||
|
||||
# direnv .envrc files
|
||||
.envrc
|
||||
|
||||
# This file used by some vendor repos (e.g. github.com/go-openapi/...) to store secret variables and should not be ignored
|
||||
!\.drone\.sec
|
||||
|
||||
# Godeps or dep workspace
|
||||
/Godeps/_workspace
|
||||
|
||||
/bazel-*
|
||||
*.pyc
|
||||
profile.cov
|
||||
|
|
|
|||
17
.prow.sh
17
.prow.sh
|
|
@ -1,3 +1,17 @@
|
|||
# Copyright 2020 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
#! /bin/bash
|
||||
|
||||
# A Prow job can override these defaults, but this shouldn't be necessary.
|
||||
|
|
@ -8,4 +22,7 @@
|
|||
|
||||
. release-tools/prow.sh
|
||||
|
||||
./release-tools/verify-boilerplate.sh "$(pwd)"
|
||||
./release-tools/verify-spelling.sh "$(pwd)"
|
||||
|
||||
main
|
||||
|
|
|
|||
22
Dockerfile
22
Dockerfile
|
|
@ -1,8 +1,26 @@
|
|||
FROM centos:latest
|
||||
# Copyright 2020 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
ARG ARCH=amd64
|
||||
|
||||
FROM k8s.gcr.io/build-image/debian-base-${ARCH}:v2.1.3
|
||||
|
||||
# Copy nfsplugin from build _output directory
|
||||
COPY bin/nfsplugin /nfsplugin
|
||||
|
||||
RUN yum -y install nfs-utils epel-release jq && yum clean all
|
||||
# this is a workaround to install nfs-common & nfs-kernel-server and don't quit with error
|
||||
# https://github.com/kubernetes-sigs/blob-csi-driver/issues/214#issuecomment-781602430
|
||||
RUN apt update && apt install ca-certificates mount nfs-common nfs-kernel-server -y || true
|
||||
|
||||
ENTRYPOINT ["/nfsplugin"]
|
||||
|
|
|
|||
111
Makefile
111
Makefile
|
|
@ -13,6 +13,115 @@
|
|||
# limitations under the License.
|
||||
|
||||
CMDS=nfsplugin
|
||||
all: build
|
||||
DEPLOY_FOLDER = ./deploy
|
||||
CMDS=nfsplugin
|
||||
PKG = github.com/kubernetes-csi/csi-driver-nfs
|
||||
GINKGO_FLAGS = -ginkgo.v
|
||||
GO111MODULE = on
|
||||
GOPATH ?= $(shell go env GOPATH)
|
||||
GOBIN ?= $(GOPATH)/bin
|
||||
DOCKER_CLI_EXPERIMENTAL = enabled
|
||||
export GOPATH GOBIN GO111MODULE DOCKER_CLI_EXPERIMENTAL
|
||||
|
||||
include release-tools/build.make
|
||||
|
||||
GIT_COMMIT = $(shell git rev-parse HEAD)
|
||||
BUILD_DATE = $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||
IMAGE_VERSION = v3.0.0
|
||||
LDFLAGS = -X ${PKG}/pkg/nfs.driverVersion=${IMAGE_VERSION} -X ${PKG}/pkg/nfs.gitCommit=${GIT_COMMIT} -X ${PKG}/pkg/nfs.buildDate=${BUILD_DATE}
|
||||
EXT_LDFLAGS = -s -w -extldflags "-static"
|
||||
# Use a custom version for E2E tests if we are testing in CI
|
||||
ifdef CI
|
||||
ifndef PUBLISH
|
||||
override IMAGE_VERSION := e2e-$(GIT_COMMIT)
|
||||
endif
|
||||
endif
|
||||
IMAGENAME ?= nfsplugin
|
||||
REGISTRY ?= andyzhangx
|
||||
REGISTRY_NAME ?= $(shell echo $(REGISTRY) | sed "s/.azurecr.io//g")
|
||||
IMAGE_TAG = $(REGISTRY)/$(IMAGENAME):$(IMAGE_VERSION)
|
||||
IMAGE_TAG_LATEST = $(REGISTRY)/$(IMAGENAME):latest
|
||||
|
||||
all: nfs
|
||||
|
||||
.PHONY: verify
|
||||
verify: unit-test
|
||||
hack/verify-all.sh
|
||||
|
||||
.PHONY: unit-test
|
||||
unit-test:
|
||||
go test -covermode=count -coverprofile=profile.cov ./pkg/... -v
|
||||
|
||||
.PHONY: sanity-test
|
||||
sanity-test: nfs
|
||||
./test/sanity/run-test.sh
|
||||
|
||||
.PHONY: integration-test
|
||||
integration-test: nfs
|
||||
./test/integration/run-test.sh
|
||||
|
||||
.PHONY: local-build-push
|
||||
local-build-push: nfs
|
||||
docker build -t $(LOCAL_USER)/nfsplugin:latest .
|
||||
docker push $(LOCAL_USER)/nfsplugin
|
||||
|
||||
.PHONY: local-k8s-install
|
||||
local-k8s-install:
|
||||
echo "Instlling locally"
|
||||
kubectl apply -f $(DEPLOY_FOLDER)/rbac-csi-nfs-controller.yaml
|
||||
kubectl apply -f $(DEPLOY_FOLDER)/csi-nfs-driverinfo.yaml
|
||||
kubectl apply -f $(DEPLOY_FOLDER)/csi-nfs-controller.yaml
|
||||
kubectl apply -f $(DEPLOY_FOLDER)/csi-nfs-node.yaml
|
||||
echo "Successfully installed"
|
||||
|
||||
.PHONY: local-k8s-uninstall
|
||||
local-k8s-uninstall:
|
||||
echo "Uninstalling driver"
|
||||
kubectl delete -f $(DEPLOY_FOLDER)/csi-nfs-controller.yaml --ignore-not-found
|
||||
kubectl delete -f $(DEPLOY_FOLDER)/csi-nfs-node.yaml --ignore-not-found
|
||||
kubectl delete -f $(DEPLOY_FOLDER)/csi-nfs-driverinfo.yaml --ignore-not-found
|
||||
kubectl delete -f $(DEPLOY_FOLDER)/rbac-csi-nfs-controller.yaml --ignore-not-found
|
||||
echo "Uninstalled NFS driver"
|
||||
|
||||
.PHONY: nfs
|
||||
nfs:
|
||||
CGO_ENABLED=0 GOOS=linux go build -a -ldflags "${LDFLAGS} ${EXT_LDFLAGS}" -mod vendor -o bin/nfsplugin ./cmd/nfsplugin
|
||||
|
||||
.PHONY: container
|
||||
container: nfs
|
||||
docker build --no-cache -t $(IMAGE_TAG) .
|
||||
|
||||
.PHONY: push
|
||||
push:
|
||||
docker push $(IMAGE_TAG)
|
||||
|
||||
.PHONY: push-latest
|
||||
push-latest:
|
||||
docker tag $(IMAGE_TAG) $(IMAGE_TAG_LATEST)
|
||||
docker push $(IMAGE_TAG_LATEST)
|
||||
|
||||
.PHONY: install-nfs-server
|
||||
install-nfs-server:
|
||||
kubectl apply -f ./deploy/example/nfs-provisioner/nfs-server.yaml
|
||||
|
||||
.PHONY: install-helm
|
||||
install-helm:
|
||||
curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash
|
||||
|
||||
.PHONY: e2e-bootstrap
|
||||
e2e-bootstrap: install-helm
|
||||
docker pull $(IMAGE_TAG) || make container push
|
||||
helm install csi-driver-nfs ./charts/latest/csi-driver-nfs --namespace kube-system --wait --timeout=15m -v=5 --debug \
|
||||
--set image.nfs.repository=$(REGISTRY)/$(IMAGENAME) \
|
||||
--set image.nfs.tag=$(IMAGE_VERSION) \
|
||||
--set image.nfs.pullPolicy=Always
|
||||
--set controller.logLevel=8
|
||||
--set node.logLevel=8
|
||||
|
||||
.PHONY: e2e-teardown
|
||||
e2e-teardown:
|
||||
helm delete csi-driver-nfs --namespace kube-system
|
||||
|
||||
.PHONY: e2e-test
|
||||
e2e-test:
|
||||
go test -v -timeout=0 ./test/e2e ${GINKGO_FLAGS}
|
||||
|
|
|
|||
76
README.md
76
README.md
|
|
@ -1,6 +1,7 @@
|
|||
# CSI NFS driver
|
||||
[](https://coveralls.io/github/kubernetes-csi/csi-driver-nfs?branch=master)
|
||||
|
||||
## Overview
|
||||
### Overview
|
||||
|
||||
This is a repository for [NFS](https://en.wikipedia.org/wiki/Network_File_System) [CSI](https://kubernetes-csi.github.io/docs/) Driver.
|
||||
Currently it implements bare minimum of the [CSI spec](https://github.com/container-storage-interface/spec) and is in the alpha state
|
||||
|
|
@ -10,76 +11,34 @@ of the development.
|
|||
|
||||
| **nfs.csi.k8s.io** | K8s version compatibility | CSI versions compatibility | Dynamic Provisioning | Resize | Snapshots | Raw Block | AccessModes | Status |
|
||||
|--------------------|---------------------------|----------------------------|----------------------|--------|-----------|-----------|--------------------------|------------------------------------------------------------------------------|
|
||||
|master | 1.14 + | v1.0 + | no | no | no | no | Read/Write Multiple Pods | Alpha |
|
||||
|master | 1.16 + | v1.0 + | yes | no | no | no | Read/Write Multiple Pods | Alpha |
|
||||
|v2.0.0 | 1.14 + | v1.0 + | no | no | no | no | Read/Write Multiple Pods | Alpha |
|
||||
|v1.0.0 | 1.9 - 1.15 | v1.0 | no | no | no | no | Read/Write Multiple Pods | [deprecated](https://github.com/kubernetes-csi/drivers/tree/master/pkg/nfs) |
|
||||
|
||||
## Requirements
|
||||
### Requirements
|
||||
|
||||
The CSI NFS driver requires Kubernetes cluster of version 1.14 or newer and
|
||||
preexisting NFS server, whether it is deployed on cluster or provisioned
|
||||
independently. The plugin itself provides only a communication layer between
|
||||
resources in the cluser and the NFS server.
|
||||
|
||||
## Example
|
||||
### Install driver on a Kubernetes cluster
|
||||
- install by [kubectl](./docs/install-csi-driver.md)
|
||||
- install by [helm charts](./charts)
|
||||
|
||||
There are multiple ways to create a kubernetes cluster, the NFS CSI plugin
|
||||
should work invariantly of your cluster setup. Very simple way of getting
|
||||
a local environment for testing can be achieved using for example
|
||||
[kind](https://github.com/kubernetes-sigs/kind).
|
||||
### Driver parameters
|
||||
Please refer to [`nfs.csi.k8s.io` driver parameters](./docs/driver-parameters.md)
|
||||
|
||||
There are also multiple different NFS servers you can use for testing of
|
||||
the plugin, the major versions of the protocol v2, v3 and v4 should be supported
|
||||
by the current implementation.
|
||||
### Examples
|
||||
- [Set up a NFS Server on a Kubernetes cluster](./deploy/example/nfs-provisioner/README.md)
|
||||
- [Basic usage](./deploy/example/README.md)
|
||||
|
||||
The example assumes you have your cluster created (e.g. `kind create cluster`)
|
||||
and working NFS server (e.g. https://github.com/rootfs/nfs-ganesha-docker)
|
||||
### Troubleshooting
|
||||
- [CSI driver troubleshooting guide](./docs/csi-debug.md)
|
||||
|
||||
#### Deploy
|
||||
## Kubernetes Development
|
||||
Please refer to [development guide](./docs/csi-dev.md)
|
||||
|
||||
Deploy the NFS plugin along with the `CSIDriver` info.
|
||||
```
|
||||
kubectl -f deploy/kubernetes create
|
||||
```
|
||||
|
||||
#### Example Nginx application
|
||||
|
||||
The [/examples/kubernetes/nginx.yaml](/examples/kubernetes/nginx.yaml) contains a `PersistentVolume`,
|
||||
`PersistentVolumeClaim` and an nginx `Pod` mounting the NFS volume under `/var/www`.
|
||||
|
||||
You will need to update the NFS Server IP and the share information under
|
||||
`volumeAttributes` inside `PersistentVolume` in `nginx.yaml` file to match your
|
||||
NFS server public end point and configuration. You can also provide additional
|
||||
`mountOptions`, such as protocol version, in the `PersistentVolume` `spec`
|
||||
relevant for your NFS Server.
|
||||
|
||||
```
|
||||
kubectl -f examples/kubernetes/nginx.yaml create
|
||||
```
|
||||
|
||||
## Running Kubernetes End To End tests on an NFS Driver
|
||||
|
||||
First, stand up a local cluster `ALLOW_PRIVILEGED=1 hack/local-up-cluster.sh` (from your Kubernetes repo)
|
||||
For Fedora/RHEL clusters, the following might be required:
|
||||
```
|
||||
sudo chown -R $USER:$USER /var/run/kubernetes/
|
||||
sudo chown -R $USER:$USER /var/lib/kubelet
|
||||
sudo chcon -R -t svirt_sandbox_file_t /var/lib/kubelet
|
||||
```
|
||||
If you are plannig to test using your own private image, you could either install your nfs driver using your own set of YAML files, or edit the existing YAML files to use that private image.
|
||||
|
||||
When using the [existing set of YAML files](https://github.com/kubernetes-csi/csi-driver-nfs/tree/master/deploy/kubernetes), you would edit the [csi-attacher-nfsplugin.yaml](https://github.com/kubernetes-csi/csi-driver-nfs/blob/master/deploy/kubernetes/csi-attacher-nfsplugin.yaml#L46) and [csi-nodeplugin-nfsplugin.yaml](https://github.com/kubernetes-csi/csi-driver-nfs/blob/master/deploy/kubernetes/csi-nodeplugin-nfsplugin.yaml#L45) files to include your private image instead of the default one. After editing these files, skip to step 3 of the following steps.
|
||||
|
||||
If you already have a driver installed, skip to step 4 of the following steps.
|
||||
|
||||
1) Build the nfs driver by running `make`
|
||||
2) Create NFS Driver Image, where the image tag would be whatever that is required by your YAML deployment files `docker build -t quay.io/k8scsi/nfsplugin:v2.0.0 .`
|
||||
3) Install the Driver: `kubectl create -f deploy/kubernetes`
|
||||
4) Build E2E test binary: `make build-tests`
|
||||
5) Run E2E Tests using the following command: `./bin/tests --ginkgo.v --ginkgo.progress --kubeconfig=/var/run/kubernetes/admin.kubeconfig`
|
||||
|
||||
|
||||
## Community, discussion, contribution, and support
|
||||
### Community, discussion, contribution, and support
|
||||
|
||||
Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/).
|
||||
|
||||
|
|
@ -88,7 +47,6 @@ You can reach the maintainers of this project at:
|
|||
- [Slack channel](https://kubernetes.slack.com/messages/sig-storage)
|
||||
- [Mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-storage)
|
||||
|
||||
|
||||
### Code of conduct
|
||||
|
||||
Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md).
|
||||
|
|
|
|||
|
|
@ -0,0 +1,56 @@
|
|||
# Install CSI driver with Helm 3
|
||||
|
||||
## Prerequisites
|
||||
- [install Helm](https://helm.sh/docs/intro/quickstart/#install-helm)
|
||||
|
||||
## install latest version
|
||||
```console
|
||||
helm repo add csi-driver-nfs https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts
|
||||
helm install csi-driver-nfs csi-driver-nfs/csi-driver-nfs --namespace kube-system
|
||||
```
|
||||
|
||||
### install a specific version
|
||||
```console
|
||||
helm repo add csi-driver-nfs https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts
|
||||
helm install csi-driver-nfs csi-driver-nfs/csi-driver-nfs --namespace kube-system --version v3.0.0
|
||||
```
|
||||
|
||||
### search for all available chart versions
|
||||
```console
|
||||
helm search repo -l csi-driver-nfs
|
||||
```
|
||||
|
||||
## uninstall CSI driver
|
||||
```console
|
||||
helm uninstall csi-driver-nfs -n kube-system
|
||||
```
|
||||
|
||||
## latest chart configuration
|
||||
|
||||
The following table lists the configurable parameters of the latest NFS CSI Driver chart and default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
|---------------------------------------------------|------------------------------------------------------------|-------------------------------------------------------------------|
|
||||
| `image.nfs.repository` | csi-driver-nfs docker image | gcr.io/k8s-staging-sig-storage/nfsplugin |
|
||||
| `image.nfs.tag` | csi-driver-nfs docker image tag | amd64-linux-canary |
|
||||
| `image.nfs.pullPolicy` | csi-driver-nfs image pull policy | IfNotPresent |
|
||||
| `image.csiProvisioner.repository` | csi-provisioner docker image | k8s.gcr.io/sig-storage/csi-provisioner |
|
||||
| `image.csiProvisioner.tag` | csi-provisioner docker image tag | v2.0.4 |
|
||||
| `image.csiProvisioner.pullPolicy` | csi-provisioner image pull policy | IfNotPresent |
|
||||
| `image.livenessProbe.repository` | liveness-probe docker image | k8s.gcr.io/sig-storage/livenessprobe |
|
||||
| `image.livenessProbe.tag` | liveness-probe docker image tag | v2.1.0 |
|
||||
| `image.livenessProbe.pullPolicy` | liveness-probe image pull policy | IfNotPresent |
|
||||
| `image.nodeDriverRegistrar.repository` | csi-node-driver-registrar docker image | k8s.gcr.io/sig-storage/csi-node-driver-registrar |
|
||||
| `image.nodeDriverRegistrar.tag` | csi-node-driver-registrar docker image tag | v2.0.1 |
|
||||
| `image.nodeDriverRegistrar.pullPolicy` | csi-node-driver-registrar image pull policy | IfNotPresent |
|
||||
| `imagePullSecrets` | Specify docker-registry secret names as an array | [] (does not add image pull secrets to deployed pods) |
|
||||
| `serviceAccount.create` | whether create service account of csi-nfs-controller | true |
|
||||
| `rbac.create` | whether create rbac of csi-nfs-controller | true |
|
||||
| `controller.replicas` | the replicas of csi-nfs-controller | 2 |
|
||||
| `controller.runOnMaster` | run controller on master node | false |
|
||||
| `controller.logLevel` | controller driver log level |`5` |
|
||||
| `node.logLevel` | node driver log level |`5` |
|
||||
|
||||
## troubleshooting
|
||||
- Add `--wait -v=5 --debug` in `helm install` command to get detailed error
|
||||
- Use `kubectl describe` to acquire more info
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
apiVersion: v1
|
||||
entries:
|
||||
csi-driver-nfs:
|
||||
- apiVersion: v1
|
||||
appVersion: latest
|
||||
created: 2021-02-05T13:28:16.728034918Z
|
||||
description: CSI NFS Driver for Kubernetes
|
||||
digest: aa12e668649d4f9fc979389618992c1195847feff048dab0f2413ef02fbcc837
|
||||
name: csi-driver-nfs
|
||||
urls:
|
||||
- https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts/latest/csi-driver-nfs-v3.0.0.tgz
|
||||
version: v3.0.0
|
||||
- apiVersion: v1
|
||||
appVersion: v2.0.0
|
||||
created: 2021-02-05T13:28:16.728270519Z
|
||||
description: CSI NFS Driver for Kubernetes
|
||||
digest: f537a133eaa965f1c053ffac130f82c9b2b624e1f8bd42937c9c48818464eaac
|
||||
name: csi-driver-nfs
|
||||
urls:
|
||||
- https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts/v2.0.0/csi-driver-nfs-v2.0.0.tgz
|
||||
version: v2.0.0
|
||||
generated: 2021-02-05T13:28:16.727677516Z
|
||||
Binary file not shown.
|
|
@ -0,0 +1,22 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
apiVersion: v1
|
||||
appVersion: latest
|
||||
description: CSI NFS Driver for Kubernetes
|
||||
name: csi-driver-nfs
|
||||
version: v3.0.0
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
The CSI NFS Driver is getting deployed to your cluster.
|
||||
|
||||
To check CSI NFS Driver pods status, please run:
|
||||
|
||||
kubectl --namespace={{ .Release.Namespace }} get pods --selector="release={{ .Release.Name }}" --watch
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
{{/* vim: set filetype=mustache: */}}
|
||||
|
||||
{{/* Expand the name of the chart.*/}}
|
||||
{{- define "nfs.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/* labels for helm resources */}}
|
||||
{{- define "nfs.labels" -}}
|
||||
labels:
|
||||
app.kubernetes.io/instance: "{{ .Release.Name }}"
|
||||
app.kubernetes.io/managed-by: "{{ .Release.Service }}"
|
||||
app.kubernetes.io/name: "{{ template "nfs.name" . }}"
|
||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
||||
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
|
||||
{{- end -}}
|
||||
|
|
@ -0,0 +1,129 @@
|
|||
---
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-nfs-controller
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{ include "nfs.labels" . | indent 2 }}
|
||||
spec:
|
||||
replicas: {{ .Values.controller.replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-nfs-controller
|
||||
template:
|
||||
metadata:
|
||||
{{ include "nfs.labels" . | indent 6 }}
|
||||
app: csi-nfs-controller
|
||||
spec:
|
||||
{{- if .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{ toYaml .Values.imagePullSecrets | indent 8 }}
|
||||
{{- end }}
|
||||
hostNetwork: true # controller also needs to mount nfs to create dir
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
serviceAccountName: csi-nfs-controller-sa
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
{{- if .Values.controller.runOnMaster}}
|
||||
kubernetes.io/role: master
|
||||
{{- end}}
|
||||
priorityClassName: system-cluster-critical
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/master"
|
||||
operator: "Equal"
|
||||
value: "true"
|
||||
effect: "NoSchedule"
|
||||
- key: "node-role.kubernetes.io/controlplane"
|
||||
operator: "Equal"
|
||||
value: "true"
|
||||
effect: "NoSchedule"
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
image: "{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}"
|
||||
args:
|
||||
- "-v=2"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--leader-election"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /csi/csi.sock
|
||||
imagePullPolicy: {{ .Values.image.csiProvisioner.pullPolicy }}
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 400Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
- name: liveness-probe
|
||||
image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}"
|
||||
args:
|
||||
- --csi-address=/csi/csi.sock
|
||||
- --probe-timeout=3s
|
||||
- --health-port=29652
|
||||
- --v=2
|
||||
imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }}
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
- name: nfs
|
||||
image: "{{ .Values.image.nfs.repository }}:{{ .Values.image.nfs.tag }}"
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
imagePullPolicy: {{ .Values.image.nfs.pullPolicy }}
|
||||
args:
|
||||
- "--v={{ .Values.controller.logLevel }}"
|
||||
- "--nodeid=$(NODE_ID)"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
env:
|
||||
- name: NODE_ID
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix:///csi/csi.sock
|
||||
ports:
|
||||
- containerPort: 29652
|
||||
name: healthz
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: healthz
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 10
|
||||
periodSeconds: 30
|
||||
volumeMounts:
|
||||
- name: pods-mount-dir
|
||||
mountPath: /var/lib/kubelet/pods
|
||||
mountPropagation: "Bidirectional"
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
volumes:
|
||||
- name: pods-mount-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/pods
|
||||
type: Directory
|
||||
- name: socket-dir
|
||||
emptyDir: {}
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
apiVersion: storage.k8s.io/v1beta1
|
||||
kind: CSIDriver
|
||||
metadata:
|
||||
name: nfs.csi.k8s.io
|
||||
spec:
|
||||
attachRequired: false
|
||||
volumeLifecycleModes:
|
||||
- Persistent
|
||||
podInfoOnMount: true
|
||||
|
|
@ -0,0 +1,115 @@
|
|||
# This YAML file contains driver-registrar & csi driver nodeplugin API objects
|
||||
# that are necessary to run CSI nodeplugin for nfs
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-nfs-node
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{ include "nfs.labels" . | indent 2 }}
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-nfs-node
|
||||
template:
|
||||
metadata:
|
||||
{{ include "nfs.labels" . | indent 6 }}
|
||||
app: csi-nfs-node
|
||||
spec:
|
||||
{{- if .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{ toYaml .Values.imagePullSecrets | indent 8 }}
|
||||
{{- end }}
|
||||
hostNetwork: true # original nfs connection would be broken without hostNetwork setting
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
containers:
|
||||
- name: liveness-probe
|
||||
image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}"
|
||||
args:
|
||||
- --csi-address=/csi/csi.sock
|
||||
- --probe-timeout=3s
|
||||
- --health-port=29653
|
||||
- --v=2
|
||||
imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }}
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
- name: node-driver-registrar
|
||||
image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}"
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command: ["/bin/sh", "-c", "rm -rf /registration/csi-nfsplugin /registration/csi-nfsplugin-reg.sock"]
|
||||
args:
|
||||
- --v=2
|
||||
- --csi-address=/csi/csi.sock
|
||||
- --kubelet-registration-path=/var/lib/kubelet/plugins/csi-nfsplugin/csi.sock
|
||||
env:
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
imagePullPolicy: {{ .Values.image.nodeDriverRegistrar.pullPolicy }}
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: registration-dir
|
||||
mountPath: /registration
|
||||
- name: nfs
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: "{{ .Values.image.nfs.repository }}:{{ .Values.image.nfs.tag }}"
|
||||
args :
|
||||
- "--v={{ .Values.node.logLevel }}"
|
||||
- "--nodeid=$(NODE_ID)"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
env:
|
||||
- name: NODE_ID
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix:///csi/csi.sock
|
||||
ports:
|
||||
- containerPort: 29653
|
||||
name: healthz
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: healthz
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 10
|
||||
periodSeconds: 30
|
||||
imagePullPolicy: {{ .Values.image.nfs.pullPolicy }}
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: pods-mount-dir
|
||||
mountPath: /var/lib/kubelet/pods
|
||||
mountPropagation: "Bidirectional"
|
||||
volumes:
|
||||
- name: socket-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/csi-nfsplugin
|
||||
type: DirectoryOrCreate
|
||||
- name: pods-mount-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/pods
|
||||
type: Directory
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins_registry
|
||||
type: Directory
|
||||
name: registration-dir
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
{{- if .Values.serviceAccount.create -}}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-nfs-controller-sa
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{ include "nfs.labels" . | indent 2 }}
|
||||
---
|
||||
{{- end -}}
|
||||
|
||||
{{- if .Values.rbac.create -}}
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: nfs-external-provisioner-role
|
||||
{{ include "nfs.labels" . | indent 2 }}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "create", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["csinodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: nfs-csi-provisioner-binding
|
||||
{{ include "nfs.labels" . | indent 2 }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-nfs-controller-sa
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: nfs-external-provisioner-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- end -}}
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
image:
|
||||
nfs:
|
||||
repository: mcr.microsoft.com/k8s/csi/nfs-csi
|
||||
tag: latest
|
||||
pullPolicy: IfNotPresent
|
||||
csiProvisioner:
|
||||
repository: k8s.gcr.io/sig-storage/csi-provisioner
|
||||
tag: v2.1.0
|
||||
pullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
repository: k8s.gcr.io/sig-storage/livenessprobe
|
||||
tag: v2.1.0
|
||||
pullPolicy: IfNotPresent
|
||||
nodeDriverRegistrar:
|
||||
repository: k8s.gcr.io/sig-storage/csi-node-driver-registrar
|
||||
tag: v2.1.0
|
||||
pullPolicy: IfNotPresent
|
||||
serviceAccount:
|
||||
create: true
|
||||
rbac:
|
||||
create: true
|
||||
controller:
|
||||
replicas: 2
|
||||
runOnMaster: false
|
||||
logLevel: 5
|
||||
|
||||
node:
|
||||
logLevel: 5
|
||||
|
||||
## Reference to one or more secrets to be used when pulling images
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
##
|
||||
imagePullSecrets: []
|
||||
# - name: "image-pull-secret"
|
||||
Binary file not shown.
|
|
@ -0,0 +1,22 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
apiVersion: v1
|
||||
appVersion: v2.0.0
|
||||
description: CSI NFS Driver for Kubernetes
|
||||
name: csi-driver-nfs
|
||||
version: v2.0.0
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
The CSI NFS Driver is getting deployed to your cluster.
|
||||
|
||||
To check CSI NFS Driver pods status, please run:
|
||||
|
||||
kubectl --namespace={{ .Release.Namespace }} get pods --selector="release={{ .Release.Name }}" --watch
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
{{/* vim: set filetype=mustache: */}}
|
||||
|
||||
{{/* labels for helm resources */}}
|
||||
{{- define "nfs.labels" -}}
|
||||
labels:
|
||||
heritage: "{{ .Release.Service }}"
|
||||
release: "{{ .Release.Name }}"
|
||||
revision: "{{ .Release.Revision }}"
|
||||
chart: "{{ .Chart.Name }}"
|
||||
chartVersion: "{{ .Chart.Version }}"
|
||||
{{- end -}}
|
||||
|
|
@ -0,0 +1,110 @@
|
|||
---
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-nfs-controller
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{ include "nfs.labels" . | indent 2 }}
|
||||
spec:
|
||||
replicas: {{ .Values.controller.replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-nfs-controller
|
||||
template:
|
||||
metadata:
|
||||
{{ include "nfs.labels" . | indent 6 }}
|
||||
app: csi-nfs-controller
|
||||
spec:
|
||||
serviceAccountName: csi-nfs-controller-sa
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
priorityClassName: system-cluster-critical
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/master"
|
||||
operator: "Equal"
|
||||
value: "true"
|
||||
effect: "NoSchedule"
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
image: "{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}"
|
||||
args:
|
||||
- "-v=5"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--leader-election"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /csi/csi.sock
|
||||
imagePullPolicy: {{ .Values.image.csiProvisioner.pullPolicy }}
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
- name: liveness-probe
|
||||
image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}"
|
||||
args:
|
||||
- --csi-address=/csi/csi.sock
|
||||
- --probe-timeout=3s
|
||||
- --health-port=29642
|
||||
- --v=5
|
||||
imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }}
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
- name: nfs
|
||||
image: "{{ .Values.image.nfs.repository }}:{{ .Values.image.nfs.tag }}"
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
imagePullPolicy: {{ .Values.image.nfs.pullPolicy }}
|
||||
args:
|
||||
- "-v=5"
|
||||
- "--nodeid=$(NODE_ID)"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
env:
|
||||
- name: NODE_ID
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix:///csi/csi.sock
|
||||
volumeMounts:
|
||||
- name: plugin-dir
|
||||
mountPath: /plugin
|
||||
- name: pods-mount-dir
|
||||
mountPath: /var/lib/kubelet/pods
|
||||
mountPropagation: "Bidirectional"
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
volumes:
|
||||
- name: plugin-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/csi-nfsplugin
|
||||
type: DirectoryOrCreate
|
||||
- name: pods-mount-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/pods
|
||||
type: Directory
|
||||
- name: socket-dir
|
||||
emptyDir: {}
|
||||
|
|
@ -0,0 +1,79 @@
|
|||
# This YAML file contains driver-registrar & csi driver nodeplugin API objects
|
||||
# that are necessary to run CSI nodeplugin for nfs
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-nfs-node
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{ include "nfs.labels" . | indent 2 }}
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-nfs-node
|
||||
template:
|
||||
metadata:
|
||||
{{ include "nfs.labels" . | indent 6 }}
|
||||
app: csi-nfs-node
|
||||
spec:
|
||||
hostNetwork: true # original nfs connection would be broken without hostNetwork setting
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
containers:
|
||||
- name: node-driver-registrar
|
||||
image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}"
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command: ["/bin/sh", "-c", "rm -rf /registration/csi-nfsplugin /registration/csi-nfsplugin-reg.sock"]
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/plugin/csi.sock
|
||||
- --kubelet-registration-path=/var/lib/kubelet/plugins/csi-nfsplugin/csi.sock
|
||||
env:
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
imagePullPolicy: {{ .Values.image.nodeDriverRegistrar.pullPolicy }}
|
||||
volumeMounts:
|
||||
- name: plugin-dir
|
||||
mountPath: /plugin
|
||||
- name: registration-dir
|
||||
mountPath: /registration
|
||||
- name: nfs
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: "{{ .Values.image.nfs.repository }}:{{ .Values.image.nfs.tag }}"
|
||||
args :
|
||||
- "-v=5"
|
||||
- "--nodeid=$(NODE_ID)"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
env:
|
||||
- name: NODE_ID
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix://plugin/csi.sock
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
volumeMounts:
|
||||
- name: plugin-dir
|
||||
mountPath: /plugin
|
||||
- name: pods-mount-dir
|
||||
mountPath: /var/lib/kubelet/pods
|
||||
mountPropagation: "Bidirectional"
|
||||
volumes:
|
||||
- name: plugin-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/csi-nfsplugin
|
||||
type: DirectoryOrCreate
|
||||
- name: pods-mount-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/pods
|
||||
type: Directory
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins_registry
|
||||
type: Directory
|
||||
name: registration-dir
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
{{- if .Values.serviceAccount.create -}}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-nfs-controller-sa
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{ include "nfs.labels" . | indent 2 }}
|
||||
---
|
||||
{{- end -}}
|
||||
|
||||
{{- if .Values.rbac.create -}}
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: nfs-external-provisioner-role
|
||||
{{ include "nfs.labels" . | indent 2 }}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "create", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["csinodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: nfs-csi-provisioner-binding
|
||||
{{ include "nfs.labels" . | indent 2 }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-nfs-controller-sa
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: nfs-external-provisioner-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- end -}}
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
image:
|
||||
nfs:
|
||||
repository: quay.io/k8scsi/nfsplugin
|
||||
tag: v2.0.0
|
||||
pullPolicy: IfNotPresent
|
||||
csiProvisioner:
|
||||
repository: k8s.gcr.io/sig-storage/csi-provisioner
|
||||
tag: v2.0.4
|
||||
pullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
repository: k8s.gcr.io/sig-storage/livenessprobe
|
||||
tag: v2.1.0
|
||||
pullPolicy: IfNotPresent
|
||||
nodeDriverRegistrar:
|
||||
repository: k8s.gcr.io/sig-storage/csi-node-driver-registrar
|
||||
tag: v2.0.1
|
||||
pullPolicy: IfNotPresent
|
||||
serviceAccount:
|
||||
create: true
|
||||
rbac:
|
||||
create: true
|
||||
controller:
|
||||
replicas: 2
|
||||
|
|
@ -0,0 +1 @@
|
|||
release-tools/cloudbuild.yaml
|
||||
|
|
@ -22,65 +22,45 @@ import (
|
|||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/kubernetes-csi/csi-driver-nfs/pkg/nfs"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
endpoint string
|
||||
nodeID string
|
||||
perm string
|
||||
endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint")
|
||||
nodeID = flag.String("nodeid", "", "node id")
|
||||
perm = flag.String("mount-permissions", "", "mounted folder permissions")
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.Set("logtostderr", "true")
|
||||
_ = flag.Set("logtostderr", "true")
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
flag.CommandLine.Parse([]string{})
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "NFS",
|
||||
Short: "CSI based NFS driver",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
handle()
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().AddGoFlagSet(flag.CommandLine)
|
||||
|
||||
cmd.PersistentFlags().StringVar(&nodeID, "nodeid", "", "node id")
|
||||
cmd.MarkPersistentFlagRequired("nodeid")
|
||||
|
||||
cmd.PersistentFlags().StringVar(&endpoint, "endpoint", "", "CSI endpoint")
|
||||
cmd.MarkPersistentFlagRequired("endpoint")
|
||||
|
||||
cmd.PersistentFlags().StringVar(&perm, "mount-permissions", "", "mounted folder permissions")
|
||||
|
||||
cmd.ParseFlags(os.Args[1:])
|
||||
if err := cmd.Execute(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%s", err.Error())
|
||||
os.Exit(1)
|
||||
klog.InitFlags(nil)
|
||||
flag.Parse()
|
||||
if *nodeID == "" {
|
||||
klog.Warning("nodeid is empty")
|
||||
}
|
||||
|
||||
handle()
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
func handle() {
|
||||
// Converting string permission representation to *uint32
|
||||
var parsedPerm *uint32
|
||||
if perm != "" {
|
||||
permu64, err := strconv.ParseUint(perm, 8, 32)
|
||||
if perm != nil && *perm != "" {
|
||||
permu64, err := strconv.ParseUint(*perm, 8, 32)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Incorrect mount-permissions value: %q", perm)
|
||||
fmt.Fprintf(os.Stderr, "incorrect mount-permissions value: %q", *perm)
|
||||
os.Exit(1)
|
||||
}
|
||||
permu32 := uint32(permu64)
|
||||
parsedPerm = &permu32
|
||||
}
|
||||
|
||||
d := nfs.NewNFSdriver(nodeID, endpoint, parsedPerm)
|
||||
d.Run()
|
||||
d := nfs.NewNFSdriver(*nodeID, *endpoint, parsedPerm)
|
||||
d.Run(false)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,38 +0,0 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
_ "github.com/kubernetes-csi/csi-driver-nfs/test"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func init() {
|
||||
framework.HandleFlags()
|
||||
framework.AfterReadingAllFlags(&framework.TestContext)
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
flag.Parse()
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "CSI Suite")
|
||||
}
|
||||
|
||||
func main() {
|
||||
Test(&testing.T{})
|
||||
}
|
||||
|
|
@ -0,0 +1,119 @@
|
|||
---
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-nfs-controller
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-nfs-controller
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: csi-nfs-controller
|
||||
spec:
|
||||
hostNetwork: true # controller also needs to mount nfs to create dir
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
serviceAccountName: csi-nfs-controller-sa
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux # add "kubernetes.io/role: master" to run controller on master node
|
||||
priorityClassName: system-cluster-critical
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/master"
|
||||
operator: "Equal"
|
||||
value: "true"
|
||||
effect: "NoSchedule"
|
||||
- key: "node-role.kubernetes.io/controlplane"
|
||||
operator: "Equal"
|
||||
value: "true"
|
||||
effect: "NoSchedule"
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
image: k8s.gcr.io/sig-storage/csi-provisioner:v2.1.0
|
||||
args:
|
||||
- "-v=2"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--leader-election"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /csi/csi.sock
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 400Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
- name: liveness-probe
|
||||
image: k8s.gcr.io/sig-storage/livenessprobe:v2.1.0
|
||||
args:
|
||||
- --csi-address=/csi/csi.sock
|
||||
- --probe-timeout=3s
|
||||
- --health-port=29652
|
||||
- --v=2
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
- name: nfs
|
||||
image: mcr.microsoft.com/k8s/csi/nfs-csi:latest
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- "-v=5"
|
||||
- "--nodeid=$(NODE_ID)"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
env:
|
||||
- name: NODE_ID
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix:///csi/csi.sock
|
||||
ports:
|
||||
- containerPort: 29652
|
||||
name: healthz
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: healthz
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 10
|
||||
periodSeconds: 30
|
||||
volumeMounts:
|
||||
- name: pods-mount-dir
|
||||
mountPath: /var/lib/kubelet/pods
|
||||
mountPropagation: "Bidirectional"
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
volumes:
|
||||
- name: pods-mount-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/pods
|
||||
type: Directory
|
||||
- name: socket-dir
|
||||
emptyDir: {}
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
---
|
||||
apiVersion: storage.k8s.io/v1beta1
|
||||
kind: CSIDriver
|
||||
metadata:
|
||||
name: nfs.csi.k8s.io
|
||||
spec:
|
||||
attachRequired: false
|
||||
volumeLifecycleModes:
|
||||
- Persistent
|
||||
podInfoOnMount: true
|
||||
|
|
@ -0,0 +1,109 @@
|
|||
---
|
||||
# This YAML file contains driver-registrar & csi driver nodeplugin API objects
|
||||
# that are necessary to run CSI nodeplugin for nfs
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-nfs-node
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-nfs-node
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: csi-nfs-node
|
||||
spec:
|
||||
hostNetwork: true # original nfs connection would be broken without hostNetwork setting
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
containers:
|
||||
- name: liveness-probe
|
||||
image: k8s.gcr.io/sig-storage/livenessprobe:v2.1.0
|
||||
args:
|
||||
- --csi-address=/csi/csi.sock
|
||||
- --probe-timeout=3s
|
||||
- --health-port=29653
|
||||
- --v=2
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
- name: node-driver-registrar
|
||||
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.1.0
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command: ["/bin/sh", "-c", "rm -rf /registration/csi-nfsplugin /registration/csi-nfsplugin-reg.sock"]
|
||||
args:
|
||||
- --v=2
|
||||
- --csi-address=/csi/csi.sock
|
||||
- --kubelet-registration-path=/var/lib/kubelet/plugins/csi-nfsplugin/csi.sock
|
||||
env:
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: registration-dir
|
||||
mountPath: /registration
|
||||
- name: nfs
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: mcr.microsoft.com/k8s/csi/nfs-csi:latest
|
||||
args:
|
||||
- "-v=5"
|
||||
- "--nodeid=$(NODE_ID)"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
env:
|
||||
- name: NODE_ID
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix:///csi/csi.sock
|
||||
ports:
|
||||
- containerPort: 29653
|
||||
name: healthz
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: healthz
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 10
|
||||
periodSeconds: 30
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: pods-mount-dir
|
||||
mountPath: /var/lib/kubelet/pods
|
||||
mountPropagation: "Bidirectional"
|
||||
volumes:
|
||||
- name: socket-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/csi-nfsplugin
|
||||
type: DirectoryOrCreate
|
||||
- name: pods-mount-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/pods
|
||||
type: Directory
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins_registry
|
||||
type: Directory
|
||||
name: registration-dir
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
# CSI driver example
|
||||
|
||||
After the NFS CSI Driver is deployed in your cluster, you can follow this documentation to quickly deploy some examples.
|
||||
|
||||
You can use NFS CSI Driver to provision Persistent Volumes statically or dynamically. Please read [Kubernetes Persistent Volumes documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) for more information about Static and Dynamic provisioning.
|
||||
|
||||
Please refer to [driver parameters](../../docs/driver-parameters.md) for more detailed usage.
|
||||
|
||||
## Prerequisite
|
||||
|
||||
- [Set up a NFS Server on a Kubernetes cluster](./nfs-provisioner/README.md)
|
||||
- [Install NFS CSI Driver](../../docs/install-csi-driver.md)
|
||||
|
||||
## Storage Class Usage (Dynamic Provisioning)
|
||||
|
||||
- Follow the folling command to create a `StorageClass`, and then `PersistentVolume` and `PersistentVolumeClaim` dynamically.
|
||||
|
||||
```bash
|
||||
# create StorageClass
|
||||
kubectl create -f https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/deploy/example/storageclass-nfs.yaml
|
||||
|
||||
# create PVC
|
||||
kubectl create -f https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/deploy/example/pvc-nfs-csi-dynamic.yaml
|
||||
```
|
||||
|
||||
## PV/PVC Usage (Static Provisioning)
|
||||
|
||||
- Follow the folling command to create `PersistentVolume` and `PersistentVolumeClaim` statically.
|
||||
|
||||
```bash
|
||||
# create PV
|
||||
kubectl create -f https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/deploy/example/pv-nfs-csi.yaml
|
||||
|
||||
# create PVC
|
||||
kubectl create -f https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/deploy/example/pvc-nfs-csi-static.yaml
|
||||
```
|
||||
|
|
@ -0,0 +1,44 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: pvc-deployment-nfs
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteMany # In this example, multiple Pods consume the same PVC.
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
storageClassName: nfs-csi
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: deployment-nfs
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
name: deployment-nfs
|
||||
template:
|
||||
metadata:
|
||||
name: deployment-nfs
|
||||
labels:
|
||||
name: deployment-nfs
|
||||
spec:
|
||||
nodeSelector:
|
||||
"kubernetes.io/os": linux
|
||||
containers:
|
||||
- name: deployment-nfs
|
||||
image: mcr.microsoft.com/oss/nginx/nginx:1.19.5
|
||||
command:
|
||||
- "/bin/bash"
|
||||
- "-c"
|
||||
- set -euo pipefail; while true; do echo $(hostname) $(date) >> /mnt/nfs/outfile; sleep 1; done
|
||||
volumeMounts:
|
||||
- name: nfs
|
||||
mountPath: "/mnt/nfs"
|
||||
volumes:
|
||||
- name: nfs
|
||||
persistentVolumeClaim:
|
||||
claimName: pvc-deployment-nfs
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
# Set up a NFS Server on a Kubernetes cluster
|
||||
|
||||
After the NFS CSI Driver is deployed in your cluster, you can follow this documentation to quickly deploy some example applications. You can use NFS CSI Driver to provision Persistent Volumes statically or dynamically. Please read Kubernetes Persistent Volumes for more information about Static and Dynamic provisioning.
|
||||
|
||||
There are multiple different NFS servers you can use for testing of
|
||||
the plugin, the major versions of the protocol v2, v3 and v4 should be supported
|
||||
by the current implementation. This page will show you how to set up a NFS Server deployment on a Kubernetes cluster.
|
||||
|
||||
- To create a NFS provisioner on your Kubernetes cluster, run the following command.
|
||||
|
||||
```bash
|
||||
kubectl create -f https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/deploy/example/nfs-provisioner/nfs-server.yaml
|
||||
```
|
||||
|
||||
- During the deployment, a new service `nfs-server` will be created which exposes the NFS server endpoint `nfs-server.default.svc.cluster.local` and the share path `/`. You can specify `PersistentVolume` or `StorageClass` using these information.
|
||||
|
||||
- Deploy the NFS CSI driver, please refer to [install NFS CSI driver](../../../docs/install-csi-driver.md).
|
||||
|
||||
- To check if the NFS server is working, we can statically create a PersistentVolume and a PersistentVolumeClaim, and mount it onto a sample pod:
|
||||
|
||||
```bash
|
||||
kubectl create -f https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/deploy/example/nfs-provisioner/nginx-pod.yaml
|
||||
```
|
||||
|
||||
- Verify if the NFS server is functional, you can check the mount point from the example pod.
|
||||
|
||||
```bash
|
||||
kubectl exec nginx-nfs-example -- bash -c "findmnt /var/www -o TARGET,SOURCE,FSTYPE"
|
||||
```
|
||||
|
||||
- The output should look like the following:
|
||||
|
||||
```bash
|
||||
TARGET SOURCE FSTYPE
|
||||
/var/www nfs-server.default.svc.cluster.local:/ nfs4
|
||||
```
|
||||
|
|
@ -0,0 +1,59 @@
|
|||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: nfs-server
|
||||
labels:
|
||||
app: nfs-server
|
||||
spec:
|
||||
type: ClusterIP # use "LoadBalancer" to get a public ip
|
||||
selector:
|
||||
app: nfs-server
|
||||
ports:
|
||||
- name: tcp-2049
|
||||
port: 2049
|
||||
protocol: TCP
|
||||
- name: udp-111
|
||||
port: 111
|
||||
protocol: UDP
|
||||
---
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: nfs-server
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nfs-server
|
||||
template:
|
||||
metadata:
|
||||
name: nfs-server
|
||||
labels:
|
||||
app: nfs-server
|
||||
spec:
|
||||
nodeSelector:
|
||||
"kubernetes.io/os": linux
|
||||
containers:
|
||||
- name: nfs-server
|
||||
image: itsthenetwork/nfs-server-alpine:latest
|
||||
env:
|
||||
- name: SHARED_DIRECTORY
|
||||
value: "/exports"
|
||||
volumeMounts:
|
||||
- mountPath: /exports
|
||||
name: nfs-vol
|
||||
securityContext:
|
||||
privileged: true
|
||||
ports:
|
||||
- name: tcp-2049
|
||||
containerPort: 2049
|
||||
protocol: TCP
|
||||
- name: udp-111
|
||||
containerPort: 111
|
||||
protocol: UDP
|
||||
volumes:
|
||||
- name: nfs-vol
|
||||
hostPath:
|
||||
path: /nfs-vol # modify this to specify another path to store nfs share data
|
||||
type: DirectoryOrCreate
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: pv-nginx
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
mountOptions:
|
||||
- hard
|
||||
- nfsvers=4.1
|
||||
csi:
|
||||
driver: nfs.csi.k8s.io
|
||||
readOnly: false
|
||||
volumeHandle: unique-volumeid # make sure it's a unique id in the cluster
|
||||
volumeAttributes:
|
||||
server: nfs-server.default.svc.cluster.local
|
||||
share: /
|
||||
---
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: pvc-nginx
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
volumeName: pv-nginx
|
||||
storageClassName: ""
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-nfs-example
|
||||
spec:
|
||||
containers:
|
||||
- image: nginx
|
||||
name: nginx
|
||||
ports:
|
||||
- containerPort: 80
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- mountPath: /var/www
|
||||
name: pvc-nginx
|
||||
volumes:
|
||||
- name: pvc-nginx
|
||||
persistentVolumeClaim:
|
||||
claimName: pvc-nginx
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: pv-nfs
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
persistentVolumeReclaimPolicy: Retain
|
||||
mountOptions:
|
||||
- hard
|
||||
- nfsvers=4.1
|
||||
csi:
|
||||
driver: nfs.csi.k8s.io
|
||||
readOnly: false
|
||||
volumeHandle: unique-volumeid # make sure it's a unique id in the cluster
|
||||
volumeAttributes:
|
||||
server: nfs-server.default.svc.cluster.local
|
||||
share: /
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: pvc-nfs-dynamic
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
storageClassName: nfs-csi
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
---
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: pvc-nfs-static
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
volumeName: pv-nfs
|
||||
storageClassName: ""
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: statefulset-nfs
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
serviceName: statefulset-nfs
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
nodeSelector:
|
||||
"kubernetes.io/os": linux
|
||||
containers:
|
||||
- name: statefulset-nfs
|
||||
image: mcr.microsoft.com/oss/nginx/nginx:1.19.5
|
||||
command:
|
||||
- "/bin/bash"
|
||||
- "-c"
|
||||
- set -euo pipefail; while true; do echo $(date) >> /mnt/nfs/outfile; sleep 1; done
|
||||
volumeMounts:
|
||||
- name: persistent-storage
|
||||
mountPath: /mnt/nfs
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: persistent-storage
|
||||
annotations:
|
||||
volume.beta.kubernetes.io/storage-class: nfs-csi
|
||||
spec:
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: nfs-csi
|
||||
provisioner: nfs.csi.k8s.io
|
||||
parameters:
|
||||
server: nfs-server.default.svc.cluster.local
|
||||
share: /
|
||||
reclaimPolicy: Retain
|
||||
volumeBindingMode: Immediate
|
||||
mountOptions:
|
||||
- hard
|
||||
- nfsvers=4.1
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2020 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
ver="master"
|
||||
if [[ "$#" -gt 0 ]]; then
|
||||
ver="$1"
|
||||
fi
|
||||
|
||||
repo="https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/$ver/deploy"
|
||||
if [[ "$#" -gt 1 ]]; then
|
||||
if [[ "$2" == *"local"* ]]; then
|
||||
echo "use local deploy"
|
||||
repo="./deploy"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ $ver != "master" ]; then
|
||||
repo="$repo/$ver"
|
||||
fi
|
||||
|
||||
echo "Installing NFS CSI driver, version: $ver ..."
|
||||
kubectl apply -f $repo/rbac-csi-nfs-controller.yaml
|
||||
kubectl apply -f $repo/csi-nfs-driverinfo.yaml
|
||||
kubectl apply -f $repo/csi-nfs-controller.yaml
|
||||
kubectl apply -f $repo/csi-nfs-node.yaml
|
||||
echo 'NFS CSI driver installed successfully.'
|
||||
|
|
@ -1,75 +0,0 @@
|
|||
# This YAML file contains driver-registrar & csi driver nodeplugin API objects
|
||||
# that are necessary to run CSI nodeplugin for nfs
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-nodeplugin-nfsplugin
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-nodeplugin-nfsplugin
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: csi-nodeplugin-nfsplugin
|
||||
spec:
|
||||
serviceAccount: csi-nodeplugin
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: node-driver-registrar
|
||||
image: quay.io/k8scsi/csi-node-driver-registrar:v1.0.2
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command: ["/bin/sh", "-c", "rm -rf /registration/csi-nfsplugin /registration/csi-nfsplugin-reg.sock"]
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/plugin/csi.sock
|
||||
- --kubelet-registration-path=/var/lib/kubelet/plugins/csi-nfsplugin/csi.sock
|
||||
env:
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
volumeMounts:
|
||||
- name: plugin-dir
|
||||
mountPath: /plugin
|
||||
- name: registration-dir
|
||||
mountPath: /registration
|
||||
- name: nfs
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: quay.io/k8scsi/nfsplugin:v2.0.0
|
||||
args :
|
||||
- "--nodeid=$(NODE_ID)"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
env:
|
||||
- name: NODE_ID
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix://plugin/csi.sock
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
volumeMounts:
|
||||
- name: plugin-dir
|
||||
mountPath: /plugin
|
||||
- name: pods-mount-dir
|
||||
mountPath: /var/lib/kubelet/pods
|
||||
mountPropagation: "Bidirectional"
|
||||
volumes:
|
||||
- name: plugin-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/csi-nfsplugin
|
||||
type: DirectoryOrCreate
|
||||
- name: pods-mount-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/pods
|
||||
type: Directory
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins_registry
|
||||
type: Directory
|
||||
name: registration-dir
|
||||
|
|
@ -1,34 +0,0 @@
|
|||
# This YAML defines all API objects to create RBAC roles for CSI node plugin
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-nodeplugin
|
||||
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-nodeplugin
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-nodeplugin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-nodeplugin
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: csi-nodeplugin
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-nfs-controller-sa
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: nfs-external-provisioner-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "create", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["csinodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch"]
|
||||
---
|
||||
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: nfs-csi-provisioner-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-nfs-controller-sa
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: nfs-external-provisioner-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2020 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
ver="master"
|
||||
if [[ "$#" -gt 0 ]]; then
|
||||
ver="$1"
|
||||
fi
|
||||
|
||||
repo="https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/$ver/deploy"
|
||||
if [[ "$#" -gt 1 ]]; then
|
||||
if [[ "$2" == *"local"* ]]; then
|
||||
echo "use local deploy"
|
||||
repo="./deploy"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ $ver != "master" ]; then
|
||||
repo="$repo/$ver"
|
||||
fi
|
||||
|
||||
echo "Uninstalling NFS driver, version: $ver ..."
|
||||
kubectl delete -f $repo/csi-nfs-controller.yaml --ignore-not-found
|
||||
kubectl delete -f $repo/csi-nfs-node.yaml --ignore-not-found
|
||||
kubectl delete -f $repo/csi-nfs-driverinfo.yaml --ignore-not-found
|
||||
kubectl delete -f $repo/rbac-csi-nfs-controller.yaml --ignore-not-found
|
||||
echo 'Uninstalled NFS driver successfully.'
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
## CSI driver debug tips
|
||||
|
||||
### Case#1: volume create/delete failed
|
||||
- locate csi driver pod
|
||||
```console
|
||||
$ kubectl get pod -o wide -n kube-system | grep csi-nfs-controller
|
||||
NAME READY STATUS RESTARTS AGE IP NODE
|
||||
csi-nfs-controller-56bfddd689-dh5tk 5/5 Running 0 35s 10.240.0.19 k8s-agentpool-22533604-0
|
||||
csi-nfs-controller-56bfddd689-sl4ll 5/5 Running 0 35s 10.240.0.23 k8s-agentpool-22533604-1
|
||||
```
|
||||
- get csi driver logs
|
||||
```console
|
||||
$ kubectl logs csi-nfs-controller-56bfddd689-dh5tk -c nfs -n kube-system > csi-nfs-controller.log
|
||||
```
|
||||
> note: there could be multiple controller pods, if there are no helpful logs, try to get logs from other controller pods
|
||||
|
||||
### Case#2: volume mount/unmount failed
|
||||
- locate csi driver pod and figure out which pod does tha actual volume mount/unmount
|
||||
|
||||
```console
|
||||
$ kubectl get pod -o wide -n kube-system | grep csi-nfs-node
|
||||
NAME READY STATUS RESTARTS AGE IP NODE
|
||||
csi-nfs-node-cvgbs 3/3 Running 0 7m4s 10.240.0.35 k8s-agentpool-22533604-1
|
||||
csi-nfs-node-dr4s4 3/3 Running 0 7m4s 10.240.0.4 k8s-agentpool-22533604-0
|
||||
```
|
||||
|
||||
- get csi driver logs
|
||||
```console
|
||||
$ kubectl logs csi-nfs-node-cvgbs -c nfs -n kube-system > csi-nfs-node.log
|
||||
```
|
||||
|
||||
### troubleshooting connection failure on agent node
|
||||
```console
|
||||
mkdir /tmp/test
|
||||
mount -v -t nfs -o ... nfs-server:/path /tmp/test
|
||||
```
|
||||
|
|
@ -0,0 +1,119 @@
|
|||
# NFS CSI driver development guide
|
||||
|
||||
## How to build this project
|
||||
- Clone repo
|
||||
```console
|
||||
$ mkdir -p $GOPATH/src/sigs.k8s.io/
|
||||
$ git clone https://github.com/kubernetes-csi/csi-driver-nfs $GOPATH/src/github.com/kubernetes-csi/csi-driver-nfs
|
||||
```
|
||||
|
||||
- Build CSI driver
|
||||
```console
|
||||
$ cd $GOPATH/src/github.com/kubernetes-csi/csi-driver-nfs
|
||||
$ make
|
||||
```
|
||||
|
||||
- Run verification test before submitting code
|
||||
```console
|
||||
$ make verify
|
||||
```
|
||||
|
||||
- If there is config file changed under `charts` directory, run following command to update chart file
|
||||
```console
|
||||
helm package charts/latest/csi-driver-nfs -d charts/latest/
|
||||
```
|
||||
|
||||
## How to test CSI driver in local environment
|
||||
|
||||
Install `csc` tool according to https://github.com/rexray/gocsi/tree/master/csc
|
||||
```console
|
||||
$ mkdir -p $GOPATH/src/github.com
|
||||
$ cd $GOPATH/src/github.com
|
||||
$ git clone https://github.com/rexray/gocsi.git
|
||||
$ cd rexray/gocsi/csc
|
||||
$ make build
|
||||
```
|
||||
|
||||
#### Start CSI driver locally
|
||||
```console
|
||||
$ cd $GOPATH/src/github.com/kubernetes-csi/csi-driver-nfs
|
||||
$ ./_output/nfsplugin --endpoint tcp://127.0.0.1:10000 --nodeid CSINode -v=5 &
|
||||
```
|
||||
|
||||
#### 0. Set environment variables
|
||||
```console
|
||||
$ cap="1,mount,"
|
||||
$ volname="test-$(date +%s)"
|
||||
$ volsize="2147483648"
|
||||
$ endpoint="unix:///tmp/csi.sock"
|
||||
$ target_path="/tmp/targetpath"
|
||||
$ params="server=127.0.0.1,share=/"
|
||||
```
|
||||
|
||||
#### 1. Get plugin info
|
||||
```console
|
||||
$ csc identity plugin-info --endpoint "$endpoint"
|
||||
"nfs.csi.k8s.io" "v2.0.0"
|
||||
```
|
||||
|
||||
#### 2. Create a new nfs volume
|
||||
```console
|
||||
$ value="$(csc controller new --endpoint "$endpoint" --cap "$cap" "$volname" --req-bytes "$volsize" --params "$params")"
|
||||
$ sleep 15
|
||||
$ volumeid="$(echo "$value" | awk '{print $1}' | sed 's/"//g')"
|
||||
$ echo "Got volume id: $volumeid"
|
||||
```
|
||||
|
||||
#### 3. Publish a nfs volume
|
||||
```
|
||||
$ csc node publish --endpoint "$endpoint" --cap "$cap" --vol-context "$params" --target-path "$target_path" "$volumeid"
|
||||
```
|
||||
|
||||
#### 4. Unpublish a nfs volume
|
||||
```console
|
||||
$ csc node unpublish --endpoint "$endpoint" --target-path "$target_path" "$volumeid"
|
||||
```
|
||||
|
||||
#### 6. Validate volume capabilities
|
||||
```console
|
||||
$ csc controller validate-volume-capabilities --endpoint "$endpoint" --cap "$cap" "$volumeid"
|
||||
```
|
||||
|
||||
#### 7. Delete the nfs volume
|
||||
```console
|
||||
$ csc controller del --endpoint "$endpoint" "$volumeid" --timeout 10m
|
||||
```
|
||||
|
||||
#### 8. Get NodeID
|
||||
```console
|
||||
$ csc node get-info --endpoint "$endpoint"
|
||||
CSINode
|
||||
```
|
||||
|
||||
## How to test CSI driver in a Kubernetes cluster
|
||||
- Set environment variable
|
||||
```console
|
||||
export REGISTRY=<dockerhub-alias>
|
||||
export IMAGE_VERSION=latest
|
||||
```
|
||||
|
||||
- Build continer image and push image to dockerhub
|
||||
```console
|
||||
# run `docker login` first
|
||||
# build docker image
|
||||
make container
|
||||
# push the docker image
|
||||
make push
|
||||
```
|
||||
|
||||
- Deploy a Kubernetes cluster and make sure `kubectl get nodes` works on your dev box.
|
||||
|
||||
- Run E2E test on the Kubernetes cluster.
|
||||
|
||||
```console
|
||||
# install NFS CSI Driver on the Kubernetes cluster
|
||||
make e2e-bootstrap
|
||||
|
||||
# run the E2E test
|
||||
make e2e-test
|
||||
```
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
## Driver Parameters
|
||||
> This plugin driver itself only provides a communication layer between resources in the cluser and the NFS server, you need to bring your own NFS server before using this driver.
|
||||
|
||||
### Storage Class Usage (Dynamic Provisioning)
|
||||
> [`StorageClass` example](../deploy/example/storageclass-nfs.yaml)
|
||||
|
||||
Name | Meaning | Example Value | Mandatory | Default value
|
||||
--- | --- | --- | --- | ---
|
||||
server | NFS Server endpoint | Domain name `nfs-server.default.svc.cluster.local` <br>Or IP address `127.0.0.1` | Yes |
|
||||
share | NFS share path | `/` | Yes |
|
||||
|
||||
### PV/PVC Usage (Static Provisioning)
|
||||
> [`PersistentVolume` example](../deploy/example/pv-nfs-csi.yaml)
|
||||
|
||||
Name | Meaning | Example Value | Mandatory | Default value
|
||||
--- | --- | --- | --- | ---
|
||||
volumeAttributes.server | NFS Server endpoint | Domain name `nfs-server.default.svc.cluster.local` <br>Or IP address `127.0.0.1` | Yes |
|
||||
volumeAttributes.share | NFS share path | `/` | Yes |
|
||||
|
|
@ -0,0 +1,37 @@
|
|||
# Install NFS CSI driver master version on a kubernetes cluster
|
||||
|
||||
If you have already installed Helm, you can also use it to install NFS CSI driver. Please see [Installation with Helm](../charts/README.md).
|
||||
|
||||
## Install with kubectl
|
||||
- remote install
|
||||
```console
|
||||
curl -skSL https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/deploy/install-driver.sh | bash -s master --
|
||||
```
|
||||
|
||||
- local install
|
||||
```console
|
||||
git clone https://github.com/kubernetes-csi/csi-driver-nfs.git
|
||||
cd csi-driver-nfs
|
||||
./deploy/install-driver.sh master local
|
||||
```
|
||||
|
||||
- check pods status:
|
||||
```console
|
||||
kubectl -n kube-system get pod -o wide -l app=csi-nfs-controller
|
||||
kubectl -n kube-system get pod -o wide -l app=csi-nfs-node
|
||||
```
|
||||
|
||||
example output:
|
||||
|
||||
```console
|
||||
NAME READY STATUS RESTARTS AGE IP NODE
|
||||
csi-nfs-controller-56bfddd689-dh5tk 4/4 Running 0 35s 10.240.0.19 k8s-agentpool-22533604-0
|
||||
csi-nfs-controller-56bfddd689-8pgr4 4/4 Running 0 35s 10.240.0.35 k8s-agentpool-22533604-1
|
||||
csi-nfs-node-cvgbs 3/3 Running 0 35s 10.240.0.35 k8s-agentpool-22533604-1
|
||||
csi-nfs-node-dr4s4 3/3 Running 0 35s 10.240.0.4 k8s-agentpool-22533604-0
|
||||
```
|
||||
|
||||
- clean up NFS CSI driver
|
||||
```console
|
||||
curl -skSL https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/deploy/uninstall-driver.sh | bash -s master --
|
||||
```
|
||||
|
|
@ -1,53 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: data-nfsplugin
|
||||
labels:
|
||||
name: data-nfsplugin
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
capacity:
|
||||
storage: 100Gi
|
||||
csi:
|
||||
driver: nfs.csi.k8s.io
|
||||
volumeHandle: data-id
|
||||
volumeAttributes:
|
||||
server: 127.0.0.1
|
||||
share: /export
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: data-nfsplugin
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
resources:
|
||||
requests:
|
||||
storage: 100Gi
|
||||
selector:
|
||||
matchExpressions:
|
||||
- key: name
|
||||
operator: In
|
||||
values: ["data-nfsplugin"]
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx
|
||||
spec:
|
||||
containers:
|
||||
- image: maersk/nginx
|
||||
imagePullPolicy: Always
|
||||
name: nginx
|
||||
ports:
|
||||
- containerPort: 80
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- mountPath: /var/www
|
||||
name: data-nfsplugin
|
||||
volumes:
|
||||
- name: data-nfsplugin
|
||||
persistentVolumeClaim:
|
||||
claimName: data-nfsplugin
|
||||
69
go.mod
69
go.mod
|
|
@ -1,27 +1,52 @@
|
|||
module github.com/kubernetes-csi/csi-driver-nfs
|
||||
|
||||
go 1.13
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/container-storage-interface/spec v1.1.0
|
||||
github.com/docker/distribution v2.7.1+incompatible // indirect
|
||||
github.com/fatih/camelcase v1.0.0 // indirect
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.2.0
|
||||
github.com/onsi/ginkgo v1.8.0
|
||||
github.com/onsi/gomega v1.5.0
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
|
||||
github.com/spf13/cobra v0.0.3
|
||||
golang.org/x/net v0.0.0-20190415100556-4a65cf94b679
|
||||
google.golang.org/grpc v1.20.0
|
||||
k8s.io/api v0.0.0-20190415132514-c2f1300cac21
|
||||
k8s.io/apiextensions-apiserver v0.0.0-20190315093550-53c4693659ed // indirect
|
||||
k8s.io/apimachinery v0.0.0-20190415132420-07d458fe0356
|
||||
k8s.io/cli-runtime v0.0.0-20190415133733-52015cbe156a // indirect
|
||||
k8s.io/cluster-bootstrap v0.0.0-20190415134033-d885a12fbbe4 // indirect
|
||||
k8s.io/csi-translation-lib v0.0.0-20190415134207-82f1dfd98d10 // indirect
|
||||
k8s.io/kube-aggregator v0.0.0-20190415133304-80ce4e5a0cbc // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20190401085232-94e1e7b7574c // indirect
|
||||
k8s.io/kubernetes v1.14.1
|
||||
k8s.io/utils v0.0.0-20200124190032-861946025e34
|
||||
github.com/container-storage-interface/spec v1.3.0
|
||||
github.com/golang/protobuf v1.4.3
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.9.0
|
||||
github.com/kubernetes-csi/external-snapshotter/v2 v2.0.0-20200617021606-4800ca72d403
|
||||
github.com/onsi/ginkgo v1.11.0
|
||||
github.com/onsi/gomega v1.7.1
|
||||
github.com/pborman/uuid v1.2.0
|
||||
github.com/stretchr/testify v1.6.1
|
||||
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7
|
||||
google.golang.org/grpc v1.29.0
|
||||
k8s.io/api v0.21.0
|
||||
k8s.io/apimachinery v0.21.0
|
||||
k8s.io/client-go v0.21.0
|
||||
k8s.io/klog/v2 v2.8.0
|
||||
k8s.io/kubernetes v1.21.0
|
||||
k8s.io/mount-utils v0.0.0
|
||||
sigs.k8s.io/yaml v1.2.0
|
||||
)
|
||||
|
||||
replace (
|
||||
k8s.io/api => k8s.io/api v0.21.0
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.21.0
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.21.0
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.21.0
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.21.0
|
||||
k8s.io/client-go => k8s.io/client-go v0.21.0
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.21.0
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.21.0
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.21.0
|
||||
k8s.io/component-base => k8s.io/component-base v0.21.0
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.21.0
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.21.0
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.21.0
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.21.0
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.21.0
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.21.0
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.21.0
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.21.0
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.21.0
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.21.0
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.21.0
|
||||
k8s.io/metrics => k8s.io/metrics v0.21.0
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.21.0
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.21.0
|
||||
k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.21.0
|
||||
k8s.io/sample-controller => k8s.io/sample-controller v0.21.0
|
||||
)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,13 @@
|
|||
# Copyright YEAR The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
# Copyright YEAR The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
# Copyright YEAR The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
Copyright YEAR The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
|
@ -0,0 +1,202 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2019 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import difflib
|
||||
import glob
|
||||
import json
|
||||
import mmap
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from datetime import date
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"filenames",
|
||||
help="list of files to check, all files if unspecified",
|
||||
nargs='*')
|
||||
|
||||
# Rootdir defaults to the directory **above** the repo-infra dir.
|
||||
rootdir = os.path.dirname(__file__) + "./../../../"
|
||||
rootdir = os.path.abspath(rootdir)
|
||||
parser.add_argument(
|
||||
"--rootdir", default=rootdir, help="root directory to examine")
|
||||
|
||||
default_boilerplate_dir = os.path.join(rootdir, "csi-driver-nfs/hack/boilerplate")
|
||||
|
||||
parser.add_argument(
|
||||
"--boilerplate-dir", default=default_boilerplate_dir)
|
||||
|
||||
parser.add_argument(
|
||||
"-v", "--verbose",
|
||||
help="give verbose output regarding why a file does not pass",
|
||||
action="store_true")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
verbose_out = sys.stderr if args.verbose else open("/dev/null", "w")
|
||||
|
||||
def get_refs():
|
||||
refs = {}
|
||||
|
||||
for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")):
|
||||
extension = os.path.basename(path).split(".")[1]
|
||||
|
||||
ref_file = open(path, 'r')
|
||||
ref = ref_file.read().splitlines()
|
||||
ref_file.close()
|
||||
refs[extension] = ref
|
||||
|
||||
return refs
|
||||
|
||||
def file_passes(filename, refs, regexs):
|
||||
try:
|
||||
f = open(filename, 'r')
|
||||
except Exception as exc:
|
||||
print("Unable to open %s: %s" % (filename, exc), file=verbose_out)
|
||||
return False
|
||||
|
||||
data = f.read()
|
||||
f.close()
|
||||
|
||||
basename = os.path.basename(filename)
|
||||
extension = file_extension(filename)
|
||||
if extension != "":
|
||||
ref = refs[extension]
|
||||
else:
|
||||
ref = refs[basename]
|
||||
|
||||
# remove build tags from the top of Go files
|
||||
if extension == "go":
|
||||
p = regexs["go_build_constraints"]
|
||||
(data, found) = p.subn("", data, 1)
|
||||
|
||||
# remove shebang from the top of shell files
|
||||
if extension == "sh" or extension == "py":
|
||||
p = regexs["shebang"]
|
||||
(data, found) = p.subn("", data, 1)
|
||||
|
||||
data = data.splitlines()
|
||||
|
||||
# if our test file is smaller than the reference it surely fails!
|
||||
if len(ref) > len(data):
|
||||
print('File %s smaller than reference (%d < %d)' %
|
||||
(filename, len(data), len(ref)),
|
||||
file=verbose_out)
|
||||
return False
|
||||
|
||||
# trim our file to the same number of lines as the reference file
|
||||
data = data[:len(ref)]
|
||||
|
||||
p = regexs["year"]
|
||||
for d in data:
|
||||
if p.search(d):
|
||||
print('File %s is missing the year' % filename, file=verbose_out)
|
||||
return False
|
||||
|
||||
# Replace all occurrences of the regex "CURRENT_YEAR|...|2016|2015|2014" with "YEAR"
|
||||
p = regexs["date"]
|
||||
for i, d in enumerate(data):
|
||||
(data[i], found) = p.subn('YEAR', d)
|
||||
if found != 0:
|
||||
break
|
||||
|
||||
# if we don't match the reference at this point, fail
|
||||
if ref != data:
|
||||
print("Header in %s does not match reference, diff:" % filename, file=verbose_out)
|
||||
if args.verbose:
|
||||
print(file=verbose_out)
|
||||
for line in difflib.unified_diff(ref, data, 'reference', filename, lineterm=''):
|
||||
print(line, file=verbose_out)
|
||||
print(file=verbose_out)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def file_extension(filename):
|
||||
return os.path.splitext(filename)[1].split(".")[-1].lower()
|
||||
|
||||
skipped_dirs = ['Godeps', 'third_party', '_gopath', '_output', '.git',
|
||||
'cluster/env.sh', 'vendor', 'test/e2e/generated/bindata.go',
|
||||
'repo-infra/verify/boilerplate/test', '.glide']
|
||||
|
||||
def normalize_files(files):
|
||||
newfiles = []
|
||||
for pathname in files:
|
||||
if any(x in pathname for x in skipped_dirs):
|
||||
continue
|
||||
newfiles.append(pathname)
|
||||
return newfiles
|
||||
|
||||
def get_files(extensions):
|
||||
files = []
|
||||
if len(args.filenames) > 0:
|
||||
files = args.filenames
|
||||
else:
|
||||
for root, dirs, walkfiles in os.walk(args.rootdir):
|
||||
# don't visit certain dirs. This is just a performance improvement
|
||||
# as we would prune these later in normalize_files(). But doing it
|
||||
# cuts down the amount of filesystem walking we do and cuts down
|
||||
# the size of the file list
|
||||
for d in skipped_dirs:
|
||||
if d in dirs:
|
||||
dirs.remove(d)
|
||||
|
||||
for name in walkfiles:
|
||||
pathname = os.path.join(root, name)
|
||||
files.append(pathname)
|
||||
|
||||
files = normalize_files(files)
|
||||
|
||||
outfiles = []
|
||||
for pathname in files:
|
||||
basename = os.path.basename(pathname)
|
||||
extension = file_extension(pathname)
|
||||
if extension in extensions or basename in extensions:
|
||||
outfiles.append(pathname)
|
||||
return outfiles
|
||||
|
||||
def get_regexs():
|
||||
regexs = {}
|
||||
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
|
||||
regexs["year"] = re.compile( 'YEAR' )
|
||||
# dates can be 2014, 2015, 2016, ..., CURRENT_YEAR, company holder names can be anything
|
||||
years = range(2014, date.today().year + 1)
|
||||
regexs["date"] = re.compile( '(%s)' % "|".join(map(lambda l: str(l), years)) )
|
||||
# strip // +build \n\n build constraints
|
||||
regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE)
|
||||
# strip #!.* from shell scripts
|
||||
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
|
||||
return regexs
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
regexs = get_regexs()
|
||||
refs = get_refs()
|
||||
filenames = get_files(refs.keys())
|
||||
|
||||
for filename in filenames:
|
||||
if not file_passes(filename, refs, regexs):
|
||||
print(filename, file=sys.stdout)
|
||||
|
||||
return 0
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
# Copyright YEAR The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
# Copyright YEAR The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2020 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
if [[ "$#" -lt 1 ]]; then
|
||||
echo "please provide a registry name"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
export REGISTRY_NAME="$1"
|
||||
export REGISTRY=$REGISTRY_NAME.azurecr.io
|
||||
export IMAGENAME=public/k8s/csi/nfs-csi
|
||||
export CI=1
|
||||
export PUBLISH=1
|
||||
az acr login --name $REGISTRY_NAME
|
||||
make container push push-latest
|
||||
|
||||
echo "sleep 60s ..."
|
||||
sleep 60
|
||||
image="mcr.microsoft.com/k8s/csi/nfs-csi:latest"
|
||||
docker pull $image
|
||||
docker inspect $image | grep Created
|
||||
|
|
@ -0,0 +1,108 @@
|
|||
#!/bin/bash
|
||||
# Copyright 2019 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
|
||||
# Explicitly opt into go modules, even though we're inside a GOPATH directory
|
||||
export GO111MODULE=on
|
||||
# Explicitly clear GOPATH, to ensure nothing this script calls makes use of that path info
|
||||
export GOPATH=
|
||||
# Explicitly clear GOFLAGS, since GOFLAGS=-mod=vendor breaks dependency resolution while rebuilding vendor
|
||||
export GOFLAGS=
|
||||
|
||||
cd "$(git rev-parse --show-toplevel)"
|
||||
trap 'echo "FAILED" >&2' ERR
|
||||
TMP_DIR="${TMP_DIR:-$(mktemp -d /tmp/update-vendor.XXXX)}"
|
||||
|
||||
prune-vendor() {
|
||||
find vendor -type f \
|
||||
-not -iname "*.c" \
|
||||
-not -iname "*.go" \
|
||||
-not -iname "*.h" \
|
||||
-not -iname "*.proto" \
|
||||
-not -iname "*.s" \
|
||||
-not -iname "AUTHORS*" \
|
||||
-not -iname "CONTRIBUTORS*" \
|
||||
-not -iname "COPYING*" \
|
||||
-not -iname "LICENSE*" \
|
||||
-not -iname "NOTICE*" \
|
||||
-delete
|
||||
}
|
||||
|
||||
# ensure_require_replace_directives_for_all_dependencies:
|
||||
# - ensures all existing 'require' directives have an associated 'replace' directive pinning a version
|
||||
# - adds explicit 'require' directives for all transitive dependencies
|
||||
# - adds explicit 'replace' directives for all require directives (existing 'replace' directives take precedence)
|
||||
function ensure_require_replace_directives_for_all_dependencies() {
|
||||
local local_tmp_dir
|
||||
local_tmp_dir=$(mktemp -d "${TMP_DIR}/pin_replace.XXXX")
|
||||
|
||||
# collect 'require' directives that actually specify a version
|
||||
local require_filter='(.Version != null) and (.Version != "v0.0.0") and (.Version != "v0.0.0-00010101000000-000000000000")'
|
||||
# collect 'replace' directives that unconditionally pin versions (old=new@version)
|
||||
local replace_filter='(.Old.Version == null) and (.New.Version != null)'
|
||||
|
||||
# Capture local require/replace directives before running any go commands that can modify the go.mod file
|
||||
local require_json="${local_tmp_dir}/require.json"
|
||||
local replace_json="${local_tmp_dir}/replace.json"
|
||||
go mod edit -json | jq -r ".Require // [] | sort | .[] | select(${require_filter})" > "${require_json}"
|
||||
go mod edit -json | jq -r ".Replace // [] | sort | .[] | select(${replace_filter})" > "${replace_json}"
|
||||
|
||||
# 1. Ensure require directives have a corresponding replace directive pinning a version
|
||||
cat "${require_json}" | jq -r '"-replace \(.Path)=\(.Path)@\(.Version)"' | xargs -L 100 go mod edit -fmt
|
||||
cat "${replace_json}" | jq -r '"-replace \(.Old.Path)=\(.New.Path)@\(.New.Version)"'| xargs -L 100 go mod edit -fmt
|
||||
|
||||
# 2. Add explicit require directives for indirect dependencies
|
||||
go list -m -json all | jq -r 'select(.Main != true) | select(.Indirect == true) | "-require \(.Path)@\(.Version)"' | xargs -L 100 go mod edit -fmt
|
||||
|
||||
# 3. Add explicit replace directives pinning dependencies that aren't pinned yet
|
||||
go list -m -json all | jq -r 'select(.Main != true) | select(.Replace == null) | "-replace \(.Path)=\(.Path)@\(.Version)"' | xargs -L 100 go mod edit -fmt
|
||||
}
|
||||
|
||||
function group_replace_directives() {
|
||||
local local_tmp_dir
|
||||
local_tmp_dir=$(mktemp -d "${TMP_DIR}/group_replace.XXXX")
|
||||
local go_mod_replace="${local_tmp_dir}/go.mod.replace.tmp"
|
||||
local go_mod_noreplace="${local_tmp_dir}/go.mod.noreplace.tmp"
|
||||
# separate replace and non-replace directives
|
||||
cat go.mod | awk "
|
||||
# print lines between 'replace (' ... ')' lines
|
||||
/^replace [(]/ { inreplace=1; next }
|
||||
inreplace && /^[)]/ { inreplace=0; next }
|
||||
inreplace { print > \"${go_mod_replace}\"; next }
|
||||
|
||||
# print ungrouped replace directives with the replace directive trimmed
|
||||
/^replace [^(]/ { sub(/^replace /,\"\"); print > \"${go_mod_replace}\"; next }
|
||||
|
||||
# otherwise print to the noreplace file
|
||||
{ print > \"${go_mod_noreplace}\" }
|
||||
"
|
||||
cat "${go_mod_noreplace}" > go.mod
|
||||
echo "replace (" >> go.mod
|
||||
cat "${go_mod_replace}" >> go.mod
|
||||
echo ")" >> go.mod
|
||||
|
||||
go mod edit -fmt
|
||||
}
|
||||
|
||||
ensure_require_replace_directives_for_all_dependencies
|
||||
go mod tidy
|
||||
ensure_require_replace_directives_for_all_dependencies
|
||||
group_replace_directives
|
||||
go mod vendor
|
||||
#prune-vendor
|
||||
echo SUCCESS
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
find . -name "*.go" | grep -v "\/vendor\/" | xargs gofmt -s -w
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2020 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
VERSION=${1#"v"}
|
||||
if [ -z "$VERSION" ]; then
|
||||
echo "Must specify version!"
|
||||
exit 1
|
||||
fi
|
||||
MODS=($(
|
||||
curl -sS https://raw.githubusercontent.com/kubernetes/kubernetes/v${VERSION}/go.mod |
|
||||
sed -n 's|.*k8s.io/\(.*\) => ./staging/src/k8s.io/.*|k8s.io/\1|p'
|
||||
))
|
||||
for MOD in "${MODS[@]}"; do
|
||||
echo $MOD
|
||||
V=$(
|
||||
go mod download -json "${MOD}@kubernetes-${VERSION}" |
|
||||
sed -n 's|.*"Version": "\(.*\)".*|\1|p'
|
||||
)
|
||||
echo ${V}
|
||||
go mod edit "-replace=${MOD}=${MOD}@${V}"
|
||||
done
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
readonly PKG_ROOT="$(git rev-parse --show-toplevel)"
|
||||
|
||||
${PKG_ROOT}/hack/verify-gofmt.sh
|
||||
${PKG_ROOT}/hack/verify-govet.sh
|
||||
${PKG_ROOT}/hack/verify-golint.sh
|
||||
${PKG_ROOT}/hack/verify-yamllint.sh
|
||||
${PKG_ROOT}/hack/verify-boilerplate.sh
|
||||
${PKG_ROOT}/hack/verify-spelling.sh
|
||||
${PKG_ROOT}/hack/verify-helm-chart-files.sh
|
||||
${PKG_ROOT}/hack/verify-helm-chart.sh
|
||||
${PKG_ROOT}/hack/verify-gomod.sh
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
echo "Verifying boilerplate"
|
||||
|
||||
if [[ -z "$(command -v python)" ]]; then
|
||||
echo "Cannot find python. Make link to python3..."
|
||||
update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
fi
|
||||
|
||||
REPO_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
boilerDir="${REPO_ROOT}/hack/boilerplate"
|
||||
boiler="${boilerDir}/boilerplate.py"
|
||||
|
||||
files_need_boilerplate=($(${boiler} --rootdir=${REPO_ROOT} --verbose))
|
||||
|
||||
# Run boilerplate.py unit tests
|
||||
unitTestOut="$(mktemp)"
|
||||
trap cleanup EXIT
|
||||
cleanup() {
|
||||
rm "${unitTestOut}"
|
||||
}
|
||||
|
||||
# Run boilerplate check
|
||||
if [[ ${#files_need_boilerplate[@]} -gt 0 ]]; then
|
||||
for file in "${files_need_boilerplate[@]}"; do
|
||||
echo "Boilerplate header is wrong for: ${file}"
|
||||
done
|
||||
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Done"
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
#!/bin/bash
|
||||
# Copyright 2020 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
echo "begin to create deployment examples ..."
|
||||
|
||||
kubectl apply -f ./deploy/example/storageclass-nfs.yaml
|
||||
kubectl apply -f ./deploy/example/deployment.yaml
|
||||
kubectl apply -f ./deploy/example/statefulset.yaml
|
||||
|
||||
echo "sleep 60s ..."
|
||||
sleep 60
|
||||
|
||||
echo "begin to check pod status ..."
|
||||
kubectl get pods -o wide
|
||||
|
||||
kubectl get pods --field-selector status.phase=Running | grep deployment-nfs
|
||||
kubectl get pods --field-selector status.phase=Running | grep statefulset-nfs-0
|
||||
|
||||
echo "deployment examples running completed."
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
echo "Verifying gofmt"
|
||||
|
||||
readonly diff=$(find . -name "*.go" | grep -v "\/vendor\/" | xargs gofmt -s -d 2>&1)
|
||||
if [[ -n "${diff}" ]]; then
|
||||
echo "${diff}"
|
||||
echo
|
||||
echo "Please run hack/update-gofmt.sh to fix the issue(s)"
|
||||
exit 1
|
||||
fi
|
||||
echo "No issue found"
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
if [[ -z "$(command -v golangci-lint)" ]]; then
|
||||
echo "Cannot find golangci-lint. Installing golangci-lint..."
|
||||
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.31.0
|
||||
export PATH=$PATH:$(go env GOPATH)/bin
|
||||
fi
|
||||
|
||||
echo "Verifying golint"
|
||||
|
||||
golangci-lint run --no-config --enable=golint --disable=typecheck --deadline=10m
|
||||
|
||||
echo "Congratulations! Lint check completed for all Go source files."
|
||||
|
|
@ -0,0 +1,32 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2020 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
echo "Verifying gomod"
|
||||
export GO111MODULE=on
|
||||
echo "go mod tidy"
|
||||
go mod tidy
|
||||
echo "go mod vendor"
|
||||
go mod vendor
|
||||
diff=`git diff`
|
||||
if [[ -n "${diff}" ]]; then
|
||||
echo "${diff}"
|
||||
echo
|
||||
echo "error"
|
||||
exit 1
|
||||
fi
|
||||
echo "Done"
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
echo "Verifying govet"
|
||||
|
||||
go vet $(go list ./... | grep -v vendor)
|
||||
|
||||
echo "Done"
|
||||
|
|
@ -0,0 +1,55 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2021 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
echo "begin to verify chart tgz files ..."
|
||||
git config core.filemode false
|
||||
|
||||
# verify whether chart config has changed
|
||||
diff=`git diff`
|
||||
if [[ -n "${diff}" ]]; then
|
||||
echo "${diff}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for dir in charts/*
|
||||
do
|
||||
if [ -d $dir ]; then
|
||||
if [ -f $dir/*.tgz ]; then
|
||||
echo "verify $dir ..."
|
||||
tar -xvf $dir/*.tgz -C $dir/
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
diff=`git diff`
|
||||
if [[ -n "${diff}" ]]; then
|
||||
echo
|
||||
echo
|
||||
echo "${diff}"
|
||||
echo
|
||||
echo "latest chart config has changed, pls run \"helm package charts/latest/csi-driver-nfs -d charts/latest/\" to update tgz file"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "chart tgz files verified."
|
||||
|
||||
echo "verify helm chart index ..."
|
||||
curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash
|
||||
helm repo add csi-driver-smb https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts
|
||||
helm search repo -l csi-driver-nfs
|
||||
echo "helm chart index verified."
|
||||
|
|
@ -0,0 +1,80 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2019 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
readonly PKG_ROOT="$(git rev-parse --show-toplevel)"
|
||||
|
||||
function get_image_from_helm_chart() {
|
||||
local -r image_name="${1}"
|
||||
image_repository="$(cat ${PKG_ROOT}/charts/latest/csi-driver-nfs/values.yaml | yq -r .image.${image_name}.repository)"
|
||||
image_tag="$(cat ${PKG_ROOT}/charts/latest/csi-driver-nfs/values.yaml | yq -r .image.${image_name}.tag)"
|
||||
echo "${image_repository}:${image_tag}"
|
||||
}
|
||||
|
||||
function validate_image() {
|
||||
local -r expected_image="${1}"
|
||||
local -r image="${2}"
|
||||
|
||||
if [[ "${expected_image}" != "${image}" ]]; then
|
||||
echo "Expected ${expected_image}, but got ${image} in helm chart"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
echo "Comparing image version between helm chart and manifests in deploy folder"
|
||||
|
||||
if [[ -z "$(command -v pip)" ]]; then
|
||||
echo "Cannot find pip. Installing pip3..."
|
||||
apt install python3-pip -y
|
||||
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1
|
||||
fi
|
||||
|
||||
if [[ -z "$(command -v jq)" ]]; then
|
||||
echo "Cannot find jq. Installing yq..."
|
||||
apt install jq -y
|
||||
fi
|
||||
|
||||
# jq-equivalent for yaml
|
||||
pip install yq
|
||||
|
||||
# Extract images from csi-nfs-controller.yaml
|
||||
expected_csi_provisioner_image="$(cat ${PKG_ROOT}/deploy/csi-nfs-controller.yaml | yq -r .spec.template.spec.containers[0].image | head -n 1)"
|
||||
expected_liveness_probe_image="$(cat ${PKG_ROOT}/deploy/csi-nfs-controller.yaml | yq -r .spec.template.spec.containers[1].image | head -n 1)"
|
||||
expected_nfs_image="$(cat ${PKG_ROOT}/deploy/csi-nfs-controller.yaml | yq -r .spec.template.spec.containers[2].image | head -n 1)"
|
||||
|
||||
csi_provisioner_image="$(get_image_from_helm_chart "csiProvisioner")"
|
||||
validate_image "${expected_csi_provisioner_image}" "${csi_provisioner_image}"
|
||||
|
||||
liveness_probe_image="$(get_image_from_helm_chart "livenessProbe")"
|
||||
validate_image "${expected_liveness_probe_image}" "${liveness_probe_image}"
|
||||
|
||||
nfs_image="$(get_image_from_helm_chart "nfs")"
|
||||
validate_image "${expected_nfs_image}" "${nfs_image}"
|
||||
|
||||
# Extract images from csi-nfs-node.yaml
|
||||
expected_liveness_probe_image="$(cat ${PKG_ROOT}/deploy/csi-nfs-node.yaml | yq -r .spec.template.spec.containers[0].image | head -n 1)"
|
||||
expected_node_driver_registrar="$(cat ${PKG_ROOT}/deploy/csi-nfs-node.yaml | yq -r .spec.template.spec.containers[1].image | head -n 1)"
|
||||
expected_nfs_image="$(cat ${PKG_ROOT}/deploy/csi-nfs-node.yaml | yq -r .spec.template.spec.containers[2].image | head -n 1)"
|
||||
|
||||
validate_image "${expected_liveness_probe_image}" "${liveness_probe_image}"
|
||||
|
||||
node_driver_registrar="$(get_image_from_helm_chart "nodeDriverRegistrar")"
|
||||
validate_image "${expected_node_driver_registrar}" "${node_driver_registrar}"
|
||||
|
||||
validate_image "${expected_nfs_image}" "${nfs_image}"
|
||||
|
||||
echo "Images in deploy/ matches those in the latest helm chart."
|
||||
|
|
@ -0,0 +1,57 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2019 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
TOOL_VERSION="v0.3.4"
|
||||
|
||||
# cd to the root path
|
||||
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd -P)"
|
||||
cd "${ROOT}"
|
||||
|
||||
# create a temporary directory
|
||||
TMP_DIR=$(mktemp -d)
|
||||
|
||||
# cleanup
|
||||
exitHandler() (
|
||||
echo "Cleaning up..."
|
||||
rm -rf "${TMP_DIR}"
|
||||
)
|
||||
trap exitHandler EXIT
|
||||
|
||||
if [[ -z "$(command -v misspell)" ]]; then
|
||||
echo "Cannot find misspell. Installing misspell..."
|
||||
# perform go get in a temp dir as we are not tracking this version in a go module
|
||||
# if we do the go get in the repo, it will create / update a go.mod and go.sum
|
||||
cd "${TMP_DIR}"
|
||||
GO111MODULE=on GOBIN="${TMP_DIR}" go get "github.com/client9/misspell/cmd/misspell@${TOOL_VERSION}"
|
||||
export PATH="${TMP_DIR}:${PATH}"
|
||||
fi
|
||||
cd "${ROOT}"
|
||||
|
||||
# check spelling
|
||||
RES=0
|
||||
echo "Checking spelling..."
|
||||
ERROR_LOG="${TMP_DIR}/errors.log"
|
||||
git ls-files | grep -v vendor | xargs misspell > "${ERROR_LOG}"
|
||||
if [[ -s "${ERROR_LOG}" ]]; then
|
||||
sed 's/^/error: /' "${ERROR_LOG}" # add 'error' to each line to highlight in e2e status
|
||||
echo "Found spelling errors!"
|
||||
RES=1
|
||||
fi
|
||||
exit "${RES}"
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
echo 'Verifying dependency update'
|
||||
|
||||
if [[ -n "$(git diff --shortstat)" ]]; then
|
||||
echo 'Some files got changed after dependencies update'
|
||||
git --no-pager diff
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo 'Done'
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2020 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [[ -z "$(command -v yamllint)" ]]; then
|
||||
apt update && apt install yamllint -y
|
||||
fi
|
||||
|
||||
LOG=/tmp/yamllint.log
|
||||
helmPath=charts/latest/csi-driver-nfs/templates
|
||||
|
||||
echo "checking yaml files num ..."
|
||||
deployDirNum=`ls deploy/*.yaml | wc -l`
|
||||
helmDirNum=`ls $helmPath/*.yaml | grep -v serviceaccount | wc -l`
|
||||
if [[ "${deployDirNum}" != "${helmDirNum}" ]]; then
|
||||
echo "yaml file num($deployDirNum) under deploy/ not equal to num($helmDirNum) under $helmPath"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for path in "deploy/*.yaml" "deploy/example/*.yaml" "deploy/example/nfs-provisioner/*.yaml"
|
||||
do
|
||||
echo "checking yamllint under path: $path ..."
|
||||
yamllint -f parsable $path | grep -v "line too long" > $LOG
|
||||
cat $LOG
|
||||
linecount=`cat $LOG | grep -v "line too long" | wc -l`
|
||||
if [ $linecount -gt 0 ]; then
|
||||
echo "yaml files under $path are not linted, failed with: "
|
||||
cat $LOG
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo "checking yamllint under path: $helmPath ..."
|
||||
yamllint -f parsable $helmPath/*.yaml | grep -v "line too long" | grep -v "too many spaces inside braces" | grep -v "missing document start" | grep -v "syntax error" > $LOG
|
||||
linecount=`cat $LOG | wc -l`
|
||||
if [ $linecount -gt 0 ]; then
|
||||
echo "yaml files under $helmPath/ are not linted, failed with: "
|
||||
cat $LOG
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Congratulations! All Yaml files have been linted."
|
||||
|
|
@ -1,23 +1,146 @@
|
|||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// ControllerServer controller server setting
|
||||
type ControllerServer struct {
|
||||
Driver *nfsDriver
|
||||
Driver *Driver
|
||||
// Working directory for the provisioner to temporarily mount nfs shares at
|
||||
workingMountDir string
|
||||
}
|
||||
|
||||
// nfsVolume is an internal representation of a volume
|
||||
// created by the provisioner.
|
||||
type nfsVolume struct {
|
||||
// Volume id
|
||||
id string
|
||||
// Address of the NFS server.
|
||||
// Matches paramServer.
|
||||
server string
|
||||
// Base directory of the NFS server to create volumes under
|
||||
// Matches paramShare.
|
||||
baseDir string
|
||||
// Subdirectory of the NFS server to create volumes under
|
||||
subDir string
|
||||
// size of volume
|
||||
size int64
|
||||
}
|
||||
|
||||
// Ordering of elements in the CSI volume id.
|
||||
// ID is of the form {server}/{baseDir}/{subDir}.
|
||||
// TODO: This volume id format limits baseDir and
|
||||
// subDir to only be one directory deep.
|
||||
// Adding a new element should always go at the end
|
||||
// before totalIDElements
|
||||
const (
|
||||
idServer = iota
|
||||
idBaseDir
|
||||
idSubDir
|
||||
totalIDElements // Always last
|
||||
)
|
||||
|
||||
// CreateVolume create a volume
|
||||
func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "")
|
||||
name := req.GetName()
|
||||
if len(name) == 0 {
|
||||
return nil, status.Error(codes.InvalidArgument, "CreateVolume name must be provided")
|
||||
}
|
||||
if err := cs.validateVolumeCapabilities(req.GetVolumeCapabilities()); err != nil {
|
||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
reqCapacity := req.GetCapacityRange().GetRequiredBytes()
|
||||
nfsVol, err := cs.newNFSVolume(name, reqCapacity, req.GetParameters())
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
var volCap *csi.VolumeCapability
|
||||
if len(req.GetVolumeCapabilities()) > 0 {
|
||||
volCap = req.GetVolumeCapabilities()[0]
|
||||
}
|
||||
// Mount nfs base share so we can create a subdirectory
|
||||
if err = cs.internalMount(ctx, nfsVol, volCap); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to mount nfs server: %v", err.Error())
|
||||
}
|
||||
defer func() {
|
||||
if err = cs.internalUnmount(ctx, nfsVol); err != nil {
|
||||
klog.Warningf("failed to unmount nfs server: %v", err.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
// Create subdirectory under base-dir
|
||||
// TODO: revisit permissions
|
||||
internalVolumePath := cs.getInternalVolumePath(nfsVol)
|
||||
if err = os.Mkdir(internalVolumePath, 0777); err != nil && !os.IsExist(err) {
|
||||
return nil, status.Errorf(codes.Internal, "failed to make subdirectory: %v", err.Error())
|
||||
}
|
||||
// Remove capacity setting when provisioner 1.4.0 is available with fix for
|
||||
// https://github.com/kubernetes-csi/external-provisioner/pull/271
|
||||
return &csi.CreateVolumeResponse{Volume: cs.nfsVolToCSI(nfsVol, reqCapacity)}, nil
|
||||
}
|
||||
|
||||
// DeleteVolume delete a volume
|
||||
func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "")
|
||||
volumeID := req.GetVolumeId()
|
||||
if volumeID == "" {
|
||||
return nil, status.Error(codes.InvalidArgument, "volume id is empty")
|
||||
}
|
||||
nfsVol, err := cs.getNfsVolFromID(volumeID)
|
||||
if err != nil {
|
||||
// An invalid ID should be treated as doesn't exist
|
||||
klog.Warningf("failed to get nfs volume for volume id %v deletion: %v", volumeID, err)
|
||||
return &csi.DeleteVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
// Mount nfs base share so we can delete the subdirectory
|
||||
if err = cs.internalMount(ctx, nfsVol, nil); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to mount nfs server: %v", err.Error())
|
||||
}
|
||||
defer func() {
|
||||
if err = cs.internalUnmount(ctx, nfsVol); err != nil {
|
||||
klog.Warningf("failed to unmount nfs server: %v", err.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
// Delete subdirectory under base-dir
|
||||
internalVolumePath := cs.getInternalVolumePath(nfsVol)
|
||||
|
||||
klog.V(2).Infof("Removing subdirectory at %v", internalVolumePath)
|
||||
if err = os.RemoveAll(internalVolumePath); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to delete subdirectory: %v", err.Error())
|
||||
}
|
||||
|
||||
return &csi.DeleteVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
func (cs *ControllerServer) ControllerPublishVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) {
|
||||
|
|
@ -28,10 +151,22 @@ func (cs *ControllerServer) ControllerUnpublishVolume(ctx context.Context, req *
|
|||
return nil, status.Error(codes.Unimplemented, "")
|
||||
}
|
||||
|
||||
func (cs *ControllerServer) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
|
||||
func (cs *ControllerServer) ControllerGetVolume(ctx context.Context, req *csi.ControllerGetVolumeRequest) (*csi.ControllerGetVolumeResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "")
|
||||
}
|
||||
|
||||
func (cs *ControllerServer) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
|
||||
if len(req.GetVolumeId()) == 0 {
|
||||
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
|
||||
}
|
||||
if req.GetVolumeCapabilities() == nil {
|
||||
return nil, status.Error(codes.InvalidArgument, "Volume capabilities missing in request")
|
||||
}
|
||||
|
||||
// supports all AccessModes, no need to check capabilities here
|
||||
return &csi.ValidateVolumeCapabilitiesResponse{Message: ""}, nil
|
||||
}
|
||||
|
||||
func (cs *ControllerServer) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "")
|
||||
}
|
||||
|
|
@ -43,8 +178,6 @@ func (cs *ControllerServer) GetCapacity(ctx context.Context, req *csi.GetCapacit
|
|||
// ControllerGetCapabilities implements the default GRPC callout.
|
||||
// Default supports all capabilities
|
||||
func (cs *ControllerServer) ControllerGetCapabilities(ctx context.Context, req *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) {
|
||||
glog.V(5).Infof("Using default ControllerGetCapabilities")
|
||||
|
||||
return &csi.ControllerGetCapabilitiesResponse{
|
||||
Capabilities: cs.Driver.cscap,
|
||||
}, nil
|
||||
|
|
@ -65,3 +198,178 @@ func (cs *ControllerServer) ListSnapshots(ctx context.Context, req *csi.ListSnap
|
|||
func (cs *ControllerServer) ControllerExpandVolume(ctx context.Context, req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "")
|
||||
}
|
||||
|
||||
func (cs *ControllerServer) validateVolumeCapabilities(caps []*csi.VolumeCapability) error {
|
||||
if len(caps) == 0 {
|
||||
return fmt.Errorf("volume capabilities must be provided")
|
||||
}
|
||||
|
||||
for _, c := range caps {
|
||||
if err := cs.validateVolumeCapability(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cs *ControllerServer) validateVolumeCapability(c *csi.VolumeCapability) error {
|
||||
if c == nil {
|
||||
return fmt.Errorf("volume capability must be provided")
|
||||
}
|
||||
|
||||
// Validate access mode
|
||||
accessMode := c.GetAccessMode()
|
||||
if accessMode == nil {
|
||||
return fmt.Errorf("volume capability access mode not set")
|
||||
}
|
||||
if !cs.Driver.cap[accessMode.Mode] {
|
||||
return fmt.Errorf("driver does not support access mode: %v", accessMode.Mode.String())
|
||||
}
|
||||
|
||||
// Validate access type
|
||||
accessType := c.GetAccessType()
|
||||
if accessType == nil {
|
||||
return fmt.Errorf("volume capability access type not set")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mount nfs server at base-dir
|
||||
func (cs *ControllerServer) internalMount(ctx context.Context, vol *nfsVolume, volCap *csi.VolumeCapability) error {
|
||||
sharePath := filepath.Join(string(filepath.Separator) + vol.baseDir)
|
||||
targetPath := cs.getInternalMountPath(vol)
|
||||
|
||||
if volCap == nil {
|
||||
volCap = &csi.VolumeCapability{
|
||||
AccessType: &csi.VolumeCapability_Mount{
|
||||
Mount: &csi.VolumeCapability_MountVolume{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(4).Infof("internally mounting %v:%v at %v", vol.server, sharePath, targetPath)
|
||||
_, err := cs.Driver.ns.NodePublishVolume(ctx, &csi.NodePublishVolumeRequest{
|
||||
TargetPath: targetPath,
|
||||
VolumeContext: map[string]string{
|
||||
paramServer: vol.server,
|
||||
paramShare: sharePath,
|
||||
},
|
||||
VolumeCapability: volCap,
|
||||
VolumeId: vol.id,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmount nfs server at base-dir
|
||||
func (cs *ControllerServer) internalUnmount(ctx context.Context, vol *nfsVolume) error {
|
||||
targetPath := cs.getInternalMountPath(vol)
|
||||
|
||||
// Unmount nfs server at base-dir
|
||||
klog.V(4).Infof("internally unmounting %v", targetPath)
|
||||
_, err := cs.Driver.ns.NodeUnpublishVolume(ctx, &csi.NodeUnpublishVolumeRequest{
|
||||
VolumeId: vol.id,
|
||||
TargetPath: cs.getInternalMountPath(vol),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Convert VolumeCreate parameters to an nfsVolume
|
||||
func (cs *ControllerServer) newNFSVolume(name string, size int64, params map[string]string) (*nfsVolume, error) {
|
||||
var (
|
||||
server string
|
||||
baseDir string
|
||||
)
|
||||
|
||||
// Validate parameters (case-insensitive).
|
||||
// TODO do more strict validation.
|
||||
for k, v := range params {
|
||||
switch strings.ToLower(k) {
|
||||
case paramServer:
|
||||
server = v
|
||||
case paramShare:
|
||||
baseDir = v
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid parameter %q", k)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate required parameters
|
||||
if server == "" {
|
||||
return nil, fmt.Errorf("%v is a required parameter", paramServer)
|
||||
}
|
||||
if baseDir == "" {
|
||||
return nil, fmt.Errorf("%v is a required parameter", paramShare)
|
||||
}
|
||||
|
||||
vol := &nfsVolume{
|
||||
server: server,
|
||||
baseDir: baseDir,
|
||||
subDir: name,
|
||||
size: size,
|
||||
}
|
||||
vol.id = cs.getVolumeIDFromNfsVol(vol)
|
||||
|
||||
return vol, nil
|
||||
}
|
||||
|
||||
// Get working directory for CreateVolume and DeleteVolume
|
||||
func (cs *ControllerServer) getInternalMountPath(vol *nfsVolume) string {
|
||||
// use default if empty
|
||||
if cs.workingMountDir == "" {
|
||||
cs.workingMountDir = "/tmp"
|
||||
}
|
||||
return filepath.Join(cs.workingMountDir, vol.subDir)
|
||||
}
|
||||
|
||||
// Get internal path where the volume is created
|
||||
// The reason why the internal path is "workingDir/subDir/subDir" is because:
|
||||
// * the semantic is actually "workingDir/volId/subDir" and volId == subDir.
|
||||
// * we need a mount directory per volId because you can have multiple
|
||||
// CreateVolume calls in parallel and they may use the same underlying share.
|
||||
// Instead of refcounting how many CreateVolume calls are using the same
|
||||
// share, it's simpler to just do a mount per request.
|
||||
func (cs *ControllerServer) getInternalVolumePath(vol *nfsVolume) string {
|
||||
return filepath.Join(cs.getInternalMountPath(vol), vol.subDir)
|
||||
}
|
||||
|
||||
// Get user-visible share path for the volume
|
||||
func (cs *ControllerServer) getVolumeSharePath(vol *nfsVolume) string {
|
||||
return filepath.Join(string(filepath.Separator), vol.baseDir, vol.subDir)
|
||||
}
|
||||
|
||||
// Convert into nfsVolume into a csi.Volume
|
||||
func (cs *ControllerServer) nfsVolToCSI(vol *nfsVolume, reqCapacity int64) *csi.Volume {
|
||||
return &csi.Volume{
|
||||
CapacityBytes: reqCapacity,
|
||||
VolumeId: vol.id,
|
||||
VolumeContext: map[string]string{
|
||||
paramServer: vol.server,
|
||||
paramShare: cs.getVolumeSharePath(vol),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Given a nfsVolume, return a CSI volume id
|
||||
func (cs *ControllerServer) getVolumeIDFromNfsVol(vol *nfsVolume) string {
|
||||
idElements := make([]string, totalIDElements)
|
||||
idElements[idServer] = strings.Trim(vol.server, "/")
|
||||
idElements[idBaseDir] = strings.Trim(vol.baseDir, "/")
|
||||
idElements[idSubDir] = strings.Trim(vol.subDir, "/")
|
||||
return strings.Join(idElements, "/")
|
||||
}
|
||||
|
||||
// Given a CSI volume id, return a nfsVolume
|
||||
func (cs *ControllerServer) getNfsVolFromID(id string) (*nfsVolume, error) {
|
||||
volRegex := regexp.MustCompile("^([^/]+)/(.*)/([^/]+)$")
|
||||
tokens := volRegex.FindStringSubmatch(id)
|
||||
if tokens == nil {
|
||||
return nil, fmt.Errorf("Could not split %q into server, baseDir and subDir", id)
|
||||
}
|
||||
|
||||
return &nfsVolume{
|
||||
id: id,
|
||||
server: tokens[1],
|
||||
baseDir: tokens[2],
|
||||
subDir: tokens[3],
|
||||
}, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,427 @@
|
|||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nfs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"fmt"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
mount "k8s.io/mount-utils"
|
||||
)
|
||||
|
||||
const (
|
||||
testServer = "test-server"
|
||||
testBaseDir = "test-base-dir"
|
||||
testBaseDirNested = "test/base/dir"
|
||||
testCSIVolume = "test-csi"
|
||||
testVolumeID = "test-server/test-base-dir/test-csi"
|
||||
testVolumeIDNested = "test-server/test/base/dir/test-csi"
|
||||
)
|
||||
|
||||
// for Windows support in the future
|
||||
var (
|
||||
testShare = filepath.Join(string(filepath.Separator), testBaseDir, string(filepath.Separator), testCSIVolume)
|
||||
)
|
||||
|
||||
func initTestController(t *testing.T) *ControllerServer {
|
||||
var perm *uint32
|
||||
mounter := &mount.FakeMounter{MountPoints: []mount.MountPoint{}}
|
||||
driver := NewNFSdriver("", "", perm)
|
||||
driver.ns = NewNodeServer(driver, mounter)
|
||||
cs := NewControllerServer(driver)
|
||||
cs.workingMountDir = "/tmp"
|
||||
return cs
|
||||
}
|
||||
|
||||
func teardown() {
|
||||
err := os.RemoveAll("/tmp/" + testCSIVolume)
|
||||
|
||||
if err != nil {
|
||||
fmt.Print(err.Error())
|
||||
fmt.Printf("\n")
|
||||
fmt.Printf("\033[1;91m%s\033[0m\n", "> Teardown failed")
|
||||
} else {
|
||||
fmt.Printf("\033[1;36m%s\033[0m\n", "> Teardown completed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
code := m.Run()
|
||||
teardown()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
func TestCreateVolume(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
req *csi.CreateVolumeRequest
|
||||
resp *csi.CreateVolumeResponse
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid defaults",
|
||||
req: &csi.CreateVolumeRequest{
|
||||
Name: testCSIVolume,
|
||||
VolumeCapabilities: []*csi.VolumeCapability{
|
||||
{
|
||||
AccessType: &csi.VolumeCapability_Mount{
|
||||
Mount: &csi.VolumeCapability_MountVolume{},
|
||||
},
|
||||
AccessMode: &csi.VolumeCapability_AccessMode{
|
||||
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
|
||||
},
|
||||
},
|
||||
},
|
||||
Parameters: map[string]string{
|
||||
paramServer: testServer,
|
||||
paramShare: testBaseDir,
|
||||
},
|
||||
},
|
||||
resp: &csi.CreateVolumeResponse{
|
||||
Volume: &csi.Volume{
|
||||
VolumeId: testVolumeID,
|
||||
VolumeContext: map[string]string{
|
||||
paramServer: testServer,
|
||||
paramShare: testShare,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "name empty",
|
||||
req: &csi.CreateVolumeRequest{
|
||||
VolumeCapabilities: []*csi.VolumeCapability{
|
||||
{
|
||||
AccessType: &csi.VolumeCapability_Mount{
|
||||
Mount: &csi.VolumeCapability_MountVolume{},
|
||||
},
|
||||
AccessMode: &csi.VolumeCapability_AccessMode{
|
||||
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
|
||||
},
|
||||
},
|
||||
},
|
||||
Parameters: map[string]string{
|
||||
paramServer: testServer,
|
||||
paramShare: testBaseDir,
|
||||
},
|
||||
},
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid volume capability",
|
||||
req: &csi.CreateVolumeRequest{
|
||||
Name: testCSIVolume,
|
||||
VolumeCapabilities: []*csi.VolumeCapability{
|
||||
{
|
||||
AccessMode: &csi.VolumeCapability_AccessMode{
|
||||
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
|
||||
},
|
||||
},
|
||||
},
|
||||
Parameters: map[string]string{
|
||||
paramServer: testServer,
|
||||
paramShare: testBaseDir,
|
||||
},
|
||||
},
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid create context",
|
||||
req: &csi.CreateVolumeRequest{
|
||||
Name: testCSIVolume,
|
||||
VolumeCapabilities: []*csi.VolumeCapability{
|
||||
{
|
||||
AccessType: &csi.VolumeCapability_Mount{
|
||||
Mount: &csi.VolumeCapability_MountVolume{},
|
||||
},
|
||||
AccessMode: &csi.VolumeCapability_AccessMode{
|
||||
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
|
||||
},
|
||||
},
|
||||
},
|
||||
Parameters: map[string]string{
|
||||
"unknown-parameter": "foo",
|
||||
},
|
||||
},
|
||||
expectErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range cases {
|
||||
test := test //pin
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
// Setup
|
||||
cs := initTestController(t)
|
||||
// Run
|
||||
resp, err := cs.CreateVolume(context.TODO(), test.req)
|
||||
|
||||
// Verify
|
||||
if !test.expectErr && err != nil {
|
||||
t.Errorf("test %q failed: %v", test.name, err)
|
||||
}
|
||||
if test.expectErr && err == nil {
|
||||
t.Errorf("test %q failed; got success", test.name)
|
||||
}
|
||||
if !reflect.DeepEqual(resp, test.resp) {
|
||||
t.Errorf("test %q failed: got resp %+v, expected %+v", test.name, resp, test.resp)
|
||||
}
|
||||
if !test.expectErr {
|
||||
info, err := os.Stat(filepath.Join(cs.workingMountDir, test.req.Name, test.req.Name))
|
||||
if err != nil {
|
||||
t.Errorf("test %q failed: couldn't find volume subdirectory: %v", test.name, err)
|
||||
}
|
||||
if !info.IsDir() {
|
||||
t.Errorf("test %q failed: subfile not a directory", test.name)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteVolume(t *testing.T) {
|
||||
cases := []struct {
|
||||
desc string
|
||||
req *csi.DeleteVolumeRequest
|
||||
resp *csi.DeleteVolumeResponse
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
desc: "Volume ID missing",
|
||||
req: &csi.DeleteVolumeRequest{},
|
||||
resp: nil,
|
||||
expectedErr: status.Error(codes.InvalidArgument, "Volume ID missing in request"),
|
||||
},
|
||||
{
|
||||
desc: "Valid request",
|
||||
req: &csi.DeleteVolumeRequest{VolumeId: testVolumeID},
|
||||
resp: &csi.DeleteVolumeResponse{},
|
||||
expectedErr: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range cases {
|
||||
test := test //pin
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
// Setup
|
||||
cs := initTestController(t)
|
||||
_ = os.MkdirAll(filepath.Join(cs.workingMountDir, testCSIVolume), os.ModePerm)
|
||||
_, _ = os.Create(filepath.Join(cs.workingMountDir, testCSIVolume, testCSIVolume))
|
||||
|
||||
// Run
|
||||
resp, err := cs.DeleteVolume(context.TODO(), test.req)
|
||||
|
||||
// Verify
|
||||
if test.expectedErr == nil && err != nil {
|
||||
t.Errorf("test %q failed: %v", test.desc, err)
|
||||
}
|
||||
if test.expectedErr != nil && err == nil {
|
||||
t.Errorf("test %q failed; expected error %v, got success", test.desc, test.expectedErr)
|
||||
}
|
||||
if !reflect.DeepEqual(resp, test.resp) {
|
||||
t.Errorf("test %q failed: got resp %+v, expected %+v", test.desc, resp, test.resp)
|
||||
}
|
||||
if _, err := os.Stat(filepath.Join(cs.workingMountDir, testCSIVolume, testCSIVolume)); test.expectedErr == nil && !os.IsNotExist(err) {
|
||||
t.Errorf("test %q failed: expected volume subdirectory deleted, it still exists", test.desc)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateVolumeCapabilities(t *testing.T) {
|
||||
cases := []struct {
|
||||
desc string
|
||||
req *csi.ValidateVolumeCapabilitiesRequest
|
||||
resp *csi.ValidateVolumeCapabilitiesResponse
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
desc: "Volume ID missing",
|
||||
req: &csi.ValidateVolumeCapabilitiesRequest{},
|
||||
resp: nil,
|
||||
expectedErr: status.Error(codes.InvalidArgument, "Volume ID missing in request"),
|
||||
},
|
||||
{
|
||||
desc: "Volume capabilities missing",
|
||||
req: &csi.ValidateVolumeCapabilitiesRequest{VolumeId: testVolumeID},
|
||||
resp: nil,
|
||||
expectedErr: status.Error(codes.InvalidArgument, "Volume capabilities missing in request"),
|
||||
},
|
||||
{
|
||||
desc: "valid request",
|
||||
req: &csi.ValidateVolumeCapabilitiesRequest{
|
||||
VolumeId: testVolumeID,
|
||||
VolumeCapabilities: []*csi.VolumeCapability{
|
||||
{
|
||||
AccessType: &csi.VolumeCapability_Mount{
|
||||
Mount: &csi.VolumeCapability_MountVolume{},
|
||||
},
|
||||
AccessMode: &csi.VolumeCapability_AccessMode{
|
||||
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
resp: &csi.ValidateVolumeCapabilitiesResponse{Message: ""},
|
||||
expectedErr: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range cases {
|
||||
test := test //pin
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
// Setup
|
||||
cs := initTestController(t)
|
||||
|
||||
// Run
|
||||
resp, err := cs.ValidateVolumeCapabilities(context.TODO(), test.req)
|
||||
|
||||
// Verify
|
||||
if test.expectedErr == nil && err != nil {
|
||||
t.Errorf("test %q failed: %v", test.desc, err)
|
||||
}
|
||||
if test.expectedErr != nil && err == nil {
|
||||
t.Errorf("test %q failed; expected error %v, got success", test.desc, test.expectedErr)
|
||||
}
|
||||
if !reflect.DeepEqual(resp, test.resp) {
|
||||
t.Errorf("test %q failed: got resp %+v, expected %+v", test.desc, resp, test.resp)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestControllerGetCapabilities(t *testing.T) {
|
||||
cases := []struct {
|
||||
desc string
|
||||
req *csi.ControllerGetCapabilitiesRequest
|
||||
resp *csi.ControllerGetCapabilitiesResponse
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
desc: "valid request",
|
||||
req: &csi.ControllerGetCapabilitiesRequest{},
|
||||
resp: &csi.ControllerGetCapabilitiesResponse{
|
||||
Capabilities: []*csi.ControllerServiceCapability{
|
||||
{
|
||||
Type: &csi.ControllerServiceCapability_Rpc{
|
||||
Rpc: &csi.ControllerServiceCapability_RPC{
|
||||
Type: csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErr: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range cases {
|
||||
test := test //pin
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
// Setup
|
||||
cs := initTestController(t)
|
||||
|
||||
// Run
|
||||
resp, err := cs.ControllerGetCapabilities(context.TODO(), test.req)
|
||||
|
||||
// Verify
|
||||
if test.expectedErr == nil && err != nil {
|
||||
t.Errorf("test %q failed: %v", test.desc, err)
|
||||
}
|
||||
if test.expectedErr != nil && err == nil {
|
||||
t.Errorf("test %q failed; expected error %v, got success", test.desc, test.expectedErr)
|
||||
}
|
||||
if !reflect.DeepEqual(resp, test.resp) {
|
||||
t.Errorf("test %q failed: got resp %+v, expected %+v", test.desc, resp, test.resp)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNfsVolFromId(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
req string
|
||||
resp *nfsVolume
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
name: "ID only server",
|
||||
req: testServer,
|
||||
resp: nil,
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "ID missing subDir",
|
||||
req: strings.Join([]string{testServer, testBaseDir}, "/"),
|
||||
resp: nil,
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "valid request single baseDir",
|
||||
req: testVolumeID,
|
||||
resp: &nfsVolume{
|
||||
id: testVolumeID,
|
||||
server: testServer,
|
||||
baseDir: testBaseDir,
|
||||
subDir: testCSIVolume,
|
||||
},
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid request nested baseDir",
|
||||
req: testVolumeIDNested,
|
||||
resp: &nfsVolume{
|
||||
id: testVolumeIDNested,
|
||||
server: testServer,
|
||||
baseDir: testBaseDirNested,
|
||||
subDir: testCSIVolume,
|
||||
},
|
||||
expectErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range cases {
|
||||
test := test //pin
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
// Setup
|
||||
cs := initTestController(t)
|
||||
|
||||
// Run
|
||||
resp, err := cs.getNfsVolFromID(test.req)
|
||||
|
||||
// Verify
|
||||
if !test.expectErr && err != nil {
|
||||
t.Errorf("test %q failed: %v", test.name, err)
|
||||
}
|
||||
if test.expectErr && err == nil {
|
||||
t.Errorf("test %q failed; got success", test.name)
|
||||
}
|
||||
if !reflect.DeepEqual(resp, test.resp) {
|
||||
t.Errorf("test %q failed: got resp %+v, expected %+v", test.name, resp, test.resp)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
mount "k8s.io/mount-utils"
|
||||
)
|
||||
|
||||
type fakeMounter struct {
|
||||
mount.FakeMounter
|
||||
}
|
||||
|
||||
// Mount overrides mount.FakeMounter.Mount.
|
||||
func (f *fakeMounter) Mount(source string, target string, fstype string, options []string) error {
|
||||
if strings.Contains(source, "error_mount") {
|
||||
return fmt.Errorf("fake Mount: source error")
|
||||
} else if strings.Contains(target, "error_mount") {
|
||||
return fmt.Errorf("fake Mount: target error")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MountSensitive overrides mount.FakeMounter.MountSensitive.
|
||||
func (f *fakeMounter) MountSensitive(source string, target string, fstype string, options []string, sensitiveOptions []string) error {
|
||||
if strings.Contains(source, "error_mount_sens") {
|
||||
return fmt.Errorf("fake MountSensitive: source error")
|
||||
} else if strings.Contains(target, "error_mount_sens") {
|
||||
return fmt.Errorf("fake MountSensitive: target error")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//IsLikelyNotMountPoint overrides mount.FakeMounter.IsLikelyNotMountPoint.
|
||||
func (f *fakeMounter) IsLikelyNotMountPoint(file string) (bool, error) {
|
||||
if strings.Contains(file, "error_is_likely") {
|
||||
return false, fmt.Errorf("fake IsLikelyNotMountPoint: fake error")
|
||||
}
|
||||
if strings.Contains(file, "false_is_likely") {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func NewFakeMounter() (*mount.SafeFormatAndMount, error) {
|
||||
return &mount.SafeFormatAndMount{
|
||||
Interface: &fakeMounter{},
|
||||
}, nil
|
||||
}
|
||||
|
|
@ -0,0 +1,159 @@
|
|||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
mount "k8s.io/mount-utils"
|
||||
)
|
||||
|
||||
func TestMount(t *testing.T) {
|
||||
targetTest := "./target_test"
|
||||
sourceTest := "./source_test"
|
||||
|
||||
tests := []struct {
|
||||
desc string
|
||||
source string
|
||||
target string
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
desc: "[Error] Mocked source error",
|
||||
source: "./error_mount_source",
|
||||
target: targetTest,
|
||||
expectedErr: fmt.Errorf("fake Mount: source error"),
|
||||
},
|
||||
{
|
||||
desc: "[Error] Mocked target error",
|
||||
source: sourceTest,
|
||||
target: "./error_mount_target",
|
||||
expectedErr: fmt.Errorf("fake Mount: target error"),
|
||||
},
|
||||
{
|
||||
desc: "[Success] Successful run",
|
||||
source: sourceTest,
|
||||
target: targetTest,
|
||||
expectedErr: nil,
|
||||
},
|
||||
}
|
||||
|
||||
d, err := getTestNodeServer()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get test node server")
|
||||
}
|
||||
fakeMounter := &fakeMounter{}
|
||||
d.mounter = &mount.SafeFormatAndMount{
|
||||
Interface: fakeMounter,
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
err := d.mounter.Mount(test.source, test.target, "", nil)
|
||||
if !reflect.DeepEqual(err, test.expectedErr) {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMountSensitive(t *testing.T) {
|
||||
targetTest := "./target_test"
|
||||
sourceTest := "./source_test"
|
||||
|
||||
tests := []struct {
|
||||
desc string
|
||||
source string
|
||||
target string
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
desc: "[Error] Mocked source error",
|
||||
source: "./error_mount_sens_source",
|
||||
target: targetTest,
|
||||
expectedErr: fmt.Errorf("fake MountSensitive: source error"),
|
||||
},
|
||||
{
|
||||
desc: "[Error] Mocked target error",
|
||||
source: sourceTest,
|
||||
target: "./error_mount_sens_target",
|
||||
expectedErr: fmt.Errorf("fake MountSensitive: target error"),
|
||||
},
|
||||
{
|
||||
desc: "[Success] Successful run",
|
||||
source: sourceTest,
|
||||
target: targetTest,
|
||||
expectedErr: nil,
|
||||
},
|
||||
}
|
||||
|
||||
d, err := getTestNodeServer()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get test node server")
|
||||
}
|
||||
fakeMounter := &fakeMounter{}
|
||||
d.mounter = &mount.SafeFormatAndMount{
|
||||
Interface: fakeMounter,
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
err := d.mounter.MountSensitive(test.source, test.target, "", nil, nil)
|
||||
if !reflect.DeepEqual(err, test.expectedErr) {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsLikelyNotMountPoint(t *testing.T) {
|
||||
targetTest := "./target_test"
|
||||
tests := []struct {
|
||||
desc string
|
||||
file string
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
desc: "[Error] Mocked file error",
|
||||
file: "./error_is_likely_target",
|
||||
expectedErr: fmt.Errorf("fake IsLikelyNotMountPoint: fake error"),
|
||||
},
|
||||
{desc: "[Success] Successful run",
|
||||
file: targetTest,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
desc: "[Success] Successful run not a mount",
|
||||
file: "./false_is_likely_target",
|
||||
expectedErr: nil,
|
||||
},
|
||||
}
|
||||
|
||||
d, err := getTestNodeServer()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get test node server")
|
||||
}
|
||||
fakeMounter := &fakeMounter{}
|
||||
d.mounter = &mount.SafeFormatAndMount{
|
||||
Interface: fakeMounter,
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
_, err := d.mounter.IsLikelyNotMountPoint(test.file)
|
||||
if !reflect.DeepEqual(err, test.expectedErr) {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,66 @@
|
|||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nfs
|
||||
|
||||
import (
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"github.com/golang/protobuf/ptypes/wrappers"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type IdentityServer struct {
|
||||
Driver *Driver
|
||||
}
|
||||
|
||||
func (ids *IdentityServer) GetPluginInfo(ctx context.Context, req *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) {
|
||||
if ids.Driver.name == "" {
|
||||
return nil, status.Error(codes.Unavailable, "Driver name not configured")
|
||||
}
|
||||
|
||||
if ids.Driver.version == "" {
|
||||
return nil, status.Error(codes.Unavailable, "Driver is missing version")
|
||||
}
|
||||
|
||||
return &csi.GetPluginInfoResponse{
|
||||
Name: ids.Driver.name,
|
||||
VendorVersion: ids.Driver.version,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Probe check whether the plugin is running or not.
|
||||
// This method does not need to return anything.
|
||||
// Currently the spec does not dictate what you should return either.
|
||||
// Hence, return an empty response
|
||||
func (ids *IdentityServer) Probe(ctx context.Context, req *csi.ProbeRequest) (*csi.ProbeResponse, error) {
|
||||
return &csi.ProbeResponse{Ready: &wrappers.BoolValue{Value: true}}, nil
|
||||
}
|
||||
|
||||
func (ids *IdentityServer) GetPluginCapabilities(ctx context.Context, req *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) {
|
||||
return &csi.GetPluginCapabilitiesResponse{
|
||||
Capabilities: []*csi.PluginCapability{
|
||||
{
|
||||
Type: &csi.PluginCapability_Service_{
|
||||
Service: &csi.PluginCapability_Service{
|
||||
Type: csi.PluginCapability_Service_CONTROLLER_SERVICE,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
func TestGetPluginInfo(t *testing.T) {
|
||||
req := csi.GetPluginInfoRequest{}
|
||||
emptyNameDriver := NewEmptyDriver("name")
|
||||
emptyVersionDriver := NewEmptyDriver("version")
|
||||
tests := []struct {
|
||||
desc string
|
||||
driver *Driver
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
desc: "Successful Request",
|
||||
driver: NewEmptyDriver(""),
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
desc: "Driver name missing",
|
||||
driver: emptyNameDriver,
|
||||
expectedErr: status.Error(codes.Unavailable, "Driver name not configured"),
|
||||
},
|
||||
{
|
||||
desc: "Driver version missing",
|
||||
driver: emptyVersionDriver,
|
||||
expectedErr: status.Error(codes.Unavailable, "Driver is missing version"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
fakeIdentityServer := IdentityServer{
|
||||
Driver: test.driver,
|
||||
}
|
||||
_, err := fakeIdentityServer.GetPluginInfo(context.Background(), &req)
|
||||
if !reflect.DeepEqual(err, test.expectedErr) {
|
||||
t.Errorf("Unexpected error: %v\nExpected: %v", err, test.expectedErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProbe(t *testing.T) {
|
||||
d := NewEmptyDriver("")
|
||||
req := csi.ProbeRequest{}
|
||||
fakeIdentityServer := IdentityServer{
|
||||
Driver: d,
|
||||
}
|
||||
resp, err := fakeIdentityServer.Probe(context.Background(), &req)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
assert.Equal(t, resp.XXX_sizecache, int32(0))
|
||||
assert.Equal(t, resp.Ready.Value, true)
|
||||
}
|
||||
|
||||
func TestGetPluginCapabilities(t *testing.T) {
|
||||
expectedCap := []*csi.PluginCapability{
|
||||
{
|
||||
Type: &csi.PluginCapability_Service_{
|
||||
Service: &csi.PluginCapability_Service{
|
||||
Type: csi.PluginCapability_Service_CONTROLLER_SERVICE,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
d := NewEmptyDriver("")
|
||||
fakeIdentityServer := IdentityServer{
|
||||
Driver: d,
|
||||
}
|
||||
req := csi.GetPluginCapabilitiesRequest{}
|
||||
resp, err := fakeIdentityServer.GetPluginCapabilities(context.Background(), &req)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
assert.Equal(t, resp.XXX_sizecache, int32(0))
|
||||
assert.Equal(t, resp.Capabilities, expectedCap)
|
||||
|
||||
}
|
||||
|
|
@ -1,49 +0,0 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type IdentityServer struct {
|
||||
Driver *nfsDriver
|
||||
}
|
||||
|
||||
func (ids *IdentityServer) GetPluginInfo(ctx context.Context, req *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) {
|
||||
glog.V(5).Infof("Using default GetPluginInfo")
|
||||
|
||||
if ids.Driver.name == "" {
|
||||
return nil, status.Error(codes.Unavailable, "Driver name not configured")
|
||||
}
|
||||
|
||||
if ids.Driver.version == "" {
|
||||
return nil, status.Error(codes.Unavailable, "Driver is missing version")
|
||||
}
|
||||
|
||||
return &csi.GetPluginInfoResponse{
|
||||
Name: ids.Driver.name,
|
||||
VendorVersion: ids.Driver.version,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ids *IdentityServer) Probe(ctx context.Context, req *csi.ProbeRequest) (*csi.ProbeResponse, error) {
|
||||
return &csi.ProbeResponse{}, nil
|
||||
}
|
||||
|
||||
func (ids *IdentityServer) GetPluginCapabilities(ctx context.Context, req *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) {
|
||||
glog.V(5).Infof("Using default capabilities")
|
||||
return &csi.GetPluginCapabilitiesResponse{
|
||||
Capabilities: []*csi.PluginCapability{
|
||||
{
|
||||
Type: &csi.PluginCapability_Service_{
|
||||
Service: &csi.PluginCapability_Service{
|
||||
Type: csi.PluginCapability_Service_CONTROLLER_SERVICE,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
|
@ -17,12 +17,14 @@ limitations under the License.
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/utils/mount"
|
||||
"k8s.io/klog/v2"
|
||||
mount "k8s.io/mount-utils"
|
||||
)
|
||||
|
||||
type nfsDriver struct {
|
||||
type Driver struct {
|
||||
name string
|
||||
nodeID string
|
||||
version string
|
||||
|
|
@ -32,24 +34,33 @@ type nfsDriver struct {
|
|||
perm *uint32
|
||||
|
||||
//ids *identityServer
|
||||
ns *nodeServer
|
||||
cap map[csi.VolumeCapability_AccessMode_Mode]bool
|
||||
cscap []*csi.ControllerServiceCapability
|
||||
ns *NodeServer
|
||||
cap map[csi.VolumeCapability_AccessMode_Mode]bool
|
||||
cscap []*csi.ControllerServiceCapability
|
||||
nscap []*csi.NodeServiceCapability
|
||||
volumeLocks *VolumeLocks
|
||||
}
|
||||
|
||||
const (
|
||||
driverName = "nfs.csi.k8s.io"
|
||||
DriverName = "nfs.csi.k8s.io"
|
||||
// Address of the NFS server
|
||||
paramServer = "server"
|
||||
// Base directory of the NFS server to create volumes under.
|
||||
// The base directory must be a direct child of the root directory.
|
||||
// The root directory is omitted from the string, for example:
|
||||
// "base" instead of "/base"
|
||||
paramShare = "share"
|
||||
)
|
||||
|
||||
var (
|
||||
version = "2.0.0"
|
||||
version = "3.0.0"
|
||||
)
|
||||
|
||||
func NewNFSdriver(nodeID, endpoint string, perm *uint32) *nfsDriver {
|
||||
glog.Infof("Driver: %v version: %v", driverName, version)
|
||||
func NewNFSdriver(nodeID, endpoint string, perm *uint32) *Driver {
|
||||
klog.Infof("Driver: %v version: %v", DriverName, version)
|
||||
|
||||
n := &nfsDriver{
|
||||
name: driverName,
|
||||
n := &Driver{
|
||||
name: DriverName,
|
||||
version: version,
|
||||
nodeID: nodeID,
|
||||
endpoint: endpoint,
|
||||
|
|
@ -69,19 +80,32 @@ func NewNFSdriver(nodeID, endpoint string, perm *uint32) *nfsDriver {
|
|||
// NFS plugin does not support ControllerServiceCapability now.
|
||||
// If support is added, it should set to appropriate
|
||||
// ControllerServiceCapability RPC types.
|
||||
n.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{csi.ControllerServiceCapability_RPC_UNKNOWN})
|
||||
n.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{
|
||||
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME,
|
||||
})
|
||||
|
||||
n.AddNodeServiceCapabilities([]csi.NodeServiceCapability_RPC_Type{
|
||||
csi.NodeServiceCapability_RPC_GET_VOLUME_STATS,
|
||||
csi.NodeServiceCapability_RPC_UNKNOWN,
|
||||
})
|
||||
n.volumeLocks = NewVolumeLocks()
|
||||
return n
|
||||
}
|
||||
|
||||
func NewNodeServer(n *nfsDriver, mounter mount.Interface) *nodeServer {
|
||||
return &nodeServer{
|
||||
func NewNodeServer(n *Driver, mounter mount.Interface) *NodeServer {
|
||||
return &NodeServer{
|
||||
Driver: n,
|
||||
mounter: mounter,
|
||||
}
|
||||
}
|
||||
|
||||
func (n *nfsDriver) Run() {
|
||||
func (n *Driver) Run(testMode bool) {
|
||||
versionMeta, err := GetVersionYAML()
|
||||
if err != nil {
|
||||
klog.Fatalf("%v", err)
|
||||
}
|
||||
klog.Infof("\nDRIVER INFORMATION:\n-------------------\n%s\n\nStreaming logs below:", versionMeta)
|
||||
|
||||
n.ns = NewNodeServer(n, mount.New(""))
|
||||
s := NewNonBlockingGRPCServer()
|
||||
s.Start(n.endpoint,
|
||||
|
|
@ -89,29 +113,43 @@ func (n *nfsDriver) Run() {
|
|||
// NFS plugin has not implemented ControllerServer
|
||||
// using default controllerserver.
|
||||
NewControllerServer(n),
|
||||
n.ns)
|
||||
n.ns,
|
||||
testMode)
|
||||
s.Wait()
|
||||
}
|
||||
|
||||
func (n *nfsDriver) AddVolumeCapabilityAccessModes(vc []csi.VolumeCapability_AccessMode_Mode) []*csi.VolumeCapability_AccessMode {
|
||||
func (n *Driver) AddVolumeCapabilityAccessModes(vc []csi.VolumeCapability_AccessMode_Mode) []*csi.VolumeCapability_AccessMode {
|
||||
var vca []*csi.VolumeCapability_AccessMode
|
||||
for _, c := range vc {
|
||||
glog.Infof("Enabling volume access mode: %v", c.String())
|
||||
klog.Infof("Enabling volume access mode: %v", c.String())
|
||||
vca = append(vca, &csi.VolumeCapability_AccessMode{Mode: c})
|
||||
n.cap[c] = true
|
||||
}
|
||||
return vca
|
||||
}
|
||||
|
||||
func (n *nfsDriver) AddControllerServiceCapabilities(cl []csi.ControllerServiceCapability_RPC_Type) {
|
||||
func (n *Driver) AddControllerServiceCapabilities(cl []csi.ControllerServiceCapability_RPC_Type) {
|
||||
var csc []*csi.ControllerServiceCapability
|
||||
|
||||
for _, c := range cl {
|
||||
glog.Infof("Enabling controller service capability: %v", c.String())
|
||||
klog.Infof("Enabling controller service capability: %v", c.String())
|
||||
csc = append(csc, NewControllerServiceCapability(c))
|
||||
}
|
||||
|
||||
n.cscap = csc
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (n *Driver) AddNodeServiceCapabilities(nl []csi.NodeServiceCapability_RPC_Type) {
|
||||
var nsc []*csi.NodeServiceCapability
|
||||
for _, n := range nl {
|
||||
klog.Infof("Enabling node service capability: %v", n.String())
|
||||
nsc = append(nsc, NewNodeServiceCapability(n))
|
||||
}
|
||||
n.nscap = nsc
|
||||
}
|
||||
|
||||
func IsCorruptedDir(dir string) bool {
|
||||
_, pathErr := mount.PathExists(dir)
|
||||
fmt.Printf("IsCorruptedDir(%s) returned with error: %v", dir, pathErr)
|
||||
return pathErr != nil && mount.IsCorruptedMnt(pathErr)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,187 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nfs
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const (
|
||||
fakeNodeID = "fakeNodeID"
|
||||
)
|
||||
|
||||
func NewEmptyDriver(emptyField string) *Driver {
|
||||
var d *Driver
|
||||
var perm *uint32
|
||||
switch emptyField {
|
||||
case "version":
|
||||
d = &Driver{
|
||||
name: DriverName,
|
||||
version: "",
|
||||
nodeID: fakeNodeID,
|
||||
cap: map[csi.VolumeCapability_AccessMode_Mode]bool{},
|
||||
perm: perm,
|
||||
}
|
||||
case "name":
|
||||
d = &Driver{
|
||||
name: "",
|
||||
version: version,
|
||||
nodeID: fakeNodeID,
|
||||
cap: map[csi.VolumeCapability_AccessMode_Mode]bool{},
|
||||
perm: perm,
|
||||
}
|
||||
default:
|
||||
d = &Driver{
|
||||
name: DriverName,
|
||||
version: version,
|
||||
nodeID: fakeNodeID,
|
||||
cap: map[csi.VolumeCapability_AccessMode_Mode]bool{},
|
||||
perm: perm,
|
||||
}
|
||||
}
|
||||
d.volumeLocks = NewVolumeLocks()
|
||||
return d
|
||||
}
|
||||
|
||||
func TestNewFakeDriver(t *testing.T) {
|
||||
d := NewEmptyDriver("version")
|
||||
assert.Empty(t, d.version)
|
||||
|
||||
d = NewEmptyDriver("name")
|
||||
assert.Empty(t, d.name)
|
||||
}
|
||||
|
||||
func TestIsCorruptedDir(t *testing.T) {
|
||||
existingMountPath, err := ioutil.TempDir(os.TempDir(), "csi-mount-test")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create tmp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(existingMountPath)
|
||||
|
||||
curruptedPath := filepath.Join(existingMountPath, "curruptedPath")
|
||||
if err := os.Symlink(existingMountPath, curruptedPath); err != nil {
|
||||
t.Fatalf("failed to create curruptedPath: %v", err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
desc string
|
||||
dir string
|
||||
expectedResult bool
|
||||
}{
|
||||
{
|
||||
desc: "NotExist dir",
|
||||
dir: "/tmp/NotExist",
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
desc: "Existing dir",
|
||||
dir: existingMountPath,
|
||||
expectedResult: false,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
isCorruptedDir := IsCorruptedDir(test.dir)
|
||||
assert.Equal(t, test.expectedResult, isCorruptedDir, "TestCase[%d]: %s", i, test.desc)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRun(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
testFunc func(t *testing.T)
|
||||
}{
|
||||
{
|
||||
name: "Successful run",
|
||||
testFunc: func(t *testing.T) {
|
||||
d := NewEmptyDriver("")
|
||||
d.endpoint = "tcp://127.0.0.1:0"
|
||||
d.Run(true)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Successful run with node ID missing",
|
||||
testFunc: func(t *testing.T) {
|
||||
d := NewEmptyDriver("")
|
||||
d.endpoint = "tcp://127.0.0.1:0"
|
||||
d.nodeID = ""
|
||||
d.Run(true)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, tc.testFunc)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewControllerServiceCapability(t *testing.T) {
|
||||
tests := []struct {
|
||||
cap csi.ControllerServiceCapability_RPC_Type
|
||||
}{
|
||||
{
|
||||
cap: csi.ControllerServiceCapability_RPC_UNKNOWN,
|
||||
},
|
||||
{
|
||||
cap: csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME,
|
||||
},
|
||||
{
|
||||
cap: csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME,
|
||||
},
|
||||
{
|
||||
cap: csi.ControllerServiceCapability_RPC_LIST_VOLUMES,
|
||||
},
|
||||
{
|
||||
cap: csi.ControllerServiceCapability_RPC_GET_CAPACITY,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
resp := NewControllerServiceCapability(test.cap)
|
||||
assert.NotNil(t, resp)
|
||||
assert.Equal(t, resp.XXX_sizecache, int32(0))
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewNodeServiceCapability(t *testing.T) {
|
||||
tests := []struct {
|
||||
cap csi.NodeServiceCapability_RPC_Type
|
||||
}{
|
||||
{
|
||||
cap: csi.NodeServiceCapability_RPC_UNKNOWN,
|
||||
},
|
||||
{
|
||||
cap: csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME,
|
||||
},
|
||||
{
|
||||
cap: csi.NodeServiceCapability_RPC_GET_VOLUME_STATS,
|
||||
},
|
||||
{
|
||||
cap: csi.NodeServiceCapability_RPC_EXPAND_VOLUME,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
resp := NewNodeServiceCapability(test.cap)
|
||||
assert.NotNil(t, resp)
|
||||
assert.Equal(t, resp.XXX_sizecache, int32(0))
|
||||
}
|
||||
}
|
||||
|
|
@ -23,17 +23,18 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"k8s.io/utils/mount"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
mount "k8s.io/mount-utils"
|
||||
)
|
||||
|
||||
type nodeServer struct {
|
||||
Driver *nfsDriver
|
||||
// NodeServer driver
|
||||
type NodeServer struct {
|
||||
Driver *Driver
|
||||
mounter mount.Interface
|
||||
}
|
||||
|
||||
|
|
@ -42,8 +43,20 @@ const (
|
|||
unmountTimeout = time.Minute
|
||||
)
|
||||
|
||||
func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
|
||||
// NodePublishVolume mount the volume
|
||||
func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
|
||||
if req.GetVolumeCapability() == nil {
|
||||
return nil, status.Error(codes.InvalidArgument, "Volume capability missing in request")
|
||||
}
|
||||
volumeID := req.GetVolumeId()
|
||||
if len(volumeID) == 0 {
|
||||
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
|
||||
}
|
||||
targetPath := req.GetTargetPath()
|
||||
if len(targetPath) == 0 {
|
||||
return nil, status.Error(codes.InvalidArgument, "Target path not provided")
|
||||
}
|
||||
|
||||
notMnt, err := ns.mounter.IsLikelyNotMountPoint(targetPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
|
|
@ -55,21 +68,26 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
|
|||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if !notMnt {
|
||||
return &csi.NodePublishVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
mo := req.GetVolumeCapability().GetMount().GetMountFlags()
|
||||
if acquired := ns.Driver.volumeLocks.TryAcquire(volumeID); !acquired {
|
||||
return nil, status.Errorf(codes.Aborted, volumeOperationAlreadyExistsFmt, volumeID)
|
||||
}
|
||||
defer ns.Driver.volumeLocks.Release(volumeID)
|
||||
|
||||
mountOptions := req.GetVolumeCapability().GetMount().GetMountFlags()
|
||||
if req.GetReadonly() {
|
||||
mo = append(mo, "ro")
|
||||
mountOptions = append(mountOptions, "ro")
|
||||
}
|
||||
|
||||
s := req.GetVolumeContext()["server"]
|
||||
ep := req.GetVolumeContext()["share"]
|
||||
s := req.GetVolumeContext()[paramServer]
|
||||
ep := req.GetVolumeContext()[paramShare]
|
||||
source := fmt.Sprintf("%s:%s", s, ep)
|
||||
|
||||
err = ns.mounter.Mount(source, targetPath, "nfs", mo)
|
||||
klog.V(2).Infof("NodePublishVolume: volumeID(%v) source(%s) targetPath(%s) mountflags(%v)", volumeID, source, targetPath, mountOptions)
|
||||
err = ns.mounter.Mount(source, targetPath, "nfs", mountOptions)
|
||||
if err != nil {
|
||||
if os.IsPermission(err) {
|
||||
return nil, status.Error(codes.PermissionDenied, err.Error())
|
||||
|
|
@ -89,7 +107,7 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
|
|||
return &csi.NodePublishVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
func (ns *nodeServer) IsNotMountPoint(path string) (bool, error) {
|
||||
func (ns *NodeServer) IsNotMountPoint(path string) (bool, error) {
|
||||
mtab, err := ns.mounter.List()
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
|
@ -106,43 +124,57 @@ func (ns *nodeServer) IsNotMountPoint(path string) (bool, error) {
|
|||
return true, nil
|
||||
}
|
||||
|
||||
func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
|
||||
// NodeUnpublishVolume unmount the volume
|
||||
func (ns *NodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
|
||||
volumeID := req.GetVolumeId()
|
||||
if len(volumeID) == 0 {
|
||||
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
|
||||
}
|
||||
targetPath := req.GetTargetPath()
|
||||
glog.V(6).Infof("NodeUnpublishVolume started for %s", targetPath)
|
||||
if len(targetPath) == 0 {
|
||||
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
|
||||
}
|
||||
klog.V(6).Infof("NodeUnpublishVolume started for %s", targetPath)
|
||||
|
||||
notMnt, err := ns.IsNotMountPoint(targetPath)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
glog.V(4).Infof("NodeUnpublishVolume: path %s is *not* a mount point: %t", targetPath, notMnt)
|
||||
if acquired := ns.Driver.volumeLocks.TryAcquire(volumeID); !acquired {
|
||||
return nil, status.Errorf(codes.Aborted, volumeOperationAlreadyExistsFmt, volumeID)
|
||||
}
|
||||
defer ns.Driver.volumeLocks.Release(volumeID)
|
||||
|
||||
klog.V(4).Infof("NodeUnpublishVolume: path %s is *not* a mount point: %t", targetPath, notMnt)
|
||||
if !notMnt {
|
||||
|
||||
err := ns.tryUnmount(targetPath)
|
||||
if err != nil {
|
||||
if err == context.DeadlineExceeded {
|
||||
glog.V(2).Infof("Timed out waiting for unmount of %s, trying with -f", targetPath)
|
||||
klog.V(2).Infof("Timed out waiting for unmount of %s, trying with -f", targetPath)
|
||||
err = ns.forceUnmount(targetPath)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
glog.V(2).Infof("Unmounted %s", targetPath)
|
||||
klog.V(2).Infof("Unmounted %s", targetPath)
|
||||
}
|
||||
|
||||
klog.V(2).Infof("NodeUnpublishVolume: Remove %s on volumeID(%s)", targetPath, volumeID)
|
||||
if err := os.Remove(targetPath); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("Cleaned %s", targetPath)
|
||||
klog.V(4).Infof("Cleaned %s", targetPath)
|
||||
|
||||
return &csi.NodeUnpublishVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
// tryUnmount calls plain "umount" and waits for unmountTimeout for it to finish.
|
||||
func (ns *nodeServer) tryUnmount(path string) error {
|
||||
func (ns *NodeServer) tryUnmount(path string) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), unmountTimeout)
|
||||
defer cancel()
|
||||
|
||||
|
|
@ -161,7 +193,7 @@ func (ns *nodeServer) tryUnmount(path string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (ns *nodeServer) forceUnmount(path string) error {
|
||||
func (ns *NodeServer) forceUnmount(path string) error {
|
||||
cmd := exec.Command("umount", "-f", path)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
|
|
@ -170,42 +202,107 @@ func (ns *nodeServer) forceUnmount(path string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (ns *nodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) {
|
||||
glog.V(5).Infof("Using default NodeGetInfo")
|
||||
|
||||
// NodeGetInfo return info of the node on which this plugin is running
|
||||
func (ns *NodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) {
|
||||
return &csi.NodeGetInfoResponse{
|
||||
NodeId: ns.Driver.nodeID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ns *nodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
|
||||
glog.V(5).Infof("Using default NodeGetCapabilities")
|
||||
|
||||
// NodeGetCapabilities return the capabilities of the Node plugin
|
||||
func (ns *NodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
|
||||
return &csi.NodeGetCapabilitiesResponse{
|
||||
Capabilities: []*csi.NodeServiceCapability{
|
||||
Capabilities: ns.Driver.nscap,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NodeGetVolumeStats get volume stats
|
||||
func (ns *NodeServer) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) {
|
||||
if len(req.VolumeId) == 0 {
|
||||
return nil, status.Error(codes.InvalidArgument, "NodeGetVolumeStats volume ID was empty")
|
||||
}
|
||||
if len(req.VolumePath) == 0 {
|
||||
return nil, status.Error(codes.InvalidArgument, "NodeGetVolumeStats volume path was empty")
|
||||
}
|
||||
|
||||
_, err := os.Stat(req.VolumePath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, status.Errorf(codes.NotFound, "path %s does not exist", req.VolumePath)
|
||||
}
|
||||
return nil, status.Errorf(codes.Internal, "failed to stat file %s: %v", req.VolumePath, err)
|
||||
}
|
||||
|
||||
volumeMetrics, err := volume.NewMetricsStatFS(req.VolumePath).GetMetrics()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to get metrics: %v", err)
|
||||
}
|
||||
|
||||
available, ok := volumeMetrics.Available.AsInt64()
|
||||
if !ok {
|
||||
return nil, status.Errorf(codes.Internal, "failed to transform volume available size(%v)", volumeMetrics.Available)
|
||||
}
|
||||
capacity, ok := volumeMetrics.Capacity.AsInt64()
|
||||
if !ok {
|
||||
return nil, status.Errorf(codes.Internal, "failed to transform volume capacity size(%v)", volumeMetrics.Capacity)
|
||||
}
|
||||
used, ok := volumeMetrics.Used.AsInt64()
|
||||
if !ok {
|
||||
return nil, status.Errorf(codes.Internal, "failed to transform volume used size(%v)", volumeMetrics.Used)
|
||||
}
|
||||
|
||||
inodesFree, ok := volumeMetrics.InodesFree.AsInt64()
|
||||
if !ok {
|
||||
return nil, status.Errorf(codes.Internal, "failed to transform disk inodes free(%v)", volumeMetrics.InodesFree)
|
||||
}
|
||||
inodes, ok := volumeMetrics.Inodes.AsInt64()
|
||||
if !ok {
|
||||
return nil, status.Errorf(codes.Internal, "failed to transform disk inodes(%v)", volumeMetrics.Inodes)
|
||||
}
|
||||
inodesUsed, ok := volumeMetrics.InodesUsed.AsInt64()
|
||||
if !ok {
|
||||
return nil, status.Errorf(codes.Internal, "failed to transform disk inodes used(%v)", volumeMetrics.InodesUsed)
|
||||
}
|
||||
|
||||
return &csi.NodeGetVolumeStatsResponse{
|
||||
Usage: []*csi.VolumeUsage{
|
||||
{
|
||||
Type: &csi.NodeServiceCapability_Rpc{
|
||||
Rpc: &csi.NodeServiceCapability_RPC{
|
||||
Type: csi.NodeServiceCapability_RPC_UNKNOWN,
|
||||
},
|
||||
},
|
||||
Unit: csi.VolumeUsage_BYTES,
|
||||
Available: available,
|
||||
Total: capacity,
|
||||
Used: used,
|
||||
},
|
||||
{
|
||||
Unit: csi.VolumeUsage_INODES,
|
||||
Available: inodesFree,
|
||||
Total: inodes,
|
||||
Used: inodesUsed,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ns *nodeServer) NodeGetVolumeStats(ctx context.Context, in *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) {
|
||||
// NodeUnstageVolume unstage volume
|
||||
func (ns *NodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "")
|
||||
}
|
||||
|
||||
func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
|
||||
return &csi.NodeUnstageVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
|
||||
return &csi.NodeStageVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
func (ns *nodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolumeRequest) (*csi.NodeExpandVolumeResponse, error) {
|
||||
// NodeStageVolume stage volume
|
||||
func (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "")
|
||||
}
|
||||
|
||||
// NodeExpandVolume node expand volume
|
||||
func (ns *NodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolumeRequest) (*csi.NodeExpandVolumeResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "")
|
||||
}
|
||||
|
||||
func makeDir(pathname string) error {
|
||||
err := os.MkdirAll(pathname, os.FileMode(0755))
|
||||
if err != nil {
|
||||
if !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,312 @@
|
|||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"github.com/kubernetes-csi/csi-driver-nfs/test/utils/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const (
|
||||
targetTest = "./target_test"
|
||||
)
|
||||
|
||||
func TestNodePublishVolume(t *testing.T) {
|
||||
ns, err := getTestNodeServer()
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
volumeCap := csi.VolumeCapability_AccessMode{Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER}
|
||||
alreadyMountedTarget := testutil.GetWorkDirPath("false_is_likely_exist_target", t)
|
||||
targetTest := testutil.GetWorkDirPath("target_test", t)
|
||||
|
||||
tests := []struct {
|
||||
desc string
|
||||
setup func()
|
||||
req csi.NodePublishVolumeRequest
|
||||
skipOnWindows bool
|
||||
expectedErr error
|
||||
cleanup func()
|
||||
}{
|
||||
{
|
||||
desc: "[Error] Volume capabilities missing",
|
||||
req: csi.NodePublishVolumeRequest{},
|
||||
expectedErr: status.Error(codes.InvalidArgument, "Volume capability missing in request"),
|
||||
},
|
||||
{
|
||||
desc: "[Error] Volume ID missing",
|
||||
req: csi.NodePublishVolumeRequest{VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap}},
|
||||
expectedErr: status.Error(codes.InvalidArgument, "Volume ID missing in request"),
|
||||
},
|
||||
{
|
||||
desc: "[Error] Target path missing",
|
||||
req: csi.NodePublishVolumeRequest{VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap},
|
||||
VolumeId: "vol_1"},
|
||||
expectedErr: status.Error(codes.InvalidArgument, "Target path not provided"),
|
||||
},
|
||||
{
|
||||
desc: "[Error] Volume operation in progress",
|
||||
setup: func() {
|
||||
ns.Driver.volumeLocks.TryAcquire("vol_1")
|
||||
},
|
||||
req: csi.NodePublishVolumeRequest{VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap},
|
||||
VolumeId: "vol_1",
|
||||
TargetPath: targetTest},
|
||||
expectedErr: status.Error(codes.Aborted, fmt.Sprintf(volumeOperationAlreadyExistsFmt, "vol_1")),
|
||||
cleanup: func() {
|
||||
ns.Driver.volumeLocks.Release("vol_1")
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "[Success] Stage target path missing",
|
||||
req: csi.NodePublishVolumeRequest{VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap},
|
||||
VolumeId: "vol_1",
|
||||
TargetPath: targetTest},
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
desc: "[Success] Valid request read only",
|
||||
req: csi.NodePublishVolumeRequest{VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap},
|
||||
VolumeId: "vol_1",
|
||||
TargetPath: targetTest,
|
||||
Readonly: true},
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
desc: "[Success] Valid request already mounted",
|
||||
req: csi.NodePublishVolumeRequest{VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap},
|
||||
VolumeId: "vol_1",
|
||||
TargetPath: alreadyMountedTarget,
|
||||
Readonly: true},
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
desc: "[Success] Valid request",
|
||||
req: csi.NodePublishVolumeRequest{VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap},
|
||||
VolumeId: "vol_1",
|
||||
TargetPath: targetTest,
|
||||
Readonly: true},
|
||||
expectedErr: nil,
|
||||
},
|
||||
}
|
||||
|
||||
// setup
|
||||
_ = makeDir(alreadyMountedTarget)
|
||||
|
||||
for _, tc := range tests {
|
||||
if tc.setup != nil {
|
||||
tc.setup()
|
||||
}
|
||||
_, err := ns.NodePublishVolume(context.Background(), &tc.req)
|
||||
if !reflect.DeepEqual(err, tc.expectedErr) {
|
||||
t.Errorf("Desc:%v\nUnexpected error: %v\nExpected: %v", tc.desc, err, tc.expectedErr)
|
||||
}
|
||||
if tc.cleanup != nil {
|
||||
tc.cleanup()
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up
|
||||
err = os.RemoveAll(targetTest)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(alreadyMountedTarget)
|
||||
assert.NoError(t, err)
|
||||
|
||||
}
|
||||
|
||||
func TestNodeUnpublishVolume(t *testing.T) {
|
||||
ns, err := getTestNodeServer()
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
errorTarget := testutil.GetWorkDirPath("error_is_likely_target", t)
|
||||
targetTest := testutil.GetWorkDirPath("target_test", t)
|
||||
targetFile := testutil.GetWorkDirPath("abc.go", t)
|
||||
alreadyMountedTarget := testutil.GetWorkDirPath("false_is_likely_exist_target", t)
|
||||
|
||||
tests := []struct {
|
||||
desc string
|
||||
setup func()
|
||||
req csi.NodeUnpublishVolumeRequest
|
||||
expectedErr error
|
||||
cleanup func()
|
||||
}{
|
||||
{
|
||||
desc: "[Error] Volume ID missing",
|
||||
req: csi.NodeUnpublishVolumeRequest{TargetPath: targetTest},
|
||||
expectedErr: status.Error(codes.InvalidArgument, "Volume ID missing in request"),
|
||||
},
|
||||
{
|
||||
desc: "[Error] Target missing",
|
||||
req: csi.NodeUnpublishVolumeRequest{VolumeId: "vol_1"},
|
||||
expectedErr: status.Error(codes.InvalidArgument, "Target path missing in request"),
|
||||
},
|
||||
{
|
||||
desc: "[Error] Unmount error mocked by IsLikelyNotMountPoint",
|
||||
req: csi.NodeUnpublishVolumeRequest{TargetPath: errorTarget, VolumeId: "vol_1"},
|
||||
expectedErr: status.Error(codes.Internal, "fake IsLikelyNotMountPoint: fake error"),
|
||||
},
|
||||
{
|
||||
desc: "[Error] Volume not mounted",
|
||||
req: csi.NodeUnpublishVolumeRequest{TargetPath: targetFile, VolumeId: "vol_1"},
|
||||
expectedErr: status.Error(codes.NotFound, "Volume not mounted"),
|
||||
},
|
||||
{
|
||||
desc: "[Error] Volume operation in progress",
|
||||
setup: func() {
|
||||
ns.Driver.volumeLocks.TryAcquire("vol_1")
|
||||
},
|
||||
req: csi.NodeUnpublishVolumeRequest{TargetPath: alreadyMountedTarget, VolumeId: "vol_1"},
|
||||
expectedErr: status.Error(codes.Aborted, fmt.Sprintf(volumeOperationAlreadyExistsFmt, "vol_1")),
|
||||
cleanup: func() {
|
||||
ns.Driver.volumeLocks.Release("vol_1")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Setup
|
||||
_ = makeDir(errorTarget)
|
||||
|
||||
for _, tc := range tests {
|
||||
if tc.setup != nil {
|
||||
tc.setup()
|
||||
}
|
||||
_, err := ns.NodeUnpublishVolume(context.Background(), &tc.req)
|
||||
if !reflect.DeepEqual(err, tc.expectedErr) {
|
||||
t.Errorf("Desc:%v\nUnexpected error: %v\nExpected: %v", tc.desc, err, tc.expectedErr)
|
||||
}
|
||||
if tc.cleanup != nil {
|
||||
tc.cleanup()
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up
|
||||
err = os.RemoveAll(errorTarget)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestNodeGetInfo(t *testing.T) {
|
||||
ns, err := getTestNodeServer()
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
// Test valid request
|
||||
req := csi.NodeGetInfoRequest{}
|
||||
resp, err := ns.NodeGetInfo(context.Background(), &req)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, resp.GetNodeId(), fakeNodeID)
|
||||
}
|
||||
|
||||
func TestNodeGetCapabilities(t *testing.T) {
|
||||
ns, err := getTestNodeServer()
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
capType := &csi.NodeServiceCapability_Rpc{
|
||||
Rpc: &csi.NodeServiceCapability_RPC{
|
||||
Type: csi.NodeServiceCapability_RPC_UNKNOWN,
|
||||
},
|
||||
}
|
||||
|
||||
capList := []*csi.NodeServiceCapability{{
|
||||
Type: capType,
|
||||
}}
|
||||
ns.Driver.nscap = capList
|
||||
|
||||
// Test valid request
|
||||
req := csi.NodeGetCapabilitiesRequest{}
|
||||
resp, err := ns.NodeGetCapabilities(context.Background(), &req)
|
||||
assert.NotNil(t, resp)
|
||||
assert.Equal(t, resp.Capabilities[0].GetType(), capType)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func getTestNodeServer() (NodeServer, error) {
|
||||
d := NewEmptyDriver("")
|
||||
mounter, err := NewFakeMounter()
|
||||
if err != nil {
|
||||
return NodeServer{}, errors.New("failed to get fake mounter")
|
||||
}
|
||||
return NodeServer{
|
||||
Driver: d,
|
||||
mounter: mounter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func TestNodeGetVolumeStats(t *testing.T) {
|
||||
nonexistedPath := "/not/a/real/directory"
|
||||
fakePath := "/tmp/fake-volume-path"
|
||||
|
||||
tests := []struct {
|
||||
desc string
|
||||
req csi.NodeGetVolumeStatsRequest
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
desc: "[Error] Volume ID missing",
|
||||
req: csi.NodeGetVolumeStatsRequest{VolumePath: targetTest},
|
||||
expectedErr: status.Error(codes.InvalidArgument, "NodeGetVolumeStats volume ID was empty"),
|
||||
},
|
||||
{
|
||||
desc: "[Error] VolumePath missing",
|
||||
req: csi.NodeGetVolumeStatsRequest{VolumeId: "vol_1"},
|
||||
expectedErr: status.Error(codes.InvalidArgument, "NodeGetVolumeStats volume path was empty"),
|
||||
},
|
||||
{
|
||||
desc: "[Error] Incorrect volume path",
|
||||
req: csi.NodeGetVolumeStatsRequest{VolumePath: nonexistedPath, VolumeId: "vol_1"},
|
||||
expectedErr: status.Errorf(codes.NotFound, "path /not/a/real/directory does not exist"),
|
||||
},
|
||||
{
|
||||
desc: "[Success] Standard success",
|
||||
req: csi.NodeGetVolumeStatsRequest{VolumePath: fakePath, VolumeId: "vol_1"},
|
||||
expectedErr: nil,
|
||||
},
|
||||
}
|
||||
|
||||
// Setup
|
||||
_ = makeDir(fakePath)
|
||||
ns, err := getTestNodeServer()
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
_, err := ns.NodeGetVolumeStats(context.Background(), &test.req)
|
||||
if !reflect.DeepEqual(err, test.expectedErr) {
|
||||
t.Errorf("desc: %v, expected error: %v, actual error: %v", test.desc, test.expectedErr, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up
|
||||
err = os.RemoveAll(fakePath)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
|
@ -1,20 +1,36 @@
|
|||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nfs
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"google.golang.org/grpc"
|
||||
"time"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"google.golang.org/grpc"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// Defines Non blocking GRPC server interfaces
|
||||
type NonBlockingGRPCServer interface {
|
||||
// Start services at the endpoint
|
||||
Start(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer)
|
||||
Start(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer, testMode bool)
|
||||
// Waits for the service to stop
|
||||
Wait()
|
||||
// Stops the service gracefully
|
||||
|
|
@ -33,13 +49,11 @@ type nonBlockingGRPCServer struct {
|
|||
server *grpc.Server
|
||||
}
|
||||
|
||||
func (s *nonBlockingGRPCServer) Start(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer) {
|
||||
func (s *nonBlockingGRPCServer) Start(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer, testMode bool) {
|
||||
|
||||
s.wg.Add(1)
|
||||
|
||||
go s.serve(endpoint, ids, cs, ns)
|
||||
|
||||
return
|
||||
go s.serve(endpoint, ids, cs, ns, testMode)
|
||||
}
|
||||
|
||||
func (s *nonBlockingGRPCServer) Wait() {
|
||||
|
|
@ -54,23 +68,23 @@ func (s *nonBlockingGRPCServer) ForceStop() {
|
|||
s.server.Stop()
|
||||
}
|
||||
|
||||
func (s *nonBlockingGRPCServer) serve(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer) {
|
||||
func (s *nonBlockingGRPCServer) serve(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer, testMode bool) {
|
||||
|
||||
proto, addr, err := ParseEndpoint(endpoint)
|
||||
if err != nil {
|
||||
glog.Fatal(err.Error())
|
||||
klog.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if proto == "unix" {
|
||||
addr = "/" + addr
|
||||
if err := os.Remove(addr); err != nil && !os.IsNotExist(err) {
|
||||
glog.Fatalf("Failed to remove %s, error: %s", addr, err.Error())
|
||||
klog.Fatalf("Failed to remove %s, error: %s", addr, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
listener, err := net.Listen(proto, addr)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to listen: %v", err)
|
||||
klog.Fatalf("Failed to listen: %v", err)
|
||||
}
|
||||
|
||||
opts := []grpc.ServerOption{
|
||||
|
|
@ -89,8 +103,21 @@ func (s *nonBlockingGRPCServer) serve(endpoint string, ids csi.IdentityServer, c
|
|||
csi.RegisterNodeServer(server, ns)
|
||||
}
|
||||
|
||||
glog.Infof("Listening for connections on address: %#v", listener.Addr())
|
||||
// Used to stop the server while running tests
|
||||
if testMode {
|
||||
s.wg.Done()
|
||||
go func() {
|
||||
// make sure Serve() is called
|
||||
s.wg.Wait()
|
||||
time.Sleep(time.Millisecond * 1000)
|
||||
s.server.GracefulStop()
|
||||
}()
|
||||
}
|
||||
|
||||
server.Serve(listener)
|
||||
klog.Infof("Listening for connections on address: %#v", listener.Addr())
|
||||
|
||||
err = server.Serve(listener)
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed to serve grpc server: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,22 +1,42 @@
|
|||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"github.com/golang/glog"
|
||||
"github.com/kubernetes-csi/csi-lib-utils/protosanitizer"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"strings"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
func NewDefaultIdentityServer(d *nfsDriver) *IdentityServer {
|
||||
func NewDefaultIdentityServer(d *Driver) *IdentityServer {
|
||||
return &IdentityServer{
|
||||
Driver: d,
|
||||
}
|
||||
}
|
||||
|
||||
func NewControllerServer(d *nfsDriver) *ControllerServer {
|
||||
func NewControllerServer(d *Driver) *ControllerServer {
|
||||
return &ControllerServer{
|
||||
Driver: d,
|
||||
}
|
||||
|
|
@ -32,6 +52,16 @@ func NewControllerServiceCapability(cap csi.ControllerServiceCapability_RPC_Type
|
|||
}
|
||||
}
|
||||
|
||||
func NewNodeServiceCapability(cap csi.NodeServiceCapability_RPC_Type) *csi.NodeServiceCapability {
|
||||
return &csi.NodeServiceCapability{
|
||||
Type: &csi.NodeServiceCapability_Rpc{
|
||||
Rpc: &csi.NodeServiceCapability_RPC{
|
||||
Type: cap,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func ParseEndpoint(ep string) (string, string, error) {
|
||||
if strings.HasPrefix(strings.ToLower(ep), "unix://") || strings.HasPrefix(strings.ToLower(ep), "tcp://") {
|
||||
s := strings.SplitN(ep, "://", 2)
|
||||
|
|
@ -42,14 +72,56 @@ func ParseEndpoint(ep string) (string, string, error) {
|
|||
return "", "", fmt.Errorf("Invalid endpoint: %v", ep)
|
||||
}
|
||||
|
||||
func getLogLevel(method string) int32 {
|
||||
if method == "/csi.v1.Identity/Probe" ||
|
||||
method == "/csi.v1.Node/NodeGetCapabilities" ||
|
||||
method == "/csi.v1.Node/NodeGetVolumeStats" {
|
||||
return 8
|
||||
}
|
||||
return 2
|
||||
}
|
||||
|
||||
func logGRPC(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
glog.V(3).Infof("GRPC call: %s", info.FullMethod)
|
||||
glog.V(5).Infof("GRPC request: %s", protosanitizer.StripSecrets(req))
|
||||
level := klog.Level(getLogLevel(info.FullMethod))
|
||||
klog.V(level).Infof("GRPC call: %s", info.FullMethod)
|
||||
klog.V(level).Infof("GRPC request: %s", protosanitizer.StripSecrets(req))
|
||||
|
||||
resp, err := handler(ctx, req)
|
||||
if err != nil {
|
||||
glog.Errorf("GRPC error: %v", err)
|
||||
klog.Errorf("GRPC error: %v", err)
|
||||
} else {
|
||||
glog.V(5).Infof("GRPC response: %s", protosanitizer.StripSecrets(resp))
|
||||
klog.V(level).Infof("GRPC response: %s", protosanitizer.StripSecrets(resp))
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
const (
|
||||
volumeOperationAlreadyExistsFmt = "An operation with the given Volume ID %s already exists"
|
||||
)
|
||||
|
||||
type VolumeLocks struct {
|
||||
locks sets.String
|
||||
mux sync.Mutex
|
||||
}
|
||||
|
||||
func NewVolumeLocks() *VolumeLocks {
|
||||
return &VolumeLocks{
|
||||
locks: sets.NewString(),
|
||||
}
|
||||
}
|
||||
|
||||
func (vl *VolumeLocks) TryAcquire(volumeID string) bool {
|
||||
vl.mux.Lock()
|
||||
defer vl.mux.Unlock()
|
||||
if vl.locks.Has(volumeID) {
|
||||
return false
|
||||
}
|
||||
vl.locks.Insert(volumeID)
|
||||
return true
|
||||
}
|
||||
|
||||
func (vl *VolumeLocks) Release(volumeID string) {
|
||||
vl.mux.Lock()
|
||||
defer vl.mux.Unlock()
|
||||
vl.locks.Delete(volumeID)
|
||||
}
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue