写在最前

https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner

https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner/tree/master/deploy

注意:文中的 tanqidi-client-provisioner 只是一个用于示例的名称。
实际使用时,你可以根据自己的云厂商、环境或团队规范来自定义命名,使其更具可读性和管理性。例如:

  • tencent-client-provisioner

  • aliyun-client-provisioner

  • huawei-client-provisioner

  • onprem-nfs-provisioner

  • mycompany-nfs-provisioner

清晰、语义化的命名能够让使用者在看到 StorageClass 或 Provisioner 名称时即可判断其来源或用途,实现“见名知意”的效果,也更有利于后续维护与排查。

1. 系统配置

yum install -y nfs-utils rpcbind
systemctl enable --now rpcbind
systemctl enable --now nfs

mkdir /data/nfs
chmod 777 -R /data/nfs

# 配置路径,自行定义k8s集群ip增加安全性,防止恶意挂载
[root@hybxvdka01 nfs]# cat /etc/exports
/data/nfs 10.133.179.71(rw,sync,no_root_squash,no_subtree_check) \
          10.133.179.72(rw,sync,no_root_squash,no_subtree_check) \
          10.133.179.73(rw,sync,no_root_squash,no_subtree_check)

[root@hybxvdka01 nfs]# exportfs -r
[root@hybxvdka01 nfs]# showmount -e
Export list for hybxvdka01:
/data/nfs 10.133.179.71,10.133.179.72,10.133.179.73

2 kubernetes 资源

2.1 namespace

apiVersion: v1
kind: Namespace
metadata:
  name: nfs-client-provisioner

2.2 rbac

apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
  namespace: nfs-client-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["nodes"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    namespace: nfs-client-provisioner
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  namespace: nfs-client-provisioner
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  namespace: nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    namespace: nfs-client-provisioner
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io

2.3 deployment

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-client-provisioner
  labels:
    app: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs-client-provisioner
spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: sig-storage/nfs-subdir-external-provisioner:v4.0.2
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: k8s-sigs.io/nfs-subdir-external-provisioner
            - name: NFS_SERVER
              value: 10.133.179.71
            - name: NFS_PATH
              value: /data/nfs
      volumes:
        - name: nfs-client-root
          nfs:
            server: 10.133.179.71
            path: /data/nfs

2.4 storage-class

apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: nfs-client
provisioner: k8s-sigs.io/nfs-subdir-external-provisioner
parameters:
  archiveOnDelete: "false"
reclaimPolicy: Retain
volumeBindingMode: Immediate

2.5 test-pvc

kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: test-nfs-claim
spec:
  storageClassName: nfs-client
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 100Mi

2.6 多合一

cat > nfs-subdir-external-provisioner.yaml<<EOF
apiVersion: v1
kind: Namespace
metadata:
  name: nfs-client-provisioner
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
  namespace: nfs-client-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["nodes"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    namespace: nfs-client-provisioner
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  namespace: nfs-client-provisioner
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  namespace: nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    namespace: nfs-client-provisioner
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-client-provisioner
  labels:
    app: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs-client-provisioner
spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: harbor.tanqidi.com/k8s/arm64/sig-storage/nfs-subdir-external-provisioner:v4.0.2
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: k8s-sigs.io/nfs-subdir-external-provisioner
            - name: NFS_SERVER
              value: 10.133.179.71
            - name: NFS_PATH
              value: /data/nfs
      volumes:
        - name: nfs-client-root
          nfs:
            server: 10.133.179.71
            path: /data/nfs
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: nfs-client
provisioner: k8s-sigs.io/nfs-subdir-external-provisioner
parameters:
  archiveOnDelete: "false"
reclaimPolicy: Retain
volumeBindingMode: Immediate
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: test-nfs-claim
spec:
  storageClassName: nfs-client
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 100Mi
EOF

2.7 测试容器

cat >> test-nfs-busybox-deployment <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
  name: test-nfs-busybox-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: busybox
  template:
    metadata:
      labels:
        app: busybox
    spec:
      containers:
      - name: busybox
        image: busybox:1.31.1
        command:
          - sh
          - -c
          - |
            echo "helo nfs" > /mnt/test.txt;   # 写入文件
            echo "File written successfully";   # 打印成功消息
            tail -f /dev/null                   # 保持容器卡住
        volumeMounts:
        - name: nfs-volume
          mountPath: /mnt
      volumes:
      - name: nfs-volume
        persistentVolumeClaim:
          claimName: test-nfs-claim

EOF


# 结果完全正确。

drwxrwxrwx 2 root root 22 Jul 29 18:04 default-test-nfs-claim-pvc-4e798c44-772b-4797-bfce-9e256d85c732
[root@hybxvdka01 nfs]# ll default-test-nfs-claim-pvc-4e798c44-772b-4797-bfce-9e256d85c732/
total 4
-rw-r--r-- 1 root root 9 Jul 29 18:06 test.txt
[root@hybxvdka01 nfs]# cat default-test-nfs-claim-pvc-4e798c44-772b-4797-bfce-9e256d85c732/test.txt
helo nfs

3. 默认存储类

metadata:
  annotations:
    storageclass.kubernetes.io/is-default-class: "true"
kubectl patch storageclass nfs-client -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'

4. 接入第二个NFS存储类

因为有时候它需要挂载到远程的NAS,也是可以使用这种方式来完成的

apiVersion: apps/v1
kind: Deployment
metadata:
  name: tanqidi-client-provisioner
  labels:
    app: tanqidi-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs-client-provisioner
spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: tanqidi-client-provisioner
  template:
    metadata:
      labels:
        app: tanqidi-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: tanqidi-client-provisioner
          image: sig-storage/nfs-subdir-external-provisioner:v4.0.2
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: tanqidi-client
            - name: NFS_SERVER
              value: 172.31.0.99
            - name: NFS_PATH
              value: /hryyjt__bxdeva/k8s-data
      volumes:
        - name: nfs-client-root
          nfs:
            server: 172.31.0.99
            path: /hryyjt__bxdeva/k8s-data

存储类,他的provisioner必须和上述一致

apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: tanqidi-client
#provisioner的值必须与 deployment 文件中env 的PROVISIONER_NAME值保持一致
provisioner: tanqidi-client
parameters:
  archiveOnDelete: "false"
reclaimPolicy: Retain
volumeBindingMode: Immediate

4.1 启用删除归档特性

https://github.com/kubernetes-csi/csi-driver-nfs/issues/522

在默认的 StorageClass 中,回收策略(reclaimPolicy)通常为 Retain。这意味着当 PVC 被删除后,底层的数据目录不会跟着一起删除,而是继续保留在存储中,等待用户手动处理。

然而,在测试环境或需要频繁创建/删除 PVC 的场景中,这种行为会带来明显的问题:
随着 PVC 的不断销毁,数据目录会越来越多,存储路径中充斥着大量已经无效的 PVC 残留目录。由于这些目录名称通常是随机生成的 UUID,很难分辨哪些是有效的、哪些已经对应不到任何 PVC。如果再进行人工清理,不仅效率低,还容易误删仍在使用的数据,风险较高。

# 你能分辨出哪个是已经被删除了的pvc卷吗?
[root@hybxvpka01 k8s-data]# ll
total 12
drwxrwxrwx 2 root root 4096 Nov 19 19:03 default-pvc-10m-pvc-ba7fddda-493b-4a4b-83cb-4e17aa859b1a
drwxrwxrwx 2 root root 4096 Nov 19 18:59 default-pvc-10m-pvc-bb92a057-b797-4920-b6ad-1837fdec9972
drwxrwxrwx 2 root root 4096 Nov 19 19:31 default-pvc-10m-pvc-e5414671-9b69-4588-a19f-4dcb520e05d2

为了提升基于 NFS 的持久化存储管理体验,强烈建议在部署 NFS StorageClass 之初就启用 archiveOnDelete 特性,并将其 reclaimPolicy 设置为 Delete
这样,NFS Provisioner 会在 PVC 被删除时自动接管目录清理逻辑:
它不会直接删除数据,而是将对应的卷目录重命名为以 archived-xxxx 开头的归档目录。

这样做的好处非常明显:

  • 不会立即删除数据,安全性高
    归档目录仍然保留,避免误删生产数据的风险。

  • 目录清爽、易于维护
    所有不再使用的 PVC 会被自动归档,避免 NFS 根目录中堆满随机 UUID 的目录,提升可读性和可管理性。

  • 适合频繁创建/销毁 PVC 的场景
    尤其在开发、测试或 CI 环境中,卷的生命周期短,自动归档能有效避免大量残留目录造成的混乱。

综上,启用 archiveOnDelete 并搭配 reclaimPolicy: Delete 是管理 NFS 存储类最推荐、最友好的配置方案。

apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: tanqidi-client
#provisioner的值必须与 deployment 文件中env 的PROVISIONER_NAME值保持一致
provisioner: tanqidi-client
parameters:
  archiveOnDelete: "true"
reclaimPolicy: Delete
volumeBindingMode: Immediate
# 可以看到启用archiveOnDelete特性并且将reclaimPolicy设置为Delete时,在删除pvc卷时数据目录将会自动重名为archived-开头,有此前缀即表示该目录对应的卷已经被删除,在适当的时候可以自行决定这些归档目录数据是否需要删除。
[root@hybxvpka01 k8s-data]# ll
total 12
drwxrwxrwx 2 root root 4096 Nov 19 19:31 archived-default-pvc-10m-pvc-e5414671-9b69-4588-a19f-4dcb520e05d2
drwxrwxrwx 2 root root 4096 Nov 19 19:03 default-pvc-10m-pvc-ba7fddda-493b-4a4b-83cb-4e17aa859b1a