安装NFS Server

NFS服务端IP为: 10.20.70.113

1
sudo apt-get -y install nfs-kernel-server  # 包含nfs-common
1
2
3
sudo mkdir -p /data/k8s-nfs
echo '/data/k8s-nfs  *(rw,sync,no_root_squash)' | sudo tee /etc/exports
sudo exportfs -a

检查状态

1
2
3
systemctl status nfs-server
sudo rpcinfo -p |grep nfs
cat /var/lib/nfs/etab

安装NFS Client

所有工作节点安装NFS客户端

1
sudo apt-get install nfs-common

检查状态

1
sudo showmount -e 10.20.70.113

测试

1
2
3
4
5
6
mkdir /tmp/foo
sudo mount -t nfs 10.20.70.113:/data/k8s-nfs /tmp/foo
sudo touch /tmp/foo/a.txt

# 在NFS服务端查看a.txt
ls /data/k8s-nfs

静态创建PV

创建pv

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
tee nfs-pv.yaml << EOF
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv01
spec:
  capacity:
    storage: 1Gi
  accessModes:
    - ReadWriteMany
  persistentVolumeReclaimPolicy: Retain
  nfs:
    server: 10.20.70.113
    path: "/data/k8s-nfs"
EOF

kubectl apply -f nfs-pv.yaml

创建pvc

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
tee nfs-pvc.yaml << EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pvc01
spec:
  accessModes:
    - ReadWriteMany
  storageClassName: ""
  resources:
    requests:
      storage: 1Gi
EOF

kubectl apply -f nfs-pvc.yaml

使用pvc

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
tee nginx.yaml << EOF
apiVersion: v1
kind: Service
metadata:
  name: my-nginx-svc
  labels:
    app: mynginx
spec:
  type: NodePort
  ports:
  - port: 80
    targetPort: 80
    nodePort: 30002
  selector:
    app: mynginx
    
---

apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-nginx
spec:
  selector:
    matchLabels:
      app: mynginx
  replicas: 2
  template:
    metadata:
      labels:
        app: mynginx
    spec:
      containers:
      - name: mynginx
        image: nginx:alpine
        ports:
        - containerPort: 80
        volumeMounts:
        - name: www
          mountPath: /usr/share/nginx/html
          subPath: my-nginx-www
      volumes:
      - name: www
        persistentVolumeClaim:
          claimName: pvc01
EOF

kubectl apply -f nginx.yaml
1
echo '<h1>Hi, K8s</h1>' | sudo tee /data/k8s-nfs/my-nginx-www/index.html

访问试下,或者浏览器访问也行

$ curl http://10.20.70.113:30002
<h1>Hi, K8s</h1>

动态创建PV

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
tee nfs-client.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-client-provisioner
  labels:
    app: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: k8s-sigs.io/nfs-subdir-external-provisioner
            - name: NFS_SERVER
              value: 10.20.70.113
            - name: NFS_PATH
              value: /data/k8s-nfs
      volumes:
        - name: nfs-client-root
          nfs:
            server: 10.20.70.113
            path: /data/k8s-nfs
EOF

kubectl apply -f nfs-client.yaml

k8s.gcr.io镜像国内拉取不了解决办法,看pod落到哪个node了,建议所有node都拉取一下

1
2
3
4
5
sudo docker pull strongxyz/nfs-subdir-external-provisioner:v4.0.2

sudo docker tag strongxyz/nfs-subdir-external-provisioner:v4.0.2 k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2

sudo docker rmi strongxyz/nfs-subdir-external-provisioner:v4.0.2
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
tee nfs-client-sa.yaml << EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["nodes"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: default
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: default
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io
EOF

kubectl apply -f nfs-client-sa.yaml
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
tee nfs-client-class.yaml << EOF
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: nfs-client
provisioner: k8s-sigs.io/nfs-subdir-external-provisioner # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
  archiveOnDelete: "true"
EOF

kubectl apply -f nfs-client-class.yaml

手动创建pvc

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
tee test-pvc.yaml << EOF
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: test-pvc
spec:
  storageClassName: nfs-client
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 1Mi
EOF

kubectl apply -f test-pvc.yaml

创建pod,测试刚才创建的pvc是否可正常使用

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
tee test-pod.yaml << EOF
kind: Pod
apiVersion: v1
metadata:
  name: test-pod
spec:
  containers:
  - name: test-pod
    image: busybox:stable
    command:
      - "/bin/sh"
    args:
      - "-c"
      - "touch /mnt/SUCCESS && exit 0 || exit 1"
    volumeMounts:
      - name: nfs-pvc
        mountPath: "/mnt"
  restartPolicy: "Never"
  volumes:
    - name: nfs-pvc
      persistentVolumeClaim:
        claimName: test-pvc
EOF

kubectl apply -f test-pod.yaml

确认是否成功写入文件SUCCESS

$ ls /data/k8s-nfs/default-test-pvc-`kubectl get pvc | awk '/test-pvc/{print $3}'`
SUCCESS

自动创建pvc

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
tee test-statefulset-nfs.yaml << EOF
apiVersion: v1
kind: Service
metadata:
  name: nginx
  labels:
    app: nginx
spec:
  type: NodePort
  ports:
  - port: 80
    targetPort: 80
    nodePort: 30003
  selector:
    app: nginx
---
apiVersion: apps/v1 #  for k8s versions before 1.9.0 use apps/v1beta2  and before 1.8.0 use extensions/v1beta1
kind: StatefulSet
metadata:
  name: web
  labels:
    app: nginx
spec:
  serviceName: "nginx"
  selector:
    matchLabels:
      app: nginx
  replicas: 3
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:alpine
        ports:
        - containerPort: 80
          name: web
        volumeMounts:
        - name: www
          mountPath: /usr/share/nginx/html
  volumeClaimTemplates:
  - metadata:
      name: www
    spec:
      accessModes: [ "ReadWriteOnce" ]
      resources:
        requests:
          storage: 1Gi
      storageClassName: nfs-client
EOF

kubectl apply -f test-statefulset-nfs.yaml

参考链接

浅谈 Kubernetes 数据持久化方案

https://ubuntu.com/server/docs/service-nfs

https://kubernetes.io/zh/docs/concepts/storage/persistent-volumes/#access-modes

https://github.com/kubernetes/examples/tree/master/staging/volumes/nfs

https://kubernetes.io/zh/docs/tasks/run-application/run-stateless-application-deployment/

https://github.com/kubernetes/website/tree/main/content/en/examples/application/nginx

https://github.com/kubernetes-retired/external-storage/tree/master/nfs-client

https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner/tree/master/deploy

https://github.com/kubernetes/examples/blob/master/staging/volumes/vsphere/simple-statefulset.yaml