#### 环境介绍
```sh
ceph集群节点: 172.16.100.1:6789,172.16.100.2:6789,172.16.100.11:6789
ceph version 15.2.13 (c44bc49e7a57a87d84dfff2a077a2058aa2172e2) octopus (stable)
ceph client:15.2.14 (pod镜像:"elementalnet/cephfs-provisioner:0.8.0")
```
`注意:目前官网的quay.io/external_storage/cephfs-provisioner:latest镜像ceph版本为13.x,跟ceph集群的版本15不匹配,导致pv一直是pengding状态。这里使用第三方镜像:elementalnet/cephfs-provisioner:0.8.0`
#### 安装ceph
k8s节点安装客户端
```sh
$ yum install ceph-common -y
```
拷贝ceph节点的key到k8s节点
```sh
$ scp /etc/ceph/ceph.client.admin.keyring 172.16.100.100:/etc/ceph
```
配置访问ceph的secert
```sh
$ ceph auth get-key client.admin | base64
QVFEa0RFTmhYQ1UzQUJBQXFmSWptMFJkSVpGaC9VR0V4M0RNc3c9PQ==
```
```sh
$ cat >cephfs-secret.yaml<<EOF
apiVersion: v1
kind: Secret
metadata:
name: ceph-secret-admin
namespace: cephfs
type: "kubernetes.io/rbd"
data:
key: QVFEa0RFTmhYQ1UzQUJBQXFmSWptMFJkSVpGaC9VR0V4M0RNc3c9PQ== #替换上面的输出内容
EOF
```
```sh
$ kubectl create -f cephfs-secret.yaml
```
#### 安装cephfs provisioner
参考:https://github.com/kubernetes-retired/external-storage/tree/master/ceph/cephfs/deploy
创建RBAC
```sh
$ cat >cephfs-rbac.yaml <<EOF
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cephfs-provisioner
namespace: cephfs
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cephfs-provisioner
namespace: cephfs
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create", "get", "delete"]
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cephfs-provisioner
namespace: cephfs
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cephfs-provisioner
subjects:
- kind: ServiceAccount
name: cephfs-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-provisioner
namespace: cephfs
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
- apiGroups: [""]
resources: ["services"]
resourceNames: ["kube-dns","coredns"]
verbs: ["list", "get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-provisioner
subjects:
- kind: ServiceAccount
name: cephfs-provisioner
namespace: cephfs
roleRef:
kind: ClusterRole
name: cephfs-provisioner
apiGroup: rbac.authorization.k8s.io
EOF
```
```sh
$ kubectl create -f cephfs-rbac.yaml
```
创建cephfs-provisioner
```sh
$ cat > cephfs-provisioner.yaml <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: cephfs-provisioner
namespace: cephfs
spec:
replicas: 1
selector:
matchLabels:
app: cephfs-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: cephfs-provisioner
spec:
containers:
- name: cephfs-provisioner
#image: "quay.io/external_storage/cephfs-provisioner:latest" #ceph集群版本为15.2,该版本对应的ceph客户端太旧,镜像没有更新,替换下面的镜像
image: "elementalnet/cephfs-provisioner:0.8.0"
env:
- name: PROVISIONER_NAME
value: ceph.com/cephfs
- name: PROVISIONER_SECRET_NAMESPACE
value: cephfs
command:
- "/usr/local/bin/cephfs-provisioner"
args:
- "-id=cephfs-provisioner-1"
serviceAccount: cephfs-provisioner
EOF
```
```sh
$ kubectl create -f cephfs-provisioner.yaml
```
#### 创建存储类
```sh
$ cat > cephfs-storageclass.yaml <<EOF
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: cephfs
namespace: cephfs
provisioner: ceph.com/cephfs
parameters:
monitors: 172.16.100.1:6789,172.16.100.2:6789,172.16.100.11:6789
adminId: admin
adminSecretName: ceph-secret-admin
adminSecretNamespace: cephfs
claimRoot: /pvc-volumes
EOF
```
```sh
$ kubectl create -f cephfs-storageclass.yaml
```
#### 创建测试pvc
```sh
$ cat > cephfs-test-pvc.yaml <<EOF
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: cephfs-test-pvc-1
annotations:
volume.beta.kubernetes.io/storage-class: "cephfs"
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 500Mi
EOF
```
```sh
$ kubectl create -f cephfs-test-pvc.yaml
```
获取pvc
```sh
$ kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
cephfs-test-pvc-1 Bound pvc-571ae252-b080-4a67-8f3d-40bda1304fe3 500Mi RWX cephfs 2m1s
```
获取pv
```sh
$ kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-571ae252-b080-4a67-8f3d-40bda1304fe3 500Mi RWX Delete Bound default/cephfs-test-pvc-1 cephfs 2m3s
```
#### 创建nginx挂载pvc
```sh
$ cat > cephfs-test-busybox-deployment.yml <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: cephfs-test-deploy-busybox
spec:
replicas: 3
selector:
matchLabels:
app: cephfs-test-busybox
template:
metadata:
labels:
app: cephfs-test-busybox
spec:
containers:
- name: busybox
image: busybox
command: ["sleep", "60000"]
volumeMounts:
- mountPath: "/mnt/cephfs"
name: cephfs-test-pvc
volumes:
- name: cephfs-test-pvc
persistentVolumeClaim:
claimName: cephfs-test-pvc-1
EOF
```
```sh
$ kubectl create -f cephfs-test-busybox-deployment.yml
```
```sh
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
cephfs-test-deploy-busybox-56556d86ff-4dmzn 1/1 Running 0 4m28s
cephfs-test-deploy-busybox-56556d86ff-6dr6v 1/1 Running 0 4m28s
cephfs-test-deploy-busybox-56556d86ff-b75mw 1/1 Running 0 4m28s
```
测试pod挂载的cephfs文件读写是否同步
```sh
$ kubectl exec -ti cephfs-test-deploy-busybox-56556d86ff-4dmzn sh
/ # cd /mnt/cephfs/
/mnt/cephfs # ls
/mnt/cephfs # touch cephfs-test-deploy-busybox-56556d86ff-4dmzn
/mnt/cephfs # exit
```
```sh
$ kubectl exec -ti cephfs-test-deploy-busybox-56556d86ff-6dr6v sh
/ # cd /mnt/cephfs/
/mnt/cephfs # ls
cephfs-test-deploy-busybox-56556d86ff-4dmzn #pod之间同步成功
```