DRAFT

  1. Add 3 worker nodes with a dedicated block device to use with ceph

  2. Install git

[root@test-vm1 ~]$ yum install -y git
  1. Install rook
[root@test-vm1 ~]$ su - k8sadm
[k8sadm@test-vm1 ~]$ git clone https://github.com/rook/rook.git
[k8sadm@test-vm1 ~]$ kubectl apply -f  rook/cluster/examples/kubernetes/ceph/operator.yaml
namespace/rook-ceph-system created
customresourcedefinition.apiextensions.k8s.io/clusters.ceph.rook.io created
customresourcedefinition.apiextensions.k8s.io/filesystems.ceph.rook.io created
customresourcedefinition.apiextensions.k8s.io/objectstores.ceph.rook.io created
customresourcedefinition.apiextensions.k8s.io/pools.ceph.rook.io created
customresourcedefinition.apiextensions.k8s.io/volumes.rook.io created
clusterrole.rbac.authorization.k8s.io/rook-ceph-cluster-mgmt created
role.rbac.authorization.k8s.io/rook-ceph-system created
clusterrole.rbac.authorization.k8s.io/rook-ceph-global created
serviceaccount/rook-ceph-system created
rolebinding.rbac.authorization.k8s.io/rook-ceph-system created
clusterrolebinding.rbac.authorization.k8s.io/rook-ceph-global created
deployment.apps/rook-ceph-operator created 
  1. Label these worker nodes as 'storage-node'
[k8sadm@test-vm1 ~]$ kubectl label node test-vm4.home.lcl role=storage-node
node/test-vm4.home.lcl labeled

[k8sadm@test-vm1 ~]$ kubectl label node test-vm5.home.lcl role=storage-node
node/test-vm5.home.lcl labeled

[k8sadm@test-vm1 ~]$ kubectl label node test-vm6.home.lcl role=storage-node
node/test-vm6.home.lcl labeled
  1. create a cluster config
[k8sadm@test-vm1 ~]$ vi cluster.yaml
apiVersion: v1
kind: Namespace
metadata:
  name: rook-ceph
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: rook-ceph-cluster
  namespace: rook-ceph
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: rook-ceph-cluster
  namespace: rook-ceph
rules:
- apiGroups: [""]
  resources: ["configmaps"]
  verbs: [ "get", "list", "watch", "create", "update", "delete" ]
---
# Allow the operator to create resources in this cluster's namespace
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: rook-ceph-cluster-mgmt
  namespace: rook-ceph
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: rook-ceph-cluster-mgmt
subjects:
- kind: ServiceAccount
  name: rook-ceph-system
  namespace: rook-ceph-system
---
# Allow the pods in this namespace to work with configmaps
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: rook-ceph-cluster
  namespace: rook-ceph
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: rook-ceph-cluster
subjects:
- kind: ServiceAccount
  name: rook-ceph-cluster
  namespace: rook-ceph
---
apiVersion: ceph.rook.io/v1beta1
kind: Cluster
metadata:
  name: rook-ceph
  namespace: rook-ceph
spec:
  dataDirHostPath: /var/lib/rook
  serviceAccount: rook-ceph-cluster
  mon:
    count: 3
    allowMultiplePerNode: true
  dashboard:
    enabled: true
  network:
    hostNetwork: false
  placement:
    all:
      nodeAffinity:
        requiredDuringSchedulingIgnoredDuringExecution:
          nodeSelectorTerms:
          - matchExpressions:
            - key: role
              operator: In
              values:
              - storage-node
      podAffinity:
      podAntiAffinity:
      tolerations:
      - key: storage-node
        operator: Exists
  resources:
  storage:
    useAllNodes: false
    useAllDevices: false
    deviceFilter:
    location:
    config:
      databaseSizeMB: "1024"
      journalSizeMB: "1024"
    nodes:
    - name: "test-vm4.home.lcl"
      devices:
      - name: "vdb"
    - name: "test-vm5.home.lcl"
      devices:
      - name: "vdb"
    - name: "test-vm6.home.lcl"
      devices:
      - name: "vdb"

You might want to change in the above yaml:

  • the number of mon's
    • use an odd number!
    • between 1 and 9
  • node names
  • device names (it could be vdc or sdb in your case..
  1. Apply the cluster configuration
[k8sadm@test-vm1 ~]$ kubectl apply -f cluster.yaml
namespace/rook-ceph created
serviceaccount/rook-ceph-cluster created
role.rbac.authorization.k8s.io/rook-ceph-cluster created
rolebinding.rbac.authorization.k8s.io/rook-ceph-cluster-mgmt created
rolebinding.rbac.authorization.k8s.io/rook-ceph-cluster created
cluster.ceph.rook.io/rook-ceph created
  1. Check the status
[k8sadm@test-vm1 ~]$ kubectl -n rook-ceph get pods
NAME                                            READY     STATUS      RESTARTS   AGE
rook-ceph-mgr-a-77f86598dd-clsqw                1/1       Running     0          5m
rook-ceph-mon-a-c8b6b9c78-f54px                 1/1       Running     0          5m
rook-ceph-mon-b-85c677b6b4-wg9xb                1/1       Running     0          5m
rook-ceph-mon-c-5fbd645bc4-gwq4v                1/1       Running     0          5m
rook-ceph-osd-0-bc94cf68d-tz7pg                 1/1       Running     0          4m
rook-ceph-osd-1-858b858874-bktlk                1/1       Running     0          4m
rook-ceph-osd-2-6c54c75878-m2zpx                1/1       Running     0          4m
rook-ceph-osd-prepare-test-vm4.home.lcl-fdbnx   0/1       Completed   0          5m
rook-ceph-osd-prepare-test-vm5.home.lcl-m2k75   0/1       Completed   0          5m
rook-ceph-osd-prepare-test-vm6.home.lcl-qcqk5   0/1       Completed   0          5m
  1. Install the ceph toolbox
[k8sadm@test-vm1 ~]$ kubectl apply -f  rook/cluster/examples/kubernetes/ceph/toolbox.yaml 
deployment.apps/rook-ceph-tools created

[k8sadm@test-vm1 ~]$ kubectl -n rook-ceph get pods
NAME                                            READY     STATUS      RESTARTS   AGE
rook-ceph-mgr-a-77f86598dd-clsqw                1/1       Running     0          5m
rook-ceph-mon-a-c8b6b9c78-f54px                 1/1       Running     0          5m
rook-ceph-mon-b-85c677b6b4-wg9xb                1/1       Running     0          5m
rook-ceph-mon-c-5fbd645bc4-gwq4v                1/1       Running     0          5m
rook-ceph-osd-0-bc94cf68d-tz7pg                 1/1       Running     0          4m
rook-ceph-osd-1-858b858874-bktlk                1/1       Running     0          4m
rook-ceph-osd-2-6c54c75878-m2zpx                1/1       Running     0          4m
rook-ceph-osd-prepare-test-vm4.home.lcl-fdbnx   0/1       Completed   0          5m
rook-ceph-osd-prepare-test-vm5.home.lcl-m2k75   0/1       Completed   0          5m
rook-ceph-osd-prepare-test-vm6.home.lcl-qcqk5   0/1       Completed   0          5m
rook-ceph-tools-856cd87f69-9tznz                1/1       Running     0          4m
  1. Check the ceph status
[k8sadm@test-vm1 ~]$ kubectl -n rook-ceph exec -it $(kubectl -n rook-ceph get pod -l "app=rook-ceph-tools" -o jsonpath='{.items[0].metadata.name}') ceph status
  cluster:
    id:     2afbac2e-0df9-43a5-821a-c08bdbff3584
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum b,c,a
    mgr: a(active)
    osd: 3 osds: 3 up, 3 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 bytes
    usage:   3077 MB used, 289 GB / 292 GB avail
    pgs:
  1. Checl the ceph osd status
[k8sadm@test-vm1 ~]$ kubectl -n rook-ceph exec -it $(kubectl -n rook-ceph get pod -l "app=rook-ceph-tools" -o jsonpath='{.items[0].metadata.name}') ceph osd status
+----+----------------------------------+-------+-------+--------+---------+--------+---------+-----------+
| id |               host               |  used | avail | wr ops | wr data | rd ops | rd data |   state   |
+----+----------------------------------+-------+-------+--------+---------+--------+---------+-----------+
| 0  | rook-ceph-osd-0-bc94cf68d-tz7pg  | 1025M | 96.4G |    0   |     0   |    0   |     0   | exists,up |
| 1  | rook-ceph-osd-1-858b858874-bktlk | 1025M | 96.4G |    0   |     0   |    0   |     0   | exists,up |
| 2  | rook-ceph-osd-2-6c54c75878-m2zpx | 1025M | 96.4G |    0   |     0   |    0   |     0   | exists,up |
+----+----------------------------------+-------+-------+--------+---------+--------+---------+-----------+

Now just follow the manual to use ceph:

https://rook.io/docs/rook/master/block.html

[k8sadm@test-vm1 kubernetes]$ kubectl -n rook-ceph exec -it $(kubectl -n rook-ceph get pod -l "app=rook-ceph-tools" -o jsonpath='{.items[0].metadata.name}') ceph status
  cluster:
    id:     2afbac2e-0df9-43a5-821a-c08bdbff3584
    health: HEALTH_OK
 
  services:
    mon: 4 daemons, quorum b,c,d,a
    mgr: a(active)
    osd: 3 osds: 3 up, 3 in
 
  data:
    pools:   1 pools, 100 pgs
    objects: 62 objects, 95040 kB
    usage:   3151 MB used, 289 GB / 292 GB avail
    pgs:     100 active+clean
 
  io:
    client:   71023 B/s rd, 5648 kB/s wr, 10 op/s rd, 18 op/s wr

sources:

  1. http://docs.ceph.com/docs/master/dev/kubernetes/
  2. https://rook.io/docs/rook/master/
  3. https://medium.com/@zhimin.wen/deploy-rook-ceph-on-icp-2-1-0-3-63ec16787093