/root/.blog k8s

rook-ceph on k8s

DRAFT

  1. Add 3 worker nodes with a dedicated block device to use with ceph

  2. Install git

[root@test-vm1 ~]$ yum install -y git
  1. Install rook
[root@test-vm1 ~]$ su - k8sadm
[k8sadm@test-vm1 ~]$ git clone https://github.com/rook/rook.git
[k8sadm@test-vm1 ~]$ kubectl apply -f  rook/cluster/examples/kubernetes/ceph/operator.yaml
namespace/rook-ceph-system created
customresourcedefinition.apiextensions.k8s.io/clusters.ceph.rook.io created
customresourcedefinition.apiextensions.k8s.io/filesystems.ceph.rook.io created
customresourcedefinition.apiextensions.k8s.io/objectstores.ceph.rook.io created
customresourcedefinition.apiextensions.k8s.io/pools.ceph.rook.io created
customresourcedefinition.apiextensions.k8s.io/volumes.rook.io created
clusterrole.rbac.authorization.k8s.io/rook-ceph-cluster-mgmt created
role.rbac.authorization.k8s.io/rook-ceph-system created
clusterrole.rbac.authorization.k8s.io/rook-ceph-global created
serviceaccount/rook-ceph-system created
rolebinding.rbac.authorization.k8s.io/rook-ceph-system created
clusterrolebinding.rbac.authorization.k8s.io/rook-ceph-global created
deployment.apps/rook-ceph-operator created 
  1. Label these worker nodes as 'storage-node'
[k8sadm@test-vm1 ~]$ kubectl label node test-vm4.home.lcl role=storage-node
node/test-vm4.home.lcl labeled

[k8sadm@test-vm1 ~]$ kubectl label node test-vm5.home.lcl role=storage-node
node/test-vm5.home.lcl labeled

[k8sadm@test-vm1 ~]$ kubectl label node test-vm6.home.lcl role=storage-node
node/test-vm6.home.lcl labeled
  1. create a cluster config
[k8sadm@test-vm1 ~]$ vi cluster.yaml
apiVersion: v1
kind: Namespace
metadata:
  name: rook-ceph
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: rook-ceph-cluster
  namespace: rook-ceph
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: rook-ceph-cluster
  namespace: rook-ceph
rules:
- apiGroups: [""]
  resources: ["configmaps"]
  verbs: [ "get", "list", "watch", "create", "update", "delete" ]
---
# Allow the operator to create resources in this cluster's namespace
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: rook-ceph-cluster-mgmt
  namespace: rook-ceph
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: rook-ceph-cluster-mgmt
subjects:
- kind: ServiceAccount
  name: rook-ceph-system
  namespace: rook-ceph-system
---
# Allow the pods in this namespace to work with configmaps
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: rook-ceph-cluster
  namespace: rook-ceph
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: rook-ceph-cluster
subjects:
- kind: ServiceAccount
  name: rook-ceph-cluster
  namespace: rook-ceph
---
apiVersion: ceph.rook.io/v1beta1
kind: Cluster
metadata:
  name: rook-ceph
  namespace: rook-ceph
spec:
  dataDirHostPath: /var/lib/rook
  serviceAccount: rook-ceph-cluster
  mon:
    count: 3
    allowMultiplePerNode: true
  dashboard:
    enabled: true
  network:
    hostNetwork: false
  placement:
    all:
      nodeAffinity:
        requiredDuringSchedulingIgnoredDuringExecution:
          nodeSelectorTerms:
          - matchExpressions:
            - key: role
              operator: In
              values:
              - storage-node
      podAffinity:
      podAntiAffinity:
      tolerations:
      - key: storage-node
        operator: Exists
  resources:
  storage:
    useAllNodes: false
    useAllDevices: false
    deviceFilter:
    location:
    config:
      databaseSizeMB: "1024"
      journalSizeMB: "1024"
    nodes:
    - name: "test-vm4.home.lcl"
      devices:
      - name: "vdb"
    - name: "test-vm5.home.lcl"
      devices:
      - name: "vdb"
    - name: "test-vm6.home.lcl"
      devices:
      - name: "vdb"

You might want to change in the above yaml:

  • the number of mon's
    • use an odd number!
    • between 1 and 9
  • node names
  • device names (it could be vdc or sdb in your case..
  1. Apply the cluster configuration
[k8sadm@test-vm1 ~]$ kubectl apply -f cluster.yaml
namespace/rook-ceph created
serviceaccount/rook-ceph-cluster created
role.rbac.authorization.k8s.io/rook-ceph-cluster created
rolebinding.rbac.authorization.k8s.io/rook-ceph-cluster-mgmt created
rolebinding.rbac.authorization.k8s.io/rook-ceph-cluster created
cluster.ceph.rook.io/rook-ceph created
  1. Check the status
[k8sadm@test-vm1 ~]$ kubectl -n rook-ceph get pods
NAME                                            READY     STATUS      RESTARTS   AGE
rook-ceph-mgr-a-77f86598dd-clsqw                1/1       Running     0          5m
rook-ceph-mon-a-c8b6b9c78-f54px                 1/1       Running     0          5m
rook-ceph-mon-b-85c677b6b4-wg9xb                1/1       Running     0          5m
rook-ceph-mon-c-5fbd645bc4-gwq4v                1/1       Running     0          5m
rook-ceph-osd-0-bc94cf68d-tz7pg                 1/1       Running     0          4m
rook-ceph-osd-1-858b858874-bktlk                1/1       Running     0          4m
rook-ceph-osd-2-6c54c75878-m2zpx                1/1       Running     0          4m
rook-ceph-osd-prepare-test-vm4.home.lcl-fdbnx   0/1       Completed   0          5m
rook-ceph-osd-prepare-test-vm5.home.lcl-m2k75   0/1       Completed   0          5m
rook-ceph-osd-prepare-test-vm6.home.lcl-qcqk5   0/1       Completed   0          5m
  1. Install the ceph toolbox
[k8sadm@test-vm1 ~]$ kubectl apply -f  rook/cluster/examples/kubernetes/ceph/toolbox.yaml 
deployment.apps/rook-ceph-tools created

[k8sadm@test-vm1 ~]$ kubectl -n rook-ceph get pods
NAME                                            READY     STATUS      RESTARTS   AGE
rook-ceph-mgr-a-77f86598dd-clsqw                1/1       Running     0          5m
rook-ceph-mon-a-c8b6b9c78-f54px                 1/1       Running     0          5m
rook-ceph-mon-b-85c677b6b4-wg9xb                1/1       Running     0          5m
rook-ceph-mon-c-5fbd645bc4-gwq4v                1/1       Running     0          5m
rook-ceph-osd-0-bc94cf68d-tz7pg                 1/1       Running     0          4m
rook-ceph-osd-1-858b858874-bktlk                1/1       Running     0          4m
rook-ceph-osd-2-6c54c75878-m2zpx                1/1       Running     0          4m
rook-ceph-osd-prepare-test-vm4.home.lcl-fdbnx   0/1       Completed   0          5m
rook-ceph-osd-prepare-test-vm5.home.lcl-m2k75   0/1       Completed   0          5m
rook-ceph-osd-prepare-test-vm6.home.lcl-qcqk5   0/1       Completed   0          5m
rook-ceph-tools-856cd87f69-9tznz                1/1       Running     0          4m
  1. Check the ceph status
[k8sadm@test-vm1 ~]$ kubectl -n rook-ceph exec -it $(kubectl -n rook-ceph get pod -l "app=rook-ceph-tools" -o jsonpath='{.items[0].metadata.name}') ceph status
  cluster:
    id:     2afbac2e-0df9-43a5-821a-c08bdbff3584
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum b,c,a
    mgr: a(active)
    osd: 3 osds: 3 up, 3 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 bytes
    usage:   3077 MB used, 289 GB / 292 GB avail
    pgs:
  1. Checl the ceph osd status
[k8sadm@test-vm1 ~]$ kubectl -n rook-ceph exec -it $(kubectl -n rook-ceph get pod -l "app=rook-ceph-tools" -o jsonpath='{.items[0].metadata.name}') ceph osd status
+----+----------------------------------+-------+-------+--------+---------+--------+---------+-----------+
| id |               host               |  used | avail | wr ops | wr data | rd ops | rd data |   state   |
+----+----------------------------------+-------+-------+--------+---------+--------+---------+-----------+
| 0  | rook-ceph-osd-0-bc94cf68d-tz7pg  | 1025M | 96.4G |    0   |     0   |    0   |     0   | exists,up |
| 1  | rook-ceph-osd-1-858b858874-bktlk | 1025M | 96.4G |    0   |     0   |    0   |     0   | exists,up |
| 2  | rook-ceph-osd-2-6c54c75878-m2zpx | 1025M | 96.4G |    0   |     0   |    0   |     0   | exists,up |
+----+----------------------------------+-------+-------+--------+---------+--------+---------+-----------+

Now just follow the manual to use ceph:

https://rook.io/docs/rook/master/block.html

[k8sadm@test-vm1 kubernetes]$ kubectl -n rook-ceph exec -it $(kubectl -n rook-ceph get pod -l "app=rook-ceph-tools" -o jsonpath='{.items[0].metadata.name}') ceph status
  cluster:
    id:     2afbac2e-0df9-43a5-821a-c08bdbff3584
    health: HEALTH_OK
 
  services:
    mon: 4 daemons, quorum b,c,d,a
    mgr: a(active)
    osd: 3 osds: 3 up, 3 in
 
  data:
    pools:   1 pools, 100 pgs
    objects: 62 objects, 95040 kB
    usage:   3151 MB used, 289 GB / 292 GB avail
    pgs:     100 active+clean
 
  io:
    client:   71023 B/s rd, 5648 kB/s wr, 10 op/s rd, 18 op/s wr

sources:

  1. http://docs.ceph.com/docs/master/dev/kubernetes/
  2. https://rook.io/docs/rook/master/
  3. https://medium.com/@zhimin.wen/deploy-rook-ceph-on-icp-2-1-0-3-63ec16787093

k8s Dashboard installation

Deploy the Dashboard

  1. install the kubernetes dashboard
[k8sadm@test-vm1 ~]$ kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml

secret "kubernetes-dashboard-certs" created
serviceaccount "kubernetes-dashboard" created
role "kubernetes-dashboard-minimal" created
rolebinding "kubernetes-dashboard-minimal" created
deployment "kubernetes-dashboard" created
service "kubernetes-dashboard" created
  1. Deploy heapster to enable container cluster monitoring and performance analysis on your cluster
[k8sadm@test-vm1 ~]$ kubectl apply -f https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/influxdb/heapster.yaml

serviceaccount "heapster" created
deployment "heapster" created
service "heapster" created
  1. Deploy the influxdb backend for heapster to your cluster
[k8sadm@test-vm1 ~]$ kubectl apply -f https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/influxdb/influxdb.yaml

deployment "monitoring-influxdb" created
service "monitoring-influxdb" created
  1. Create the heapster cluster role binding for the dashboard:
[k8sadm@test-vm1 ~]$ kubectl apply -f https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/rbac/heapster-rbac.yaml

clusterrolebinding "heapster" created

Create an admin Service Account and Cluster Role Binding

  1. Create a file called k8s-admin-service-account.yaml with the text below
apiVersion: v1
kind: ServiceAccount
metadata:
  name: k8s-admin
  namespace: kube-system
  1. Apply the service account to your cluster
[k8sadm@test-vm1 ~]$ kubectl apply -f k8s-admin-service-account.yaml

serviceaccount "k8s-admin" created
  1. Create a file called k8s-admin-cluster-role-binding.yaml with the text below
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: k8s-admin
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: k8s-admin
  namespace: kube-system
  1. Apply the cluster role binding to your cluster
[k8sadm@test-vm1 ~]$ kubectl apply -f k8s-admin-cluster-role-binding.yaml

clusterrolebinding "k8s-admin" created

Connect to the Dashboard

  1. Retrieve an authentication token for the eks-admin service account. Copy the <authentication_token> value from the output. You use this token to connect to the dashboard
[k8sadm@test-vm1 ~]$ kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep k8s-admin | awk '{print $1}')

Name:         k8s-admin-token-b5zv4
Namespace:    kube-system
Labels:       <none>
Annotations:  kubernetes.io/service-account.name=k8s-admin
              kubernetes.io/service-account.uid=bcfe66ac-39be-11e8-97e8-026dce96b6e8

Type:  kubernetes.io/service-account-token

Data
====
ca.crt:     1025 bytes
namespace:  11 bytes
token:      <authentication_token>
  1. Start the kubectl proxy
[k8sadm@test-vm1 ~]$ kubectl proxy

Starting to serve on 127.0.0.1:8001
  1. Open the following link with a web browser to access the dashboard endpoint: http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/

  2. Choose Token, paste the <authentication_token> output from the previous command into the Token field, and choose SIGN IN.


sources:

  1. https://docs.aws.amazon.com/eks/latest/userguide/dashboard-tutorial.html

Continue with:

  1. K8s rook-ceph Install https://sunwfrk.com/rook-ceph-on-k8s/

k8s master and nodes on RHEL/Centos 7

Configure the master node

Preparation

  1. Run the following commands to pass bridged IP traffic to iptables chains
[root@test-vm1 ~]$ yum update -y
[root@test-vm1 ~]$ modprobe br_netfilter

[root@test-vm1 ~]$ cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
[root@test-vm1 ~]$ sysctl --system

2a) Allow the necessary ports trough the firewall when you're working in an unsafe environment or in production

firewall-cmd --zone=public --add-port=6443/tcp --permanent
firewall-cmd --zone=public --add-port=80/tcp --permanent
firewall-cmd --zone=public --add-port=443/tcp --permanent
firewall-cmd --zone=public --add-port=18080/tcp --permanent
firewall-cmd --zone=public --add-port=10254/tcp --permanent
firewall-cmd --reload

2b) If you're just testing this in a safe lab environment you can disable the firewall.

[root@test-vm1 ~]$ systemctl stop firewalld && systemctl disable firewalld 
  1. Check if selinux is Enabled with the following command
[root@test-vm1 ~]$ sestatus
  1. If the current mode is enforcing then you need to change the mode to permissive or disabled.
[root@test-vm1 ~]$ sed -i --follow-symlinks 's/SELINUX=enforcing/SELINUX=permissive/g' /etc/sysconfig/selinux
[root@test-vm1 ~]$ setenforce 0
  1. Kubernetes doesn't want to use swap so it can offer the best performance, so we have to disable it.
[root@test-vm1 ~]$ swapoff -a
[root@test-vm1 ~]$ vi /etc/fstab

#/dev/mapper/centos-swap swap                    swap    defaults        0 0

6a) Add the kubernetes repository to yum

[root@test-vm1 ~]$ cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF

6b) Add the official docker repo to yum

[root@test-vm1 ~]$ yum remove docker docker-client docker-client-latest docker-common docker-latest docker-latest-logrotate docker-logrotate docker-selinux docker-engine-selinux docker-engine
[root@test-vm1 ~]$ yum install -y yum-utils device-mapper-persistent-data lvm2
[root@test-vm1 ~]$ yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo

Installation

  1. Install kubeadm and docker
[root@test-vm1 ~]$ yum install -y ebtables ethtool docker-ce kubelet kubeadm kubectl
  1. Start docker and enable it at boot
[root@test-vm1 ~]$ systemctl start docker && systemctl enable docker
  1. Start kubelet and enable it at boot
[root@test-vm1 ~]$ systemctl start kubelet && systemctl enable kubelet
  1. Initialize kubernetes. Be aware, for some pod network implementations you might need to add a specific '--pod-network-cidr=' setting. Please check https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#pod-network before continuing.
[root@test-vm1 ~]$ kubeadm init --pod-network-cidr=10.244.0.0/16
I0715 12:50:01.543998    1958 feature_gate.go:230] feature gates: &{map[]}
[init] using Kubernetes version: v1.11.0
[preflight] running pre-flight checks
I0715 12:50:01.577212    1958 kernel_validator.go:81] Validating kernel version
I0715 12:50:01.577289    1958 kernel_validator.go:96] Validating kernel config
[preflight/images] Pulling images required for setting up a Kubernetes cluster
[preflight/images] This might take a minute or two, depending on the speed of your internet connection
[preflight/images] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[preflight] Activating the kubelet service
[certificates] Generated ca certificate and key.
[certificates] Generated apiserver certificate and key.
[certificates] apiserver serving cert is signed for DNS names [test-vm1.home.lcl kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.1.221]
[certificates] Generated apiserver-kubelet-client certificate and key.
[certificates] Generated sa key and public key.
[certificates] Generated front-proxy-ca certificate and key.
[certificates] Generated front-proxy-client certificate and key.
[certificates] Generated etcd/ca certificate and key.
[certificates] Generated etcd/server certificate and key.
[certificates] etcd/server serving cert is signed for DNS names [test-vm1.home.lcl localhost] and IPs [127.0.0.1 ::1]
[certificates] Generated etcd/peer certificate and key.
[certificates] etcd/peer serving cert is signed for DNS names [test-vm1.home.lcl localhost] and IPs [192.168.1.221 127.0.0.1 ::1]
[certificates] Generated etcd/healthcheck-client certificate and key.
[certificates] Generated apiserver-etcd-client certificate and key.
[certificates] valid certificates and keys now exist in "/etc/kubernetes/pki"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/admin.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/kubelet.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/controller-manager.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/scheduler.conf"
[controlplane] wrote Static Pod manifest for component kube-apiserver to "/etc/kubernetes/manifests/kube-apiserver.yaml"
[controlplane] wrote Static Pod manifest for component kube-controller-manager to "/etc/kubernetes/manifests/kube-controller-manager.yaml"
[controlplane] wrote Static Pod manifest for component kube-scheduler to "/etc/kubernetes/manifests/kube-scheduler.yaml"
[etcd] Wrote Static Pod manifest for a local etcd instance to "/etc/kubernetes/manifests/etcd.yaml"
[init] waiting for the kubelet to boot up the control plane as Static Pods from directory "/etc/kubernetes/manifests" 
[init] this might take a minute or longer if the control plane images have to be pulled
[apiclient] All control plane components are healthy after 43.502080 seconds
[uploadconfig] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.11" in namespace kube-system with the configuration for the kubelets in the cluster
[markmaster] Marking the node test-vm1.home.lcl as master by adding the label "node-role.kubernetes.io/master=''"
[markmaster] Marking the node test-vm1.home.lcl as master by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "test-vm1.home.lcl" as an annotation
[bootstraptoken] using token: e8yb38.htt4pz8dmxq77jha
[bootstraptoken] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstraptoken] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstraptoken] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstraptoken] creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes master has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of machines by running the following on each node
as root:

  kubeadm join 192.168.1.221:6443 --token e8yb38.hqq4pz9dmlq77jha --discovery-token-ca-cert-hash sha256:50b01f19d8060ba593a009d134912d62b95ca80fdbe76f3995c8ba6c4a92c705
  1. Create admin user
[root@test-vm1 ~]$ groupadd -g 1000 k8sadm
[root@test-vm1 ~]$ useradd -u 1000 -g k8sadm -G wheel k8sadm
[root@test-vm1 ~]$ passwd k8sadm
Changing password for user k8sadm.
New password: 
Retype new password: 
passwd: all authentication tokens updated successfully.
[root@test-vm1 ~]$ su - k8sadm
[k8sadm@test-vm1 ~]$ mkdir -p $HOME/.kube
[k8sadm@test-vm1 ~]$ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[k8sadm@test-vm1 ~]$ sudo chown $(id -u):$(id -g) $HOME/.kube/config
  1. Configure the pod network
[k8sadm@test-vm1 ~]$ kubectl get nodes
NAME                STATUS     ROLES     AGE       VERSION
test-vm1.home.lcl   NotReady   master    2m        v1.11.0
[k8sadm@test-vm1 ~]$ kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
[k8sadm@test-vm1 ~]$ kubectl get nodes
NAME                STATUS    ROLES     AGE       VERSION
test-vm1.home.lcl   Ready     master    3m        v1.11.0
[k8sadm@test-vm1 ~]$ kubectl get pods --all-namespaces
NAMESPACE     NAME                                        READY     STATUS    RESTARTS   AGE
kube-system   coredns-78fcdf6894-g7rg4                    1/1       Running   0          2h
kube-system   coredns-78fcdf6894-vr4xm                    1/1       Running   0          2h
kube-system   etcd-test-vm1.home.lcl                      1/1       Running   1          2h
kube-system   kube-apiserver-test-vm1.home.lcl            1/1       Running   1          2h
kube-system   kube-controller-manager-test-vm1.home.lcl   1/1       Running   1          2h
kube-system   kube-proxy-524ql                            1/1       Running   1          2h
kube-system   kube-scheduler-test-vm1.home.lcl            1/1       Running   1          2h
kube-system   kube-flannel-ds-45d87                       1/1       Running   1          2h
kube-system   kube-flannel-ds-bqh8j                       1/1       Running   1          2h
kube-system   kube-flannel-ds-dfldc                       1/1       Running   1          2h

Configure the worker nodes

  1. Repeat steps 1 to 6 on all worker nodes

  2. Install docker and kubeadm

[root@test-vm2 ~]$ yum install -y kubeadm docker-ce kubelet
[root@test-vm3 ~]$ yum install -y kubeadm docker-ce kubelet
  1. Start docker and enable it at boot
[root@test-vm2 ~]$ systemctl start docker && systemctl enable docker
[root@test-vm3 ~]$ systemctl start docker && systemctl enable docker
  1. Start kubelet and enable it at boot
[root@test-vm2 ~]$ systemctl start kubelet && systemctl enable kubelet
[root@test-vm3 ~]$ systemctl start kubelet && systemctl enable kubelet
  1. Join the workers to the master

use the command kubeadm returned in step 10

[root@test-vm2 ~]$ kubeadm join 192.168.1.221:6443 --token e8yb38.hqq4pz9dmlq77jha --discovery-token-ca-cert-hash sha256:50b01f19d8060ba593a009d134912d62b95ca80fdbe76f3995c8ba6c4a92c705
[root@test-vm3 ~]$ kubeadm join 192.168.1.221:6443 --token e8yb38.hqq4pz9dmlq77jha --discovery-token-ca-cert-hash sha256:50b01f19d8060ba593a009d134912d62b95ca80fdbe76f3995c8ba6c4a92c705
  1. verify the status

after a little while you will see

[k8sadm@test-vm1 ~]$ kubectl get nodes
NAME                STATUS    ROLES     AGE       VERSION
test-vm1.home.lcl   Ready     master    26m       v1.11.1
test-vm2.home.lcl   Ready     <none>    1m        v1.11.1
test-vm3.home.lcl   Ready     <none>    1m        v1.11.1

sources:

  1. https://amasucci.com/post/2017/10/22/how-to-install-kubernetes-1.8.1-on-centos-7.3/
  2. ....
  3. ....

Continue with:

  1. k8s Dashboard Install https://sunwfrk.com/k8s-dashboard-installation/
  2. K8s rook-ceph Install https://sunwfrk.com/rook-ceph-on-k8s/