openshift 4.10 single node with ODF
我们可以给single node openshift,配置一个ceph/odf存储。可以是单独的一块硬盘,也可以是系统安装盘上面多分出来的数据分区。
本文档的前导实验,是如何部署一个普通的single node openshift
视频讲解
reference:
install ceph components to ocp
# cat << EOF > /data/install/local-storage.yaml
# ---
# apiVersion: v1
# kind: Namespace
# metadata:
# name: openshift-local-storage
# annotations:
# workload.openshift.io/allowed: management
# ---
# apiVersion: operators.coreos.com/v1
# kind: OperatorGroup
# metadata:
# name: openshift-local-storage
# namespace: openshift-local-storage
# spec:
# targetNamespaces:
# - openshift-local-storage
# ---
# apiVersion: operators.coreos.com/v1alpha1
# kind: Subscription
# metadata:
# name: local-storage-operator
# namespace: openshift-local-storage
# spec:
# channel: "stable"
# installPlanApproval: Manual
# name: local-storage-operator
# source: redhat-operators
# sourceNamespace: openshift-marketplace
# EOF
# oc create -f /data/install/local-storage.yaml
cat << EOF > /data/install/openshift-storage.yaml
---
apiVersion: v1
kind: Namespace
metadata:
name: openshift-storage
annotations:
workload.openshift.io/allowed: management
---
apiVersion: operators.coreos.com/v1
kind: OperatorGroup
metadata:
name: openshift-storage
namespace: openshift-storage
spec:
targetNamespaces:
- openshift-storage
---
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: odf-operator
namespace: openshift-storage
spec:
channel: "stable-4.10"
installPlanApproval: Manual
name: odf-operator
source: redhat-operators
sourceNamespace: openshift-marketplace
EOF
oc create -f /data/install/openshift-storage.yaml
cd /data/install
cat << EOF > /data/install/ceph-cluster.yaml
---
apiVersion: ceph.rook.io/v1
kind: CephCluster
metadata:
name: main
namespace: openshift-storage
spec:
storage:
useAllNodes: true
useAllDevices: true
cephVersion:
# Ceph 16 (pacific)
image: quay.io/ceph/ceph:v16.2.6 # https://quay.io/repository/ceph/ceph?tab=tags
#image: registry.redhat.io/rhceph/rhceph-5-rhel8:5-14 # https://catalog.redhat.com/software/containers/rhceph/rhceph-5-rhel8/60ec72a74a6a2c7844abe5fb?tag=all
# Ceph 14 (nautilus)
#image: quay.io/ceph/ceph:v14.2.22
#image: registry.redhat.io/rhceph/rhceph-4-rhel8:4-59 # https://catalog.redhat.com/software/containers/detail/5e39df7cd70cc54b02baf33f?tag=all
# Ceph 12 (luminous)
#image: registry.redhat.io/rhceph/rhceph-3-rhel7:3-51 # https://catalog.redhat.com/software/containers/rhceph/rhceph-3-rhel7/5a15ec17ecb5244d5b553577?tag=all
mon:
allowMultiplePerNode: true
mgr:
allowMultiplePerNode: true
modules:
- name: balancer
enabled: true
- name: pg_autoscaler
enabled: true
- name: rook
enabled: true
dashboard:
enabled: true
port: 8443
ssl: false
monitoring:
enabled: true
rulesNamespace: openshift-storage
logCollector:
enabled: true
periodicity: 24h
disruptionManagement:
managePodBudgets: true
machineDisruptionBudgetNamespace: openshift-machine-api
priorityClassNames:
mgr: system-node-critical
mon: system-node-critical
osd: system-node-critical
dataDirHostPath: /var/lib/rook # under /host in CoreOS
continueUpgradeAfterChecksEvenIfNotHealthy: true
---
kind: ConfigMap
apiVersion: v1
metadata:
name: rook-config-override # this name is required!
namespace: openshift-storage
data:
config: |
[global]
osd_pool_default_size = 1
mon_warn_on_pool_no_redundancy = false
EOF
oc create -f /data/install/ceph-cluster.yaml
# oc apply -f /data/install/ceph-cluster.yaml
oc exec deployment/rook-ceph-operator -n openshift-storage -- \
ceph -c /var/lib/rook/openshift-storage/openshift-storage.config -s
# cluster:
# id: 17cb663d-e4f4-4f9b-9993-ce33c971496a
# health: HEALTH_OK
# services:
# mon: 3 daemons, quorum a,b,c (age 8m)
# mgr: a(active, since 7m)
# osd: 1 osds: 1 up (since 7m), 1 in (since 7m)
# data:
# pools: 1 pools, 128 pgs
# objects: 0 objects, 0 B
# usage: 5.4 MiB used, 100 GiB / 100 GiB avail
# pgs: 128 active+clean
# oc expose svc/rook-ceph-mgr-dashboard -n openshift-storage
oc create route edge --service=rook-ceph-mgr-dashboard -n openshift-storage
oc get route -n openshift-storage
# NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD
# rook-ceph-mgr-dashboard rook-ceph-mgr-dashboard-openshift-storage.apps.acm-demo-hub.redhat.ren rook-ceph-mgr-dashboard http-dashboard None
oc get secret rook-ceph-dashboard-password --output=jsonpath="{['data']['password']}" -n openshift-storage | base64 -d && echo
# d%`1E#/jBL?7NcG0G5\*
# access cashboard on http://rook-ceph-mgr-dashboard-openshift-storage.apps.acm-demo-hub.redhat.ren/
# with username admin
add cephfs support
cat << EOF > /data/install/ceph-cluster-config.yaml
apiVersion: ceph.rook.io/v1
kind: CephFilesystem
metadata:
name: main
namespace: openshift-storage
# See:
# https://github.com/rook/rook/blob/master/Documentation/ceph-filesystem.md
# https://github.com/rook/rook/blob/master/Documentation/ceph-filesystem-crd.md
# https://github.com/rook/rook/blob/master/Documentation/ceph-pool-crd.md
spec:
metadataPool:
replicated:
size: 1
requireSafeReplicaSize: false
dataPools:
- failureDomain: osd
replicated:
size: 1
requireSafeReplicaSize: false
metadataServer:
activeCount: 1
activeStandby: true
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: ceph-fs
reclaimPolicy: Delete
provisioner: openshift-storage.cephfs.csi.ceph.com
parameters:
clusterID: openshift-storage
fsName: main
pool: main-data0
csi.storage.k8s.io/provisioner-secret-namespace: openshift-storage
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: openshift-storage
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/node-stage-secret-namespace: openshift-storage
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
EOF
oc create -f /data/install/ceph-cluster-config.yaml
# oc delete -f /data/install/ceph-cluster-config.yaml
oc exec deployment/rook-ceph-operator -n openshift-storage -- ceph -c /var/lib/rook/openshift-storage/openshift-storage.config -s
# cluster:
# id: 3e7d32b0-9160-4421-9c7e-217116279601
# health: HEALTH_OK
# services:
# mon: 3 daemons, quorum a,b,c (age 4m)
# mgr: a(active, since 3m)
# mds: 1/1 daemons up, 1 hot standby
# osd: 1 osds: 1 up (since 3m), 1 in (since 4m)
# data:
# volumes: 1/1 healthy
# pools: 3 pools, 192 pgs
# objects: 22 objects, 2.3 KiB
# usage: 6.2 MiB used, 100 GiB / 100 GiB avail
# pgs: 192 active+clean
# io:
# client: 852 B/s rd, 1 op/s rd, 0 op/s wr
# progress:
add ceph-rbd support
cat << EOF > /data/install/ceph-cluster-config-rdb.yaml
---
apiVersion: ceph.rook.io/v1
kind: CephBlockPool
metadata:
name: replicapool
namespace: openshift-storage
spec:
failureDomain: osd
replicated:
size: 1
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: ceph-rbd
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: openshift-storage.rbd.csi.ceph.com
parameters:
# clusterID is the namespace where the rook cluster is running
clusterID: openshift-storage
# Ceph pool into which the RBD image shall be created
pool: replicapool
# (optional) mapOptions is a comma-separated list of map options.
# For krbd options refer
# https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
# For nbd options refer
# https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
# mapOptions: lock_on_read,queue_depth=1024
# (optional) unmapOptions is a comma-separated list of unmap options.
# For krbd options refer
# https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
# For nbd options refer
# https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
# unmapOptions: force
# RBD image format. Defaults to "2".
imageFormat: "2"
# RBD image features. Available for imageFormat: "2". CSI RBD currently supports only layering feature.
imageFeatures: layering
# The secrets contain Ceph admin credentials.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: openshift-storage
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: openshift-storage
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: openshift-storage
csi.storage.k8s.io/fstype: ext4
# Delete the rbd volume when a PVC is deleted
reclaimPolicy: Delete
# Optional, if you want to add dynamic resize for PVC.
# For now only ext3, ext4, xfs resize support provided, like in Kubernetes itself.
allowVolumeExpansion: true
EOF
oc create -f /data/install/ceph-cluster-config-rdb.yaml
# oc delete -f /data/install/ceph-cluster-config-rdb.yaml
kubectl patch storageclass ceph-rbd -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
oc exec deployment/rook-ceph-operator -n openshift-storage -- ceph -c /var/lib/rook/openshift-storage/openshift-storage.config -s
# cluster:
# id: 17cb663d-e4f4-4f9b-9993-ce33c971496a
# health: HEALTH_WARN
# too many PGs per OSD (302 > max 250)
# services:
# mon: 3 daemons, quorum a,b,c (age 67m)
# mgr: a(active, since 38m)
# mds: 1/1 daemons up, 1 hot standby
# osd: 1 osds: 1 up (since 38m), 1 in (since 67m)
# data:
# volumes: 1/1 healthy
# pools: 4 pools, 302 pgs
# objects: 28 objects, 2.3 KiB
# usage: 33 MiB used, 100 GiB / 100 GiB avail
# pgs: 0.331% pgs not active
# 301 active+clean
# 1 peering
# progress:
# Global Recovery Event (4s)
# [===========================.]
add object storage / s3 support
cat << EOF > /data/install/ceph-cluster-config-object-store.yaml
---
apiVersion: ceph.rook.io/v1
kind: CephObjectStore
metadata:
name: my-store
namespace: openshift-storage
spec:
metadataPool:
failureDomain: osd
replicated:
size: 1
dataPool:
failureDomain: osd
# erasureCoded:
# dataChunks: 2
# codingChunks: 1
preservePoolsOnDelete: true
gateway:
sslCertificateRef:
port: 80
# securePort: 443
instances: 1
healthCheck:
bucket:
disabled: false
interval: 60s
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: ceph-bucket
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: openshift-storage.ceph.rook.io/bucket
reclaimPolicy: Delete
parameters:
objectStoreName: my-store
objectStoreNamespace: openshift-storage
EOF
oc create -f /data/install/ceph-cluster-config-object-store.yaml
# test out
cat << EOF > /data/install/ceph-cluster-config-s3.yaml
apiVersion: objectbucket.io/v1alpha1
kind: ObjectBucketClaim
metadata:
name: ceph-bucket
spec:
generateBucketName: ceph-bkt
storageClassName: ceph-bucket
EOF
oc create -n default -f /data/install/ceph-cluster-config-s3.yaml
# oc get -n default ObjectBucketClaim
# get parameters from ceph's object storage
export AWS_HOST=$(kubectl -n default get cm ceph-bucket -o jsonpath='{.data.BUCKET_HOST}')
export PORT=$(kubectl -n default get cm ceph-bucket -o jsonpath='{.data.BUCKET_PORT}')
export BUCKET_NAME=$(kubectl -n default get cm ceph-bucket -o jsonpath='{.data.BUCKET_NAME}')
export AWS_ACCESS_KEY_ID=$(kubectl -n default get secret ceph-bucket -o jsonpath='{.data.AWS_ACCESS_KEY_ID}' | base64 --decode)
export AWS_SECRET_ACCESS_KEY=$(kubectl -n default get secret ceph-bucket -o jsonpath='{.data.AWS_SECRET_ACCESS_KEY}' | base64 --decode)