openshift 4.3 huge page

video

  • https://youtu.be/T7R-j0B9eSY
  • https://www.bilibili.com/video/BV1De411W7JU/

https://docs.openshift.com/container-platform/4.3/scalability_and_performance/what-huge-pages-do-and-how-they-are-consumed-by-apps.html

https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/performance_tuning_guide/sect-red_hat_enterprise_linux-performance_tuning_guide-configuring_transparent_huge_pages

# check original status
cat /sys/kernel/mm/transparent_hugepage/enabled
# [always] madvise never

cat /sys/kernel/mm/transparent_hugepage/defrag
# [always] madvise never

# begin to test 
oc label node infra1.hsc.redhat.ren hugepages=true

cat << EOF > hugepages_tuning.yaml
apiVersion: tuned.openshift.io/v1
kind: Tuned
metadata:
  name: hugepages 
  namespace: openshift-cluster-node-tuning-operator
spec:
  profile: 
  - data: |
      [main]
      summary=Configuration for hugepages
      include=openshift-node

      [vm]
      transparent_hugepages=never

      [sysctl]
      vm.nr_hugepages=1024
    name: node-hugepages
  recommend:
  - match: 
    - label: hugepages
    priority: 30
    profile: node-hugepages
EOF

oc create -f hugepages_tuning.yaml

oc get pod -o wide -n openshift-cluster-node-tuning-operator

oc logs tuned-86g8b \
    -n openshift-cluster-node-tuning-operator | grep 'applied$' | tail -n1

# check result
cat /sys/kernel/mm/transparent_hugepage/enabled
# always madvise [never]

cat /sys/kernel/mm/transparent_hugepage/defrag
# [always] madvise never

# node feature discovery 功能已经触发了profile自动选择。

cat << EOF > hugepages-pod.yaml
apiVersion: v1
kind: Pod
metadata:
  generateName: hugepages-volume-
spec:
  containers:
  - securityContext:
      privileged: true
    image: registry.redhat.ren:5443/docker.io/wangzheng422/centos:centos7-test
    imagePullPolicy: Always
    command:
    - sleep
    - inf
    name: example
    volumeMounts:
    - mountPath: /dev/hugepages
      name: hugepage
    resources:
      limits:
        hugepages-2Mi: 100Mi 
        memory: "1Gi"
        cpu: "1"
  volumes:
  - name: hugepage
    emptyDir:
      medium: HugePages
EOF
oc create -n demo -f hugepages-pod.yaml

# login into pod
oc rsh hugepages-volume-9nwlv

mount | grep page
# nodev on /dev/hugepages type hugetlbfs (rw,relatime,seclabel,pagesize=2Mi)

# 来看看系统huge page的状态
# yum install libhugetlbfs-utils
hugeadm --explain

# 根据以下的2个帖子,hugepage是给程序分配内存用的,不能用文件操作演示
# https://serverfault.com/questions/811670/how-to-create-copy-a-file-into-hugetlbfs
# https://stackoverflow.com/questions/40285971/how-to-load-text-segments-of-shared-libraries-into-huge-pages-on-linux]

# sysbench memory --memory-hugetlb=on --memory-total-size=200M run

# restore
oc delete -f hugepages_tuning.yaml
# reboot