openshift 4.3 QoS
本文测试,openshift (ovs) pod 在大流量下, 限流功能的表现。
video
- https://youtu.be/IaWdkPsRinw
- https://www.bilibili.com/video/BV1cV411d7LV/
参考资料:
https://docs.openshift.com/container-platform/4.3/nodes/pods/nodes-pods-configuring.html
https://docs.openshift.com/container-platform/3.11/admin_guide/managing_pods.html#admin-guide-manage-pods-limit-bandwidth
# 查看infra0, infra1上面的端口速度,可以看到是10GE的网口
ethtool em1
# Settings for em1:
# Supported ports: [ FIBRE ]
# Supported link modes: 1000baseT/Full
# 10000baseT/Full
# Supported pause frame use: Symmetric Receive-only
# Supports auto-negotiation: No
# Supported FEC modes: Not reported
# Advertised link modes: 10000baseT/Full
# Advertised pause frame use: No
# Advertised auto-negotiation: No
# Advertised FEC modes: Not reported
# Speed: 10000Mb/s
# Duplex: Full
# Port: FIBRE
# PHYAD: 1
# Transceiver: internal
# Auto-negotiation: off
# Supports Wake-on: g
# Wake-on: d
# Current message level: 0x00000000 (0)
# Link detected: yes
# 创建2个服务端Pod,用iperf3作为服务端,服务端不限速。再创建一个客户端Pod,有iperf3作为客户端。
cat << EOF > demo.yaml
---
kind: Pod
apiVersion: v1
metadata:
name: demo-pod1
namespace: demo
spec:
nodeSelector:
kubernetes.io/hostname: 'infra1.hsc.redhat.ren'
restartPolicy: Always
containers:
- name: demo1
image: >-
registry.redhat.ren:5443/docker.io/wangzheng422/centos:centos7-test
env:
- name: key
value: value
command: ["iperf3", "-s", "-p" ]
args: [ "6666" ]
imagePullPolicy: Always
resources:
requests:
cpu: 4.0
memory: 8Gi
limits:
cpu: 60.0
memory: 100Gi
---
kind: Pod
apiVersion: v1
metadata:
name: demo-pod2
namespace: default
spec:
nodeSelector:
kubernetes.io/hostname: 'infra1.hsc.redhat.ren'
restartPolicy: Always
containers:
- name: demo1
image: >-
registry.redhat.ren:5443/docker.io/wangzheng422/centos:centos7-test
env:
- name: key
value: value
command: ["iperf3", "-s", "-p" ]
args: [ "6666" ]
imagePullPolicy: Always
resources:
requests:
cpu: 4.0
memory: 8Gi
limits:
cpu: 60.0
memory: 100Gi
---
kind: Pod
apiVersion: v1
metadata:
name: iperf
namespace: zte
spec:
nodeSelector:
kubernetes.io/hostname: 'infra0.hsc.redhat.ren'
restartPolicy: Always
containers:
- name: iperf
image: >-
registry.redhat.ren:5443/docker.io/wangzheng422/centos:centos7-test
env:
- name: key
value: value
command: ["/bin/bash", "-c", "--" ]
args: [ "trap : TERM INT; sleep infinity & wait" ]
imagePullPolicy: Always
resources:
requests:
cpu: 4.0
memory: 8Gi
limits:
cpu: 60.0
memory: 100Gi
EOF
oc apply -f demo.yaml
# 查找服务端pod ip
oc get pod -A -o wide | grep demo-pod
oc get pod -n zte -o wide
pod_demo1_ip=$(oc get pod -n demo demo-pod1 -o json | jq -r '.status.podIPs[0].ip')
pod_demo2_ip=$(oc get pod -n default demo-pod2 -o json | jq -r '.status.podIPs[0].ip')
echo $pod_demo1_ip
echo $pod_demo2_ip
# 进入客户端,对两个服务端pod进行测速
/bin/rm -f nohup.out
nohup oc exec -n zte -it iperf -- iperf3 -T demo1 -i 10 -t 30 -b 3G -P 6 -p 6666 -c $pod_demo1_ip 2>&1 &
nohup oc exec -n zte -it iperf -- iperf3 -T demo2 -i 10 -t 30 -b 6G -P 6 -p 6666 -c $pod_demo2_ip 2>&1 &
tail -f nohup.out
# 调整流量重新测试,对两个服务端pod进行测速
/bin/rm -f nohup.out
nohup oc exec -n zte -it iperf -- iperf3 -T demo1 -i 10 -t 30 -b 6G -P 6 -p 6666 -c $pod_demo1_ip 2>&1 &
nohup oc exec -n zte -it iperf -- iperf3 -T demo2 -i 10 -t 30 -b 6G -P 6 -p 6666 -c $pod_demo2_ip 2>&1 &
tail -f nohup.out
# 调整流量重新测试,对两个服务端pod进行测速
/bin/rm -f nohup.out
nohup oc exec -n zte -it iperf -- iperf3 -T demo1 -i 10 -t 30 -b 8G -P 6 -p 6666 -c $pod_demo1_ip 2>&1 &
nohup oc exec -n zte -it iperf -- iperf3 -T demo2 -i 10 -t 30 -b 6G -P 6 -p 6666 -c $pod_demo2_ip 2>&1 &
tail -f nohup.out
# 查看服务端pod的日志,可以看到流量信息
# 更改服务端带宽为6G
oc delete pod -n demo demo-pod1
cat << EOF > demo1.yaml
---
kind: Pod
apiVersion: v1
metadata:
name: demo-pod1
namespace: demo
annotations:
kubernetes.io/ingress-bandwidth: 6G
kubernetes.io/egress-bandwidth: 6G
spec:
nodeSelector:
kubernetes.io/hostname: 'infra1.hsc.redhat.ren'
restartPolicy: Always
containers:
- name: demo1
image: >-
registry.redhat.ren:5443/docker.io/wangzheng422/centos:centos7-test
env:
- name: key
value: value
command: ["iperf3", "-s", "-p" ]
args: [ "6666" ]
imagePullPolicy: Always
EOF
oc apply -n demo -f demo1.yaml
# 查找服务端pod ip
oc get pod -A -o wide | grep demo-pod
oc get pod -n zte -o wide
pod_demo1_ip=$(oc get pod -n demo demo-pod1 -o json | jq -r '.status.podIPs[0].ip')
pod_demo2_ip=$(oc get pod -n default demo-pod2 -o json | jq -r '.status.podIPs[0].ip')
echo $pod_demo1_ip
echo $pod_demo2_ip
# 调整流量重新测试,对两个服务端pod进行测速
/bin/rm -f nohup.out
nohup oc exec -n zte -it iperf -- iperf3 -T demo1 -i 10 -t 30 -b 8G -P 6 -p 6666 -c $pod_demo1_ip 2>&1 &
nohup oc exec -n zte -it iperf -- iperf3 -T demo2 -i 10 -t 30 -b 6G -P 6 -p 6666 -c $pod_demo2_ip 2>&1 &
tail -f nohup.out
# 查看服务端pod的日志,可以看到流量信息
# 更改服务端带宽为3G
oc delete pod -n demo demo-pod1
cat << EOF > demo1.yaml
---
kind: Pod
apiVersion: v1
metadata:
name: demo-pod1
namespace: demo
annotations:
kubernetes.io/ingress-bandwidth: 3G
kubernetes.io/egress-bandwidth: 3G
spec:
nodeSelector:
kubernetes.io/hostname: 'infra1.hsc.redhat.ren'
restartPolicy: Always
containers:
- name: demo1
image: >-
registry.redhat.ren:5443/docker.io/wangzheng422/centos:centos7-test
env:
- name: key
value: value
command: ["iperf3", "-s", "-p" ]
args: [ "6666" ]
imagePullPolicy: Always
EOF
oc apply -n demo -f demo1.yaml
# 查找服务端pod ip
oc get pod -A -o wide | grep demo-pod
oc get pod -n zte -o wide
pod_demo1_ip=$(oc get pod -n demo demo-pod1 -o json | jq -r '.status.podIPs[0].ip')
pod_demo2_ip=$(oc get pod -n default demo-pod2 -o json | jq -r '.status.podIPs[0].ip')
echo $pod_demo1_ip
echo $pod_demo2_ip
# 调整流量重新测试,对两个服务端pod进行测速
/bin/rm -f nohup.out
nohup oc exec -n zte -it iperf -- iperf3 -T demo1 -i 10 -t 30 -b 8G -P 6 -p 6666 -c $pod_demo1_ip 2>&1 &
nohup oc exec -n zte -it iperf -- iperf3 -T demo2 -i 10 -t 30 -b 6G -P 6 -p 6666 -c $pod_demo2_ip 2>&1 &
tail -f nohup.out
# 查看服务端pod的日志,可以看到流量信息
oc delete -f demo.yaml
package size
oc exec -n zte -it iperf -- iperf3 -T demo1 -V -b 10G -M 1500 -p 6666 -c $pod_demo1_ip
# demo1: Test Complete. Summary Results:
# demo1: [ ID] Interval Transfer Bandwidth Retr
# demo1: [ 4] 0.00-10.00 sec 3.66 GBytes 3.15 Gbits/sec 221 sender
# demo1: [ 4] 0.00-10.00 sec 3.66 GBytes 3.14 Gbits/sec receiver
# demo1: CPU Utilization: local/sender 38.5% (1.8%u/36.6%s), remote/receiver 9.6% (0.4%u/9.2%s)
oc exec -n zte -it iperf -- iperf3 -T demo1 -V -b 10G -M 1000 -p 6666 -c $pod_demo1_ip
# demo1: Test Complete. Summary Results:
# demo1: [ ID] Interval Transfer Bandwidth Retr
# demo1: [ 4] 0.00-10.00 sec 2.68 GBytes 2.30 Gbits/sec 304 sender
# demo1: [ 4] 0.00-10.00 sec 2.68 GBytes 2.30 Gbits/sec receiver
# demo1: CPU Utilization: local/sender 22.8% (1.0%u/21.7%s), remote/receiver 2.4% (0.2%u/2.2%s)
oc exec -n zte -it iperf -- iperf3 -T demo1 -V -b 10G -M 500 -p 6666 -c $pod_demo1_ip
# demo1: Test Complete. Summary Results:
# demo1: [ ID] Interval Transfer Bandwidth Retr
# demo1: [ 4] 0.00-10.00 sec 1.32 GBytes 1.14 Gbits/sec 195 sender
# demo1: [ 4] 0.00-10.00 sec 1.32 GBytes 1.13 Gbits/sec receiver
# demo1: CPU Utilization: local/sender 13.6% (0.9%u/12.7%s), remote/receiver 4.2% (0.3%u/4.0%s)
oc exec -n zte -it iperf -- iperf3 -T demo1 -V -b 10G -M 100 -p 6666 -c $pod_demo1_ip
# demo1: Test Complete. Summary Results:
# demo1: [ ID] Interval Transfer Bandwidth Retr
# demo1: [ 4] 0.00-10.00 sec 224 MBytes 188 Mbits/sec 590 sender
# demo1: [ 4] 0.00-10.00 sec 223 MBytes 187 Mbits/sec receiver
# demo1: CPU Utilization: local/sender 3.5% (0.2%u/3.3%s), remote/receiver 10.2% (0.1%u/10.1%s)
oc exec -n zte -it iperf -- iperf3 -T demo1 -V -b 10G -M 1500 -P 10 -p 6666 -c $pod_demo1_ip
# demo1: [SUM] 0.00-10.00 sec 9.21 GBytes 7.91 Gbits/sec 4804 sender
# demo1: [SUM] 0.00-10.00 sec 9.20 GBytes 7.90 Gbits/sec receiver
# demo1: CPU Utilization: local/sender 65.3% (2.5%u/62.8%s), remote/receiver 28.5% (0.4%u/28.1%s)
oc exec -n zte -it iperf -- iperf3 -T demo1 -V -b 10G -M 1000 -P 10 -p 6666 -c $pod_demo1_ip
# demo1: [SUM] 0.00-10.00 sec 8.62 GBytes 7.40 Gbits/sec 4354 sender
# demo1: [SUM] 0.00-10.00 sec 8.61 GBytes 7.40 Gbits/sec receiver
# demo1: CPU Utilization: local/sender 73.7% (2.4%u/71.3%s), remote/receiver 19.7% (0.9%u/18.8%s)
oc exec -n zte -it iperf -- iperf3 -T demo1 -V -b 10G -M 500 -P 10 -p 6666 -c $pod_demo1_ip
# demo1: [SUM] 0.00-10.00 sec 4.72 GBytes 4.05 Gbits/sec 7142 sender
# demo1: [SUM] 0.00-10.00 sec 4.71 GBytes 4.05 Gbits/sec receiver
# demo1: CPU Utilization: local/sender 49.4% (2.0%u/47.3%s), remote/receiver 17.6% (0.6%u/17.1%s)
oc exec -n zte -it iperf -- iperf3 -T demo1 -V -b 10G -M 100 -P 10 -p 6666 -c $pod_demo1_ip
# demo1: [SUM] 0.00-10.00 sec 895 MBytes 750 Mbits/sec 10362 sender
# demo1: [SUM] 0.00-10.00 sec 889 MBytes 745 Mbits/sec receiver
# demo1: CPU Utilization: local/sender 14.4% (0.6%u/13.7%s), remote/receiver 22.6% (0.3%u/22.3%s)
iperf3 -T demo1 -V -b 10G -M 1500 -p 6666 -c 117.177.241.24
# demo1: Test Complete. Summary Results:
# demo1: [ ID] Interval Transfer Bandwidth Retr
# demo1: [ 4] 0.00-10.00 sec 10.5 GBytes 8.98 Gbits/sec 0 sender
# demo1: [ 4] 0.00-10.00 sec 10.4 GBytes 8.98 Gbits/sec receiver
# demo1: CPU Utilization: local/sender 52.8% (2.7%u/50.2%s), remote/receiver 30.6% (1.0%u/29.5%s)
iperf3 -T demo1 -V -b 10G -M 1000 -p 6666 -c 117.177.241.24
# demo1: Test Complete. Summary Results:
# demo1: [ ID] Interval Transfer Bandwidth Retr
# demo1: [ 4] 0.00-10.00 sec 9.28 GBytes 7.97 Gbits/sec 0 sender
# demo1: [ 4] 0.00-10.00 sec 9.27 GBytes 7.96 Gbits/sec receiver
# demo1: CPU Utilization: local/sender 54.4% (3.2%u/51.2%s), remote/receiver 19.2% (0.1%u/19.1%s)
iperf3 -T demo1 -V -b 10G -M 500 -p 6666 -c 117.177.241.24
# demo1: Test Complete. Summary Results:
# demo1: [ ID] Interval Transfer Bandwidth Retr
# demo1: [ 4] 0.00-10.00 sec 6.14 GBytes 5.28 Gbits/sec 5857 sender
# demo1: [ 4] 0.00-10.00 sec 6.14 GBytes 5.27 Gbits/sec receiver
# demo1: CPU Utilization: local/sender 30.6% (2.1%u/28.5%s), remote/receiver 12.6% (0.1%u/12.5%s)
iperf3 -T demo1 -V -b 10G -M 100 -p 6666 -c 117.177.241.24
# demo1: Test Complete. Summary Results:
# demo1: [ ID] Interval Transfer Bandwidth Retr
# demo1: [ 4] 0.00-10.00 sec 1.41 GBytes 1.21 Gbits/sec 3499 sender
# demo1: [ 4] 0.00-10.00 sec 1.40 GBytes 1.21 Gbits/sec receiver
# demo1: CPU Utilization: local/sender 8.2% (0.9%u/7.4%s), remote/receiver 23.8% (0.1%u/23.7%s)