@@ -4,6 +4,9 @@ set -euo pipefail
4
4
KUBE_OVN_NS=kube-system
5
5
OVN_NB_POD=
6
6
OVN_SB_POD=
7
+ OVN_NORTHD_POD=
8
+ PERF_TIMES=5
9
+ PERF_LABEL=" PerfTest"
7
10
8
11
showHelp (){
9
12
echo " kubectl ko {subcommand} [option...]"
@@ -20,6 +23,7 @@ showHelp(){
20
23
echo " diagnose {all|node} [nodename] diagnose connectivity of all nodes or a specific node"
21
24
echo " reload restart all kube-ovn components"
22
25
echo " env-check check the environment configuration"
26
+ echo " perf [image] performance test default image is kubeovn/test:v1.12.0"
23
27
}
24
28
25
29
# usage: ipv4_to_hex 192.168.0.1
@@ -475,6 +479,17 @@ getOvnCentralPod(){
475
479
exit 1
476
480
fi
477
481
OVN_SB_POD=$SB_POD
482
+ NORTHD_POD=$( kubectl get pod -n kube-system -l ovn-northd-leader=true | grep ovn-central | head -n 1 | awk ' {print $1}' )
483
+ if [ -z " $NORTHD_POD " ]; then
484
+ echo " ovn northd not exists"
485
+ exit 1
486
+ fi
487
+ OVN_NORTHD_POD=$NORTHD_POD
488
+ image=$( kubectl -n kube-system get pods -l app=kube-ovn-cni -o jsonpath=' {.items[0].spec.containers[0].image}' )
489
+ if [ -z " $image " ]; then
490
+ echo " cannot get kube-ovn image"
491
+ exit 1
492
+ fi
478
493
}
479
494
480
495
getOvnCentralDbStatus (){
@@ -690,6 +705,281 @@ env-check(){
690
705
done
691
706
}
692
707
708
+
709
+ applyTestServer () {
710
+ tmpFileName=" test-server.yaml"
711
+ podName=" test-server"
712
+ nodeID=$1
713
+ imageID=$2
714
+
715
+ cat << EOF > $tmpFileName
716
+ apiVersion: v1
717
+ kind: Pod
718
+ metadata:
719
+ name: $podName
720
+ namespace: $KUBE_OVN_NS
721
+ labels:
722
+ app: $PERF_LABEL
723
+ spec:
724
+ containers:
725
+ - name: $podName
726
+ image: $imageID
727
+ imagePullPolicy: IfNotPresent
728
+ command: ["sh", "-c"]
729
+ args:
730
+ - |
731
+ qperf &
732
+ ./test-server.sh
733
+ nodeSelector:
734
+ kubernetes.io/hostname: $nodeID
735
+ EOF
736
+
737
+ kubectl apply -f $tmpFileName
738
+ rm $tmpFileName
739
+ }
740
+
741
+ applyTestHostServer () {
742
+ tmpFileName=" test-host-server.yaml"
743
+ podName=" test-host-server"
744
+ nodeID=$1
745
+ imageID=$2
746
+
747
+ cat << EOF > $tmpFileName
748
+ apiVersion: v1
749
+ kind: Pod
750
+ metadata:
751
+ name: $podName
752
+ namespace: $KUBE_OVN_NS
753
+ labels:
754
+ app: $PERF_LABEL
755
+ spec:
756
+ hostNetwork: true
757
+ containers:
758
+ - name: $podName
759
+ image: $imageID
760
+ imagePullPolicy: IfNotPresent
761
+ command: ["sh", "-c"]
762
+ args:
763
+ - |
764
+ qperf &
765
+ ./test-server.sh
766
+ nodeSelector:
767
+ kubernetes.io/hostname: $nodeID
768
+ EOF
769
+
770
+ kubectl apply -f $tmpFileName
771
+ rm $tmpFileName
772
+ }
773
+
774
+
775
+ applyTestClient () {
776
+ tmpFileName=" test-client.yaml"
777
+ local podName=" test-client"
778
+ local nodeID=$1
779
+ local imageID=$2
780
+ touch $tmpFileName
781
+ cat << EOF > $tmpFileName
782
+ apiVersion: v1
783
+ kind: Pod
784
+ metadata:
785
+ name: $podName
786
+ namespace: $KUBE_OVN_NS
787
+ labels:
788
+ app: $PERF_LABEL
789
+ spec:
790
+ containers:
791
+ - name: $podName
792
+ image: $imageID
793
+ imagePullPolicy: IfNotPresent
794
+ command: ["sh", "-c", "sleep infinity"]
795
+ nodeSelector:
796
+ kubernetes.io/hostname: $nodeID
797
+ EOF
798
+ kubectl apply -f $tmpFileName
799
+ rm $tmpFileName
800
+ }
801
+
802
+ applyTestHostClient () {
803
+ tmpFileName=" test-host-client.yaml"
804
+ local podName=" test-host-client"
805
+ local nodeID=$1
806
+ local imageID=$2
807
+ touch $tmpFileName
808
+ cat << EOF > $tmpFileName
809
+ apiVersion: v1
810
+ kind: Pod
811
+ metadata:
812
+ name: $podName
813
+ namespace: $KUBE_OVN_NS
814
+ labels:
815
+ app: $PERF_LABEL
816
+ spec:
817
+ hostNetwork: true
818
+ containers:
819
+ - name: $podName
820
+ image: $imageID
821
+ imagePullPolicy: IfNotPresent
822
+ command: ["sh", "-c", "sleep infinity"]
823
+ nodeSelector:
824
+ kubernetes.io/hostname: $nodeID
825
+ EOF
826
+ kubectl apply -f $tmpFileName
827
+ rm $tmpFileName
828
+ }
829
+
830
+ perf (){
831
+ imageID=${1:- " kubeovn/test:v1.12.0" }
832
+
833
+ nodes=($( kubectl get node --no-headers -o custom-columns=NAME:.metadata.name) )
834
+ if [[ ${# nodes} -eq 1 ]]; then
835
+ applyTestClient ${nodes[0]} $imageID
836
+ applyTestHostClient ${nodes[0]} $imageID
837
+ applyTestServer ${nodes[0]} $imageID
838
+ applyTestHostServer ${nodes[0]} $imageID
839
+ elif [[ ${# nodes} -le 0 ]]; then
840
+ echo " can't find node in the cluster"
841
+ return
842
+ elif [[ ${# nodes} -ge 2 ]]; then
843
+ applyTestClient ${nodes[1]} $imageID
844
+ applyTestHostClient ${nodes[1]} $imageID
845
+ applyTestServer ${nodes[0]} $imageID
846
+ applyTestHostServer ${nodes[0]} $imageID
847
+ fi
848
+
849
+ isfailed=true
850
+ for i in {0..300}
851
+ do
852
+ if kubectl wait pod --for=condition=Ready -l app=$PERF_LABEL -n kube-system ; then
853
+ isfailed=false
854
+ break
855
+ fi
856
+ sleep 1; \
857
+ done
858
+
859
+ if $isfailed ; then
860
+ echo " Error test pod not ready"
861
+ return
862
+ fi
863
+
864
+ local serverIP=$( kubectl get pod test-server -n $KUBE_OVN_NS -o jsonpath={.status.podIP})
865
+ local hostserverIP=$( kubectl get pod test-host-server -n $KUBE_OVN_NS -o jsonpath={.status.podIP})
866
+
867
+ echo " Start doing pod network performance"
868
+ unicastPerfTest test-client $serverIP
869
+
870
+ echo " Start doing host network performance"
871
+ unicastPerfTest test-host-client $hostserverIP
872
+
873
+ echo " Start doing pod multicast network performance"
874
+ multicastPerfTest
875
+
876
+ echo " Start doing leader recover time test"
877
+ checkLeaderRecover
878
+
879
+ kubectl delete pods -l app=$PERF_LABEL -n $KUBE_OVN_NS
880
+ }
881
+
882
+ unicastPerfTest () {
883
+ clientPodName=$1
884
+ serverIP=$2
885
+ echo " =================================== unicast perfromance test ============================================================="
886
+ printf " %-15s %-15s %-15s %-15s %-15s %-15s\n" " Size" " TCP Latency" " TCP Bandwidth" " UDP Latency" " UDP Lost Rate" " UDP Bandwidth"
887
+ for size in " 64" " 128" " 512" " 1k" " 4k"
888
+ do
889
+ output=$( kubectl exec $clientPodName -n $KUBE_OVN_NS -- qperf -t $PERF_TIMES $serverIP -ub -oo msg_size:$size -vu tcp_lat udp_lat 2>&1 )
890
+ tcpLat=" $( echo $output | grep -oP ' tcp_lat: latency = \K[\d.]+ (us|ms|sec)' ) "
891
+ udpLat=" $( echo $output | grep -oP ' udp_lat: latency = \K[\d.]+ (us|ms|sec)' ) "
892
+ kubectl exec $clientPodName -n $KUBE_OVN_NS -- iperf3 -c $serverIP -u -t $PERF_TIMES -i 1 -P 10 -b 1000G -l $size > temp_perf_result.log 2> /dev/null
893
+ udpBw=$( cat temp_perf_result.log | grep -oP ' \d+\.?\d* [KMG]bits/sec' | tail -n 1)
894
+ udpLostRate=$( cat temp_perf_result.log | grep -oP ' \(\d+(\.\d+)?%\)' | tail -n 1)
895
+
896
+ kubectl exec $clientPodName -n $KUBE_OVN_NS -- iperf3 -c $serverIP -t $PERF_TIMES -i 1 -P 10 -l $size > temp_perf_result.log 2> /dev/null
897
+ tcpBw=$( cat temp_perf_result.log | grep -oP ' \d+\.?\d* [KMG]bits/sec' | tail -n 1)
898
+ printf " %-15s %-15s %-15s %-15s %-15s %-15s\n" " $size " " $tcpLat " " $tcpBw " " $udpLat " " $udpLostRate " " $udpBw "
899
+ done
900
+ echo " ========================================================================================================================="
901
+ rm temp_perf_result.log
902
+ }
903
+
904
+ multicastPerfTest () {
905
+ clientNode=$( kubectl get pod test-client -n $KUBE_OVN_NS -o jsonpath={.spec.nodeName})
906
+ serverNode=$( kubectl get pod test-server -n $KUBE_OVN_NS -o jsonpath={.spec.nodeName})
907
+ clientNs=$( kubectl ko vsctl $clientNode --column=external_ids find interface external_ids:iface-id=test-client.$KUBE_OVN_NS | awk -F ' pod_netns=' ' {print $2}' | grep -o ' cni-[0-9a-f]\{8\}-[0-9a-f]\{4\}-[0-9a-f]\{4\}-[0-9a-f]\{4\}-[0-9a-f]\{12\}' )
908
+ serverNs=$( kubectl ko vsctl $serverNode --column=external_ids find interface external_ids:iface-id=test-server.$KUBE_OVN_NS | awk -F ' pod_netns=' ' {print $2}' | grep -o ' cni-[0-9a-f]\{8\}-[0-9a-f]\{4\}-[0-9a-f]\{4\}-[0-9a-f]\{4\}-[0-9a-f]\{12\}' )
909
+ clientovsPod=$( kubectl get pod -owide -A | grep ovs-ovn | grep $clientNode | awk ' {print $2}' )
910
+ kubectl exec $clientovsPod -n kube-system -- ip netns exec $clientNs ip maddr add 01:00:5e:00:00:64 dev eth0
911
+ serverovsPod=$( kubectl get pod -owide -A | grep ovs-ovn | grep $serverNode | awk ' {print $2}' )
912
+ kubectl exec $serverovsPod -n kube-system -- ip netns exec $serverNs ip maddr add 01:00:5e:00:00:64 dev eth0
913
+ genMulticastPerfResult test-server
914
+ kubectl exec $clientovsPod -n kube-system -- ip netns exec $clientNs ip maddr del 01:00:5e:00:00:64 dev eth0
915
+ kubectl exec $serverovsPod -n kube-system -- ip netns exec $serverNs ip maddr del 01:00:5e:00:00:64 dev eth0
916
+ }
917
+
918
+ genMulticastPerfResult () {
919
+ serverName=$1
920
+
921
+ start_server_cmd=" iperf -s -B 224.0.0.100 -i 1 -u"
922
+ kubectl exec $serverName -n $KUBE_OVN_NS -- $start_server_cmd > $serverName .log &
923
+
924
+ echo " =================================== multicast perfromance test ========================================================="
925
+ printf " %-15s %-15s %-15s %-15s\n" " Size" " UDP Latency" " UDP Lost Rate" " UDP Bandwidth"
926
+ for size in " 64" " 128" " 512" " 1k" " 4k"
927
+ do
928
+ kubectl exec test-client -n $KUBE_OVN_NS -- iperf -c 224.0.0.100 -u -T 32 -t $PERF_TIMES -i 1 -b 1000G -l $size > /dev/null
929
+ udpBw=$( cat $serverName .log | grep -oP ' \d+\.?\d* [KMG]bits/sec' | tail -n 1)
930
+ udpLostRate=$( cat $serverName .log | grep -oP ' \(\d+(\.\d+)?%\)' | tail -n 1)
931
+ kubectl exec test-client -n $KUBE_OVN_NS -- iperf -c 224.0.0.100 -u -T 32 -t $PERF_TIMES -i 1 -l $size > /dev/null
932
+ udpLat=$( cat $serverName .log | grep -oP ' \d+\.?\d* ms' | tail -n 1)
933
+ printf " %-15s %-15s %-15s %-15s\n" " $size " " $udpLat " " $udpLostRate " " $udpBw "
934
+ done
935
+
936
+ echo " ========================================================================================================================="
937
+
938
+ pids=($( ps -ef | grep " $start_server_cmd " | grep -v grep | awk ' {print $2}' ) )
939
+ kill ${pids[1]}
940
+
941
+ rm $serverName .log
942
+ }
943
+
944
+ checkLeaderRecover () {
945
+ getOvnCentralPod
946
+ getPodRecoverTime " nb"
947
+ sleep 5
948
+ getOvnCentralPod
949
+ getPodRecoverTime " sb"
950
+ sleep 5
951
+ getOvnCentralPod
952
+ getPodRecoverTime " northd"
953
+
954
+ }
955
+
956
+ getPodRecoverTime (){
957
+ component_name=$1
958
+ start_time=$( date +%s.%N)
959
+ echo " Delete ovn central $component_name pod"
960
+ if [[ $component_name == " nb" ]]; then
961
+ kubectl delete pod $OVN_NB_POD -n kube-system
962
+ elif [[ $component_name == " sb" ]]; then
963
+ kubectl delete pod $OVN_SB_POD -n kube-system
964
+ elif [[ $component_name == " northd" ]]; then
965
+ kubectl delete pod $OVN_NORTHD_POD -n kube-system
966
+ fi
967
+ echo " Waiting for ovn central $component_name pod running"
968
+ replicas=$( kubectl get deployment -n kube-system ovn-central -o jsonpath={.spec.replicas})
969
+ availableNum=$( kubectl get deployment -n kube-system | grep ovn-central | awk {' print $4' })
970
+ while [ $availableNum != $replicas ]
971
+ do
972
+ availableNum=$( kubectl get deployment -n kube-system | grep ovn-central | awk {' print $4' })
973
+ usleep 0.001
974
+ done
975
+
976
+ end_time=$( date +%s.%N)
977
+ elapsed_time=$( echo " $end_time - $start_time " | bc)
978
+ echo " ================================ OVN $component_name recover takes $elapsed_time s =================================="
979
+ }
980
+
981
+
982
+
693
983
if [ $# -lt 1 ]; then
694
984
showHelp
695
985
exit 0
@@ -727,6 +1017,9 @@ case $subcommand in
727
1017
env-check)
728
1018
env-check
729
1019
;;
1020
+ perf)
1021
+ perf " $@ "
1022
+ ;;
730
1023
* )
731
1024
showHelp
732
1025
;;
0 commit comments