@@ -10,6 +10,8 @@ REGISTRY="kubeovn"
10
10
OVN_NORTHD_POD=
11
11
PERF_TIMES=5
12
12
PERF_LABEL=" PerfTest"
13
+ CONN_CHECK_LABEL=" conn-check"
14
+ CONN_CHECK_SERVER=" conn-check-server"
13
15
14
16
showHelp (){
15
17
echo " kubectl ko {subcommand} [option...]"
@@ -25,7 +27,7 @@ showHelp(){
25
27
echo " trace ... trace ovn microflow of specific packet"
26
28
echo " trace {namespace/podname} {target ip address} [target mac address] {icmp|tcp|udp} [target tcp/udp port] trace ICMP/TCP/UDP"
27
29
echo " trace {namespace/podname} {target ip address} [target mac address] arp {request|reply} trace ARP request/reply"
28
- echo " diagnose {all|node} [nodename] diagnose connectivity of all nodes or a specific node"
30
+ echo " diagnose {all|node|subnet } [nodename|subnetName ] diagnose connectivity of all nodes or a specific node or specify subnet's ds pod "
29
31
echo " env-check check the environment configuration"
30
32
echo " tuning {install-fastpath|local-install-fastpath|remove-fastpath|install-stt|local-install-stt|remove-stt} {centos7|centos8}} [kernel-devel-version] deploy kernel optimisation components to the system"
31
33
echo " reload restart all kube-ovn components"
@@ -475,6 +477,67 @@ checkLeader(){
475
477
echo " ovn-$component leader check ok"
476
478
}
477
479
480
+ applyConnServerDaemonset (){
481
+ subnetName=$1
482
+
483
+ if [ $( kubectl get subnet $subnetName | wc -l) -eq 0 ]; then
484
+ echo " no subnet $subnetName exists !!"
485
+ exit 1
486
+ fi
487
+
488
+ imageID=$( kubectl get ds -n $KUBE_OVN_NS kube-ovn-pinger -o jsonpath={.spec.template.spec.containers[0].image})
489
+ tmpFileName=" conn-server.yaml"
490
+ cat << EOF > $tmpFileName
491
+ kind: DaemonSet
492
+ apiVersion: apps/v1
493
+ metadata:
494
+ name: $subnetName -$CONN_CHECK_SERVER
495
+ namespace: $KUBE_OVN_NS
496
+ spec:
497
+ selector:
498
+ matchLabels:
499
+ app: $CONN_CHECK_LABEL
500
+ template:
501
+ metadata:
502
+ annotations:
503
+ ovn.kubernetes.io/logical_switch: $subnetName
504
+ labels:
505
+ app: $CONN_CHECK_LABEL
506
+ spec:
507
+ serviceAccountName: ovn
508
+ containers:
509
+ - name: $subnetName -$CONN_CHECK_SERVER
510
+ imagePullPolicy: IfNotPresent
511
+ image: $imageID
512
+ command:
513
+ - /kube-ovn/kube-ovn-pinger
514
+ args:
515
+ - --enable-verbose-conn-check=true
516
+ env:
517
+ - name: POD_NAME
518
+ valueFrom:
519
+ fieldRef:
520
+ fieldPath: metadata.name
521
+ EOF
522
+ kubectl apply -f $tmpFileName
523
+ rm $tmpFileName
524
+
525
+ isfailed=true
526
+ for i in {0..59}
527
+ do
528
+ if kubectl wait pod --for=condition=Ready -l app=$CONN_CHECK_LABEL -n $KUBE_OVN_NS ; then
529
+ isfailed=false
530
+ break
531
+ fi
532
+ sleep 1; \
533
+ done
534
+
535
+ if $isfailed ; then
536
+ echo " Error ds $subnetName -$CONN_CHECK_SERVER pod not ready"
537
+ return
538
+ fi
539
+ }
540
+
478
541
diagnose (){
479
542
kubectl get crd vpcs.kubeovn.io
480
543
kubectl get crd vpc-nat-gateways.kubeovn.io
@@ -562,9 +625,27 @@ diagnose(){
562
625
echo " ### finish diagnose node $nodeName "
563
626
echo " "
564
627
;;
628
+ subnet)
629
+ subnetName=" $2 "
630
+ applyConnServerDaemonset $subnetName
631
+
632
+ if [ $( kubectl get ds kube-ovn-cni -n $KUBE_OVN_NS -oyaml | grep enable-verbose-conn-check | wc -l) -eq 0 ]; then
633
+ echo " Warning: kube-ovn-cni not have args enable-verbose-conn-check, it will fail when check node tcp/udp connectivity"
634
+ fi
635
+
636
+ pingers=$( kubectl -n $KUBE_OVN_NS get po --no-headers -o custom-columns=NAME:.metadata.name -l app=kube-ovn-pinger)
637
+ for pinger in $pingers
638
+ do
639
+ echo " #### pinger diagnose results:"
640
+ kubectl exec -n $KUBE_OVN_NS " $pinger " -- /kube-ovn/kube-ovn-pinger --mode=job --ds-name=$subnetName -$CONN_CHECK_SERVER --ds-namespace=$KUBE_OVN_NS --enable-verbose-conn-check=true
641
+ echo " "
642
+ done
643
+
644
+ kubectl delete ds $subnetName -$CONN_CHECK_SERVER -n $KUBE_OVN_NS
645
+ ;;
565
646
* )
566
647
echo " type $type not supported"
567
- echo " kubectl ko diagnose {all|node} [nodename]"
648
+ echo " kubectl ko diagnose {all|node|subnet } [nodename|subnetName ]"
568
649
;;
569
650
esac
570
651
}
@@ -1121,6 +1202,9 @@ perf(){
1121
1202
echo " Start doing pod multicast network performance"
1122
1203
multicastPerfTest
1123
1204
1205
+ echo " Start doing host multicast network performance"
1206
+ multicastHostPerfTest
1207
+
1124
1208
echo " Start doing leader recover time test"
1125
1209
checkLeaderRecover
1126
1210
@@ -1149,6 +1233,34 @@ unicastPerfTest() {
1149
1233
rm temp_perf_result.log
1150
1234
}
1151
1235
1236
+ getAddressNic () {
1237
+ podName=$1
1238
+ ipAddress=$2
1239
+
1240
+ interface=$( kubectl exec $podName -n $KUBE_OVN_NS -- ip -o addr show | awk ' {split($4, a, "/"); print $2, a[1]}' | awk -v ip=" $ipAddress " ' $0 ~ ip {print $1}' )
1241
+ echo " $interface "
1242
+ }
1243
+
1244
+ multicastHostPerfTest () {
1245
+ clientNode=$( kubectl get pod test-host-client -n $KUBE_OVN_NS -o jsonpath={.spec.nodeName})
1246
+ serverNode=$( kubectl get pod test-host-server -n $KUBE_OVN_NS -o jsonpath={.spec.nodeName})
1247
+
1248
+ clientHostIP=$( kubectl get pod test-host-client -n $KUBE_OVN_NS -o jsonpath={.status.hostIP})
1249
+ serverHostIP=$( kubectl get pod test-host-server -n $KUBE_OVN_NS -o jsonpath={.status.hostIP})
1250
+
1251
+ clientNic=$( getAddressNic test-host-client $clientHostIP )
1252
+ serverNic=$( getAddressNic test-host-server $serverHostIP )
1253
+
1254
+ clientovsPod=$( kubectl get pod -owide -A | grep ovs-ovn | grep $clientNode | awk ' {print $2}' )
1255
+ kubectl exec $clientovsPod -n kube-system -- ip maddr add 01:00:5e:00:00:64 dev $clientNic
1256
+ serverovsPod=$( kubectl get pod -owide -A | grep ovs-ovn | grep $serverNode | awk ' {print $2}' )
1257
+ kubectl exec $serverovsPod -n kube-system -- ip maddr add 01:00:5e:00:00:64 dev $serverNic
1258
+ genMulticastPerfResult test-host-server test-host-client
1259
+
1260
+ kubectl exec $clientovsPod -n kube-system -- ip maddr del 01:00:5e:00:00:64 dev $clientNic
1261
+ kubectl exec $serverovsPod -n kube-system -- ip maddr del 01:00:5e:00:00:64 dev $serverNic
1262
+ }
1263
+
1152
1264
multicastPerfTest () {
1153
1265
clientNode=$( kubectl get pod test-client -n $KUBE_OVN_NS -o jsonpath={.spec.nodeName})
1154
1266
serverNode=$( kubectl get pod test-server -n $KUBE_OVN_NS -o jsonpath={.spec.nodeName})
@@ -1158,13 +1270,14 @@ multicastPerfTest() {
1158
1270
kubectl exec $clientovsPod -n kube-system -- ip netns exec $clientNs ip maddr add 01:00:5e:00:00:64 dev eth0
1159
1271
serverovsPod=$( kubectl get pod -owide -A | grep ovs-ovn | grep $serverNode | awk ' {print $2}' )
1160
1272
kubectl exec $serverovsPod -n kube-system -- ip netns exec $serverNs ip maddr add 01:00:5e:00:00:64 dev eth0
1161
- genMulticastPerfResult test-server
1273
+ genMulticastPerfResult test-server test-client
1162
1274
kubectl exec $clientovsPod -n kube-system -- ip netns exec $clientNs ip maddr del 01:00:5e:00:00:64 dev eth0
1163
1275
kubectl exec $serverovsPod -n kube-system -- ip netns exec $serverNs ip maddr del 01:00:5e:00:00:64 dev eth0
1164
1276
}
1165
1277
1166
1278
genMulticastPerfResult () {
1167
1279
serverName=$1
1280
+ clientName=$2
1168
1281
1169
1282
start_server_cmd=" iperf -s -B 224.0.0.100 -i 1 -u"
1170
1283
kubectl exec $serverName -n $KUBE_OVN_NS -- $start_server_cmd > $serverName .log &
@@ -1173,10 +1286,10 @@ genMulticastPerfResult() {
1173
1286
printf " %-15s %-15s %-15s %-15s\n" " Size" " UDP Latency" " UDP Lost Rate" " UDP Bandwidth"
1174
1287
for size in " 64" " 128" " 512" " 1k" " 4k"
1175
1288
do
1176
- kubectl exec test-client -n $KUBE_OVN_NS -- iperf -c 224.0.0.100 -u -T 32 -t $PERF_TIMES -i 1 -b 1000G -l $size > /dev/null
1289
+ kubectl exec $clientName -n $KUBE_OVN_NS -- iperf -c 224.0.0.100 -u -T 32 -t $PERF_TIMES -i 1 -b 1000G -l $size > /dev/null
1177
1290
udpBw=$( cat $serverName .log | grep -oP ' \d+\.?\d* [KMG]bits/sec' | tail -n 1)
1178
1291
udpLostRate=$( cat $serverName .log | grep -oP ' \(\d+(\.\d+)?%\)' | tail -n 1)
1179
- kubectl exec test-client -n $KUBE_OVN_NS -- iperf -c 224.0.0.100 -u -T 32 -t $PERF_TIMES -i 1 -l $size > /dev/null
1292
+ kubectl exec $clientName -n $KUBE_OVN_NS -- iperf -c 224.0.0.100 -u -T 32 -t $PERF_TIMES -i 1 -l $size > /dev/null
1180
1293
udpLat=$( cat $serverName .log | grep -oP ' \d+\.?\d* ms' | tail -n 1)
1181
1294
printf " %-15s %-15s %-15s %-15s\n" " $size " " $udpLat " " $udpLostRate " " $udpBw "
1182
1295
done
@@ -1218,7 +1331,7 @@ getPodRecoverTime(){
1218
1331
while [ $availableNum != $replicas ]
1219
1332
do
1220
1333
availableNum=$( kubectl get deployment -n kube-system | grep ovn-central | awk {' print $4' })
1221
- usleep 0.001
1334
+ sleep 0.001
1222
1335
done
1223
1336
1224
1337
end_time=$( date +%s.%N)
0 commit comments