2 # SPDX-license-identifier: Apache-2.0
3 ##############################################################################
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9 ##############################################################################
18 sriov_capable_nodes=$(kubectl get nodes -o json | jq -r '.items[] | select((.status.capacity."intel.com/intel_sriov_700"!=null) and ((.status.capacity."intel.com/intel_sriov_700"|tonumber)>=2)) | .metadata.name')
19 if [ -z "$sriov_capable_nodes" ]; then
20 echo "Ethernet adaptor version is not set. Topology manager test case cannot run on this machine"
23 echo "NIC card specs match. Topology manager option avaiable for this version."
26 pod_name=pod-topology-manager
27 csar_id=bd55cccc-bf34-11ea-b3de-0242ac130004
29 function create_pod_yaml {
32 pushd ${CSAR_DIR}/${csar_id}
34 cat << POD > $pod_name.yaml
40 k8s.v1.cni.cncf.io/networks: sriov-eno2
44 image: docker.io/centos/tools:latest
51 intel.com/intel_sriov_700: '1'
55 intel.com/intel_sriov_700: '1'
60 create_pod_yaml ${csar_id}
61 kubectl delete pod $pod_name --ignore-not-found=true --now --wait
62 kubectl create -f ${CSAR_DIR}/${csar_id}/$pod_name.yaml --validate=false
64 wait_for_pod $pod_name
66 uid=$(kubectl get pod pod-topology-manager -o jsonpath='{.metadata.uid}')
67 node_name=$(kubectl get pod $pod_name -o jsonpath='{.spec.nodeName}')
68 node_ip=$(kubectl get node $node_name -o jsonpath='{.status.addresses[].address}')
71 cpu_core=$(ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $node_ip -- cat /var/lib/kubelet/cpu_manager_state | jq -r --arg UID "${uid}" --arg POD_NAME "${pod_name}" '.entries[$UID][$POD_NAME]')
72 numa_node_number=$(ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $node_ip -- lscpu | grep "NUMA node(s)" | awk -F ':' '{print $2}')
73 for (( node=0; node<$numa_node_number; node++ )); do
74 ranges=$(ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $node_ip -- lscpu | grep "NUMA node"$node | awk -F ':' '{print $2}')
75 ranges=(${ranges//,/ })
76 for range in ${ranges[@]}; do
77 min=$(echo $range | awk -F '-' '{print $1}')
78 max=$(echo $range | awk -F '-' '{print $2}')
79 if [ $cpu_core -ge $min ] && [ $cpu_core -le $max ]; then
85 vf_pci=$(kubectl exec -it $pod_name -- env | grep PCIDEVICE_INTEL_COM_INTEL_SRIOV_700 | awk -F '=' '{print $2}' | sed 's/\r//g')
86 vf_numa_node=$(ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $node_ip -- cat /sys/bus/pci/devices/$vf_pci/numa_node)
88 echo "The allocated cpu core is:" $cpu_core
89 echo "The numa node of the allocated cpu core is:" $cpu_numa_node
90 echo "The PCI address of the allocated vf is:" $vf_pci
91 echo "The numa node of the allocated vf is:" $vf_numa_node
92 if [ $cpu_numa_node == $vf_numa_node ]; then
93 echo "The allocated cpu core and vf are on the same numa node"
95 echo "The allocated cpu core and vf are on different numa nodes"
98 kubectl delete pod $pod_name --now