Commit 520ac826 authored by one's avatar one
Browse files

[rccl-tests] Add scripts and topo files for BW1000

parent b142b6fb
<graphs version="1">
<graph id="0" pattern="4" crossnic="0" nchannels="8" speedintra="24" speedinter="24" latencyinter="0" typeintra="NVL" typeinter="PXB" samechannels="0">
<channel>
<net dev="0"/>
<gpu dev="0"/>
<gpu dev="0x7"/>
<gpu dev="0x6"/>
<gpu dev="0x5"/>
<gpu dev="0x4"/>
<gpu dev="0x3"/>
<gpu dev="0x2"/>
<gpu dev="0x1"/>
<net dev="0"/>
</channel>
<channel>
<net dev="0x1"/>
<gpu dev="0x1"/>
<gpu dev="0x2"/>
<gpu dev="0x3"/>
<gpu dev="0x4"/>
<gpu dev="0x5"/>
<gpu dev="0x6"/>
<gpu dev="0x7"/>
<gpu dev="0"/>
<net dev="0x1"/>
</channel>
<channel>
<net dev="0x2"/>
<gpu dev="0x2"/>
<gpu dev="0x1"/>
<gpu dev="0"/>
<gpu dev="0x7"/>
<gpu dev="0x6"/>
<gpu dev="0x5"/>
<gpu dev="0x4"/>
<gpu dev="0x3"/>
<net dev="0x2"/>
</channel>
<channel>
<net dev="0x3"/>
<gpu dev="0x3"/>
<gpu dev="0x4"/>
<gpu dev="0x5"/>
<gpu dev="0x6"/>
<gpu dev="0x7"/>
<gpu dev="0"/>
<gpu dev="0x1"/>
<gpu dev="0x2"/>
<net dev="0x3"/>
</channel>
<channel>
<net dev="0x4"/>
<gpu dev="0x4"/>
<gpu dev="0x3"/>
<gpu dev="0x2"/>
<gpu dev="0x1"/>
<gpu dev="0"/>
<gpu dev="0x7"/>
<gpu dev="0x6"/>
<gpu dev="0x5"/>
<net dev="0x4"/>
</channel>
<channel>
<net dev="0x5"/>
<gpu dev="0x5"/>
<gpu dev="0x6"/>
<gpu dev="0x7"/>
<gpu dev="0"/>
<gpu dev="0x1"/>
<gpu dev="0x2"/>
<gpu dev="0x3"/>
<gpu dev="0x4"/>
<net dev="0x5"/>
</channel>
<channel>
<net dev="0x6"/>
<gpu dev="0x6"/>
<gpu dev="0x5"/>
<gpu dev="0x4"/>
<gpu dev="0x3"/>
<gpu dev="0x2"/>
<gpu dev="0x1"/>
<gpu dev="0"/>
<gpu dev="0x7"/>
<net dev="0x6"/>
</channel>
<channel>
<net dev="0x7"/>
<gpu dev="0x7"/>
<gpu dev="0"/>
<gpu dev="0x1"/>
<gpu dev="0x2"/>
<gpu dev="0x3"/>
<gpu dev="0x4"/>
<gpu dev="0x5"/>
<gpu dev="0x6"/>
<net dev="0x7"/>
</channel>
</graph>
<graph id="1" pattern="1" crossnic="0" nchannels="8" speedintra="24" speedinter="24" latencyinter="0" typeintra="NVL" typeinter="PXB" samechannels="0">
<channel>
<net dev="0"/>
<gpu dev="0"/>
<gpu dev="0x1"/>
<gpu dev="0x2"/>
<gpu dev="0x3"/>
<gpu dev="0x4"/>
<gpu dev="0x5"/>
<gpu dev="0x6"/>
<gpu dev="0x7"/>
<net dev="0"/>
</channel>
<channel>
<net dev="0x1"/>
<gpu dev="0x1"/>
<gpu dev="0x2"/>
<gpu dev="0x3"/>
<gpu dev="0x4"/>
<gpu dev="0x5"/>
<gpu dev="0x6"/>
<gpu dev="0x7"/>
<gpu dev="0"/>
<net dev="0x1"/>
</channel>
<channel>
<net dev="0x2"/>
<gpu dev="0x2"/>
<gpu dev="0x3"/>
<gpu dev="0x4"/>
<gpu dev="0x5"/>
<gpu dev="0x6"/>
<gpu dev="0x7"/>
<gpu dev="0"/>
<gpu dev="0x1"/>
<net dev="0x2"/>
</channel>
<channel>
<net dev="0x3"/>
<gpu dev="0x3"/>
<gpu dev="0x4"/>
<gpu dev="0x5"/>
<gpu dev="0x6"/>
<gpu dev="0x7"/>
<gpu dev="0"/>
<gpu dev="0x1"/>
<gpu dev="0x2"/>
<net dev="0x3"/>
</channel>
<channel>
<net dev="0x4"/>
<gpu dev="0x4"/>
<gpu dev="0x5"/>
<gpu dev="0x6"/>
<gpu dev="0x7"/>
<gpu dev="0"/>
<gpu dev="0x1"/>
<gpu dev="0x2"/>
<gpu dev="0x3"/>
<net dev="0x4"/>
</channel>
<channel>
<net dev="0x5"/>
<gpu dev="0x5"/>
<gpu dev="0x6"/>
<gpu dev="0x7"/>
<gpu dev="0"/>
<gpu dev="0x1"/>
<gpu dev="0x2"/>
<gpu dev="0x3"/>
<gpu dev="0x4"/>
<net dev="0x5"/>
</channel>
<channel>
<net dev="0x6"/>
<gpu dev="0x6"/>
<gpu dev="0x7"/>
<gpu dev="0"/>
<gpu dev="0x1"/>
<gpu dev="0x2"/>
<gpu dev="0x3"/>
<gpu dev="0x4"/>
<gpu dev="0x5"/>
<net dev="0x6"/>
</channel>
<channel>
<net dev="0x7"/>
<gpu dev="0x7"/>
<gpu dev="0"/>
<gpu dev="0x1"/>
<gpu dev="0x2"/>
<gpu dev="0x3"/>
<gpu dev="0x4"/>
<gpu dev="0x5"/>
<gpu dev="0x6"/>
<net dev="0x7"/>
</channel>
</graph>
<graph id="2" pattern="6" crossnic="0" nchannels="0" speedintra="0" speedinter="0" latencyinter="0" typeintra="LOC" typeinter="LOC" samechannels="0"/>
<graph id="2" pattern="3" crossnic="0" nchannels="0" speedintra="0" speedinter="0" latencyinter="0" typeintra="LOC" typeinter="LOC" samechannels="0"/>
<graph id="3" pattern="5" crossnic="0" nchannels="0" speedintra="0" speedinter="0" latencyinter="0" typeintra="LOC" typeinter="LOC" samechannels="0"/>
</graphs>
#!/bin/bash
set -e
# =================================================
# Helper functions
# =================================================
help() {
cat << EOF
RCCL Tests MPI run helper script
Usage: $(basename "$0") [OPTIONS]
OPTIONS:
-h, --help Show this help message and exit
-np Total number of processes (default: sum of per-node counts in --hosts)
-H, --hosts Comma-separated list of nodes with optional process count per node
Format: node01:8,node02:8
If count is omitted, falls back to auto-detected GPU count per node.
--tcp-iface TCP interface to use for communication (default: ${tcp_iface})
--ssh-port SSH port to use for remote connections (default: ${ssh_port})
EOF
}
# =================================================
# Global variables
# =================================================
np=
hosts_raw=
tcp_iface=p14p2
ssh_port=3333
rccltest_runscript="${PWD}/run_rccltest"
rccltest_args=()
mpi_bin=/opt/mpi/bin/mpirun
ompi_prefix=/opt/mpi
# Detect the number of GPUs per node (used as fallback when count is not specified in --hosts)
ngpu_per_node=$(hy-smi --showid 2>/dev/null | grep -ic "Device ID")
if [[ -z "${ngpu_per_node}" || "${ngpu_per_node}" -eq 0 ]]; then
echo "[WRAPPER] Failed to get the number of GPUs per node via hy-smi. Defaulting to 8."
ngpu_per_node=8
else
echo "[WRAPPER] Detected ${ngpu_per_node} GPUs per node."
fi
# =================================================
# Parameter parsing
# =================================================
while [[ $# -gt 0 ]]; do
case "${1}" in
-h|--help) help; exit 0 ;;
-np) np=${2}; shift 2 ;;
-H|--hosts) hosts_raw=${2}; shift 2 ;;
--tcp-iface) tcp_iface=${2}; shift 2 ;;
--ssh-port) ssh_port=${2}; shift 2 ;;
--) shift; rccltest_args+=("$@"); break ;;
*) rccltest_args+=("${1}"); shift ;;
esac
done
# =================================================
# Parse hosts into parallel arrays: node_names[], node_slots[]
# Input format: node01:8,node02:8 (count optional, falls back to ngpu_per_node)
# =================================================
parse_hosts() {
node_names=()
node_slots=()
IFS=',' read -ra entries <<< "${hosts_raw}"
for entry in "${entries[@]}"; do
local name="${entry%%:*}"
local slots="${entry##*:}"
# If no ':' was present, entry == name == slots
if [[ "${entry}" != *:* ]]; then
slots="${ngpu_per_node}"
fi
node_names+=("${name}")
node_slots+=("${slots}")
done
}
# =================================================
# Run rccl test script
# =================================================
if [ -z "${hosts_raw}" ]; then
# Run single-node test if --hosts is not set
echo "[WRAPPER] No compute nodes specified. Running in single-node mode."
# Default np to ngpu_per_node when not set
np="${np:-${ngpu_per_node}}"
echo "Using np=${np}"
${mpi_bin} --allow-run-as-root \
--bind-to none \
--mca pml ucx \
--mca osc ucx \
--mca btl ^vader,tcp,openib,uct \
--mca coll ^hcoll \
$(env | grep -E '^(NCCL|RCCL|UCX|HSA)_' | cut -d= -f1 | awk '{print "-x", $1}') \
-np ${np} \
${rccltest_runscript} "${rccltest_args[@]}"
else
# Multi-node mode
echo "[WRAPPER] Running in multi-node mode."
parse_hosts
# Build MPI -H string and auto-sum np
hosts_string=""
np_sum=0
for i in "${!node_names[@]}"; do
hosts_string+="${node_names[$i]}:${node_slots[$i]},"
(( np_sum += node_slots[$i] ))
done
hosts_string="${hosts_string%,}"
# -np overrides auto-sum if explicitly provided
np="${np:-${np_sum}}"
echo "[WRAPPER] MPI hosts: ${hosts_string}"
echo "[WRAPPER] Total processes (np): ${np}"
echo "[WRAPPER] Using TCP interface: ${tcp_iface}"
echo "[WRAPPER] Using SSH port: ${ssh_port}"
# Copy files to remote nodes (skip current node)
current_node=$(hostname)
copyto_hosts=()
for name in "${node_names[@]}"; do
if [[ "${name}" != "${current_node}" ]]; then
copyto_hosts+=("${name}")
fi
done
if [ ${#copyto_hosts[@]} -gt 0 ]; then
echo "[WRAPPER] Copying files to remote nodes in parallel: ${copyto_hosts[*]}"
for node in "${copyto_hosts[@]}"; do
rsync -az -e "ssh -p ${ssh_port}" ${PWD}/build ${PWD}/scripts ${rccltest_runscript} ${NCCL_TOPO_FILE} ${NCCL_GRAPH_FILE} "${node}:${PWD}/" &
rsync -az -e "ssh -p ${ssh_port}" /opt/dtk/rccl/lib ${node}:/opt/dtk/rccl/ &
rsync -az -e "ssh -p ${ssh_port}" /opt/mpi /opt/ucx ${node}:/opt/ &
done
wait
echo "[WRAPPER] Files synchronized successfully."
fi
${mpi_bin} --allow-run-as-root \
--prefix ${ompi_prefix} \
--bind-to none \
--mca pml ucx \
--mca btl_tcp_if_include ${tcp_iface} \
--mca plm_rsh_args "-p ${ssh_port}" \
$(env | grep -E '^(NCCL|RCCL|UCX|HSA|HIP)_' | cut -d= -f1 | awk '{print "-x", $1}') \
-x PATH -x LD_LIBRARY_PATH \
-np ${np} \
-H ${hosts_string} \
"${rccltest_args[@]}"
fi
#!/bin/bash
set -e
SSH_PORT=3333
unset UCX_HOME
# export UCX_LOG_LEVEL=fatal
export NCCL_TOPO_DUMP_FILE=${PWD}/topo-generated.xml
export NCCL_GRAPH_DUMP_FILE=${PWD}/graph-generated.xml
# export NCCL_DEBUG=INFO
# export NCCL_DEBUG_SUBSYS=ALL
#export RCCL_SDMA_COPY_ENABLE=1
#export RCCL_SDMA_LINK_MODE=0
# PCIe混合链路
# export NCCL_SIMPLE_CHANNELS=32
# export RCCL_P2P_XHCL_CHANNEL_NUM=31
# export RCCL_COLL_XHCL_CHANNEL_NUM=28
export HSA_FORCE_FINE_GRAIN_PCIE=1
export NCCL_SOCKET_IFNAME=p14p2
export NCCL_IB_HCA="=mlx5_1,mlx5_2,mlx5_3,mlx5_4,mlx5_7,mlx5_8,mlx5_9,mlx5_10"
export NCCL_NET_GDR_LEVEL=SYS
export NCCL_NET_GDR_READ=1
# export NCCL_ALGO=Ring
# export NCCL_PROTO=Simple
export NCCL_SIMPLE_CHANNELS=32
unset NCCL_NCHANNELS_PER_PEER
export NCCL_TOPO_FILE=${PWD}/topo-gdr-bw1000.xml
# export NCCL_GRAPH_FILE=${PWD}/graph-16r-allreduce.xml
./mpirun_rccltest -np 2 \
./build/all_reduce_perf -b 4 -e 16G -f 2 -w 3 -g 1
./mpirun_rccltest -np 4 \
./build/all_reduce_perf -b 4 -e 16G -f 2 -w 3 -g 1
./mpirun_rccltest -np 8 \
./build/all_reduce_perf -b 4 -e 16G -f 2 -w 3 -g 1
./mpirun_rccltest -np 16 -H node01,node02 --ssh-port ${SSH_PORT} \
./build/all_reduce_perf -b 4 -e 16G -f 2 -w 3 -g 1
./mpirun_rccltest -np 32 -H node01,node02,node03,node04 --ssh-port ${SSH_PORT} \
./build/all_reduce_perf -b 4 -e 16G -f 2 -w 3 -g 1
#!/bin/bash
set -e
SSH_PORT=3333
unset UCX_HOME
# export UCX_LOG_LEVEL=fatal
export NCCL_TOPO_DUMP_FILE=${PWD}/topo-generated.xml
export NCCL_GRAPH_DUMP_FILE=${PWD}/graph-generated.xml
# export NCCL_DEBUG=INFO
# export NCCL_DEBUG_SUBSYS=ALL
#export RCCL_SDMA_COPY_ENABLE=1
#export RCCL_SDMA_LINK_MODE=0
# PCIe混合链路
# export NCCL_SIMPLE_CHANNELS=32
# export RCCL_P2P_XHCL_CHANNEL_NUM=31
# export RCCL_COLL_XHCL_CHANNEL_NUM=28
export HSA_FORCE_FINE_GRAIN_PCIE=1
export NCCL_SOCKET_IFNAME=p14p2
export NCCL_IB_HCA="=mlx5_1,mlx5_2,mlx5_3,mlx5_4,mlx5_7,mlx5_8,mlx5_9,mlx5_10"
export NCCL_P2P_LEVEL=SYS
export NCCL_NET_GDR_LEVEL=PHB
export NCCL_NET_GDR_READ=1
unset NCCL_NCHANNELS_PER_PEER
export NCCL_TOPO_FILE=${PWD}/topo-gdr-bw1000.xml
./mpirun_rccltest -np 2 \
./build/alltoall_perf -b 32 -e 16G -f 2 -w 3 -g 1
./mpirun_rccltest -np 4 \
./build/alltoall_perf -b 64 -e 16G -f 2 -w 3 -g 1
./mpirun_rccltest -np 8 \
./build/alltoall_perf -b 128 -e 16G -f 2 -w 3 -g 1
./mpirun_rccltest -np 16 -H node01,node02 --ssh-port ${SSH_PORT} \
./build/alltoall_perf -b 256 -e 16G -f 2 -w 3 -g 1
./mpirun_rccltest -np 32 -H node01,node02,node03,node04 --ssh-port ${SSH_PORT} \
./build/alltoall_perf -b 512 -e 16G -f 2 -w 3 -g 1
#!/bin/bash
set -e
SSH_PORT=3333
unset UCX_HOME
export NCCL_TOPO_DUMP_FILE=${PWD}/topo-generated.xml
export NCCL_GRAPH_DUMP_FILE=${PWD}/graph-generated.xml
# export NCCL_DEBUG=INFO
# export NCCL_DEBUG_SUBSYS=ALL
export HSA_FORCE_FINE_GRAIN_PCIE=1
export NCCL_SOCKET_IFNAME=p14p2
export NCCL_IB_HCA="=mlx5_1,mlx5_2,mlx5_3,mlx5_4,mlx5_7,mlx5_8,mlx5_9,mlx5_10"
export NCCL_P2P_LEVEL=SYS
export NCCL_NET_GDR_LEVEL=PHB
export NCCL_NET_GDR_READ=1
unset NCCL_NCHANNELS_PER_PEER
export NCCL_TOPO_FILE=${PWD}/topo-gdr-bw1000.xml
for g in {0..7}; do
echo
echo "Running with GPU ${g}"
export HIP_VISIBLE_DEVICES=${g}
./mpirun_rccltest -np 2 -H node01:1,node02:1 --ssh-port ${SSH_PORT} \
./build/sendrecv_perf -b 2G -e 2G -f 2 -w 3 -g 1
echo
done
<system version="2">
<cpu numaid="3">
<pci busid="0000:99:00.0" class="0x060400">
<pci busid="0000:9b:00.0" class="0x020700">
<nic>
<net name="mlx5_1" dev="0" gdr="1"/>
<net name="mlx5_2" dev="1" gdr="1"/>
</nic>
</pci>
<pci busid="0000:9d:00.0" class="0x060400">
<pci busid="0000:9f:00.0" class="0x0b4000">
<gpu dev="0" sm="80" gcn="gfx936" arch="169983" rank="0" gdr="1">
<xgmi target="0000:20:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:10:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:13:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:16:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:19:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:1c:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:1f:00.0" count="1" tclass="0x068000"/>
</gpu>
</pci>
</pci>
<pci busid="0000:54:00.0" class="0x060400">
<pci busid="0000:56:00.0" class="0x0b4000">
<gpu dev="1" sm="80" gcn="gfx936" arch="169983" rank="1" gdr="1">
<xgmi target="0000:20:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:10:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:13:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:16:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:19:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:1c:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:1f:00.0" count="1" tclass="0x068000"/>
</gpu>
</pci>
</pci>
</pci>
</cpu>
<cpu numaid="0">
<pci busid="0000:01:00.0" class="0x060400">
<pci busid="0000:06:00.0" class="0x020700">
<nic>
<net name="mlx5_3" dev="2" gdr="1"/>
<net name="mlx5_4" dev="3" gdr="1"/>
</nic>
</pci>
<pci busid="0000:03:00.0" class="0x060400">
<pci busid="0000:05:00.0" class="0x0b4000">
<gpu dev="3" sm="80" gcn="gfx936" arch="169983" rank="3" gdr="1">
<xgmi target="0000:20:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:10:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:13:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:16:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:19:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:1c:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:1f:00.0" count="1" tclass="0x068000"/>
</gpu>
</pci>
</pci>
<pci busid="0000:5b:00.0" class="0x060400">
<pci busid="0000:5d:00.0" class="0x0b4000">
<gpu dev="2" sm="80" gcn="gfx936" arch="169983" rank="2" gdr="1">
<xgmi target="0000:20:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:10:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:13:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:16:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:19:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:1c:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:1f:00.0" count="1" tclass="0x068000"/>
</gpu>
</pci>
</pci>
</pci>
</cpu>
<cpu numaid="7">
<pci busid="0000:e4:00.0" class="0x060400">
<pci busid="0000:e9:00.0" class="0x020700">
<nic>
<net name="mlx5_7" dev="4" gdr="1"/>
<net name="mlx5_8" dev="5" gdr="1"/>
</nic>
</pci>
<pci busid="0000:e6:00.0" class="0x060400">
<pci busid="0000:e8:00.0" class="0x0b4000">
<gpu dev="4" sm="80" gcn="gfx936" arch="169983" rank="4" gdr="1">
<xgmi target="0000:20:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:10:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:13:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:16:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:19:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:1c:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:1f:00.0" count="1" tclass="0x068000"/>
</gpu>
</pci>
</pci>
<pci busid="0000:bf:00.0" class="0x060400">
<pci busid="0000:c1:00.0" class="0x0b4000">
<gpu dev="5" sm="80" gcn="gfx936" arch="169983" rank="5" gdr="1">
<xgmi target="0000:20:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:10:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:13:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:16:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:19:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:1c:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:1f:00.0" count="1" tclass="0x068000"/>
</gpu>
</pci>
</pci>
</pci>
</cpu>
<cpu numaid="4">
<pci busid="0000:ab:00.0" class="0x060400">
<pci busid="0000:ad:00.0" class="0x020700">
<nic>
<net name="mlx5_9" dev="6" gdr="1"/>
<net name="mlx5_10" dev="7" gdr="1"/>
</nic>
</pci>
<pci busid="0000:af:00.0" class="0x060400">
<pci busid="0000:b1:00.0" class="0x0b4000">
<gpu dev="7" sm="80" gcn="gfx936" arch="169983" rank="7" gdr="1">
<xgmi target="0000:20:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:10:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:13:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:16:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:19:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:1c:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:1f:00.0" count="1" tclass="0x068000"/>
</gpu>
</pci>
</pci>
<pci busid="0000:c8:00.0" class="0x060400">
<pci busid="0000:ca:00.0" class="0x0b4000">
<gpu dev="6" sm="80" gcn="gfx936" arch="169983" rank="6" gdr="1">
<xgmi target="0000:20:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:10:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:13:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:16:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:19:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:1c:00.0" count="1" tclass="0x068000"/>
<xgmi target="0000:1f:00.0" count="1" tclass="0x068000"/>
</gpu>
</pci>
</pci>
</pci>
</cpu>
</system>
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment