Commit 07ac2dce authored by Gilbert Lee's avatar Gilbert Lee
Browse files

Initial commit

parents
# Changelog for TransferBench
## v1.01
### Added
- Adding USE_SINGLE_STREAM feature
- All Links that execute on the same GPU device are executed with a single kernel launch on a single stream
- Does not work with USE_HIP_CALL and forces USE_SINGLE_SYNC to collect timings
- Adding ability to request coherent / fine-grained host memory ('B')
### Changed
- Separating TransferBench from RCCL repo
- Peer-to-peer benchmark mode now works OUTPUT_TO_CSV
- Toplogy display now works with OUTPUT_TO_CSV
- Moving documentation about config file into example.cfg
### Removed
- Removed config file generation
- Removed show pointer address environment variable (SHOW_ADDR)
/*
Copyright (c) 2021-2022 Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#ifndef ENVVARS_HPP
#define ENVVARS_HPP
#include <algorithm>
// This class manages environment variable that affect TransferBench
class EnvVars
{
public:
// Default configuration values
int const DEFAULT_NUM_WARMUPS = 3;
int const DEFAULT_NUM_ITERATIONS = 10;
int const DEFAULT_SAMPLING_FACTOR = 1;
int const DEFAULT_NUM_CPU_PER_LINK = 4;
// Environment variables
int useHipCall; // Use hipMemcpy/hipMemset instead of custom shader kernels
int useMemset; // Perform a memset instead of a copy (ignores source memory)
int useSingleSync; // Perform synchronization only once after all iterations instead of per iteration
int useInteractive; // Pause for user-input before starting transfer loop
int combineTiming; // Combines the timing with kernel launch
int outputToCsv; // Output in CSV format
int byteOffset; // Byte-offset for memory allocations
int numWarmups; // Number of un-timed warmup iterations to perform
int numIterations; // Number of timed iterations to perform
int samplingFactor; // Affects how many different values of N are generated (when N set to 0)
int numCpuPerLink; // Number of CPU child threads to use per CPU link
int sharedMemBytes; // Amount of shared memory to use per threadblock
int blockBytes; // Each CU, except the last, gets a multiple of this many bytes to copy
int usePcieIndexing; // Base GPU indexing on PCIe address instead of HIP device
int useSingleStream; // Use a single stream per device instead of per Link. Can not be used with USE_HIP_CALL
std::vector<float> fillPattern; // Pattern of floats used to fill source data
// Constructor that collects values
EnvVars()
{
int maxSharedMemBytes = 0;
hipDeviceGetAttribute(&maxSharedMemBytes,
hipDeviceAttributeMaxSharedMemoryPerMultiprocessor, 0);
useHipCall = GetEnvVar("USE_HIP_CALL" , 0);
useMemset = GetEnvVar("USE_MEMSET" , 0);
useSingleSync = GetEnvVar("USE_SINGLE_SYNC" , 1);
useInteractive = GetEnvVar("USE_INTERACTIVE" , 0);
combineTiming = GetEnvVar("COMBINE_TIMING" , 0);
outputToCsv = GetEnvVar("OUTPUT_TO_CSV" , 0);
byteOffset = GetEnvVar("BYTE_OFFSET" , 0);
numWarmups = GetEnvVar("NUM_WARMUPS" , DEFAULT_NUM_WARMUPS);
numIterations = GetEnvVar("NUM_ITERATIONS" , DEFAULT_NUM_ITERATIONS);
samplingFactor = GetEnvVar("SAMPLING_FACTOR" , DEFAULT_SAMPLING_FACTOR);
numCpuPerLink = GetEnvVar("NUM_CPU_PER_LINK" , DEFAULT_NUM_CPU_PER_LINK);
sharedMemBytes = GetEnvVar("SHARED_MEM_BYTES" , maxSharedMemBytes / 2 + 1);
blockBytes = GetEnvVar("BLOCK_BYTES" , 256);
usePcieIndexing = GetEnvVar("USE_PCIE_INDEX" , 0);
useSingleStream = GetEnvVar("USE_SINGLE_STREAM", 0);
// Check for fill pattern
char* pattern = getenv("FILL_PATTERN");
if (pattern != NULL)
{
int patternLen = strlen(pattern);
if (patternLen % 2)
{
printf("[ERROR] FILL_PATTERN must contain an even-number of hex digits\n");
exit(1);
}
// Read in bytes
std::vector<unsigned char> bytes;
unsigned char val = 0;
for (int i = 0; i < patternLen; i++)
{
if ('0' <= pattern[i] && pattern[i] <= '9')
val += (pattern[i] - '0');
else if ('A' <= pattern[i] && pattern[i] <= 'F')
val += (pattern[i] - 'A' + 10);
else if ('a' <= pattern[i] && pattern[i] <= 'f')
val += (pattern[i] - 'a' + 10);
else
{
printf("[ERROR] FILL_PATTERN must contain an even-number of hex digits (0-9'/a-f/A-F). (not %c)\n", pattern[i]);
exit(1);
}
if (i % 2 == 0)
val <<= 4;
else
{
bytes.push_back(val);
val = 0;
}
}
// Reverse bytes (input is assumed to be given in big-endian)
std::reverse(bytes.begin(), bytes.end());
// Figure out how many copies of the pattern are necessary to fill a 4-byte float properly
int copies;
switch (patternLen % 8)
{
case 0: copies = 1; break;
case 4: copies = 2; break;
default: copies = 4; break;
}
// Fill floats
int numFloats = copies * patternLen / 8;
fillPattern.resize(numFloats);
unsigned char* rawData = (unsigned char*) fillPattern.data();
for (int i = 0; i < numFloats * 4; i++)
rawData[i] = bytes[i % bytes.size()];
}
else fillPattern.clear();
// Perform some basic validation
if (byteOffset % sizeof(float))
{
printf("[ERROR] BYTE_OFFSET must be set to multiple of %lu\n", sizeof(float));
exit(1);
}
if (numWarmups < 0)
{
printf("[ERROR] NUM_WARMUPS must be set to a non-negative number\n");
exit(1);
}
if (numIterations <= 0)
{
printf("[ERROR] NUM_ITERATIONS must be set to a positive number\n");
exit(1);
}
if (samplingFactor < 1)
{
printf("[ERROR] SAMPLING_FACTOR must be greater or equal to 1\n");
exit(1);
}
if (numCpuPerLink < 1)
{
printf("[ERROR] NUM_CPU_PER_LINK must be greater or equal to 1\n");
exit(1);
}
if (sharedMemBytes < 0 || sharedMemBytes > maxSharedMemBytes)
{
printf("[ERROR] SHARED_MEM_BYTES must be between 0 and %d\n", maxSharedMemBytes);
exit(1);
}
if (blockBytes <= 0 || blockBytes % 4)
{
printf("[ERROR] BLOCK_BYTES must be a positive multiple of 4\n");
exit(1);
}
if (useSingleStream && useHipCall)
{
printf("[ERROR] Single stream mode cannot be used with HIP calls\n");
exit(1);
}
}
// Display info on the env vars that can be used
static void DisplayUsage()
{
printf("Environment variables:\n");
printf("======================\n");
printf(" USE_HIP_CALL - Use hipMemcpy/hipMemset instead of custom shader kernels for GPU-executed copies\n");
printf(" USE_MEMSET - Perform a memset instead of a copy (ignores source memory)\n");
printf(" USE_SINGLE_SYNC - Perform synchronization only once after all iterations instead of per iteration\n");
printf(" USE_INTERACTIVE - Pause for user-input before starting transfer loop\n");
printf(" COMBINE_TIMING - Combines timing with launch (potentially lower timing overhead)\n");
printf(" OUTPUT_TO_CSV - Outputs to CSV format if set\n");
printf(" BYTE_OFFSET - Initial byte-offset for memory allocations. Must be multiple of 4. Defaults to 0\n");
printf(" NUM_WARMUPS=W - Perform W untimed warmup iteration(s) per test\n");
printf(" NUM_ITERATIONS=I - Perform I timed iteration(s) per test\n");
printf(" SAMPLING_FACTOR=F - Add F samples (when possible) between powers of 2 when auto-generating data sizes\n");
printf(" NUM_CPU_PER_LINK=C - Use C threads per Link for CPU-executed copies\n");
printf(" FILL_PATTERN=STR - Fill input buffer with pattern specified in hex digits (0-9,a-f,A-F). Must be even number of digits, (byte-level big-endian)\n");
printf(" SHARED_MEM_BYTES=X - Use X shared mem bytes per threadblock, potentially to avoid multiple threadblocks per CU\n");
printf(" BLOCK_BYTES=B - Each CU (except the last) receives a multiple of BLOCK_BYTES to copy\n");
printf(" USE_PCIE_INDEX - Index GPUs by PCIe address-ordering instead of HIP-provided indexing\n");
printf(" USE_SINGLE_STREAM - Use single stream per device instead of per link. Cannot be used with USE_HIP_CALL\n");
}
// Display env var settings
void DisplayEnvVars() const
{
if (!outputToCsv)
{
printf("Run configuration\n");
printf("=====================================================\n");
printf("%-20s = %12d : Using %s for GPU-executed copies\n", "USE_HIP_CALL", useHipCall,
useHipCall ? "HIP functions" : "custom kernels");
printf("%-20s = %12d : Performing %s\n", "USE_MEMSET", useMemset,
useMemset ? "memset" : "memcopy");
if (useHipCall && !useMemset)
{
char* env = getenv("HSA_ENABLE_SDMA");
printf("%-20s = %12s : %s\n", "HSA_ENABLE_SDMA", env,
(env && !strcmp(env, "0")) ? "Using blit kernels for hipMemcpy" : "Using DMA copy engines");
}
printf("%-20s = %12d : %s\n", "USE_SINGLE_SYNC", useSingleSync,
useSingleSync ? "Synchronizing only once, after all iterations" : "Synchronizing per iteration");
printf("%-20s = %12d : Running in %s mode\n", "USE_INTERACTIVE", useInteractive,
useInteractive ? "interactive" : "non-interactive");
printf("%-20s = %12d : %s\n", "COMBINE_TIMING", combineTiming,
combineTiming ? "Using combined timing+launch" : "Using separate timing / launch");
printf("%-20s = %12d : Output to %s\n", "OUTPUT_TO_CSV", outputToCsv,
outputToCsv ? "CSV" : "console");
printf("%-20s = %12d : Using byte offset of %d\n", "BYTE_OFFSET", byteOffset, byteOffset);
printf("%-20s = %12d : Running %d warmup iteration(s) per topology\n", "NUM_WARMUPS", numWarmups, numWarmups);
printf("%-20s = %12d : Running %d timed iteration(s) per topology\n", "NUM_ITERATIONS", numIterations, numIterations);
printf("%-20s = %12d : Using %d CPU thread(s) per CPU-based-copy Link\n", "NUM_CPU_PER_LINK", numCpuPerLink, numCpuPerLink);
printf("%-20s = %12s : ", "FILL_PATTERN", getenv("FILL_PATTERN") ? "(specified)" : "(unset)");
if (fillPattern.size())
{
printf("Pattern: %s", getenv("FILL_PATTERN"));
}
else
{
printf("Pseudo-random: (Element i = i modulo 383 + 31)");
}
printf("\n");
printf("%-20s = %12s : Using %d shared mem per threadblock\n", "SHARED_MEM_BYTES",
getenv("SHARED_MEM_BYTES") ? "(specified)" : "(unset)", sharedMemBytes);
printf("%-20s = %12d : Each CU gets a multiple of %d bytes to copy\n", "BLOCK_BYTES", blockBytes, blockBytes);
printf("%-20s = %12d : Using %s-based GPU indexing\n", "USE_PCIE_INDEX", usePcieIndexing, (usePcieIndexing ? "PCIe" : "HIP"));
printf("%-20s = %12d : Using single stream per %s\n", "USE_SINGLE_STREAM", useSingleStream, (useSingleStream ? "device" : "Link"));
printf("\n");
}
};
// Helper function that gets parses environment variable or sets to default value
static int GetEnvVar(std::string const varname, int defaultValue)
{
if (getenv(varname.c_str()))
return atoi(getenv(varname.c_str()));
return defaultValue;
}
};
#endif
/*
Copyright (c) 2021 Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
// Helper macro for checking HSA calls
#define HSA_CHECK(cmd) \
do { \
hsa_status_t error = (cmd); \
if (error != HSA_STATUS_SUCCESS) { \
const char* errString = NULL; \
hsa_status_string(error, &errString); \
std::cerr << "Encountered HSA error (" << errString << ") at line " \
<< __LINE__ << " in file " << __FILE__ << "\n"; \
exit(-1); \
} \
} while (0)
// Structure to hold HSA agent information
struct AgentData
{
bool isInitialized;
std::vector<hsa_agent_t> cpuAgents;
std::vector<hsa_agent_t> gpuAgents;
std::vector<int> closestNumaNode;
};
// Simple callback function to return any memory pool for an agent
hsa_status_t MemPoolInfoCallback(hsa_amd_memory_pool_t pool, void *data)
{
hsa_amd_memory_pool_t* poolData = reinterpret_cast<hsa_amd_memory_pool_t*>(data);
// Check memory pool flags
uint32_t poolFlags;
HSA_CHECK(hsa_amd_memory_pool_get_info(pool, HSA_AMD_MEMORY_POOL_INFO_GLOBAL_FLAGS, &poolFlags));
// Only consider coarse-grained pools
if (!(poolFlags & HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_COARSE_GRAINED)) return HSA_STATUS_SUCCESS;
*poolData = pool;
return HSA_STATUS_SUCCESS;
}
// Callback function to gather HSA agent information
hsa_status_t AgentInfoCallback(hsa_agent_t agent, void* data)
{
AgentData* agentData = reinterpret_cast<AgentData*>(data);
// Get the device type
hsa_device_type_t deviceType;
HSA_CHECK(hsa_agent_get_info(agent, HSA_AGENT_INFO_DEVICE, &deviceType));
if (deviceType == HSA_DEVICE_TYPE_CPU)
agentData->cpuAgents.push_back(agent);
if (deviceType == HSA_DEVICE_TYPE_GPU)
{
agentData->gpuAgents.push_back(agent);
agentData->closestNumaNode.push_back(0);
}
return HSA_STATUS_SUCCESS;
}
AgentData& GetAgentData()
{
static AgentData agentData = {};
if (!agentData.isInitialized)
{
agentData.isInitialized = true;
// Add all detected agents to the list
HSA_CHECK(hsa_iterate_agents(AgentInfoCallback, &agentData));
// Loop over each GPU
for (uint32_t i = 0; i < agentData.gpuAgents.size(); i++)
{
// Collect memory pool
hsa_amd_memory_pool_t pool;
HSA_CHECK(hsa_amd_agent_iterate_memory_pools(agentData.gpuAgents[i], MemPoolInfoCallback, &pool));
// Loop over each CPU agent and check distance
int bestDistance = -1;
for (uint32_t j = 0; j < agentData.cpuAgents.size(); j++)
{
// Determine number of hops from GPU memory pool to CPU agent
uint32_t hops = 0;
HSA_CHECK(hsa_amd_agent_memory_pool_get_info(agentData.cpuAgents[j],
pool,
HSA_AMD_AGENT_MEMORY_POOL_INFO_NUM_LINK_HOPS,
&hops));
// Gather link info
hsa_amd_memory_pool_link_info_t* link_info =
(hsa_amd_memory_pool_link_info_t *)malloc(hops * sizeof(hsa_amd_memory_pool_link_info_t));
HSA_CHECK(hsa_amd_agent_memory_pool_get_info(agentData.cpuAgents[j],
pool,
HSA_AMD_AGENT_MEMORY_POOL_INFO_LINK_INFO,
link_info));
int numaDist = 0;
for (int k = 0; k < hops; k++)
{
numaDist += link_info[k].numa_distance;
}
if (bestDistance == -1 || numaDist < bestDistance)
{
agentData.closestNumaNode[i] = j;
bestDistance = numaDist;
}
free(link_info);
}
}
}
return agentData;
}
// Returns closest CPU NUMA node to provided GPU
// NOTE: This assumes HSA GPU indexing is similar to HIP GPU indexing
int GetClosestNumaNode(int gpuIdx)
{
AgentData& agentData = GetAgentData();
if (gpuIdx < 0 || gpuIdx >= agentData.closestNumaNode.size())
{
printf("[ERROR] GPU index out is out of bounds\n");
exit(1);
}
return agentData.closestNumaNode[gpuIdx];
}
/*
Copyright (c) 2022 Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#pragma once
#define WARP_SIZE 64
#define BLOCKSIZE 256
// GPU copy kernel
__global__ void __launch_bounds__(BLOCKSIZE)
GpuCopyKernel(BlockParam* blockParams)
{
#define PackedFloat_t float4
#define FLOATS_PER_PACK (sizeof(PackedFloat_t) / sizeof(float))
// Collect the arguments for this threadblock
int Nrem = blockParams[blockIdx.x].N;
float const* src = blockParams[blockIdx.x].src;
float* dst = blockParams[blockIdx.x].dst;
if (threadIdx.x == 0) blockParams[blockIdx.x].startCycle = __builtin_amdgcn_s_memrealtime();
// Operate on wavefront granularity
int numWaves = BLOCKSIZE / WARP_SIZE; // Number of wavefronts per threadblock
int waveId = threadIdx.x / WARP_SIZE; // Wavefront number
int threadId = threadIdx.x % WARP_SIZE; // Thread index within wavefront
#define LOOP1_UNROLL 8
// 1st loop - each wavefront operates on LOOP1_UNROLL x FLOATS_PER_PACK per thread per iteration
// Determine the number of packed floats processed by the first loop
int const loop1Npack = (Nrem / (FLOATS_PER_PACK * LOOP1_UNROLL * WARP_SIZE)) * (LOOP1_UNROLL * WARP_SIZE);
int const loop1Nelem = loop1Npack * FLOATS_PER_PACK;
int const loop1Inc = BLOCKSIZE * LOOP1_UNROLL;
int loop1Offset = waveId * LOOP1_UNROLL * WARP_SIZE + threadId;
PackedFloat_t const* packedSrc = (PackedFloat_t const*)(src) + loop1Offset;
PackedFloat_t* packedDst = (PackedFloat_t *)(dst) + loop1Offset;
while (loop1Offset < loop1Npack)
{
PackedFloat_t vals[LOOP1_UNROLL];
#pragma unroll
for (int u = 0; u < LOOP1_UNROLL; ++u)
vals[u] = *(packedSrc + u * WARP_SIZE);
#pragma unroll
for (int u = 0; u < LOOP1_UNROLL; ++u)
*(packedDst + u * WARP_SIZE) = vals[u];
packedSrc += loop1Inc;
packedDst += loop1Inc;
loop1Offset += loop1Inc;
}
Nrem -= loop1Nelem;
if (Nrem > 0)
{
// 2nd loop - Each thread operates on FLOATS_PER_PACK per iteration
int const loop2Npack = Nrem / FLOATS_PER_PACK;
int const loop2Nelem = loop2Npack * FLOATS_PER_PACK;
int const loop2Inc = BLOCKSIZE;
int loop2Offset = threadIdx.x;
packedSrc = (PackedFloat_t const*)(src + loop1Nelem);
packedDst = (PackedFloat_t *)(dst + loop1Nelem);
while (loop2Offset < loop2Npack)
{
packedDst[loop2Offset] = packedSrc[loop2Offset];
loop2Offset += loop2Inc;
}
Nrem -= loop2Nelem;
// Deal with leftovers less than FLOATS_PER_PACK)
if (threadIdx.x < Nrem)
{
int offset = loop1Nelem + loop2Nelem + threadIdx.x;
dst[offset] = src[offset];
}
}
__threadfence_system();
if (threadIdx.x == 0)
blockParams[blockIdx.x].stopCycle = __builtin_amdgcn_s_memrealtime();
}
#define MEMSET_UNROLL 8
__global__ void __launch_bounds__(BLOCKSIZE)
GpuMemsetKernel(BlockParam* blockParams)
{
// Collect the arguments for this block
int N = blockParams[blockIdx.x].N;
float* __restrict__ dst = (float*)blockParams[blockIdx.x].dst;
// Use non-zero value
#pragma unroll MEMSET_UNROLL
for (int tid = threadIdx.x; tid < N; tid += BLOCKSIZE)
{
dst[tid] = 1234.0;
}
}
// CPU copy kernel
void CpuCopyKernel(BlockParam const& blockParams)
{
memcpy(blockParams.dst, blockParams.src, blockParams.N * sizeof(float));
}
// CPU memset kernel
void CpuMemsetKernel(BlockParam const& blockParams)
{
for (int i = 0; i < blockParams.N; i++)
blockParams.dst[i] = 1234.0;
}
# Copyright (c) 2019-2022 Advanced Micro Devices, Inc. All rights reserved.
ROCM_PATH ?= /opt/rocm
HIPCC=$(ROCM_PATH)/bin/hipcc
EXE=TransferBench
CXXFLAGS = -O3 -I. -lnuma -L$(ROCM_PATH)/hsa/lib -lhsa-runtime64
all: $(EXE)
$(EXE): $(EXE).cpp $(shell find -regex ".*\.\hpp")
$(HIPCC) $(CXXFLAGS) $< -o $@
clean:
rm -f *.o $(EXE)
# TransferBench
TransferBench is a simple utility capable of benchmarking simultaneous copies between user-specified devices (CPUs/GPUs).
## Requirements
1. ROCm stack installed on the system (HIP runtime)
2. libnuma installed on system
## Building
To build TransferBench:
* `make`
If ROCm is installed in a folder other than `/opt/rocm/`, set ROCM_PATH appropriately
This diff is collapsed.
/*
Copyright (c) 2019-2022 Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include <vector>
#include <sstream>
#include <chrono>
#include <cstdio>
#include <cstdlib>
#include <cstdint>
#include <set>
#include <unistd.h>
#include <map>
#include <iostream>
#include <sstream>
#include <hip/hip_runtime.h>
#include <hip/hip_ext.h>
#include <hsa/hsa_ext_amd.h>
#include "EnvVars.hpp"
#define TB_VERSION "1.01"
// Helper macro for catching HIP errors
#define HIP_CALL(cmd) \
do { \
hipError_t error = (cmd); \
if (error != hipSuccess) \
{ \
std::cerr << "Encountered HIP error (" << hipGetErrorString(error) << ") at line " \
<< __LINE__ << " in file " << __FILE__ << "\n"; \
exit(-1); \
} \
} while (0)
// Simple configuration parameters
size_t const DEFAULT_BYTES_PER_LINK = (1<<26); // Amount of data transferred per Link
// Different src/dst memory types supported
typedef enum
{
MEM_CPU = 0, // Coarse-grained pinned CPU memory
MEM_GPU = 1, // Coarse-grained global GPU memory
MEM_CPU_FINE = 2, // Fine-grained pinned CPU memory
MEM_GPU_FINE = 3 // Fine-grained global GPU memory
} MemType;
char const MemTypeStr[5] = "CGBF";
typedef enum
{
MODE_FILL = 0, // Fill data with pattern
MODE_CHECK = 1 // Check data against pattern
} ModeType;
// Each threadblock copies N floats from src to dst
struct BlockParam
{
int N;
float* src;
float* dst;
long long startCycle;
long long stopCycle;
};
// Each Link is a uni-direction operation from a src memory to dst memory
struct Link
{
int linkIndex; // Link identifier
// Link config
MemType exeMemType; // Link executor type (CPU or GPU)
int exeIndex; // Executor index (NUMA node for CPU / device ID for GPU)
MemType srcMemType; // Source memory type
int srcIndex; // Source device index
MemType dstMemType; // Destination memory type
int dstIndex; // Destination device index
int numBlocksToUse; // Number of threadblocks to use for this Link
// Memory
float* srcMem; // Source memory
float* dstMem; // Destination memory
// How memory is split across threadblocks / CPU cores
std::vector<BlockParam> blockParam;
BlockParam* blockParamGpuPtr;
// Results
double linkTime;
// Prepares src memory and how to divide N elements across threadblocks/threads
void PrepareBlockParams(EnvVars const& ev, size_t const N);
};
typedef std::pair<MemType, int> Executor;
struct ExecutorInfo
{
std::vector<Link> links; // Links to execute
// For GPU-Executors
int totalBlocks; // Total number of CUs/CPU threads to use
BlockParam* blockParamGpu; // Copy of block parameters in GPU device memory
std::vector<hipStream_t> streams;
std::vector<hipEvent_t> startEvents;
std::vector<hipEvent_t> stopEvents;
// Results
double totalTime;
};
typedef std::map<Executor, ExecutorInfo> LinkMap;
// Display usage instructions
void DisplayUsage(char const* cmdName);
// Display detected GPU topology / CPU numa nodes
void DisplayTopology(bool const outputToCsv);
// Build array of test sizes based on sampling factor
void PopulateTestSizes(size_t const numBytesPerLink, int const samplingFactor,
std::vector<size_t>& valuesofN);
void ParseMemType(std::string const& token, int const numCpus, int const numGpus,
MemType* memType, int* memIndex);
void ParseLinks(char* line, int numCpus, int numGpus,
LinkMap& linkMap);
void EnablePeerAccess(int const deviceId, int const peerDeviceId);
void AllocateMemory(MemType memType, int devIndex, size_t numBytes, void** memPtr);
void DeallocateMemory(MemType memType, void* memPtr);
void CheckPages(char* byteArray, size_t numBytes, int targetId);
void CheckOrFill(ModeType mode, int N, bool isMemset, bool isHipCall, std::vector<float> const& fillPattern, float* ptr);
void RunLink(EnvVars const& ev, size_t const N, int const iteration, ExecutorInfo& exeInfo, int const linkIdx);
void RunPeerToPeerBenchmarks(EnvVars const& ev, size_t N, int numBlocksToUse, int readMode, int skipCpu);
// Return the maximum bandwidth measured for given (src/dst) pair
double GetPeakBandwidth(EnvVars const& ev,
size_t const N,
int const isBidirectional,
int const readMode,
int const numBlocksToUse,
MemType const srcMemType,
int const srcIndex,
MemType const dstMemType,
int const dstIndex);
std::string GetLinkTypeDesc(uint32_t linkType, uint32_t hopCount);
std::string GetDesc(MemType srcMemType, int srcIndex,
MemType dstMemType, int dstIndex);
std::string GetLinkDesc(Link const& link);
int RemappedIndex(int const origIdx, MemType const memType);
int GetWallClockRate(int deviceId);
# ConfigFile Format:
# ==================
# A Link is defined as a uni-directional transfer from src memory location to dst memory location
# executed by either CPU or GPU
# Each single line in the configuration file defines a set of Links to run in parallel
# There are two ways to specify the configuration file:
# 1) Basic
# The basic specification assumes the same number of threadblocks/CUs used per GPU-executed Link
# A positive number of Links is specified followed by that number of triplets describing each Link
# #Links #CUs (srcMem1->Executor1->dstMem1) ... (srcMemL->ExecutorL->dstMemL)
# 2) Advanced
# The advanced specification allows different number of threadblocks/CUs used per GPU-executed Link
# A negative number of links is specified, followed by quadruples describing each Link
# -#Links (srcMem1->Executor1->dstMem1 #CUs1) ... (srcMemL->ExecutorL->dstMemL #CUsL)
# Argument Details:
# #Links : Number of Links to be run in parallel
# #CUs : Number of threadblocks/CUs to use for a GPU-executed Link
# srcMemL : Source memory location (Where the data is to be read from). Ignored in memset mode
# Executor: Executor are specified by a character indicating executor type, followed by device index (0-indexed)
# - C: CPU-executed (Indexed from 0 to 1)
# - G: GPU-executed (Indexed from 0 to 3)
# dstMemL : Destination memory location (Where the data is to be written to)
# Memory locations are specified by a character indicating memory type,
# followed by device index (0-indexed)
# Supported memory locations are:
# - C: Pinned host memory (on NUMA node, indexed from 0 to [# NUMA nodes-1])
# - B: Fine-grain host memory (on NUMA node, indexed from 0 to [# NUMA nodes-1])
# - G: Global device memory (on GPU device indexed from 0 to [# GPUs - 1])
# - F: Fine-grain device memory (on GPU device indexed from 0 to [# GPUs - 1])
# Examples:
# 1 4 (G0->G0->G1) Single link using 4 CUs on GPU0 to copy from GPU0 to GPU1
# 1 4 (C1->G2->G0) Single link using 4 CUs on GPU2 to copy from CPU1 to GPU0
# 2 4 G0->G0->G1 G1->G1->G0 Runs 2 Links in parallel. GPU0 to GPU1, and GPU1 to GPU0, each with 4 CUs
# -2 (G0 G0 G1 4) (G1 G1 G0 2) Runs 2 Links in parallel. GPU0 to GPU1 with 4 CUs, and GPU1 to GPU0 with 2 CUs
# Round brackets and arrows' ->' may be included for human clarity, but will be ignored and are unnecessary
# Lines starting with # will be ignored. Lines starting with ## will be echoed to output
# Single GPU-executed link between GPUs 0 and 1 using 4 CUs
1 4 (G0->G0->G1)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment