Unverified Commit 95575579 authored by Bruno's avatar Bruno Committed by GitHub
Browse files

Use parentesis around min and max to enable Windows build (#449)



* Towards Windows build

* formatting
Co-authored-by: default avatarBruno Cabral <bruno@potelo.com.br>
Co-authored-by: default avatarJeff Rasley <jerasley@microsoft.com>
Co-authored-by: default avatarOlatunji Ruwase <olruwase@microsoft.com>
parent 7b8be2a7
......@@ -33,8 +33,8 @@
inline int DS_GET_BLOCKS(const int N)
{
return std::max(
std::min((N + DS_CUDA_NUM_THREADS - 1) / DS_CUDA_NUM_THREADS, DS_MAXIMUM_NUM_BLOCKS),
return (std::max)(
(std::min)((N + DS_CUDA_NUM_THREADS - 1) / DS_CUDA_NUM_THREADS, DS_MAXIMUM_NUM_BLOCKS),
// Use at least 1 block, since CUDA does not allow empty block
1);
}
......
......@@ -97,7 +97,7 @@ public:
template <typename Func>
int Run(int loops, Func f)
{
float fast_latency = std::numeric_limits<float>::max();
float fast_latency = (std::numeric_limits<float>::max)();
int fast_algo = 0;
for (int algo = (int)CUBLAS_GEMM_DEFAULT_TENSOR_OP;
......@@ -252,7 +252,7 @@ public:
template <typename Func>
int Run(int loops, Func f)
{
float fast_latency = std::numeric_limits<float>::max();
float fast_latency = (std::numeric_limits<float>::max)();
int fast_algo = 0;
for (int algo = (int)CUBLAS_GEMM_DEFAULT_TENSOR_OP;
......
......@@ -27,8 +27,8 @@ size_t get_workspace_size(int maxBatchSize,
{
size_t workSpacesize = 4 * (size_t(maxBatchSize) * seq_len * hidden_size);
if (training) {
workSpacesize += (std::max((size_t(maxBatchSize) * seq_len * intermediate_size),
2 * (size_t(maxBatchSize) * heads * seq_len * seq_len)));
workSpacesize += ((std::max)((size_t(maxBatchSize) * seq_len * intermediate_size),
2 * (size_t(maxBatchSize) * heads * seq_len * seq_len)));
if (gelu_checkpoint) workSpacesize += 2 * (size_t(maxBatchSize) * seq_len * hidden_size);
}
return workSpacesize * sizeof(T);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment