"git@developer.sourcefind.cn:modelzoo/resnet50_tensorflow.git" did not exist on "f52606d62fa72e5599b1c317803552a3ff5d821d"
Commit 3e70d01b authored by Shucai Xiao's avatar Shucai Xiao
Browse files

code backup

parent 22500e6c
...@@ -53,8 +53,9 @@ void logsoftmax(hipStream_t stream, const argument& result, const argument& arg, ...@@ -53,8 +53,9 @@ void logsoftmax(hipStream_t stream, const argument& result, const argument& arg,
__syncthreads(); __syncthreads();
auto item_num = (remaining_item_num > block_size) ? block_size : remaining_item_num; auto item_num = (remaining_item_num > block_size) ? block_size : remaining_item_num;
reduce_max(lds_data, block_size, thr_idx, item_num, max_block_size); // reduce_max(lds_data, block_size, thr_idx, item_num, max_block_size);
block_reduce<type, max_op<type>>(lds_data, max_op<type>{}, block_size, thr_idx, item_num, max_block_size);
remaining_item_num -= block_size; remaining_item_num -= block_size;
} }
...@@ -75,7 +76,8 @@ void logsoftmax(hipStream_t stream, const argument& result, const argument& arg, ...@@ -75,7 +76,8 @@ void logsoftmax(hipStream_t stream, const argument& result, const argument& arg,
__syncthreads(); __syncthreads();
auto item_num = (remaining_item_num > block_size) ? block_size : remaining_item_num; auto item_num = (remaining_item_num > block_size) ? block_size : remaining_item_num;
reduce_sum(lds_data, block_size, thr_idx, item_num, max_block_size); // reduce_sum(lds_data, block_size, thr_idx, item_num, max_block_size);
block_reduce<type, sum_op<type>>(lds_data, sum_op<type>{}, block_size, thr_idx, item_num, max_block_size);
remaining_item_num -= block_size; remaining_item_num -= block_size;
} }
......
...@@ -55,6 +55,7 @@ void softmax(hipStream_t stream, const argument& result, const argument& arg, in ...@@ -55,6 +55,7 @@ void softmax(hipStream_t stream, const argument& result, const argument& arg, in
auto item_num = (remaining_item_num > block_size) ? block_size : remaining_item_num; auto item_num = (remaining_item_num > block_size) ? block_size : remaining_item_num;
reduce_max(lds_data, block_size, thr_idx, item_num, max_block_size); reduce_max(lds_data, block_size, thr_idx, item_num, max_block_size);
reduce_max(lds_data, block_size, thr_idx, item_num, max_block_size);
remaining_item_num -= block_size; remaining_item_num -= block_size;
} }
......
...@@ -10,6 +10,59 @@ inline namespace MIGRAPHX_INLINE_NS { ...@@ -10,6 +10,59 @@ inline namespace MIGRAPHX_INLINE_NS {
namespace gpu { namespace gpu {
namespace device { namespace device {
template <class T>
struct max_op
{
T operator()(T x, T y) { return (x > y) ? x : y; }
};
template <class T>
struct min_op
{
T operator()(T x, T y) { return (x < y) ? x : y; }
};
template <class T>
struct sum_op
{
T operator()(T x, T y) { return x + y; }
};
template <class T, class Op>
inline __device__ void block_reduce(T* data_ptr,
Op op,
std::size_t block_size,
std::size_t thr_idx,
std::size_t item_num,
std::size_t max_index)
{
while(true)
{
auto stride = (item_num + 1) / 2;
auto size = item_num / 2;
for(std::size_t i = thr_idx; i < size; i += block_size)
{
//data_ptr[i] = ::max(to_hip_type(data_ptr[i]), to_hip_type(data_ptr[i + stride]));
data_ptr[i] = op(data_ptr[i], data_ptr[i + stride]);
}
__syncthreads();
item_num = stride;
if(item_num == 1)
break;
}
if(thr_idx == 0)
{
// data_ptr[max_index] =
// (data_ptr[0] < data_ptr[max_index]) ? data_ptr[max_index] : data_ptr[0];
data_ptr[max_index] = op(data_ptr[max_index], data_ptr[0]);
}
__syncthreads();
}
template <class T> template <class T>
inline __device__ void reduce_max(T* data_ptr, inline __device__ void reduce_max(T* data_ptr,
std::size_t block_size, std::size_t block_size,
...@@ -72,6 +125,84 @@ inline __device__ void reduce_min(T* data_ptr, ...@@ -72,6 +125,84 @@ inline __device__ void reduce_min(T* data_ptr,
__syncthreads(); __syncthreads();
} }
template <class T>
inline __device__ void reduce_argmax(T* data_ptr,
int64_t* index_ptr,
std::size_t block_size,
std::size_t thr_idx,
std::size_t item_num,
std::size_t max_index)
{
while(true)
{
auto stride = (item_num + 1) / 2;
auto size = item_num / 2;
for(std::size_t i = thr_idx; i < size; i += block_size)
{
if(data_ptr[i] < data_ptr[i + stride])
{
data_ptr[i] = data_ptr[i + stride];
index_ptr[i] = index_ptr[i + stride];
}
}
__syncthreads();
item_num = stride;
if(item_num == 1)
break;
}
if(thr_idx == 0)
{
if(data_ptr[max_index] < data_ptr[0])
{
data_ptr[max_index] = data_ptr[0];
index_ptr[max_index] = index_ptr[0];
}
}
__syncthreads();
}
template <class T>
inline __device__ void reduce_argmin(T* data_ptr,
int64_t* index_ptr,
std::size_t block_size,
std::size_t thr_idx,
std::size_t item_num,
std::size_t min_index)
{
while(true)
{
auto stride = (item_num + 1) / 2;
auto size = item_num / 2;
for(std::size_t i = thr_idx; i < size; i += block_size)
{
if(data_ptr[i] > data_ptr[i + stride])
{
data_ptr[i] = data_ptr[i + stride];
index_ptr[i] = index_ptr[i + stride];
}
}
__syncthreads();
item_num = stride;
if(item_num == 1)
break;
}
if(thr_idx == 0)
{
if(data_ptr[min_index] > data_ptr[0])
{
data_ptr[min_index] = data_ptr[0];
index_ptr[min_index] = index_ptr[0];
}
}
__syncthreads();
}
template <class T> template <class T>
inline __device__ void reduce_sum(T* data_ptr, inline __device__ void reduce_sum(T* data_ptr,
std::size_t block_size, std::size_t block_size,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment