Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
0dac827c
Commit
0dac827c
authored
Jun 21, 2019
by
Shucai Xiao
Browse files
some preliminary optimzation for the logsoftmax function.
parent
66bae091
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
61 additions
and
28 deletions
+61
-28
src/targets/gpu/device/include/migraphx/gpu/device/types.hpp
src/targets/gpu/device/include/migraphx/gpu/device/types.hpp
+2
-2
src/targets/gpu/device/logsoftmax.cpp
src/targets/gpu/device/logsoftmax.cpp
+59
-26
No files found.
src/targets/gpu/device/include/migraphx/gpu/device/types.hpp
View file @
0dac827c
...
@@ -76,13 +76,13 @@ device_type<T>* device_cast(T* x)
...
@@ -76,13 +76,13 @@ device_type<T>* device_cast(T* x)
}
}
template
<
class
T
>
template
<
class
T
>
T
to_hip_type
(
T
x
)
__device__
__host__
T
to_hip_type
(
T
x
)
{
{
return
x
;
return
x
;
}
}
// Hip doens't support __fp16
// Hip doens't support __fp16
inline
float
to_hip_type
(
gpu_half
x
)
{
return
x
;
}
inline
__device__
__host__
float
to_hip_type
(
gpu_half
x
)
{
return
x
;
}
}
// namespace device
}
// namespace device
}
// namespace gpu
}
// namespace gpu
...
...
src/targets/gpu/device/logsoftmax.cpp
View file @
0dac827c
...
@@ -30,41 +30,74 @@ argument logsoftmax(hipStream_t stream,
...
@@ -30,41 +30,74 @@ argument logsoftmax(hipStream_t stream,
hip_tensor_descriptor
<
n_dim
>
desc_batch
(
batch_shape
);
hip_tensor_descriptor
<
n_dim
>
desc_batch
(
batch_shape
);
hip_tensor_descriptor
<
n_dim
>
desc_data
(
output_shape
);
hip_tensor_descriptor
<
n_dim
>
desc_data
(
output_shape
);
// each thread is for one item in the batch
// use one block for items in one batch.
gs_launch
(
stream
,
batch_shape
.
elements
())([
=
](
auto
i
)
{
// opt 1, load all data to lds then use the same approach as
auto
batch_idx
=
desc_batch
.
multi
(
i
);
// the current optimization
auto
data_idx
=
batch_idx
;
const
size_t
block_size
=
1024
;
launch
(
stream
,
batch_shape
.
elements
()
*
block_size
,
block_size
)
([
=
]
(
auto
idx
)
__device__
{
size_t
thr_idx
=
idx
.
local
;
size_t
blk_idx
=
idx
.
group
;
// using type = typename decltype(input)::value_type;
using
type
=
device_type
<
std
::
remove_cv_t
<
typename
decltype
(
output
)
::
value_type
>>
;
// get max
// all data can be loaded to the lds once, so all operations are
auto
batch_max
=
input_ptr
[
desc_data
.
linear
(
batch_idx
)];
// done in lds
for
(
std
::
size_t
j
=
1
;
j
<
num_in_batch
;
++
j
)
MIGRAPHX_DEVICE_SHARED
type
lds_data
[
block_size
+
2
];
auto
batch_idx
=
desc_batch
.
multi
(
blk_idx
);
auto
data_idx
=
batch_idx
;
// load data to lds and compute the batch max
size_t
item_num
=
num_in_batch
;
lds_data
[
block_size
]
=
input_ptr
[
0
];
for
(
size_t
i
=
thr_idx
;
i
<
num_in_batch
;
i
+=
block_size
)
{
{
data_idx
[
axis
]
=
j
;
data_idx
[
axis
]
=
i
;
size_t
idx
=
desc_data
.
linear
(
data_idx
);
lds_data
[
i
]
=
input_ptr
[
desc_data
.
linear
(
data_idx
)];
batch_max
=
std
::
max
(
to_hip_type
(
batch_max
),
to_hip_type
(
input_ptr
[
idx
]));
}
for
(
std
::
size_t
j
=
0
;
j
<
num_in_batch
;
++
j
)
__syncthreads
();
{
data_idx
[
axis
]
=
j
;
// use thread 0 for batch_max
size_t
idx
=
desc_data
.
linear
(
data_idx
);
if
(
thr_idx
==
0
)
output_ptr
[
idx
]
=
input_ptr
[
idx
]
-
batch_max
;
{
auto
size
=
(
item_num
>
block_size
)
?
block_size
:
item_num
;
for
(
size_t
j
=
0
;
j
<
size
;
j
++
)
{
lds_data
[
block_size
]
=
::
max
(
to_hip_type
(
lds_data
[
block_size
]),
to_hip_type
(
lds_data
[
j
]));
}
item_num
-=
block_size
;
}
__syncthreads
();
}
}
auto
batch_sum
=
::
exp
(
to_hip_type
(
output_ptr
[
desc_data
.
linear
(
batch_idx
)]));
const
size_t
block_size1
=
block_size
+
1
;
for
(
std
::
size_t
j
=
1
;
j
<
num_in_batch
;
++
j
)
lds_data
[
block_size1
]
=
0
;
item_num
=
num_in_batch
;
for
(
size_t
i
=
thr_idx
;
i
<
num_in_batch
;
i
+=
block_size
)
{
{
data_idx
[
axis
]
=
j
;
data_idx
[
axis
]
=
i
;
size_t
idx
=
desc_data
.
linear
(
data_idx
);
lds_data
[
i
]
=
input_ptr
[
desc_data
.
linear
(
data_idx
)];
batch_sum
+=
::
exp
(
to_hip_type
(
output_ptr
[
idx
]));
__syncthreads
();
// use thread 0 for batch_max
if
(
thr_idx
==
0
)
{
auto
size
=
(
item_num
>
block_size
)
?
block_size
:
item_num
;
for
(
size_t
j
=
0
;
j
<
size
;
j
++
)
{
lds_data
[
block_size1
]
+=
::
exp
(
to_hip_type
(
lds_data
[
j
]
-
lds_data
[
block_size
]));
}
item_num
-=
block_size
;
}
__syncthreads
();
}
}
batch_sum
=
::
log
(
to_hip_type
(
batch_sum
));
for
(
std
::
size_t
j
=
0
;
j
<
num_in_batch
;
++
j
)
auto
log_batch_sum
=
::
log
(
to_hip_type
(
lds_data
[
block_size1
]))
+
lds_data
[
block_size
];
item_num
=
num_in_batch
;
for
(
size_t
i
=
thr_idx
;
i
<
num_in_batch
;
i
+=
block_size
)
{
{
data_idx
[
axis
]
=
j
;
data_idx
[
axis
]
=
i
;
size_t
i
dx
=
desc_data
.
linear
(
data_idx
);
size_t
i
ndex
=
desc_data
.
linear
(
data_idx
);
output_ptr
[
i
d
x
]
-
=
batch_sum
;
output_ptr
[
i
nde
x
]
=
input_ptr
[
index
]
-
log_
batch_sum
;
}
}
});
});
});
});
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment