Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
cfaf5be6
Unverified
Commit
cfaf5be6
authored
Mar 04, 2019
by
Paul Fultz II
Committed by
GitHub
Mar 04, 2019
Browse files
Merge branch 'develop' into tf_pb
parents
c984fd24
c1fec2c4
Changes
14
Hide whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
552 additions
and
0 deletions
+552
-0
src/include/migraphx/operators.hpp
src/include/migraphx/operators.hpp
+16
-0
src/onnx/onnx.cpp
src/onnx/onnx.cpp
+15
-0
src/targets/cpu/lowering.cpp
src/targets/cpu/lowering.cpp
+70
-0
src/targets/gpu/CMakeLists.txt
src/targets/gpu/CMakeLists.txt
+2
-0
src/targets/gpu/device/logsoftmax.cpp
src/targets/gpu/device/logsoftmax.cpp
+70
-0
src/targets/gpu/include/migraphx/gpu/device/logsoftmax.hpp
src/targets/gpu/include/migraphx/gpu/device/logsoftmax.hpp
+23
-0
src/targets/gpu/include/migraphx/gpu/logsoftmax.hpp
src/targets/gpu/include/migraphx/gpu/logsoftmax.hpp
+39
-0
src/targets/gpu/logsoftmax.cpp
src/targets/gpu/logsoftmax.cpp
+27
-0
src/targets/gpu/lowering.cpp
src/targets/gpu/lowering.cpp
+2
-0
test/cpu_ops_test.cpp
test/cpu_ops_test.cpp
+170
-0
test/gpu/miopen.cpp
test/gpu/miopen.cpp
+35
-0
test/onnx/logsoftmax_test.onnx
test/onnx/logsoftmax_test.onnx
+17
-0
test/onnx/onnx_test.cpp
test/onnx/onnx_test.cpp
+11
-0
test/op_shape_test.cpp
test/op_shape_test.cpp
+55
-0
No files found.
src/include/migraphx/operators.hpp
View file @
cfaf5be6
...
@@ -950,6 +950,22 @@ struct softmax
...
@@ -950,6 +950,22 @@ struct softmax
}
}
};
};
struct
logsoftmax
{
int
axis
=
1
;
std
::
string
name
()
const
{
return
"logsoftmax"
;
}
shape
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
{
check_shapes
{
inputs
}.
has
(
1
);
if
(
axis
<
0
||
axis
>
inputs
[
0
].
lens
().
size
())
{
MIGRAPHX_THROW
(
"LogSoftMax: input axis value "
+
std
::
to_string
(
axis
)
+
" is out of range"
);
}
return
inputs
.
at
(
0
);
}
};
struct
flatten
struct
flatten
{
{
uint64_t
axis
=
0
;
uint64_t
axis
=
0
;
...
...
src/onnx/onnx.cpp
View file @
cfaf5be6
...
@@ -79,6 +79,7 @@ struct onnx_parser
...
@@ -79,6 +79,7 @@ struct onnx_parser
add_mem_op
(
"Gemm"
,
&
onnx_parser
::
parse_gemm
);
add_mem_op
(
"Gemm"
,
&
onnx_parser
::
parse_gemm
);
add_mem_op
(
"BatchNormalization"
,
&
onnx_parser
::
parse_batchnorm
);
add_mem_op
(
"BatchNormalization"
,
&
onnx_parser
::
parse_batchnorm
);
add_mem_op
(
"Softmax"
,
&
onnx_parser
::
parse_softmax
);
add_mem_op
(
"Softmax"
,
&
onnx_parser
::
parse_softmax
);
add_mem_op
(
"LogSoftmax"
,
&
onnx_parser
::
parse_logsoftmax
);
add_mem_op
(
"Squeeze"
,
&
onnx_parser
::
parse_squeeze
);
add_mem_op
(
"Squeeze"
,
&
onnx_parser
::
parse_squeeze
);
add_mem_op
(
"Unsqueeze"
,
&
onnx_parser
::
parse_unsqueeze
);
add_mem_op
(
"Unsqueeze"
,
&
onnx_parser
::
parse_unsqueeze
);
add_mem_op
(
"Slice"
,
&
onnx_parser
::
parse_slice
);
add_mem_op
(
"Slice"
,
&
onnx_parser
::
parse_slice
);
...
@@ -228,6 +229,19 @@ struct onnx_parser
...
@@ -228,6 +229,19 @@ struct onnx_parser
return
prog
.
add_instruction
(
op
::
reshape
{{
long
(
dims
[
0
]),
long
(
dims
[
1
])}},
s
);
return
prog
.
add_instruction
(
op
::
reshape
{{
long
(
dims
[
0
]),
long
(
dims
[
1
])}},
s
);
}
}
instruction_ref
parse_logsoftmax
(
const
std
::
string
&
,
const
attribute_map
&
attributes
,
std
::
vector
<
instruction_ref
>
args
)
{
int
axis
=
1
;
if
(
contains
(
attributes
,
"axis"
))
{
axis
=
parse_value
(
attributes
.
at
(
"axis"
)).
at
<
int
>
();
}
return
prog
.
add_instruction
(
op
::
logsoftmax
{
axis
},
std
::
move
(
args
));
}
instruction_ref
instruction_ref
parse_conv
(
const
std
::
string
&
,
attribute_map
attributes
,
std
::
vector
<
instruction_ref
>
args
)
parse_conv
(
const
std
::
string
&
,
attribute_map
attributes
,
std
::
vector
<
instruction_ref
>
args
)
{
{
...
@@ -496,6 +510,7 @@ struct onnx_parser
...
@@ -496,6 +510,7 @@ struct onnx_parser
return
add_broadcastable_binary_op
(
l3
,
l4
,
op
::
add
{});
return
add_broadcastable_binary_op
(
l3
,
l4
,
op
::
add
{});
}
}
}
}
return
prog
.
add_instruction
(
op
::
dot
{
alpha
,
beta
},
l1
,
l2
);
return
prog
.
add_instruction
(
op
::
dot
{
alpha
,
beta
},
l1
,
l2
);
}
}
...
...
src/targets/cpu/lowering.cpp
View file @
cfaf5be6
...
@@ -613,6 +613,75 @@ struct softmax2d
...
@@ -613,6 +613,75 @@ struct softmax2d
}
}
};
};
struct
cpu_logsoftmax
{
op
::
logsoftmax
op
;
std
::
string
name
()
const
{
return
"cpu::logsoftmax"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
return
op
.
compute_shape
(
inputs
);
}
template
<
typename
T
>
std
::
size_t
compute_batch_index
(
const
T
&
idx
,
shape
&
batch_shape
,
int
axis
)
const
{
if
(
axis
==
0
)
{
return
0
;
}
else
{
std
::
vector
<
std
::
size_t
>
batch_idx
(
idx
.
begin
(),
idx
.
begin
()
+
axis
);
return
batch_shape
.
index
(
batch_idx
.
begin
(),
batch_idx
.
end
());
}
}
argument
compute
(
context
&
,
const
shape
&
output_shape
,
std
::
vector
<
argument
>
args
)
const
{
argument
result
{
output_shape
};
auto
lens
=
output_shape
.
lens
();
std
::
vector
<
std
::
size_t
>
batch_lens
{};
if
(
op
.
axis
==
0
)
{
batch_lens
.
push_back
(
1
);
}
else
{
batch_lens
.
insert
(
batch_lens
.
begin
(),
lens
.
begin
(),
lens
.
begin
()
+
op
.
axis
);
}
shape
batch_shape
{
migraphx
::
shape
::
uint32_type
,
batch_lens
};
visit_all
(
result
,
args
[
0
])([
&
](
auto
output
,
auto
input
)
{
using
value_type
=
typename
decltype
(
input
)
::
value_type
;
std
::
vector
<
value_type
>
batch_max
(
batch_shape
.
elements
(),
std
::
numeric_limits
<
value_type
>::
lowest
());
shape_for_each
(
output_shape
,
[
&
](
auto
idx
)
{
auto
index
=
this
->
compute_batch_index
(
idx
,
batch_shape
,
op
.
axis
);
batch_max
[
index
]
=
std
::
max
(
batch_max
[
index
],
input
(
idx
.
begin
(),
idx
.
end
()));
});
shape_for_each
(
output_shape
,
[
&
](
auto
idx
)
{
auto
index
=
this
->
compute_batch_index
(
idx
,
batch_shape
,
op
.
axis
);
output
(
idx
.
begin
(),
idx
.
end
())
=
input
(
idx
.
begin
(),
idx
.
end
())
-
batch_max
[
index
];
});
std
::
vector
<
value_type
>
batch_sum
(
batch_shape
.
elements
(),
value_type
(
0
));
shape_for_each
(
output_shape
,
[
&
](
auto
idx
)
{
auto
index
=
this
->
compute_batch_index
(
idx
,
batch_shape
,
op
.
axis
);
batch_sum
[
index
]
+=
std
::
exp
(
output
(
idx
.
begin
(),
idx
.
end
()));
});
for
(
std
::
size_t
i
=
0
;
i
<
batch_sum
.
size
();
++
i
)
{
batch_sum
[
i
]
=
std
::
log
(
batch_sum
[
i
]);
}
shape_for_each
(
output_shape
,
[
&
](
auto
idx
)
{
auto
index
=
this
->
compute_batch_index
(
idx
,
batch_shape
,
op
.
axis
);
output
(
idx
.
begin
(),
idx
.
end
())
-=
batch_sum
[
index
];
});
});
return
result
;
}
};
struct
add_op
struct
add_op
{
{
std
::
string
name
()
const
{
return
"add"
;
}
std
::
string
name
()
const
{
return
"add"
;
}
...
@@ -723,6 +792,7 @@ struct cpu_apply
...
@@ -723,6 +792,7 @@ struct cpu_apply
apply_map
[
"pad"
]
=
extend_op
<
cpu_pad
,
op
::
pad
>
();
apply_map
[
"pad"
]
=
extend_op
<
cpu_pad
,
op
::
pad
>
();
apply_map
[
"concat"
]
=
extend_op
<
cpu_concat
,
op
::
concat
>
();
apply_map
[
"concat"
]
=
extend_op
<
cpu_concat
,
op
::
concat
>
();
apply_map
[
"gather"
]
=
extend_op
<
cpu_gather
,
op
::
gather
>
();
apply_map
[
"gather"
]
=
extend_op
<
cpu_gather
,
op
::
gather
>
();
apply_map
[
"logsoftmax"
]
=
extend_op
<
cpu_logsoftmax
,
op
::
logsoftmax
>
();
apply_map
[
"leaky_relu"
]
=
extend_op
<
cpu_unary
<
leaky_relu_op
>
,
op
::
leaky_relu
>
();
apply_map
[
"leaky_relu"
]
=
extend_op
<
cpu_unary
<
leaky_relu_op
>
,
op
::
leaky_relu
>
();
apply_map
[
"elu"
]
=
extend_op
<
cpu_unary
<
elu_op
>
,
op
::
elu
>
();
apply_map
[
"elu"
]
=
extend_op
<
cpu_unary
<
elu_op
>
,
op
::
elu
>
();
apply_map
[
"identity"
]
=
simple_op
<
cpu_unary
<
identity_op
>>
();
apply_map
[
"identity"
]
=
simple_op
<
cpu_unary
<
identity_op
>>
();
...
...
src/targets/gpu/CMakeLists.txt
View file @
cfaf5be6
...
@@ -26,6 +26,7 @@ add_library(migraphx_device
...
@@ -26,6 +26,7 @@ add_library(migraphx_device
device/atan.cpp
device/atan.cpp
device/add_relu.cpp
device/add_relu.cpp
device/contiguous.cpp
device/contiguous.cpp
device/logsoftmax.cpp
device/mul.cpp
device/mul.cpp
device/concat.cpp
device/concat.cpp
device/pad.cpp
device/pad.cpp
...
@@ -48,6 +49,7 @@ add_library(migraphx_gpu
...
@@ -48,6 +49,7 @@ add_library(migraphx_gpu
pooling.cpp
pooling.cpp
convolution.cpp
convolution.cpp
softmax.cpp
softmax.cpp
logsoftmax.cpp
contiguous.cpp
contiguous.cpp
concat.cpp
concat.cpp
relu.cpp
relu.cpp
...
...
src/targets/gpu/device/logsoftmax.cpp
0 → 100644
View file @
cfaf5be6
#include <migraphx/shape.hpp>
#include <migraphx/argument.hpp>
#include <migraphx/gpu/device/logsoftmax.hpp>
#include <migraphx/gpu/device/tensor.hpp>
#include <migraphx/gpu/device/launch.hpp>
#include <migraphx/gpu/device/types.hpp>
#include <migraphx/gpu/hip.hpp>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
namespace
device
{
argument
logsoftmax
(
hipStream_t
stream
,
const
migraphx
::
shape
&
output_shape
,
std
::
vector
<
migraphx
::
argument
>
args
,
int
axis
)
{
auto
lens
=
output_shape
.
lens
();
std
::
size_t
batch_size
=
std
::
accumulate
(
lens
.
begin
(),
lens
.
begin
()
+
axis
,
std
::
size_t
{
1
},
std
::
multiplies
<
std
::
size_t
>
());
std
::
size_t
n_dims
=
std
::
accumulate
(
lens
.
begin
()
+
axis
,
lens
.
end
(),
std
::
size_t
{
1
},
std
::
multiplies
<
std
::
size_t
>
());
migraphx
::
shape
comp_shape
{
output_shape
.
type
(),
{
batch_size
,
n_dims
}};
visit_all
(
args
.
back
(),
args
.
front
())([
&
](
auto
output
,
auto
input
)
{
const
auto
*
input_ptr
=
device_cast
(
input
.
data
());
auto
*
output_ptr
=
device_cast
(
output
.
data
());
// each thread is for one item in the batch
gs_launch
(
stream
,
batch_size
)([
=
](
auto
i
)
{
std
::
size_t
row_start
=
i
*
n_dims
;
// get max
auto
batch_max
=
input_ptr
[
row_start
];
for
(
std
::
size_t
j
=
1
;
j
<
n_dims
;
++
j
)
{
auto
ind
=
row_start
+
j
;
batch_max
=
std
::
max
(
to_hip_type
(
batch_max
),
to_hip_type
(
input_ptr
[
ind
]));
}
for
(
std
::
size_t
j
=
0
;
j
<
n_dims
;
++
j
)
{
auto
ind
=
row_start
+
j
;
output_ptr
[
ind
]
=
input_ptr
[
ind
]
-
batch_max
;
}
auto
batch_sum
=
::
exp
(
to_hip_type
(
output_ptr
[
row_start
]));
for
(
std
::
size_t
j
=
1
;
j
<
n_dims
;
++
j
)
{
auto
ind
=
row_start
+
j
;
batch_sum
+=
::
exp
(
to_hip_type
(
output_ptr
[
ind
]));
}
batch_sum
=
::
log
(
to_hip_type
(
batch_sum
));
for
(
std
::
size_t
j
=
0
;
j
<
n_dims
;
++
j
)
{
auto
ind
=
row_start
+
j
;
output_ptr
[
ind
]
-=
batch_sum
;
}
});
});
return
args
.
back
();
}
}
// namespace device
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
src/targets/gpu/include/migraphx/gpu/device/logsoftmax.hpp
0 → 100644
View file @
cfaf5be6
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_LOGSOFTMAX_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_LOGSOFTMAX_HPP
#include <migraphx/argument.hpp>
#include <migraphx/config.hpp>
#include <hip/hip_runtime_api.h>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
namespace
device
{
argument
logsoftmax
(
hipStream_t
stream
,
const
migraphx
::
shape
&
output_shape
,
std
::
vector
<
migraphx
::
argument
>
args
,
int
axis
);
}
// namespace device
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
#endif
src/targets/gpu/include/migraphx/gpu/logsoftmax.hpp
0 → 100644
View file @
cfaf5be6
#ifndef MIGRAPHX_GUARD_RTGLIB_LOGSOFTMAX_HPP
#define MIGRAPHX_GUARD_RTGLIB_LOGSOFTMAX_HPP
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/config.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/dfor.hpp>
#include <migraphx/gpu/device/contiguous.hpp>
#include <migraphx/gpu/device/add.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
#include <utility>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
struct
hip_logsoftmax
{
op
::
logsoftmax
op
;
std
::
string
name
()
const
{
return
"gpu::logsoftmax"
;
}
shape
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
;
argument
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
;
int
output_alias
(
const
std
::
vector
<
shape
>&
shapes
)
const
{
return
shapes
.
size
()
-
1
;
}
};
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
#endif
src/targets/gpu/logsoftmax.cpp
0 → 100644
View file @
cfaf5be6
#include <migraphx/gpu/logsoftmax.hpp>
#include <migraphx/gpu/device/logsoftmax.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/gpu/miopen.hpp>
#include <utility>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
namespace
gpu
{
shape
hip_logsoftmax
::
compute_shape
(
const
std
::
vector
<
shape
>&
inputs
)
const
{
check_shapes
{
inputs
,
*
this
}.
has
(
2
).
standard
();
return
op
.
compute_shape
({
inputs
.
at
(
0
)});
}
argument
hip_logsoftmax
::
compute
(
context
&
ctx
,
const
shape
&
output_shape
,
const
std
::
vector
<
argument
>&
args
)
const
{
return
device
::
logsoftmax
(
ctx
.
get_stream
().
get
(),
output_shape
,
args
,
op
.
axis
);
}
}
// namespace gpu
}
// namespace MIGRAPHX_INLINE_NS
}
// namespace migraphx
src/targets/gpu/lowering.cpp
View file @
cfaf5be6
...
@@ -21,6 +21,7 @@
...
@@ -21,6 +21,7 @@
#include <migraphx/gpu/leaky_relu.hpp>
#include <migraphx/gpu/leaky_relu.hpp>
#include <migraphx/gpu/elu.hpp>
#include <migraphx/gpu/elu.hpp>
#include <migraphx/gpu/softmax.hpp>
#include <migraphx/gpu/softmax.hpp>
#include <migraphx/gpu/logsoftmax.hpp>
#include <migraphx/gpu/add.hpp>
#include <migraphx/gpu/add.hpp>
#include <migraphx/gpu/sub.hpp>
#include <migraphx/gpu/sub.hpp>
#include <migraphx/gpu/exp.hpp>
#include <migraphx/gpu/exp.hpp>
...
@@ -97,6 +98,7 @@ struct miopen_apply
...
@@ -97,6 +98,7 @@ struct miopen_apply
add_extend_op
<
miopen_contiguous
,
op
::
contiguous
>
(
"contiguous"
);
add_extend_op
<
miopen_contiguous
,
op
::
contiguous
>
(
"contiguous"
);
add_extend_op
<
hip_concat
,
op
::
concat
>
(
"concat"
);
add_extend_op
<
hip_concat
,
op
::
concat
>
(
"concat"
);
add_extend_op
<
miopen_softmax
,
op
::
softmax
>
(
"softmax"
);
add_extend_op
<
miopen_softmax
,
op
::
softmax
>
(
"softmax"
);
add_extend_op
<
hip_logsoftmax
,
op
::
logsoftmax
>
(
"logsoftmax"
);
add_extend_op
<
hip_gather
,
op
::
gather
>
(
"gather"
);
add_extend_op
<
hip_gather
,
op
::
gather
>
(
"gather"
);
add_extend_op
<
hip_pad
,
op
::
pad
>
(
"pad"
);
add_extend_op
<
hip_pad
,
op
::
pad
>
(
"pad"
);
...
...
test/cpu_ops_test.cpp
View file @
cfaf5be6
...
@@ -1223,6 +1223,176 @@ TEST_CASE(softmax_test)
...
@@ -1223,6 +1223,176 @@ TEST_CASE(softmax_test)
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
s
));
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
s
));
}
}
TEST_CASE
(
logsoftmax_test_axis_0
)
{
migraphx
::
program
p
;
std
::
vector
<
float
>
a
=
{
1.93885877
,
-
1.20006269
,
0.90960855
,
0.42108916
,
-
1.50797544
,
-
1.31047913
,
1.07816336
,
-
1.13288733
,
-
0.86411064
,
0.97800238
,
0.76631385
,
2.07962834
,
-
0.8940665
,
-
1.62855592
,
-
0.53763057
,
-
1.48165117
,
-
0.64154112
,
0.42486547
,
0.89330917
,
-
2.42022666
,
0.192611
,
-
0.01257413
,
-
1.5326607
,
0.53137897
,
-
1.52383859
,
0.46994381
,
0.00453619
,
0.0066996
,
1.58394908
,
0.84216752
,
-
0.04137941
,
-
0.88580789
,
1.44055158
,
-
0.17621241
,
-
1.98917923
,
-
0.08610038
,
0.79020567
,
-
0.67714548
,
0.42774631
,
0.1376574
,
2.23569227
,
1.16681234
,
-
1.21191456
,
-
0.28411502
,
-
0.18688975
,
1.67552548
,
2.48357974
,
0.95891282
,
-
0.06616535
,
-
0.99628491
,
1.04314606
,
-
1.22943315
,
0.76930403
,
0.31106618
};
std
::
vector
<
float
>
s
=
{
-
2.71138556
,
-
5.85030702
,
-
3.74063578
,
-
4.22915517
,
-
6.15821977
,
-
5.96072346
,
-
3.57208097
,
-
5.78313166
,
-
5.51435497
,
-
3.67224195
,
-
3.88393048
,
-
2.57061599
,
-
5.54431083
,
-
6.27880025
,
-
5.1878749
,
-
6.1318955
,
-
5.29178545
,
-
4.22537886
,
-
3.75693516
,
-
7.07047099
,
-
4.45763333
,
-
4.66281846
,
-
6.18290503
,
-
4.11886536
,
-
6.17408292
,
-
4.18030052
,
-
4.64570814
,
-
4.64354473
,
-
3.06629525
,
-
3.80807681
,
-
4.69162374
,
-
5.53605222
,
-
3.20969275
,
-
4.82645674
,
-
6.63942356
,
-
4.73634471
,
-
3.86003866
,
-
5.32738981
,
-
4.22249802
,
-
4.51258693
,
-
2.41455206
,
-
3.48343199
,
-
5.86215889
,
-
4.93435935
,
-
4.83713408
,
-
2.97471885
,
-
2.16666459
,
-
3.69133151
,
-
4.71640968
,
-
5.64652924
,
-
3.60709827
,
-
5.87967748
,
-
3.8809403
,
-
4.33917815
};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
3
,
3
}};
auto
al
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a
});
int
axis
=
0
;
p
.
add_instruction
(
migraphx
::
op
::
logsoftmax
{
axis
},
al
);
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
;
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
s
));
}
TEST_CASE
(
logsoftmax_test_axis_1
)
{
migraphx
::
program
p
;
std
::
vector
<
float
>
a
=
{
1.93885877
,
-
1.20006269
,
0.90960855
,
0.42108916
,
-
1.50797544
,
-
1.31047913
,
1.07816336
,
-
1.13288733
,
-
0.86411064
,
0.97800238
,
0.76631385
,
2.07962834
,
-
0.8940665
,
-
1.62855592
,
-
0.53763057
,
-
1.48165117
,
-
0.64154112
,
0.42486547
,
0.89330917
,
-
2.42022666
,
0.192611
,
-
0.01257413
,
-
1.5326607
,
0.53137897
,
-
1.52383859
,
0.46994381
,
0.00453619
,
0.0066996
,
1.58394908
,
0.84216752
,
-
0.04137941
,
-
0.88580789
,
1.44055158
,
-
0.17621241
,
-
1.98917923
,
-
0.08610038
,
0.79020567
,
-
0.67714548
,
0.42774631
,
0.1376574
,
2.23569227
,
1.16681234
,
-
1.21191456
,
-
0.28411502
,
-
0.18688975
,
1.67552548
,
2.48357974
,
0.95891282
,
-
0.06616535
,
-
0.99628491
,
1.04314606
,
-
1.22943315
,
0.76930403
,
0.31106618
};
std
::
vector
<
float
>
s
=
{
-
1.77931988
,
-
4.91824134
,
-
2.80857010
,
-
3.29708949
,
-
5.22615409
,
-
5.02865778
,
-
2.64001529
,
-
4.85106598
,
-
4.58228929
,
-
2.74017627
,
-
2.95186480
,
-
1.63855031
,
-
4.61224515
,
-
5.34673457
,
-
4.25580922
,
-
5.19982982
,
-
4.35971977
,
-
3.29331318
,
-
2.82486948
,
-
6.13840531
,
-
3.52556765
,
-
3.73075278
,
-
5.25083935
,
-
3.18679968
,
-
5.24201724
,
-
3.24823484
,
-
3.71364246
,
-
4.14309917
,
-
2.56584969
,
-
3.30763125
,
-
4.19117818
,
-
5.03560666
,
-
2.70924719
,
-
4.32601118
,
-
6.13897800
,
-
4.23589915
,
-
3.35959310
,
-
4.82694425
,
-
3.72205246
,
-
4.01214137
,
-
1.91410650
,
-
2.98298643
,
-
5.36171333
,
-
4.43391379
,
-
4.33668852
,
-
2.47427329
,
-
1.66621903
,
-
3.19088595
,
-
4.21596412
,
-
5.14608368
,
-
3.10665271
,
-
5.37923192
,
-
3.38049474
,
-
3.83873259
};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
3
,
3
}};
auto
al
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a
});
int
axis
=
1
;
p
.
add_instruction
(
migraphx
::
op
::
logsoftmax
{
axis
},
al
);
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
;
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
s
));
}
TEST_CASE
(
logsoftmax_test_axis_2
)
{
migraphx
::
program
p
;
std
::
vector
<
float
>
a
=
{
1.93885877
,
-
1.20006269
,
0.90960855
,
0.42108916
,
-
1.50797544
,
-
1.31047913
,
1.07816336
,
-
1.13288733
,
-
0.86411064
,
0.97800238
,
0.76631385
,
2.07962834
,
-
0.8940665
,
-
1.62855592
,
-
0.53763057
,
-
1.48165117
,
-
0.64154112
,
0.42486547
,
0.89330917
,
-
2.42022666
,
0.192611
,
-
0.01257413
,
-
1.5326607
,
0.53137897
,
-
1.52383859
,
0.46994381
,
0.00453619
,
0.0066996
,
1.58394908
,
0.84216752
,
-
0.04137941
,
-
0.88580789
,
1.44055158
,
-
0.17621241
,
-
1.98917923
,
-
0.08610038
,
0.79020567
,
-
0.67714548
,
0.42774631
,
0.1376574
,
2.23569227
,
1.16681234
,
-
1.21191456
,
-
0.28411502
,
-
0.18688975
,
1.67552548
,
2.48357974
,
0.95891282
,
-
0.06616535
,
-
0.99628491
,
1.04314606
,
-
1.22943315
,
0.76930403
,
0.31106618
};
std
::
vector
<
float
>
s
=
{
-
0.79763715
,
-
3.93655861
,
-
1.82688737
,
-
2.31540676
,
-
4.24447136
,
-
4.04697505
,
-
1.65833256
,
-
3.86938325
,
-
3.60060656
,
-
1.81223672
,
-
2.02392525
,
-
0.71061076
,
-
3.68430560
,
-
4.41879502
,
-
3.32786967
,
-
4.27189027
,
-
3.43178022
,
-
2.36537363
,
-
1.35498658
,
-
4.66852241
,
-
2.05568475
,
-
2.26086988
,
-
3.78095645
,
-
1.71691678
,
-
3.77213434
,
-
1.77835194
,
-
2.24375956
,
-
2.74631770
,
-
1.16906822
,
-
1.91084978
,
-
2.79439671
,
-
3.63882519
,
-
1.31246572
,
-
2.92922971
,
-
4.74219653
,
-
2.83911768
,
-
2.19738500
,
-
3.66473615
,
-
2.55984436
,
-
2.84993327
,
-
0.75189840
,
-
1.82077833
,
-
4.19950523
,
-
3.27170569
,
-
3.17448042
,
-
1.65286841
,
-
0.84481415
,
-
2.36948107
,
-
3.39455924
,
-
4.32467880
,
-
2.28524783
,
-
4.55782704
,
-
2.55908986
,
-
3.01732771
};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
3
,
3
}};
auto
al
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a
});
int
axis
=
2
;
p
.
add_instruction
(
migraphx
::
op
::
logsoftmax
{
axis
},
al
);
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
;
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
s
));
}
TEST_CASE
(
logsoftmax_test_axis_3
)
{
migraphx
::
program
p
;
std
::
vector
<
float
>
a
=
{
1.93885877
,
-
1.20006269
,
0.90960855
,
0.42108916
,
-
1.50797544
,
-
1.31047913
,
1.07816336
,
-
1.13288733
,
-
0.86411064
,
0.97800238
,
0.76631385
,
2.07962834
,
-
0.8940665
,
-
1.62855592
,
-
0.53763057
,
-
1.48165117
,
-
0.64154112
,
0.42486547
,
0.89330917
,
-
2.42022666
,
0.192611
,
-
0.01257413
,
-
1.5326607
,
0.53137897
,
-
1.52383859
,
0.46994381
,
0.00453619
,
0.0066996
,
1.58394908
,
0.84216752
,
-
0.04137941
,
-
0.88580789
,
1.44055158
,
-
0.17621241
,
-
1.98917923
,
-
0.08610038
,
0.79020567
,
-
0.67714548
,
0.42774631
,
0.1376574
,
2.23569227
,
1.16681234
,
-
1.21191456
,
-
0.28411502
,
-
0.18688975
,
1.67552548
,
2.48357974
,
0.95891282
,
-
0.06616535
,
-
0.99628491
,
1.04314606
,
-
1.22943315
,
0.76930403
,
0.31106618
};
std
::
vector
<
float
>
s
=
{
-
0.33690375
,
-
3.47582521
,
-
1.36615397
,
-
0.27936556
,
-
2.20843016
,
-
2.01093385
,
-
0.22551114
,
-
2.43656183
,
-
2.16778514
,
-
1.57241522
,
-
1.78410375
,
-
0.47078926
,
-
1.06745881
,
-
1.80194823
,
-
0.71102288
,
-
2.30719726
,
-
1.46708721
,
-
0.40068062
,
-
0.42698261
,
-
3.74051844
,
-
1.12768078
,
-
1.07891856
,
-
2.59900513
,
-
0.53496546
,
-
2.56139951
,
-
0.56761711
,
-
1.03302473
,
-
2.09771276
,
-
0.52046328
,
-
1.26224484
,
-
1.76322959
,
-
2.60765807
,
-
0.28129860
,
-
0.81424303
,
-
2.62720985
,
-
0.72413100
,
-
0.65570381
,
-
2.12305496
,
-
1.01816317
,
-
2.48063402
,
-
0.38259915
,
-
1.45147908
,
-
1.84310238
,
-
0.91530284
,
-
0.81807757
,
-
1.31692881
,
-
0.50887455
,
-
2.03354147
,
-
1.48767160
,
-
2.41779116
,
-
0.37836019
,
-
2.56853147
,
-
0.56979429
,
-
1.02803214
};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
3
,
3
}};
auto
al
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a
});
int
axis
=
3
;
p
.
add_instruction
(
migraphx
::
op
::
logsoftmax
{
axis
},
al
);
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
;
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
s
));
}
TEST_CASE
(
logsoftmax_test_axis_4
)
{
migraphx
::
program
p
;
std
::
vector
<
float
>
a
=
{
1.93885877
,
-
1.20006269
,
0.90960855
,
0.42108916
,
-
1.50797544
,
-
1.31047913
,
1.07816336
,
-
1.13288733
,
-
0.86411064
,
0.97800238
,
0.76631385
,
2.07962834
,
-
0.8940665
,
-
1.62855592
,
-
0.53763057
,
-
1.48165117
,
-
0.64154112
,
0.42486547
,
0.89330917
,
-
2.42022666
,
0.192611
,
-
0.01257413
,
-
1.5326607
,
0.53137897
,
-
1.52383859
,
0.46994381
,
0.00453619
,
0.0066996
,
1.58394908
,
0.84216752
,
-
0.04137941
,
-
0.88580789
,
1.44055158
,
-
0.17621241
,
-
1.98917923
,
-
0.08610038
,
0.79020567
,
-
0.67714548
,
0.42774631
,
0.1376574
,
2.23569227
,
1.16681234
,
-
1.21191456
,
-
0.28411502
,
-
0.18688975
,
1.67552548
,
2.48357974
,
0.95891282
,
-
0.06616535
,
-
0.99628491
,
1.04314606
,
-
1.22943315
,
0.76930403
,
0.31106618
};
std
::
vector
<
float
>
s
=
{
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
,
0.00000000
};
migraphx
::
shape
a_shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
3
,
3
}};
auto
al
=
p
.
add_literal
(
migraphx
::
literal
{
a_shape
,
a
});
int
axis
=
4
;
p
.
add_instruction
(
migraphx
::
op
::
logsoftmax
{
axis
},
al
);
p
.
compile
(
migraphx
::
cpu
::
target
{});
auto
result
=
p
.
eval
({});
std
::
vector
<
float
>
results_vector
;
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
s
));
}
TEST_CASE
(
conv2d_test
)
TEST_CASE
(
conv2d_test
)
{
{
migraphx
::
program
p
;
migraphx
::
program
p
;
...
...
test/gpu/miopen.cpp
View file @
cfaf5be6
...
@@ -2957,6 +2957,34 @@ struct test_lstm_bidirct_default_actv2
...
@@ -2957,6 +2957,34 @@ struct test_lstm_bidirct_default_actv2
}
}
};
};
template
<
int
Axis
>
struct
test_logsoftmax
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
5
,
6
}};
auto
param
=
p
.
add_parameter
(
"0"
,
s
);
p
.
add_instruction
(
migraphx
::
op
::
logsoftmax
{
Axis
},
param
);
return
p
;
}
};
template
<
int
Axis
>
struct
test_logsoftmax_1
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
3
}};
auto
param
=
p
.
add_parameter
(
"0"
,
s
);
p
.
add_instruction
(
migraphx
::
op
::
logsoftmax
{
Axis
},
param
);
return
p
;
}
};
int
main
()
int
main
()
{
{
verify_program
<
test_relu_lrn
>
();
verify_program
<
test_relu_lrn
>
();
...
@@ -3074,4 +3102,11 @@ int main()
...
@@ -3074,4 +3102,11 @@ int main()
verify_program
<
test_lstm_bidirct_default_actv
>
();
verify_program
<
test_lstm_bidirct_default_actv
>
();
verify_program
<
test_lstm_bidirct_default_actv1
>
();
verify_program
<
test_lstm_bidirct_default_actv1
>
();
verify_program
<
test_lstm_bidirct_default_actv2
>
();
verify_program
<
test_lstm_bidirct_default_actv2
>
();
verify_program
<
test_logsoftmax
<
0
>>
();
verify_program
<
test_logsoftmax
<
1
>>
();
verify_program
<
test_logsoftmax
<
2
>>
();
verify_program
<
test_logsoftmax
<
3
>>
();
verify_program
<
test_logsoftmax
<
4
>>
();
verify_program
<
test_logsoftmax_1
<
0
>>
();
verify_program
<
test_logsoftmax_1
<
1
>>
();
}
}
test/onnx/logsoftmax_test.onnx
0 → 100644
View file @
cfaf5be6
logsoftmax-example:l
xy"
LogSoftmax*
axistest_logsoftmaxZ
x
b
y
B
\ No newline at end of file
test/onnx/onnx_test.cpp
View file @
cfaf5be6
...
@@ -672,4 +672,15 @@ TEST_CASE(add_fp16_test)
...
@@ -672,4 +672,15 @@ TEST_CASE(add_fp16_test)
EXPECT
(
p
==
prog
);
EXPECT
(
p
==
prog
);
}
}
TEST_CASE
(
logsoftmax
)
{
migraphx
::
program
p
;
auto
l0
=
p
.
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
5
,
6
}});
int
axis
=
1
;
p
.
add_instruction
(
migraphx
::
op
::
logsoftmax
{
axis
},
l0
);
auto
prog
=
migraphx
::
parse_onnx
(
"logsoftmax_test.onnx"
);
EXPECT
(
p
==
prog
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/op_shape_test.cpp
View file @
cfaf5be6
...
@@ -316,6 +316,61 @@ TEST_CASE(gather)
...
@@ -316,6 +316,61 @@ TEST_CASE(gather)
}
}
}
}
TEST_CASE
(
logsoftmax
)
{
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
,
5
}};
int
axis
=
0
;
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
,
5
}},
migraphx
::
op
::
logsoftmax
{
axis
},
input
);
}
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
,
5
}};
int
axis
=
1
;
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
,
5
}},
migraphx
::
op
::
logsoftmax
{
axis
},
input
);
}
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
,
5
}};
int
axis
=
2
;
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
,
5
}},
migraphx
::
op
::
logsoftmax
{
axis
},
input
);
}
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
,
5
}};
int
axis
=
3
;
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
,
5
}},
migraphx
::
op
::
logsoftmax
{
axis
},
input
);
}
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
,
5
}};
int
axis
=
4
;
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
,
5
}},
migraphx
::
op
::
logsoftmax
{
axis
},
input
);
}
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
,
5
}};
int
axis
=
5
;
throws_shape
(
migraphx
::
op
::
logsoftmax
{
axis
},
input
);
}
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
4
,
5
}};
int
axis
=
-
1
;
throws_shape
(
migraphx
::
op
::
logsoftmax
{
axis
},
input
);
}
}
TEST_CASE
(
dot
)
TEST_CASE
(
dot
)
{
{
{
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment