Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
2af03c23
Unverified
Commit
2af03c23
authored
Nov 29, 2022
by
Charlie Lin
Committed by
GitHub
Nov 29, 2022
Browse files
Merge branch 'develop' into dyn_gemm
parents
50604cd3
32b08891
Changes
14
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
305 additions
and
114 deletions
+305
-114
.github/workflows/performance.yaml
.github/workflows/performance.yaml
+4
-4
examples/migraphx/migraphx_driver/README.md
examples/migraphx/migraphx_driver/README.md
+1
-0
src/eliminate_contiguous.cpp
src/eliminate_contiguous.cpp
+17
-4
src/fuse_pointwise.cpp
src/fuse_pointwise.cpp
+10
-1
src/include/migraphx/op/contiguous.hpp
src/include/migraphx/op/contiguous.hpp
+18
-9
src/include/migraphx/op/transpose.hpp
src/include/migraphx/op/transpose.hpp
+31
-15
src/onnx/parse_transpose.cpp
src/onnx/parse_transpose.cpp
+1
-1
src/targets/gpu/CMakeLists.txt
src/targets/gpu/CMakeLists.txt
+5
-2
test/fuse_pointwise.cpp
test/fuse_pointwise.cpp
+29
-0
test/onnx/gen_onnx.py
test/onnx/gen_onnx.py
+15
-0
test/onnx/onnx_test.cpp
test/onnx/onnx_test.cpp
+18
-0
test/onnx/transpose_dyn_test.onnx
test/onnx/transpose_dyn_test.onnx
+0
-0
test/op_shape_test.cpp
test/op_shape_test.cpp
+28
-0
test/ref_ops_test.cpp
test/ref_ops_test.cpp
+128
-78
No files found.
.github/workflows/performance.yaml
View file @
2af03c23
...
@@ -5,14 +5,14 @@ on:
...
@@ -5,14 +5,14 @@ on:
branches
:
[
develop
]
branches
:
[
develop
]
types
:
[
opened
,
synchronize
,
closed
]
types
:
[
opened
,
synchronize
,
closed
]
schedule
:
schedule
:
-
cron
:
"
0
5
*
*
1-6"
-
cron
:
"
0
6
*
*
1-6"
workflow_dispatch
:
workflow_dispatch
:
inputs
:
inputs
:
rocm_release
:
rocm_release
:
description
:
ROCm Version
description
:
ROCm Version
required
:
true
required
:
true
default
:
'
5.
2
'
default
:
'
5.
3
'
performance_reports_repo
:
performance_reports_repo
:
description
:
Result repository
description
:
Result repository
required
:
true
required
:
true
...
@@ -30,9 +30,9 @@ concurrency: "perftest-${{ github.head_ref || github.base_ref || 'schedule' }}"
...
@@ -30,9 +30,9 @@ concurrency: "perftest-${{ github.head_ref || github.base_ref || 'schedule' }}"
jobs
:
jobs
:
release
:
release
:
uses
:
rocms
oftware
p
latform/migraphx-benchmark/.github/workflows/perf-test.yml@main
uses
:
ROCmS
oftware
P
latform/migraphx-benchmark/.github/workflows/perf-test.yml@main
with
:
with
:
rocm_release
:
${{ github.event.inputs.rocm_release || '5.
2
' }}
rocm_release
:
${{ github.event.inputs.rocm_release || '5.
3
' }}
result_number
:
${{ github.event.inputs.result_number || '10' }}
result_number
:
${{ github.event.inputs.result_number || '10' }}
flags
:
${{ github.event.inputs.flags || '-s' }}
flags
:
${{ github.event.inputs.flags || '-s' }}
performance_reports_repo
:
${{ github.event.inputs.performance_reports_repo || 'ROCmSoftwarePlatform/migraphx-reports' }}
performance_reports_repo
:
${{ github.event.inputs.performance_reports_repo || 'ROCmSoftwarePlatform/migraphx-reports' }}
...
...
examples/migraphx/migraphx_driver/README.md
View file @
2af03c23
...
@@ -29,6 +29,7 @@ See below for a comprehensive list of commands and option arguments, as well as
...
@@ -29,6 +29,7 @@ See below for a comprehensive list of commands and option arguments, as well as
| --tf | Load file as a tensorflow graph |
| --tf | Load file as a tensorflow graph |
| --migraphx | Load file as a migraphx graph |
| --migraphx | Load file as a migraphx graph |
| --migraphx-json | Load file as a migraphx JSON graph |
| --migraphx-json | Load file as a migraphx JSON graph |
| --batch | Set batch size for the model |
| --nhwc | Treat tensorflow format as nhwc |
| --nhwc | Treat tensorflow format as nhwc |
| --nchw | Treat tensorflow format as nchw |
| --nchw | Treat tensorflow format as nchw |
| --skip-unknown-operators | Skip unknown operators when parsing and continue to parse |
| --skip-unknown-operators | Skip unknown operators when parsing and continue to parse |
...
...
src/eliminate_contiguous.cpp
View file @
2af03c23
...
@@ -42,6 +42,13 @@ static bool try_compute_shape(instruction_ref ins,
...
@@ -42,6 +42,13 @@ static bool try_compute_shape(instruction_ref ins,
try
try
{
{
shape
new_shape
=
ins
->
get_operator
().
compute_shape
(
inputs
,
mods
);
shape
new_shape
=
ins
->
get_operator
().
compute_shape
(
inputs
,
mods
);
// Cannot tell if a dynamic shape will need to be made contiguous
if
(
new_shape
.
dynamic
())
{
return
false
;
}
// If the output shape is a standard shape, no need to try its output
// If the output shape is a standard shape, no need to try its output
if
(
new_shape
.
standard
())
if
(
new_shape
.
standard
())
{
{
...
@@ -133,14 +140,20 @@ static void remove_contiguous(const std::string& op_name, module& m, F f)
...
@@ -133,14 +140,20 @@ static void remove_contiguous(const std::string& op_name, module& m, F f)
}
}
}
}
// Perform evaluations in parallel
// Perform
static contiguous
evaluations in parallel
std
::
vector
<
argument
>
literals
(
const_instructions
.
size
());
std
::
vector
<
argument
>
literals
(
const_instructions
.
size
());
par_for
(
const_instructions
.
size
(),
1
,
[
&
](
const
auto
i
)
{
par_for
(
const_instructions
.
size
(),
1
,
[
&
](
const
auto
i
)
{
auto
c
=
op
::
contiguous
{};
auto
c
=
op
::
contiguous
{};
auto
prev
=
const_instructions
[
i
]
->
inputs
().
front
();
auto
prev
=
const_instructions
[
i
]
->
inputs
().
front
();
literals
[
i
]
=
c
.
compute
(
c
.
compute_shape
({
prev
->
get_shape
()}),
{
prev
->
eval
()});
// compute the output contiguous shape from the previous instruction shape
shape
computed_shape
=
c
.
compute_shape
({
prev
->
get_shape
()});
const
std
::
vector
<
argument
>&
prev_eval
=
{
prev
->
eval
()};
// prev_eval should not be used in make_compute_output_shape() as computed_shape is static
auto
co_shape
=
make_compute_output_shape
(
pack
(
c
,
computed_shape
,
prev_eval
));
literals
[
i
]
=
c
.
compute
(
co_shape
,
prev_eval
);
});
});
// Replace static contiguous operations with a literal
for
(
size_t
i
=
0
;
i
<
const_instructions
.
size
();
i
++
)
for
(
size_t
i
=
0
;
i
<
const_instructions
.
size
();
i
++
)
{
{
auto
l
=
m
.
add_literal
(
literals
[
i
].
get_shape
(),
literals
[
i
].
data
());
auto
l
=
m
.
add_literal
(
literals
[
i
].
get_shape
(),
literals
[
i
].
data
());
...
...
src/fuse_pointwise.cpp
View file @
2af03c23
...
@@ -45,7 +45,16 @@ static literal get_scalar(instruction_ref ins)
...
@@ -45,7 +45,16 @@ static literal get_scalar(instruction_ref ins)
return
{};
return
{};
auto
e
=
ins
->
eval
();
auto
e
=
ins
->
eval
();
literal
r
{};
literal
r
{};
e
.
visit_at
([
&
](
auto
x
)
{
r
=
literal
{
x
};
});
// needed for bool as visit_at invokes as() which promotes bool to int8
// Without this we'll break type checks for logical ops that are fused.
if
(
e
.
get_shape
().
type
()
==
shape
::
bool_type
)
{
r
=
literal
{
e
.
at
<
bool
>
()};
}
else
{
e
.
visit_at
([
&
](
auto
x
)
{
r
=
literal
{
x
};
});
}
return
r
;
return
r
;
}
}
...
...
src/include/migraphx/op/contiguous.hpp
View file @
2af03c23
...
@@ -28,6 +28,7 @@
...
@@ -28,6 +28,7 @@
#include <migraphx/argument.hpp>
#include <migraphx/argument.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/shape_for_each.hpp>
#include <migraphx/config.hpp>
#include <migraphx/config.hpp>
#include <migraphx/dyn_output.hpp>
namespace
migraphx
{
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
inline
namespace
MIGRAPHX_INLINE_NS
{
...
@@ -42,19 +43,27 @@ namespace op {
...
@@ -42,19 +43,27 @@ namespace op {
struct
contiguous
struct
contiguous
{
{
std
::
string
name
()
const
{
return
"contiguous"
;
}
std
::
string
name
()
const
{
return
"contiguous"
;
}
shape
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
shape
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
{
{
check_shapes
{
inputs
,
*
this
}.
has
(
1
);
check_shapes
{
inputs
,
*
this
,
true
}.
has
(
1
);
if
(
inputs
.
front
().
standard
())
auto
s0
=
inputs
.
front
();
return
inputs
.
front
();
if
(
s0
.
dynamic
()
or
s0
.
standard
())
auto
lens
=
inputs
.
at
(
0
).
lens
();
{
auto
t
=
inputs
.
at
(
0
).
type
();
return
s0
;
return
{
t
,
lens
};
}
else
{
const
auto
&
lens
=
s0
.
lens
();
auto
t
=
s0
.
type
();
return
{
t
,
lens
};
}
}
}
argument
compute
(
const
shape
&
output_shape
,
std
::
vector
<
argument
>
args
)
const
argument
compute
(
const
dyn_output
&
dyn_out
,
std
::
vector
<
argument
>
args
)
const
{
{
assert
(
out
put_shape
.
standard
());
assert
(
dyn_out
.
com
put
ed
_shape
.
standard
());
argument
result
{
out
put_shape
};
argument
result
{
dyn_out
.
com
put
ed
_shape
};
visit_all
(
result
,
args
[
0
])([
&
](
auto
output
,
auto
input
)
{
visit_all
(
result
,
args
[
0
])([
&
](
auto
output
,
auto
input
)
{
shape_for_each
(
output
.
get_shape
(),
[
&
](
const
auto
&
idx
)
{
shape_for_each
(
output
.
get_shape
(),
[
&
](
const
auto
&
idx
)
{
output
(
idx
.
begin
(),
idx
.
end
())
=
input
(
idx
.
begin
(),
idx
.
end
());
output
(
idx
.
begin
(),
idx
.
end
())
=
input
(
idx
.
begin
(),
idx
.
end
());
...
...
src/include/migraphx/op/transpose.hpp
View file @
2af03c23
...
@@ -29,6 +29,7 @@
...
@@ -29,6 +29,7 @@
#include <migraphx/config.hpp>
#include <migraphx/config.hpp>
#include <migraphx/value.hpp>
#include <migraphx/value.hpp>
#include <migraphx/op/normalize_attribute.hpp>
#include <migraphx/op/normalize_attribute.hpp>
#include <migraphx/dyn_output.hpp>
namespace
migraphx
{
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
inline
namespace
MIGRAPHX_INLINE_NS
{
...
@@ -45,17 +46,15 @@ struct transpose
...
@@ -45,17 +46,15 @@ struct transpose
}
}
std
::
string
name
()
const
{
return
"transpose"
;
}
std
::
string
name
()
const
{
return
"transpose"
;
}
shape
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
shape
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
{
{
check_shapes
{
inputs
,
*
this
}.
has
(
1
);
check_shapes
{
inputs
,
*
this
,
true
}.
has
(
1
);
auto
input
=
inputs
.
at
(
0
);
auto
input
=
inputs
.
at
(
0
);
auto
input_lens
=
input
.
lens
();
auto
input_strides
=
input
.
strides
();
auto
t
=
input
.
type
();
if
(
dims
.
size
()
!=
input
_lens
.
size
())
if
(
dims
.
size
()
!=
input
.
ndim
())
{
{
MIGRAPHX_THROW
(
"Permutation has wrong number of axes"
);
MIGRAPHX_THROW
(
"
TRANSPOSE:
Permutation has wrong number of axes"
);
}
}
std
::
vector
<
int64_t
>
axes
(
dims
.
size
());
std
::
vector
<
int64_t
>
axes
(
dims
.
size
());
std
::
iota
(
axes
.
begin
(),
axes
.
end
(),
0
);
std
::
iota
(
axes
.
begin
(),
axes
.
end
(),
0
);
...
@@ -63,19 +62,36 @@ struct transpose
...
@@ -63,19 +62,36 @@ struct transpose
{
{
MIGRAPHX_THROW
(
"TRANSPOSE: Invalid permutation"
);
MIGRAPHX_THROW
(
"TRANSPOSE: Invalid permutation"
);
}
}
std
::
vector
<
size_t
>
output_lens
(
input_lens
.
size
());
std
::
vector
<
size_t
>
output_strides
(
input_lens
.
size
());
if
(
input
.
dynamic
())
for
(
std
::
size_t
i
=
0
;
i
<
output_lens
.
size
();
i
++
)
{
{
output_lens
[
i
]
=
input_lens
[
dims
[
i
]];
std
::
vector
<
shape
::
dynamic_dimension
>
output_dyn_dims
(
input
.
ndim
());
output_strides
[
i
]
=
input_strides
[
dims
[
i
]];
std
::
transform
(
dims
.
cbegin
(),
dims
.
cend
(),
output_dyn_dims
.
begin
(),
[
&
](
auto
dim
)
{
return
input
.
dyn_dims
()[
dim
];
});
return
{
input
.
type
(),
output_dyn_dims
};
}
else
{
auto
input_lens
=
input
.
lens
();
auto
input_strides
=
input
.
strides
();
std
::
vector
<
size_t
>
output_lens
(
input
.
ndim
());
std
::
vector
<
size_t
>
output_strides
(
input
.
ndim
());
for
(
std
::
size_t
i
=
0
;
i
<
input
.
ndim
();
i
++
)
{
output_lens
[
i
]
=
input_lens
[
dims
[
i
]];
output_strides
[
i
]
=
input_strides
[
dims
[
i
]];
}
return
{
input
.
type
(),
output_lens
,
output_strides
};
}
}
return
{
t
,
output_lens
,
output_strides
};
}
}
argument
compute
(
shape
output_shape
,
std
::
vector
<
argument
>
args
)
const
argument
compute
(
const
dyn_output
&
dyn_out
,
std
::
vector
<
argument
>
args
)
const
{
{
return
args
[
0
].
reshape
(
out
put_shape
);
return
args
[
0
].
reshape
(
dyn_out
.
com
put
ed
_shape
);
}
}
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
)
const
{
return
0
;
}
std
::
ptrdiff_t
output_alias
(
const
std
::
vector
<
shape
>&
)
const
{
return
0
;
}
};
};
...
...
src/onnx/parse_transpose.cpp
View file @
2af03c23
...
@@ -47,7 +47,7 @@ struct parse_transpose : op_parser<parse_transpose>
...
@@ -47,7 +47,7 @@ struct parse_transpose : op_parser<parse_transpose>
}
}
// if perm is empty, use the default value
// if perm is empty, use the default value
auto
n_dim
=
args
.
front
()
->
get_shape
().
lens
().
size
();
auto
n_dim
=
args
.
front
()
->
get_shape
().
ndim
();
if
(
perm
.
empty
())
if
(
perm
.
empty
())
{
{
perm
.
resize
(
n_dim
);
perm
.
resize
(
n_dim
);
...
...
src/targets/gpu/CMakeLists.txt
View file @
2af03c23
...
@@ -233,11 +233,14 @@ get_target_property(MIOPEN_LOCATION MIOpen LOCATION)
...
@@ -233,11 +233,14 @@ get_target_property(MIOPEN_LOCATION MIOpen LOCATION)
check_library_exists
(
MIOpen
"miopenHiddenSetConvolutionFindMode"
"
${
MIOPEN_LOCATION
}
"
HAS_FIND_MODE_API
)
check_library_exists
(
MIOpen
"miopenHiddenSetConvolutionFindMode"
"
${
MIOPEN_LOCATION
}
"
HAS_FIND_MODE_API
)
check_library_exists
(
MIOpen
"miopenFindSolutions"
"
${
MIOPEN_LOCATION
}
"
HAS_FIND_2_API
)
check_library_exists
(
MIOpen
"miopenFindSolutions"
"
${
MIOPEN_LOCATION
}
"
HAS_FIND_2_API
)
if
(
HAS_FIND_2_API
)
# TODO: Set default to HAS_FIND_2_API
set
(
MIGRAPHX_USE_FIND_2_API OFF CACHE BOOL
""
)
if
(
MIGRAPHX_USE_FIND_2_API
)
target_compile_definitions
(
migraphx_gpu PUBLIC -DMIGRAPHX_HAS_FIND_2_API
)
target_compile_definitions
(
migraphx_gpu PUBLIC -DMIGRAPHX_HAS_FIND_2_API
)
message
(
STATUS
"MIGraphx is using Find-2.0 API of MIOpen"
)
message
(
STATUS
"MIGraphx is using Find-2.0 API of MIOpen"
)
else
()
else
()
message
(
STATUS
"MI
Open does not have Find-2.0 API
"
)
message
(
STATUS
"MI
Graphx is using legacy Find API in MIOpen
"
)
endif
()
endif
()
if
(
HAS_FIND_MODE_API
)
if
(
HAS_FIND_MODE_API
)
...
...
test/fuse_pointwise.cpp
View file @
2af03c23
...
@@ -272,6 +272,35 @@ TEST_CASE(contiguous_input)
...
@@ -272,6 +272,35 @@ TEST_CASE(contiguous_input)
EXPECT
(
p1
==
p2
);
EXPECT
(
p1
==
p2
);
}
}
TEST_CASE
(
contiguous_boolean_input
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
bool_type
,
{
2
,
3
}};
migraphx
::
shape
s_lit
{
migraphx
::
shape
::
bool_type
,
{
1
},
{
0
}};
migraphx
::
program
p1
;
{
auto
*
mm
=
p1
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
one
=
mm
->
add_literal
(
migraphx
::
literal
(
s_lit
,
{
1.0
}));
auto
yb
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
s
.
lens
()}}),
one
);
auto
y
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"contiguous"
),
yb
);
auto
xor1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"logical_xor"
),
x
,
y
);
mm
->
add_return
({
xor1
});
}
run_pass
(
p1
);
migraphx
::
program
p2
;
{
auto
*
mm
=
p2
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
xor1
=
add_pointwise
(
p2
,
"main:pointwise0"
,
{
x
},
[
=
](
auto
*
pm
,
const
auto
&
inputs
)
{
auto
y
=
pm
->
add_literal
(
migraphx
::
literal
(
s_lit
,
{
1
}));
return
pm
->
add_instruction
(
migraphx
::
make_op
(
"logical_xor"
),
inputs
[
0
],
y
);
});
mm
->
add_return
({
xor1
});
}
}
TEST_CASE
(
all_scalar_input
)
TEST_CASE
(
all_scalar_input
)
{
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
};
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
};
...
...
test/onnx/gen_onnx.py
View file @
2af03c23
...
@@ -6277,6 +6277,21 @@ def transpose_test():
...
@@ -6277,6 +6277,21 @@ def transpose_test():
return
([
node
],
[
x
],
[
y
])
return
([
node
],
[
x
],
[
y
])
@
onnx_test
def
transpose_dyn_test
():
x
=
helper
.
make_tensor_value_info
(
'0'
,
TensorProto
.
FLOAT
,
[
None
,
2
,
2
,
3
])
y
=
helper
.
make_tensor_value_info
(
'1'
,
TensorProto
.
FLOAT
,
[
None
,
3
,
2
,
2
])
node
=
onnx
.
helper
.
make_node
(
'Transpose'
,
perm
=
[
0
,
3
,
1
,
2
],
inputs
=
[
'0'
],
outputs
=
[
'1'
],
)
return
([
node
],
[
x
],
[
y
])
@
onnx_test
@
onnx_test
def
transpose_gather_test
():
def
transpose_gather_test
():
x
=
helper
.
make_tensor_value_info
(
'data'
,
TensorProto
.
FLOAT
,
[
3
,
5
,
4
,
6
])
x
=
helper
.
make_tensor_value_info
(
'data'
,
TensorProto
.
FLOAT
,
[
3
,
5
,
4
,
6
])
...
...
test/onnx/onnx_test.cpp
View file @
2af03c23
...
@@ -5973,6 +5973,24 @@ TEST_CASE(transpose_test)
...
@@ -5973,6 +5973,24 @@ TEST_CASE(transpose_test)
EXPECT
(
p
==
prog
);
EXPECT
(
p
==
prog
);
}
}
TEST_CASE
(
transpose_dyn_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
input
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
2
,
2
,
0
},
{
2
,
2
,
0
},
{
3
,
3
,
0
}}});
std
::
vector
<
int64_t
>
perm
{
0
,
3
,
1
,
2
};
auto
t0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
perm
}}),
input
);
mm
->
add_return
({
t0
});
migraphx
::
onnx_options
options
;
options
.
default_dyn_dim_value
=
{
1
,
4
,
0
};
auto
prog
=
migraphx
::
parse_onnx
(
"transpose_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
topk_attrk_test
)
TEST_CASE
(
topk_attrk_test
)
{
{
migraphx
::
program
p
;
migraphx
::
program
p
;
...
...
test/onnx/transpose_dyn_test.onnx
0 → 100644
View file @
2af03c23
File added
test/op_shape_test.cpp
View file @
2af03c23
...
@@ -365,6 +365,12 @@ TEST_CASE(contiguous_shape)
...
@@ -365,6 +365,12 @@ TEST_CASE(contiguous_shape)
expect_shape
(
single
,
migraphx
::
make_op
(
"contiguous"
),
single
);
expect_shape
(
single
,
migraphx
::
make_op
(
"contiguous"
),
single
);
}
}
TEST_CASE
(
contiguous_dyn_shape
)
{
migraphx
::
shape
s0
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
2
,
2
,
2
}}};
expect_shape
(
s0
,
migraphx
::
make_op
(
"contiguous"
),
s0
);
}
TEST_CASE
(
contiguous_shape_scalar
)
TEST_CASE
(
contiguous_shape_scalar
)
{
{
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
};
...
@@ -2273,6 +2279,28 @@ TEST_CASE(transpose_shape)
...
@@ -2273,6 +2279,28 @@ TEST_CASE(transpose_shape)
throws_shape
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
1
,
2
}}}),
input
);
throws_shape
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
1
,
2
}}}),
input
);
}
}
TEST_CASE
(
transpose_dyn_shape0
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
2
,
2
,
0
}}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{{
2
,
2
,
0
},
{
1
,
4
,
0
}}};
expect_shape
(
input
,
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
1
}}}),
input
);
expect_shape
(
output
,
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
1
,
0
}}}),
input
);
}
TEST_CASE
(
transpose_dyn_shape1
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
,
0
},
{
4
,
4
,
0
},
{
4
,
4
,
0
}}};
migraphx
::
shape
output
{
migraphx
::
shape
::
float_type
,
{{
4
,
4
,
0
},
{
4
,
4
,
0
},
{
1
,
4
,
0
}}};
expect_shape
(
input
,
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
1
,
2
}}}),
input
);
expect_shape
(
output
,
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
2
,
1
,
0
}}}),
input
);
}
TEST_CASE
(
transpose_axes_error
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
2
,
2
}};
throws_shape
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
1
}}}),
input
);
}
TEST_CASE
(
step_test
)
TEST_CASE
(
step_test
)
{
{
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
4
}};
migraphx
::
shape
s1
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
4
}};
...
...
test/ref_ops_test.cpp
View file @
2af03c23
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment