Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
9686cb33
Commit
9686cb33
authored
Feb 16, 2023
by
charlie
Browse files
Merge branch 'select_module_op' of github.com:ROCmSoftwarePlatform/AMDMIGraphX into dyn_batch_pass
parents
84725d72
dd74a89a
Changes
15
Show whitespace changes
Inline
Side-by-side
Showing
15 changed files
with
1344 additions
and
1025 deletions
+1344
-1025
src/include/migraphx/module.hpp
src/include/migraphx/module.hpp
+4
-0
src/include/migraphx/op/select_module.hpp
src/include/migraphx/op/select_module.hpp
+30
-71
src/include/migraphx/operation.hpp
src/include/migraphx/operation.hpp
+2
-11
src/include/migraphx/shape.hpp
src/include/migraphx/shape.hpp
+3
-0
src/program.cpp
src/program.cpp
+1
-1
src/replace_allocate.cpp
src/replace_allocate.cpp
+7
-9
src/shape.cpp
src/shape.cpp
+11
-0
src/targets/gpu/lowering.cpp
src/targets/gpu/lowering.cpp
+28
-0
test/op_shape_test.cpp
test/op_shape_test.cpp
+5
-15
test/ref_ops_test.cpp
test/ref_ops_test.cpp
+1078
-907
test/shape_test.cpp
test/shape_test.cpp
+24
-0
test/verify/run_verify.cpp
test/verify/run_verify.cpp
+10
-1
test/verify/test_select_module_add.cpp
test/verify/test_select_module_add.cpp
+71
-0
test/verify/test_select_module_reduce.cpp
test/verify/test_select_module_reduce.cpp
+69
-0
tools/include/operation.hpp
tools/include/operation.hpp
+1
-10
No files found.
src/include/migraphx/module.hpp
View file @
9686cb33
...
...
@@ -54,6 +54,10 @@ using ins_dep_map = std::unordered_map<instruction_ref, std::unordered_set<ins
*/
struct
module
{
// used by replace_allocate pass
// allocate memory in this module rather than using output parmaeters
bool
use_local_alloc
=
false
;
module
(
const
std
::
string
&
name
=
""
);
// move constructor
...
...
src/include/migraphx/op/select_module.hpp
View file @
9686cb33
...
...
@@ -26,8 +26,6 @@
#include <migraphx/check_shapes.hpp>
#include <migraphx/module.hpp>
#include <migraphx/dyn_output.hpp>
#include <set>
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
...
...
@@ -35,98 +33,59 @@ namespace op {
struct
select_module
{
// output shape of the dynamic model
shape
output_dyn_shape
;
int
input_batch_index
=
-
1
;
int
output_batch_index
=
-
1
;
std
::
string
dyn_batch_param_name
;
shape
output_dyn_shapes
;
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
pack
(
f
(
self
.
output_dyn_shape
,
"output_dyn_shape"
),
f
(
self
.
input_batch_index
,
"input_batch_index"
),
f
(
self
.
output_batch_index
,
"output_batch_index"
),
f
(
self
.
dyn_batch_param_name
,
"dyn_batch_param_name"
));
return
pack
(
f
(
self
.
output_dyn_shapes
,
"output_dyn_shapes"
));
}
std
::
string
name
()
const
{
return
"select_module"
;
}
// runs once during model compilation with dynamic shape input
// may run on each model evaluation with static shape input
shape
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
shape
compute_shape
(
const
std
::
vector
<
shape
>&
,
std
::
vector
<
module_ref
>
)
const
{
check_shapes
{
inputs
,
*
this
,
true
}.
has
(
1
);
auto
s0
=
inputs
.
at
(
0
);
if
(
s0
.
dynamic
())
{
// should we check that the submodules have the same parameters here?
// check that no more than one parameter is non-fixed?
// would need to use version of compute_shape with the parameter list
return
shape
{
output_dyn_shape
};
}
else
{
auto
batch_size
=
s0
.
lens
().
at
(
input_batch_index
);
auto
dds
=
output_dyn_shape
.
dyn_dims
();
dds
.
at
(
output_batch_index
)
=
{
batch_size
,
batch_size
};
std
::
vector
<
std
::
size_t
>
dims
;
if
(
std
::
all_of
(
dds
.
begin
(),
dds
.
end
(),
[](
auto
dd
)
{
return
dd
.
is_fixed
();
}))
{
std
::
transform
(
dds
.
begin
(),
dds
.
end
(),
std
::
back_inserter
(
dims
),
[](
auto
d
)
{
return
d
.
max
;
});
return
{
output_dyn_shape
.
type
(),
dims
};
}
else
{
MIGRAPHX_THROW
(
"SELECT_MODULE: more than one input dimension was non-fixed"
);
}
}
return
shape
{
output_dyn_shapes
};
}
argument
compute
(
const
dyn_output
&
dyn_out
,
argument
compute
(
const
shape
&
,
const
std
::
vector
<
argument
>&
args
,
const
std
::
vector
<
module_ref
>&
submodule_list
,
const
std
::
function
<
std
::
vector
<
argument
>
(
module_ref
&
,
const
std
::
unordered_map
<
std
::
string
,
argument
>&
)
>&
run
)
const
{
std
::
vector
<
module_ref
>
modules_to_run
;
for
(
const
auto
&
mod
:
submodule_list
)
{
// find submodule with the same parameter shape as the input data
auto
p_shape
=
mod
->
get_parameter_shape
(
dyn_batch_param_name
);
if
(
p_shape
==
args
.
at
(
0
).
get_shape
())
{
modules_to_run
.
push_back
(
mod
);
break
;
}
}
// TODO if an exact match is not found, assemble module list from binary base
// find submodule with input parameter shapes exactly the same as the input arguments
// assuming arguments are in the same order as the input parameters
auto
module_iter
=
std
::
find_if
(
submodule_list
.
cbegin
(),
submodule_list
.
cend
(),
[
&
](
module_ref
mr
)
{
auto
param_names
=
mr
->
get_parameter_names
();
assert
(
param_names
.
size
()
<=
args
.
size
());
return
std
::
equal
(
param_names
.
cbegin
(),
param_names
.
cend
(),
args
.
cbegin
(),
[
&
](
auto
p_name
,
auto
a
)
{
return
a
.
get_shape
()
==
mr
->
get_parameter_shape
(
p_name
);
});
});
if
(
module
s_to_run
.
empty
())
if
(
module
_iter
==
submodule_list
.
end
())
{
MIGRAPHX_THROW
(
"SELECT_MODULE: no compatible submodules found for input shape: "
+
migraphx
::
to_string
(
args
.
at
(
0
).
get_shape
()));
MIGRAPHX_THROW
(
"SELECT_MODULE: no compatible submodules found for given input shapes"
);
}
std
::
set
<
std
::
string
>
pnames
;
for
(
const
auto
&
mod
:
modules_to_run
)
{
// TODO If all the modules have the same parameters, this would only need to run once
auto
names
=
mod
->
get_parameter_names
();
pnames
.
insert
(
names
.
begin
(),
names
.
end
());
}
assert
(
pnames
.
size
()
<=
args
.
size
());
auto
module_to_run
=
*
module_iter
;
std
::
unordered_map
<
std
::
string
,
argument
>
params
;
std
::
transform
(
pnames
.
begin
(),
pnames
.
end
(),
// add input parameters
auto
param_names
=
module_to_run
->
get_parameter_names
();
assert
(
param_names
.
size
()
<=
args
.
size
());
std
::
transform
(
param_names
.
begin
(),
param_names
.
end
(),
args
.
begin
(),
std
::
inserter
(
params
,
params
.
end
()),
[](
auto
&&
name
,
auto
&&
a
rg
)
{
return
std
::
make_pair
(
name
,
a
rg
);
});
[](
auto
&&
name
,
auto
&&
a
)
{
return
std
::
make_pair
(
name
,
a
);
});
// TODO run multiple modules and split the parameter data to each batch size
auto
results
=
run
(
modules_to_run
.
at
(
0
),
params
);
return
results
.
at
(
0
);
auto
results
=
run
(
module_to_run
,
params
);
return
argument
{
results
};
}
};
...
...
src/include/migraphx/operation.hpp
View file @
9686cb33
...
...
@@ -140,9 +140,9 @@ template <class T>
auto
compute_shape_op
(
rank
<
2
>
,
const
T
&
x
,
const
std
::
vector
<
shape
>&
inputs
)
->
decltype
(
x
.
normalize_compute_shape
(
inputs
))
{
dependent_type
<
operation
,
T
>
y
=
x
;
if
(
inputs
.
empty
())
MIGRAPHX_THROW
(
"At least one input is required for "
+
x
.
name
());
dependent_type
<
operation
,
T
>
y
=
x
;
normalize_attributes
(
y
,
inputs
[
0
].
max_lens
());
return
any_cast
<
T
>
(
y
).
normalize_compute_shape
(
inputs
);
}
...
...
@@ -168,7 +168,7 @@ shape compute_shape_op(const T& x, const std::vector<shape>& inputs)
}
template
<
class
T
>
auto
mod_compute_shape_op
(
rank
<
2
>
,
auto
mod_compute_shape_op
(
rank
<
1
>
,
const
T
&
x
,
const
std
::
vector
<
shape
>&
inputs
,
const
std
::
vector
<
module_ref
>&
mod_args
)
...
...
@@ -177,15 +177,6 @@ auto mod_compute_shape_op(rank<2>,
return
x
.
compute_shape
(
inputs
,
mod_args
);
}
template
<
class
T
>
auto
mod_compute_shape_op
(
rank
<
1
>
,
const
T
&
x
,
const
std
::
vector
<
shape
>&
inputs
,
const
std
::
vector
<
module_ref
>&
)
->
decltype
(
x
.
compute_shape
(
inputs
))
{
return
x
.
compute_shape
(
inputs
);
}
template
<
class
T
>
shape
mod_compute_shape_op
(
rank
<
0
>
,
const
T
&
x
,
...
...
src/include/migraphx/shape.hpp
View file @
9686cb33
...
...
@@ -243,6 +243,9 @@ struct shape
/// Return true if the shape is dynamic
bool
dynamic
()
const
;
/// Return true if this shape or any of the sub_shapes are dynamic
bool
any_of_dynamic
()
const
;
shape
normalize_standard
()
const
;
shape
with_lens
(
type_t
t
,
const
std
::
vector
<
std
::
size_t
>&
l
)
const
;
...
...
src/program.cpp
View file @
9686cb33
...
...
@@ -379,7 +379,7 @@ std::vector<argument> generic_eval(const module* mod,
}));
}
assert
(
results
.
find
(
ins
)
!=
results
.
end
());
if
(
not
ins
->
get_shape
().
dynamic
())
if
(
not
ins
->
get_shape
().
any_of_
dynamic
())
{
assert
(
results
.
at
(
ins
).
get_shape
()
==
ins
->
get_shape
());
}
...
...
src/replace_allocate.cpp
View file @
9686cb33
...
...
@@ -104,19 +104,17 @@ void replace_allocate::apply(module& m) const
continue
;
auto
s
=
ins
->
get_shape
();
if
(
not
main_offload_copy
and
model
.
needs_out_params
()
and
contains
(
mod_output_names
,
ins
))
if
(
not
main_offload_copy
and
not
(
m
.
use_local_alloc
)
and
model
.
needs_out_params
()
and
contains
(
mod_output_names
,
ins
))
{
auto
out_param
=
m
.
add_parameter
(
mod_output_names
[
ins
],
s
);
m
.
replace_instruction
(
ins
,
out_param
);
continue
;
}
m
.
replace_instruction
(
ins
,
m
.
insert_instruction
(
ins
,
make_op
(
model
.
name
(),
migraphx
::
value
{{
"shape"
,
to_value
(
s
)}})));
else
{
m
.
replace_instruction
(
ins
,
make_op
(
model
.
name
(),
migraphx
::
value
{{
"shape"
,
to_value
(
s
)}}));
}
}
}
...
...
src/shape.cpp
View file @
9686cb33
...
...
@@ -483,6 +483,17 @@ std::string shape::type_string() const { return name(this->type()); }
bool
shape
::
dynamic
()
const
{
return
not
impl
->
m_dyn_dims
.
empty
();
}
bool
shape
::
any_of_dynamic
()
const
{
if
(
this
->
dynamic
())
{
return
true
;
}
return
std
::
any_of
(
this
->
sub_shapes
().
cbegin
(),
this
->
sub_shapes
().
cend
(),
[](
auto
s
)
{
return
s
.
any_of_dynamic
();
});
}
const
std
::
vector
<
shape
::
dynamic_dimension
>&
shape
::
dyn_dims
()
const
{
return
impl
->
m_dyn_dims
;
}
std
::
vector
<
std
::
size_t
>
shape
::
min_lens
()
const
...
...
src/targets/gpu/lowering.cpp
View file @
9686cb33
...
...
@@ -111,6 +111,7 @@ struct miopen_apply
add_loop_op
();
add_neg_op
();
add_nms_op
();
add_select_module_op
();
}
void
copy_params
()
const
...
...
@@ -358,6 +359,33 @@ struct miopen_apply
return
mod
->
replace_instruction
(
ins
,
gpu_out
);
});
}
/**
* Turns on use_local_alloc in the select_module submodules.
* Changes the submodule returns to a hip::sync_stream.
*/
void
add_select_module_op
()
{
apply_map
.
emplace
(
"select_module"
,
[
=
](
instruction_ref
ins
)
{
std
::
vector
<
instruction_ref
>
inputs
=
ins
->
inputs
();
auto
mod_args
=
ins
->
module_inputs
();
for
(
auto
smod
:
mod_args
)
{
smod
->
use_local_alloc
=
true
;
auto
last_ins
=
std
::
prev
(
smod
->
end
());
if
(
last_ins
->
name
()
==
"@return"
)
{
for
(
auto
out_ins
:
last_ins
->
inputs
())
{
auto
sync_out
=
smod
->
insert_instruction
(
last_ins
,
make_op
(
"hip::sync_stream"
),
out_ins
);
smod
->
replace_return
({
sync_out
});
}
}
}
return
ins
;
});
}
};
void
lowering
::
apply
(
module
&
m
)
const
{
miopen_apply
{
&
m
,
this
}.
apply
();
}
...
...
test/op_shape_test.cpp
View file @
9686cb33
...
...
@@ -2364,22 +2364,12 @@ TEST_CASE(rnn)
TEST_CASE
(
select_module_dyn
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
3
,
3
},
{
255
,
255
},
{
255
,
255
}}};
migraphx
::
shape
out_attr
=
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
1000
,
1000
}}};
std
::
vector
<
migraphx
::
shape
>
sub_shapes
=
{};
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
1000
,
1000
}}});
migraphx
::
shape
out_attr
=
migraphx
::
shape
{
sub_shapes
};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
1000
,
1000
}}},
migraphx
::
make_op
(
"select_module"
,
{{
"output_dyn_shape"
,
migraphx
::
to_value
(
out_attr
)}}),
input
);
}
TEST_CASE
(
select_module_static
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
3
,
3
,
255
,
255
}};
migraphx
::
shape
out_attr
=
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
1000
,
1000
}}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
,
1000
}},
migraphx
::
make_op
(
"select_module"
,
{{
"output_dyn_shape"
,
migraphx
::
to_value
(
out_attr
)},
{
"output_batch_index"
,
0
},
{
"input_batch_index"
,
0
}}),
out_attr
,
migraphx
::
make_op
(
"select_module"
,
{{
"output_dyn_shapes"
,
migraphx
::
to_value
(
out_attr
)}}),
input
);
}
...
...
test/ref_ops_test.cpp
View file @
9686cb33
...
...
@@ -7277,7 +7277,54 @@ TEST_CASE(scatternd_reduction_test)
}
}
TEST_CASE(select_module_test)
TEST_CASE(select_module_add_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape lit_s{migraphx::shape{migraphx::shape::float_type, {1}}};
auto literal_ins = mm->add_literal(migraphx::literal{lit_s, {6}});
// create batch submodules
auto create_submodule = [&](std::size_t batch_size, std::string module_name) {
auto* submod = p.create_module(module_name);
migraphx::shape sm_shape{migraphx::shape::float_type, {batch_size, 4}};
auto sm_input = submod->add_parameter("data", sm_shape);
auto broadcast_lit =
submod->add_instruction(migraphx::make_op("multibroadcast"), literal_ins, sm_input);
auto add_ins = submod->add_instruction(migraphx::make_op("add"), sm_input, broadcast_lit);
submod->add_return({add_ins});
return submod;
};
auto* batch1 = create_submodule(1, "batch_1");
auto* batch2 = create_submodule(2, "batch_2");
auto* batch3 = create_submodule(3, "batch_3");
auto* batch4 = create_submodule(4, "batch_4");
migraphx::shape s{migraphx::shape::float_type, {{1, 4}, {4, 4}}};
auto input = mm->add_parameter("data", s);
std::vector<migraphx::shape> sub_shapes = {};
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 4}, {4, 4}}});
migraphx::shape out_attr = migraphx::shape{sub_shapes};
auto sm_ins = mm->add_instruction(
migraphx::make_op("select_module", {{"output_dyn_shapes", migraphx::to_value(out_attr)}}),
{input},
{batch1, batch2, batch3, batch4});
auto ret = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), sm_ins);
mm->add_return({ret});
p.compile(migraphx::ref::target{});
std::vector<float> input_data{-4, 8, -1, 4, -1, 8, 8, -4};
migraphx::parameter_map params;
migraphx::shape input_fixed_shape{migraphx::shape::float_type, {2, 4}};
params["data"] = migraphx::argument(input_fixed_shape, input_data.data());
auto result = p.eval(params).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{2, 14, 5, 10, 5, 14, 14, 2};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(select_module_reduce_test0)
{
migraphx::program p;
...
...
@@ -7288,25 +7335,28 @@ TEST_CASE(select_module_test)
auto sm_input = submod->add_parameter("data", sm_shape);
auto reduce_ins =
submod->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {1}}}), sm_input);
auto squeeze_ins = submod->add_instruction(migraphx::make_op("squeeze"), reduce_ins);
auto squeeze_ins =
submod->add_instruction(migraphx::make_op("squeeze", {{"axes", {1}}}), reduce_ins);
submod->add_return({squeeze_ins});
return submod;
};
auto* batch1 = create_submodule(1, "batch_1");
auto* batch2 = create_submodule(2, "batch_2");
auto* batch3 = create_submodule(3, "batch_3");
auto* batch4 = create_submodule(4, "batch_4");
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{1, 4}, {2, 2}, {2, 2}}};
auto input = mm->add_parameter("data", s);
migraphx::shape out_attr = migraphx::shape{migraphx::shape::float_type, {{1, 4}, {2, 2}}};
mm->add_instruction(migraphx::make_op("select_module",
{{"output_dyn_shape", migraphx::to_value(out_attr)},
{"output_batch_index", 0},
{"input_batch_index", 0},
{"dyn_batch_param_name", "data"}}),
std::vector<migraphx::shape> sub_shapes = {};
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 4}, {2, 2}}});
migraphx::shape out_attr = migraphx::shape{sub_shapes};
auto sm_ins = mm->add_instruction(
migraphx::make_op("select_module", {{"output_dyn_shapes", migraphx::to_value(out_attr)}}),
{input},
{batch1, batch2, batch4});
{batch1, batch2, batch3, batch4});
auto ret = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), sm_ins);
mm->add_return({ret});
p.compile(migraphx::ref::target{});
std::vector<float> input_data{-4, 8, -1, 4, -1, 8, 8, -4};
...
...
@@ -7317,9 +7367,56 @@ TEST_CASE(select_module_test)
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{-5, 12, 7, 4};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(scatternd_reduction_dyn_test)
{
TEST_CASE(select_module_reduce_test1)
{
migraphx::program p;
// create batch submodules
auto create_submodule = [&](std::size_t batch_size, std::string module_name) {
auto* submod = p.create_module(module_name);
migraphx::shape sm_shape{migraphx::shape::float_type, {batch_size, 2, 2}};
auto sm_input = submod->add_parameter("data", sm_shape);
auto reduce_ins =
submod->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {1}}}), sm_input);
auto squeeze_ins =
submod->add_instruction(migraphx::make_op("squeeze", {{"axes", {1}}}), reduce_ins);
submod->add_return({squeeze_ins});
return submod;
};
auto* batch1 = create_submodule(1, "batch_1");
auto* batch2 = create_submodule(2, "batch_2");
auto* batch3 = create_submodule(3, "batch_3");
auto* batch4 = create_submodule(4, "batch_4");
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{1, 4}, {2, 2}, {2, 2}}};
auto input = mm->add_parameter("data", s);
std::vector<migraphx::shape> sub_shapes = {};
sub_shapes.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 4}, {2, 2}}});
migraphx::shape out_attr = migraphx::shape{sub_shapes};
auto sm_ins = mm->add_instruction(
migraphx::make_op("select_module", {{"output_dyn_shapes", migraphx::to_value(out_attr)}}),
{input},
{batch1, batch2, batch3, batch4});
auto ret = mm->add_instruction(migraphx::make_op("get_tuple_elem", {{"index", 0}}), sm_ins);
mm->add_return({ret});
p.compile(migraphx::ref::target{});
std::vector<float> input_data{-4, 8, -1, 4, -1, 8, 8, -4, -4, 8, -1, 4, -1, 8, 8, -4};
migraphx::parameter_map params;
migraphx::shape input_fixed_shape{migraphx::shape::float_type, {4, 2, 2}};
params["data"] = migraphx::argument(input_fixed_shape, input_data.data());
auto result = p.eval(params).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{-5, 12, 7, 4, -5, 12, 7, 4};
}
TEST_CASE(scatternd_reduction_dyn_test)
{
// reduction = add, with dynamic input shapes
migraphx::program p;
auto* mm = p.get_main_module();
...
...
@@ -7341,10 +7438,89 @@ TEST_CASE(select_module_test)
migraphx::parameter_map params;
migraphx::shape input_fixed_shape0{migraphx::shape::float_type, {4, 4, 4}}; // data
std::vector<float> input_data{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1,
std::vector<float> input_data{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6,
7, 8, 8, 7, 6, 5, 4, 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4,
5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, 7, 8};
std::vector<uint64_t> input_index{0, 2};
migraphx::shape input_fixed_shape1{migraphx::shape::float_type, {2, 4, 4}}; // updates
std::vector<float> input_updates{5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8,
1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4};
params["X"] = migraphx::argument(input_fixed_shape0, input_data.data());
params["I"] = migraphx::argument(is, input_index.data());
params["U"] = migraphx::argument(input_fixed_shape1, input_updates.data());
auto result = p.eval(params).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{6, 7, 8, 9, 11, 12, 13, 14, 15, 14, 13, 12, 12, 11, 10, 9,
1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1,
8, 7, 6, 5, 4, 3,
2, 1, 1, 2, 3, 4, 5, 6, 7, 8
,
9,
8, 7, 6,
6,
5,
4,
3,
4, 5, 6, 7, 9, 10, 11, 12
,
8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, 7, 8};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(sigmoid_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {2, 2}};
auto l = mm->add_literal(migraphx::literal{s, {-1, 2, -3, 4}});
mm->add_instruction(migraphx::make_op("sigmoid"), l);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{sigmoid(-1), sigmoid(2), sigmoid(-3), sigmoid(4)};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(sigmoid_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{2, 4, 0}, {2, 2, 0}}};
auto input = mm->add_parameter("X", s);
mm->add_instruction(migraphx::make_op("sigmoid"), input);
p.compile(migraphx::ref::target{});
std::vector<float> input_data{-1, 2, -3, 4};
migraphx::parameter_map params0;
migraphx::shape input_fixed_shape0{migraphx::shape::float_type, {2, 2}};
params0["X"] = migraphx::argument(input_fixed_shape0, input_data.data());
auto result = p.eval(params0).back();
std::vector<float> results_vector(4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{sigmoid(-1), sigmoid(2), sigmoid(-3), sigmoid(4)};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(scatternd_reduction_dyn_test)
{
// reduction = add, with dynamic input shapes
migraphx::program p;
auto* mm = p.get_main_module();
auto dtype = migraphx::shape::float_type;
auto itype = migraphx::shape::int64_type;
migraphx::shape::dynamic_dimension dd{3, 6, 0};
migraphx::shape ds{migraphx::shape::float_type, {dd, dd, dd}};
migraphx::shape is{itype, {2, 1}};
migraphx::shape us{dtype, {{2, 2, 0}, dd, dd}};
auto xdata = mm->add_parameter("X", ds);
auto xindex = mm->add_parameter("I", is);
auto xupdates = mm->add_parameter("U", us);
auto scatternd_add_op = migraphx::make_op("scatternd_add");
auto scatternd = mm->add_instruction(scatternd_add_op, xdata, xindex, xupdates);
mm->add_return({scatternd});
p.compile(migraphx::ref::target{});
migraphx::parameter_map params;
migraphx::shape input_fixed_shape0{migraphx::shape::float_type, {4, 4, 4}}; // data
std::vector<float> input_data{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6,
7, 8, 8, 7, 6, 5, 4, 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4,
5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, 7, 8};
std::vector<uint64_t> input_index{0, 2};
migraphx::shape input_fixed_shape1{migraphx::shape::float_type, {2, 4, 4}}; // updates
std::vector<float> input_updates{5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8,
...
...
@@ -7362,10 +7538,10 @@ TEST_CASE(select_module_test)
9, 8, 7, 6, 6, 5, 4, 3, 4, 5, 6, 7, 9, 10, 11, 12,
8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, 7, 8};
EXPECT(migraphx::verify_range(results_vector, gold));
}
}
TEST_CASE(sigmoid_test)
{
TEST_CASE(sigmoid_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {2, 2}};
...
...
@@ -7377,10 +7553,10 @@ TEST_CASE(select_module_test)
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{sigmoid(-1), sigmoid(2), sigmoid(-3), sigmoid(4)};
EXPECT(migraphx::verify_range(results_vector, gold));
}
}
TEST_CASE(sigmoid_dyn_test)
{
TEST_CASE(sigmoid_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{2, 4, 0}, {2, 2, 0}}};
...
...
@@ -7397,10 +7573,10 @@ TEST_CASE(select_module_test)
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{sigmoid(-1), sigmoid(2), sigmoid(-3), sigmoid(4)};
EXPECT(migraphx::verify_range(results_vector, gold));
}
}
TEST_CASE(sign_test)
{
TEST_CASE(sign_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {5}};
...
...
@@ -7413,10 +7589,10 @@ TEST_CASE(select_module_test)
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {1.0, 1.0, -1.0, -1.0, 0.0};
EXPECT(migraphx::verify_range(results_vector, gold));
}
}
TEST_CASE(sign_dyn_test)
{
TEST_CASE(sign_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape::dynamic_dimension dd{3, 8, 0};
...
...
@@ -7434,10 +7610,10 @@ TEST_CASE(select_module_test)
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {1.0, 1.0, -1.0, -1.0, 0.0};
EXPECT(migraphx::verify_range(results_vector, gold));
}
}
TEST_CASE(sin_test)
{
TEST_CASE(sin_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {3}};
...
...
@@ -7452,10 +7628,10 @@ TEST_CASE(select_module_test)
std::transform(
gold.begin(), gold.end(), gold.begin(), [](float n) -> float { return sinf(n); });
EXPECT(migraphx::verify_range(results_vector, gold));
}
}
TEST_CASE(sin_dyn_test)
{
TEST_CASE(sin_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape::dynamic_dimension dd{3, 8, 0};
...
...
@@ -7475,10 +7651,10 @@ TEST_CASE(select_module_test)
std::transform(
gold.begin(), gold.end(), gold.begin(), [](float n) -> float { return sinf(n); });
EXPECT(migraphx::verify_range(results_vector, gold));
}
}
TEST_CASE(sinh_test)
{
TEST_CASE(sinh_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {2, 2}};
...
...
@@ -7493,10 +7669,10 @@ TEST_CASE(select_module_test)
std::transform(
gold.begin(), gold.end(), gold.begin(), [](float n) -> float { return sinhf(n); });
EXPECT(migraphx::verify_range(results_vector, gold));
}
}
TEST_CASE(sinh_dynamic_test)
{
TEST_CASE(sinh_dynamic_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {{2, 4, 0}, {2, 4, 0}}};
...
...
@@ -7515,10 +7691,10 @@ TEST_CASE(select_module_test)
std::transform(
gold.begin(), gold.end(), gold.begin(), [](float n) -> float { return sinhf(n); });
EXPECT(migraphx::verify_range(results_vector, gold));
}
}
TEST_CASE(slice_test)
{
TEST_CASE(slice_test)
{
{
migraphx::program p;
auto* mm = p.get_main_module();
...
...
@@ -7547,8 +7723,8 @@ TEST_CASE(select_module_test)
migraphx::shape s{migraphx::shape::int32_type, {2, 2, 3}};
auto l0 = mm->add_literal(migraphx::literal{s, data});
mm->add_instruction(
migraphx::make_op(
"slice",
{{"axes", {0, 1, 2}}, {"starts", {0, 0, 0}}, {"ends", {2, 2, 2}}}),
migraphx::make_op(
"slice",
{{"axes", {0, 1, 2}}, {"starts", {0, 0, 0}}, {"ends", {2, 2, 2}}}),
l0);
migraphx::shape s2{migraphx::shape::int32_type, {2, 2, 2}, {6, 3, 1}};
EXPECT(p.get_output_shapes().back() == s2);
...
...
@@ -7561,10 +7737,10 @@ TEST_CASE(select_module_test)
EXPECT(migraphx::verify_range(results_vector, gold));
EXPECT(result.get_shape() == sresult);
}
}
}
TEST_CASE(slice_dyn_test0)
{
TEST_CASE(slice_dyn_test0)
{
// Slice a single dynamic dimension. ax1 slice limits are smaller than min; ax2 "ends" is
// too large
migraphx::program p;
...
...
@@ -7572,8 +7748,7 @@ TEST_CASE(select_module_test)
migraphx::shape s{migraphx::shape::int32_type, {{2, 3, 0}, {2, 2, 0}, {3, 3, 0}}};
auto x = mm->add_parameter("x", s);
mm->add_instruction(
migraphx::make_op("slice", {{"axes", {1, 2}}, {"starts", {0, 1}}, {"ends", {1, 6}}}),
x);
migraphx::make_op("slice", {{"axes", {1, 2}}, {"starts", {0, 1}}, {"ends", {1, 6}}}), x);
migraphx::shape s2{migraphx::shape::int32_type, {{2, 3, 0}, {1, 1, 0}, {2, 2, 0}}};
EXPECT(p.get_output_shapes().back() == s2);
p.compile(migraphx::ref::target{});
...
...
@@ -7594,10 +7769,10 @@ TEST_CASE(select_module_test)
EXPECT(migraphx::verify_range(results_vector, gold));
EXPECT(result.get_shape() == sresult);
}
}
TEST_CASE(slice_dyn_test1)
{
TEST_CASE(slice_dyn_test1)
{
// Slice all three dynamic dimensions
migraphx::program p;
auto* mm = p.get_main_module();
...
...
@@ -7625,10 +7800,10 @@ TEST_CASE(select_module_test)
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, gold));
EXPECT(result.get_shape() == sresult);
}
}
TEST_CASE(softmax_simple_test)
{
TEST_CASE(softmax_simple_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<float> a = {0.25, 0.75};
...
...
@@ -7641,10 +7816,10 @@ TEST_CASE(select_module_test)
std::vector<float> results_vector(2);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, s));
}
}
TEST_CASE(softmax_test)
{
TEST_CASE(softmax_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<float> a = {
...
...
@@ -7701,10 +7876,10 @@ TEST_CASE(select_module_test)
std::vector<float> results_vector(120);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, s));
}
}
TEST_CASE(softmax_dyn_test)
{
TEST_CASE(softmax_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape a_shape{migraphx::shape::float_type,
...
...
@@ -7764,10 +7939,10 @@ TEST_CASE(select_module_test)
0.17377149, 0.76075399, 0.20071237, 0.32632929, 0.36892858, 0.09416146, 0.26656723,
0.42914796};
EXPECT(migraphx::verify_range(results_vector, s));
}
}
TEST_CASE(sqdiff_test)
{
TEST_CASE(sqdiff_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {3}};
...
...
@@ -7780,10 +7955,10 @@ TEST_CASE(select_module_test)
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {4, 4, 4};
EXPECT(migraphx::verify_range(results_vector, gold));
}
}
TEST_CASE(sqdiff_dyn_test)
{
TEST_CASE(sqdiff_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<migraphx::shape::dynamic_dimension> dd{{2, 6, 0}};
...
...
@@ -7804,10 +7979,10 @@ TEST_CASE(select_module_test)
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {4, 4, 4};
EXPECT(migraphx::verify_range(results_vector, gold));
}
}
TEST_CASE(sqrt_test)
{
TEST_CASE(sqrt_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {5}};
...
...
@@ -7822,10 +7997,10 @@ TEST_CASE(select_module_test)
std::transform(
gold.begin(), gold.end(), gold.begin(), [](float n) -> float { return sqrtf(n); });
EXPECT(migraphx::verify_range(results_vector, gold));
}
}
TEST_CASE(sqrt_dynamic_test)
{
TEST_CASE(sqrt_dynamic_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape::dynamic_dimension dd{3, 8, 0};
...
...
@@ -7845,10 +8020,10 @@ TEST_CASE(select_module_test)
std::transform(
gold.begin(), gold.end(), gold.begin(), [](float n) -> float { return sqrtf(n); });
EXPECT(migraphx::verify_range(results_vector, gold));
}
}
TEST_CASE(squeeze_test)
{
TEST_CASE(squeeze_test)
{
{
migraphx::program p;
auto* mm = p.get_main_module();
...
...
@@ -7886,10 +8061,10 @@ TEST_CASE(select_module_test)
auto result = p.eval({}).back();
EXPECT(result.get_shape() == s2);
}
}
}
TEST_CASE(squeeze_dyn_test)
{
TEST_CASE(squeeze_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s1{migraphx::shape::float_type,
...
...
@@ -7905,10 +8080,10 @@ TEST_CASE(select_module_test)
auto result = p.eval(params0).back();
migraphx::shape s2{migraphx::shape::float_type, {4, 3, 1, 3}};
EXPECT(result.get_shape() == s2);
}
}
TEST_CASE(step_test)
{
TEST_CASE(step_test)
{
{
migraphx::program p;
auto* mm = p.get_main_module();
...
...
@@ -7942,10 +8117,10 @@ TEST_CASE(select_module_test)
migraphx::shape s2{migraphx::shape::float_type, {1, 2, 2, 1}};
EXPECT(result.get_shape() == s2);
}
}
}
TEST_CASE(sub_test)
{
TEST_CASE(sub_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {3}};
...
...
@@ -7958,10 +8133,10 @@ TEST_CASE(select_module_test)
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {-2, -2, -2};
EXPECT(migraphx::verify_range(results_vector, gold));
}
}
TEST_CASE(sub_dyn_test)
{
TEST_CASE(sub_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<migraphx::shape::dynamic_dimension> dd{{2, 6, 0}};
...
...
@@ -7982,10 +8157,10 @@ TEST_CASE(select_module_test)
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {-2, -2, -2};
EXPECT(migraphx::verify_range(results_vector, gold));
}
}
TEST_CASE(tan_test)
{
TEST_CASE(tan_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {3}};
...
...
@@ -8000,10 +8175,10 @@ TEST_CASE(select_module_test)
std::transform(
gold.begin(), gold.end(), gold.begin(), [](float n) -> float { return tanf(n); });
EXPECT(migraphx::verify_range(results_vector, gold));
}
}
TEST_CASE(tan_dynamic_test)
{
TEST_CASE(tan_dynamic_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape::dynamic_dimension dd{3, 8, 0};
...
...
@@ -8023,10 +8198,10 @@ TEST_CASE(select_module_test)
std::transform(
gold.begin(), gold.end(), gold.begin(), [](float n) -> float { return tanf(n); });
EXPECT(migraphx::verify_range(results_vector, gold));
}
}
TEST_CASE(tanh_test)
{
TEST_CASE(tanh_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {2, 2}};
...
...
@@ -8041,10 +8216,10 @@ TEST_CASE(select_module_test)
std::transform(
gold.begin(), gold.end(), gold.begin(), [](float n) -> float { return tanhf(n); });
EXPECT(migraphx::verify_range(results_vector, gold));
}
}
TEST_CASE(tanh_dynamic_test)
{
TEST_CASE(tanh_dynamic_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape::dynamic_dimension dd{3, 8, 0};
...
...
@@ -8064,10 +8239,10 @@ TEST_CASE(select_module_test)
std::transform(
gold.begin(), gold.end(), gold.begin(), [](float n) -> float { return tanhf(n); });
EXPECT(migraphx::verify_range(results_vector, gold));
}
}
TEST_CASE(topk_test)
{
TEST_CASE(topk_test)
{
auto create_program = [](int64_t k, int64_t axis, int largest) {
migraphx::program p;
auto* mm = p.get_main_module();
...
...
@@ -8116,10 +8291,10 @@ TEST_CASE(select_module_test)
std::vector<int64_t> gold_ind = {4, 2, 0, 1, 3, 1, 4, 0, 3, 0, 4, 2};
EXPECT(results.second == gold_ind);
}
}
}
TEST_CASE(transpose_test)
{
TEST_CASE(transpose_test)
{
migraphx::shape a_shape{migraphx::shape::float_type, {1, 2, 2, 3}};
std::vector<float> data(12);
std::iota(data.begin(), data.end(), 0);
...
...
@@ -8145,19 +8320,17 @@ TEST_CASE(select_module_test)
auto result2 = p.eval({}).back();
std::vector<float> results_vector(12);
result2.visit(
[&](auto output) { results_vector.assign(output.begin(), output.end()); });
result2.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0, 3, 6, 9, 1, 4, 7, 10, 2, 5, 8, 11};
EXPECT(migraphx::verify_range(results_vector, gold));
}
}
}
TEST_CASE(transpose_dyn_test)
{
TEST_CASE(transpose_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type,
{{1, 4, 0}, {2, 2, 0}, {2, 2, 0}, {3, 3, 0}}};
migraphx::shape s{migraphx::shape::float_type, {{1, 4, 0}, {2, 2, 0}, {2, 2, 0}, {3, 3, 0}}};
auto l = mm->add_parameter("X", s);
std::vector<int64_t> perm = {0, 3, 1, 2};
mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), l);
...
...
@@ -8177,10 +8350,10 @@ TEST_CASE(select_module_test)
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {0, 3, 6, 9, 1, 4, 7, 10, 2, 5, 8, 11};
EXPECT(migraphx::verify_range(results_vector, gold));
}
}
TEST_CASE(unsqueeze_test)
{
TEST_CASE(unsqueeze_test)
{
{
migraphx::program p;
auto* mm = p.get_main_module();
...
...
@@ -8205,10 +8378,10 @@ TEST_CASE(select_module_test)
auto result = p.eval({}).back();
EXPECT(result.get_shape() == s2);
}
}
}
TEST_CASE(unsqueeze_dyn_test)
{
TEST_CASE(unsqueeze_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
...
...
@@ -8224,10 +8397,10 @@ TEST_CASE(select_module_test)
auto result = p.eval(params0).back();
migraphx::shape s2{migraphx::shape::float_type, {4, 1, 3, 3}};
EXPECT(result.get_shape() == s2);
}
}
TEST_CASE(where_test)
{
TEST_CASE(where_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sb{migraphx::shape::bool_type, {3, 3}};
...
...
@@ -8251,10 +8424,10 @@ TEST_CASE(select_module_test)
gold[i] = b[i] ? x[i] : y[i];
EXPECT(migraphx::verify_range(result_vec, gold));
}
}
TEST_CASE(where_dyn_test)
{
TEST_CASE(where_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sb{migraphx::shape::bool_type, {{2, 3, 0}, {2, 3, 0}}};
...
...
@@ -8282,10 +8455,10 @@ TEST_CASE(select_module_test)
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{1, 1, 1, 2, 2, 2, 1, 2, 1};
EXPECT(migraphx::verify_range(results_vector, gold));
}
}
TEST_CASE(where_broadcasted_inputs_test)
{
TEST_CASE(where_broadcasted_inputs_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sb{migraphx::shape::bool_type, {3, 3}};
...
...
@@ -8295,10 +8468,8 @@ TEST_CASE(select_module_test)
auto lb = mm->add_literal(migraphx::literal{sb, b});
auto lx = mm->add_literal(migraphx::literal(1.0f));
auto ly = mm->add_literal(migraphx::literal(2.0f));
auto mbx =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {3, 3}}}), lx);
auto mby =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {3, 3}}}), ly);
auto mbx = mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {3, 3}}}), lx);
auto mby = mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {3, 3}}}), ly);
auto w = mm->add_instruction(migraphx::make_op("where"), lb, mbx, mby);
mm->add_return({w});
p.compile(migraphx::ref::target{});
...
...
@@ -8312,6 +8483,6 @@ TEST_CASE(select_module_test)
gold[i] = b[i] ? x[i] : y[i];
EXPECT(migraphx::verify_range(result_vec, gold));
}
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
int main(int argc, const char* argv[]) { test::run(argc, argv); }
test/shape_test.cpp
View file @
9686cb33
...
...
@@ -238,6 +238,30 @@ TEST_CASE(test_shape_dynamic_serialize)
EXPECT
(
s3
!=
s4
);
}
TEST_CASE
(
any_of_dynamic_true
)
{
std
::
vector
<
migraphx
::
shape
>
sub_shapes
=
{};
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
4
,
4
}}});
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
5
}});
migraphx
::
shape
s0
{
sub_shapes
};
EXPECT
(
s0
.
any_of_dynamic
());
sub_shapes
=
{};
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
},
{
4
,
4
}}});
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
5
}});
migraphx
::
shape
s1
{
sub_shapes
};
EXPECT
(
s1
.
any_of_dynamic
());
}
TEST_CASE
(
any_of_dynamic_false
)
{
std
::
vector
<
migraphx
::
shape
>
sub_shapes
=
{};
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
4
}});
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
5
}});
migraphx
::
shape
s
{
sub_shapes
};
EXPECT
(
not
s
.
any_of_dynamic
());
}
TEST_CASE
(
test_shape_packed
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
2
},
{
2
,
1
}};
...
...
test/verify/run_verify.cpp
View file @
9686cb33
...
...
@@ -184,9 +184,18 @@ void run_verify::verify(const std::string& name, const migraphx::program& p) con
std
::
vector
<
std
::
pair
<
std
::
string
,
result_future
>>
results
;
migraphx
::
parameter_map
m
;
for
(
auto
&&
x
:
p
.
get_parameter_shapes
())
{
if
(
x
.
second
.
dynamic
())
{
// create static shape using maximum dimensions
migraphx
::
shape
static_shape
{
x
.
second
.
type
(),
x
.
second
.
max_lens
()};
m
[
x
.
first
]
=
migraphx
::
generate_argument
(
static_shape
,
get_hash
(
x
.
first
));
}
else
{
m
[
x
.
first
]
=
migraphx
::
generate_argument
(
x
.
second
,
get_hash
(
x
.
first
));
}
}
auto
gold_f
=
detach_async
([
=
]
{
return
run_ref
(
p
,
m
);
});
for
(
const
auto
&
tname
:
target_names
)
...
...
test/verify/test_select_module_add.cpp
0 → 100644
View file @
9686cb33
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct
test_select_module_add
:
verify_program
<
test_select_module_add
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
lit_s
{
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
}}};
auto
literal_ins
=
mm
->
add_literal
(
migraphx
::
literal
{
lit_s
,
{
6
}});
// create batch submodules
auto
create_submodule
=
[
&
](
std
::
size_t
batch_size
,
std
::
string
module_name
)
{
auto
*
submod
=
p
.
create_module
(
module_name
);
migraphx
::
shape
sm_shape
{
migraphx
::
shape
::
float_type
,
{
batch_size
,
4
}};
auto
sm_input
=
submod
->
add_parameter
(
"data"
,
sm_shape
);
auto
broadcast_lit
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
),
literal_ins
,
sm_input
);
auto
add_ins
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
sm_input
,
broadcast_lit
);
submod
->
add_return
({
add_ins
});
return
submod
;
};
auto
*
batch1
=
create_submodule
(
1
,
"batch_1"
);
auto
*
batch2
=
create_submodule
(
2
,
"batch_2"
);
auto
*
batch3
=
create_submodule
(
3
,
"batch_3"
);
auto
*
batch4
=
create_submodule
(
4
,
"batch_4"
);
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
4
,
4
}}};
auto
input
=
mm
->
add_parameter
(
"data"
,
s
);
std
::
vector
<
migraphx
::
shape
>
sub_shapes
=
{};
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
4
,
4
}}});
migraphx
::
shape
out_attr
=
migraphx
::
shape
{
sub_shapes
};
auto
sm_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"select_module"
,
{{
"output_dyn_shapes"
,
migraphx
::
to_value
(
out_attr
)}}),
{
input
},
{
batch1
,
batch2
,
batch3
,
batch4
});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"get_tuple_elem"
,
{{
"index"
,
0
}}),
sm_ins
);
mm
->
add_return
({
ret
});
return
p
;
}
};
test/verify/test_select_module_reduce.cpp
0 → 100644
View file @
9686cb33
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct
test_select_module_reduce
:
verify_program
<
test_select_module_reduce
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
// create batch submodules
auto
create_submodule
=
[
&
](
std
::
size_t
batch_size
,
std
::
string
module_name
)
{
auto
submod
=
p
.
create_module
(
module_name
);
migraphx
::
shape
sm_shape
{
migraphx
::
shape
::
float_type
,
{
batch_size
,
2
,
2
}};
auto
sm_input
=
submod
->
add_parameter
(
"data"
,
sm_shape
);
auto
reduce_ins
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
1
}}}),
sm_input
);
auto
squeeze_ins
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
1
}}}),
reduce_ins
);
submod
->
add_return
({
squeeze_ins
});
return
submod
;
};
auto
*
batch1
=
create_submodule
(
1
,
"batch_1"
);
auto
*
batch2
=
create_submodule
(
2
,
"batch_2"
);
auto
*
batch3
=
create_submodule
(
3
,
"batch_3"
);
auto
*
batch4
=
create_submodule
(
4
,
"batch_4"
);
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
2
,
2
},
{
2
,
2
}}};
auto
input
=
mm
->
add_parameter
(
"data"
,
s
);
std
::
vector
<
migraphx
::
shape
>
sub_shapes
=
{};
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
2
,
2
}}});
migraphx
::
shape
out_attr
=
migraphx
::
shape
{
sub_shapes
};
auto
sm_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"select_module"
,
{{
"output_dyn_shapes"
,
migraphx
::
to_value
(
out_attr
)}}),
{
input
},
{
batch1
,
batch2
,
batch3
,
batch4
});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"get_tuple_elem"
,
{{
"index"
,
0
}}),
sm_ins
);
mm
->
add_return
({
ret
});
return
p
;
}
};
tools/include/operation.hpp
View file @
9686cb33
...
...
@@ -168,7 +168,7 @@ shape compute_shape_op(const T& x, const std::vector<shape>& inputs)
}
template
<
class
T
>
auto
mod_compute_shape_op
(
rank
<
2
>
,
auto
mod_compute_shape_op
(
rank
<
1
>
,
const
T
&
x
,
const
std
::
vector
<
shape
>&
inputs
,
const
std
::
vector
<
module_ref
>&
mod_args
)
...
...
@@ -177,15 +177,6 @@ auto mod_compute_shape_op(rank<2>,
return
x
.
compute_shape
(
inputs
,
mod_args
);
}
template
<
class
T
>
auto
mod_compute_shape_op
(
rank
<
1
>
,
const
T
&
x
,
const
std
::
vector
<
shape
>&
inputs
,
const
std
::
vector
<
module_ref
>&
)
->
decltype
(
x
.
compute_shape
(
inputs
))
{
return
x
.
compute_shape
(
inputs
);
}
template
<
class
T
>
shape
mod_compute_shape_op
(
rank
<
0
>
,
const
T
&
x
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment