Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
9686cb33
Commit
9686cb33
authored
Feb 16, 2023
by
charlie
Browse files
Merge branch 'select_module_op' of github.com:ROCmSoftwarePlatform/AMDMIGraphX into dyn_batch_pass
parents
84725d72
dd74a89a
Changes
15
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
15 changed files
with
1344 additions
and
1025 deletions
+1344
-1025
src/include/migraphx/module.hpp
src/include/migraphx/module.hpp
+4
-0
src/include/migraphx/op/select_module.hpp
src/include/migraphx/op/select_module.hpp
+30
-71
src/include/migraphx/operation.hpp
src/include/migraphx/operation.hpp
+2
-11
src/include/migraphx/shape.hpp
src/include/migraphx/shape.hpp
+3
-0
src/program.cpp
src/program.cpp
+1
-1
src/replace_allocate.cpp
src/replace_allocate.cpp
+7
-9
src/shape.cpp
src/shape.cpp
+11
-0
src/targets/gpu/lowering.cpp
src/targets/gpu/lowering.cpp
+28
-0
test/op_shape_test.cpp
test/op_shape_test.cpp
+5
-15
test/ref_ops_test.cpp
test/ref_ops_test.cpp
+1078
-907
test/shape_test.cpp
test/shape_test.cpp
+24
-0
test/verify/run_verify.cpp
test/verify/run_verify.cpp
+10
-1
test/verify/test_select_module_add.cpp
test/verify/test_select_module_add.cpp
+71
-0
test/verify/test_select_module_reduce.cpp
test/verify/test_select_module_reduce.cpp
+69
-0
tools/include/operation.hpp
tools/include/operation.hpp
+1
-10
No files found.
src/include/migraphx/module.hpp
View file @
9686cb33
...
@@ -54,6 +54,10 @@ using ins_dep_map = std::unordered_map<instruction_ref, std::unordered_set<ins
...
@@ -54,6 +54,10 @@ using ins_dep_map = std::unordered_map<instruction_ref, std::unordered_set<ins
*/
*/
struct
module
struct
module
{
{
// used by replace_allocate pass
// allocate memory in this module rather than using output parmaeters
bool
use_local_alloc
=
false
;
module
(
const
std
::
string
&
name
=
""
);
module
(
const
std
::
string
&
name
=
""
);
// move constructor
// move constructor
...
...
src/include/migraphx/op/select_module.hpp
View file @
9686cb33
...
@@ -26,8 +26,6 @@
...
@@ -26,8 +26,6 @@
#include <migraphx/check_shapes.hpp>
#include <migraphx/check_shapes.hpp>
#include <migraphx/module.hpp>
#include <migraphx/module.hpp>
#include <migraphx/dyn_output.hpp>
#include <set>
namespace
migraphx
{
namespace
migraphx
{
inline
namespace
MIGRAPHX_INLINE_NS
{
inline
namespace
MIGRAPHX_INLINE_NS
{
...
@@ -35,98 +33,59 @@ namespace op {
...
@@ -35,98 +33,59 @@ namespace op {
struct
select_module
struct
select_module
{
{
// output shape of the dynamic model
shape
output_dyn_shapes
;
shape
output_dyn_shape
;
int
input_batch_index
=
-
1
;
int
output_batch_index
=
-
1
;
std
::
string
dyn_batch_param_name
;
template
<
class
Self
,
class
F
>
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
static
auto
reflect
(
Self
&
self
,
F
f
)
{
{
return
pack
(
f
(
self
.
output_dyn_shape
,
"output_dyn_shape"
),
return
pack
(
f
(
self
.
output_dyn_shapes
,
"output_dyn_shapes"
));
f
(
self
.
input_batch_index
,
"input_batch_index"
),
f
(
self
.
output_batch_index
,
"output_batch_index"
),
f
(
self
.
dyn_batch_param_name
,
"dyn_batch_param_name"
));
}
}
std
::
string
name
()
const
{
return
"select_module"
;
}
std
::
string
name
()
const
{
return
"select_module"
;
}
// runs once during model compilation with dynamic shape input
shape
compute_shape
(
const
std
::
vector
<
shape
>&
,
std
::
vector
<
module_ref
>
)
const
// may run on each model evaluation with static shape input
shape
compute_shape
(
std
::
vector
<
shape
>
inputs
)
const
{
{
check_shapes
{
inputs
,
*
this
,
true
}.
has
(
1
);
return
shape
{
output_dyn_shapes
};
auto
s0
=
inputs
.
at
(
0
);
if
(
s0
.
dynamic
())
{
// should we check that the submodules have the same parameters here?
// check that no more than one parameter is non-fixed?
// would need to use version of compute_shape with the parameter list
return
shape
{
output_dyn_shape
};
}
else
{
auto
batch_size
=
s0
.
lens
().
at
(
input_batch_index
);
auto
dds
=
output_dyn_shape
.
dyn_dims
();
dds
.
at
(
output_batch_index
)
=
{
batch_size
,
batch_size
};
std
::
vector
<
std
::
size_t
>
dims
;
if
(
std
::
all_of
(
dds
.
begin
(),
dds
.
end
(),
[](
auto
dd
)
{
return
dd
.
is_fixed
();
}))
{
std
::
transform
(
dds
.
begin
(),
dds
.
end
(),
std
::
back_inserter
(
dims
),
[](
auto
d
)
{
return
d
.
max
;
});
return
{
output_dyn_shape
.
type
(),
dims
};
}
else
{
MIGRAPHX_THROW
(
"SELECT_MODULE: more than one input dimension was non-fixed"
);
}
}
}
}
argument
compute
(
const
dyn_output
&
dyn_out
,
argument
compute
(
const
shape
&
,
const
std
::
vector
<
argument
>&
args
,
const
std
::
vector
<
argument
>&
args
,
const
std
::
vector
<
module_ref
>&
submodule_list
,
const
std
::
vector
<
module_ref
>&
submodule_list
,
const
std
::
function
<
std
::
vector
<
argument
>
(
const
std
::
function
<
std
::
vector
<
argument
>
(
module_ref
&
,
const
std
::
unordered_map
<
std
::
string
,
argument
>&
)
>&
run
)
const
module_ref
&
,
const
std
::
unordered_map
<
std
::
string
,
argument
>&
)
>&
run
)
const
{
{
std
::
vector
<
module_ref
>
modules_to_run
;
// find submodule with input parameter shapes exactly the same as the input arguments
for
(
const
auto
&
mod
:
submodule_list
)
// assuming arguments are in the same order as the input parameters
{
auto
module_iter
=
// find submodule with the same parameter shape as the input data
std
::
find_if
(
submodule_list
.
cbegin
(),
submodule_list
.
cend
(),
[
&
](
module_ref
mr
)
{
auto
p_shape
=
mod
->
get_parameter_shape
(
dyn_batch_param_name
);
auto
param_names
=
mr
->
get_parameter_names
();
if
(
p_shape
==
args
.
at
(
0
).
get_shape
())
assert
(
param_names
.
size
()
<=
args
.
size
());
{
return
std
::
equal
(
param_names
.
cbegin
(),
modules_to_run
.
push_back
(
mod
);
param_names
.
cend
(),
break
;
args
.
cbegin
(),
}
[
&
](
auto
p_name
,
auto
a
)
{
}
return
a
.
get_shape
()
==
mr
->
get_parameter_shape
(
p_name
);
// TODO if an exact match is not found, assemble module list from binary base
});
});
if
(
modules_to_run
.
empty
())
if
(
module_iter
==
submodule_list
.
end
())
{
MIGRAPHX_THROW
(
"SELECT_MODULE: no compatible submodules found for input shape: "
+
migraphx
::
to_string
(
args
.
at
(
0
).
get_shape
()));
}
std
::
set
<
std
::
string
>
pnames
;
for
(
const
auto
&
mod
:
modules_to_run
)
{
{
// TODO If all the modules have the same parameters, this would only need to run once
MIGRAPHX_THROW
(
"SELECT_MODULE: no compatible submodules found for given input shapes"
);
auto
names
=
mod
->
get_parameter_names
();
pnames
.
insert
(
names
.
begin
(),
names
.
end
());
}
}
auto
module_to_run
=
*
module_iter
;
assert
(
pnames
.
size
()
<=
args
.
size
());
std
::
unordered_map
<
std
::
string
,
argument
>
params
;
std
::
unordered_map
<
std
::
string
,
argument
>
params
;
std
::
transform
(
pnames
.
begin
(),
pnames
.
end
(),
// add input parameters
auto
param_names
=
module_to_run
->
get_parameter_names
();
assert
(
param_names
.
size
()
<=
args
.
size
());
std
::
transform
(
param_names
.
begin
(),
param_names
.
end
(),
args
.
begin
(),
args
.
begin
(),
std
::
inserter
(
params
,
params
.
end
()),
std
::
inserter
(
params
,
params
.
end
()),
[](
auto
&&
name
,
auto
&&
a
rg
)
{
return
std
::
make_pair
(
name
,
a
rg
);
});
[](
auto
&&
name
,
auto
&&
a
)
{
return
std
::
make_pair
(
name
,
a
);
});
// TODO run multiple modules and split the parameter data to each batch size
auto
results
=
run
(
module_to_run
,
params
);
auto
results
=
run
(
modules_to_run
.
at
(
0
),
params
);
return
argument
{
results
};
return
results
.
at
(
0
);
}
}
};
};
...
...
src/include/migraphx/operation.hpp
View file @
9686cb33
...
@@ -140,9 +140,9 @@ template <class T>
...
@@ -140,9 +140,9 @@ template <class T>
auto
compute_shape_op
(
rank
<
2
>
,
const
T
&
x
,
const
std
::
vector
<
shape
>&
inputs
)
auto
compute_shape_op
(
rank
<
2
>
,
const
T
&
x
,
const
std
::
vector
<
shape
>&
inputs
)
->
decltype
(
x
.
normalize_compute_shape
(
inputs
))
->
decltype
(
x
.
normalize_compute_shape
(
inputs
))
{
{
dependent_type
<
operation
,
T
>
y
=
x
;
if
(
inputs
.
empty
())
if
(
inputs
.
empty
())
MIGRAPHX_THROW
(
"At least one input is required for "
+
x
.
name
());
MIGRAPHX_THROW
(
"At least one input is required for "
+
x
.
name
());
dependent_type
<
operation
,
T
>
y
=
x
;
normalize_attributes
(
y
,
inputs
[
0
].
max_lens
());
normalize_attributes
(
y
,
inputs
[
0
].
max_lens
());
return
any_cast
<
T
>
(
y
).
normalize_compute_shape
(
inputs
);
return
any_cast
<
T
>
(
y
).
normalize_compute_shape
(
inputs
);
}
}
...
@@ -168,7 +168,7 @@ shape compute_shape_op(const T& x, const std::vector<shape>& inputs)
...
@@ -168,7 +168,7 @@ shape compute_shape_op(const T& x, const std::vector<shape>& inputs)
}
}
template
<
class
T
>
template
<
class
T
>
auto
mod_compute_shape_op
(
rank
<
2
>
,
auto
mod_compute_shape_op
(
rank
<
1
>
,
const
T
&
x
,
const
T
&
x
,
const
std
::
vector
<
shape
>&
inputs
,
const
std
::
vector
<
shape
>&
inputs
,
const
std
::
vector
<
module_ref
>&
mod_args
)
const
std
::
vector
<
module_ref
>&
mod_args
)
...
@@ -177,15 +177,6 @@ auto mod_compute_shape_op(rank<2>,
...
@@ -177,15 +177,6 @@ auto mod_compute_shape_op(rank<2>,
return
x
.
compute_shape
(
inputs
,
mod_args
);
return
x
.
compute_shape
(
inputs
,
mod_args
);
}
}
template
<
class
T
>
auto
mod_compute_shape_op
(
rank
<
1
>
,
const
T
&
x
,
const
std
::
vector
<
shape
>&
inputs
,
const
std
::
vector
<
module_ref
>&
)
->
decltype
(
x
.
compute_shape
(
inputs
))
{
return
x
.
compute_shape
(
inputs
);
}
template
<
class
T
>
template
<
class
T
>
shape
mod_compute_shape_op
(
rank
<
0
>
,
shape
mod_compute_shape_op
(
rank
<
0
>
,
const
T
&
x
,
const
T
&
x
,
...
...
src/include/migraphx/shape.hpp
View file @
9686cb33
...
@@ -243,6 +243,9 @@ struct shape
...
@@ -243,6 +243,9 @@ struct shape
/// Return true if the shape is dynamic
/// Return true if the shape is dynamic
bool
dynamic
()
const
;
bool
dynamic
()
const
;
/// Return true if this shape or any of the sub_shapes are dynamic
bool
any_of_dynamic
()
const
;
shape
normalize_standard
()
const
;
shape
normalize_standard
()
const
;
shape
with_lens
(
type_t
t
,
const
std
::
vector
<
std
::
size_t
>&
l
)
const
;
shape
with_lens
(
type_t
t
,
const
std
::
vector
<
std
::
size_t
>&
l
)
const
;
...
...
src/program.cpp
View file @
9686cb33
...
@@ -379,7 +379,7 @@ std::vector<argument> generic_eval(const module* mod,
...
@@ -379,7 +379,7 @@ std::vector<argument> generic_eval(const module* mod,
}));
}));
}
}
assert
(
results
.
find
(
ins
)
!=
results
.
end
());
assert
(
results
.
find
(
ins
)
!=
results
.
end
());
if
(
not
ins
->
get_shape
().
dynamic
())
if
(
not
ins
->
get_shape
().
any_of_
dynamic
())
{
{
assert
(
results
.
at
(
ins
).
get_shape
()
==
ins
->
get_shape
());
assert
(
results
.
at
(
ins
).
get_shape
()
==
ins
->
get_shape
());
}
}
...
...
src/replace_allocate.cpp
View file @
9686cb33
...
@@ -104,19 +104,17 @@ void replace_allocate::apply(module& m) const
...
@@ -104,19 +104,17 @@ void replace_allocate::apply(module& m) const
continue
;
continue
;
auto
s
=
ins
->
get_shape
();
auto
s
=
ins
->
get_shape
();
if
(
not
main_offload_copy
and
not
(
m
.
use_local_alloc
)
and
model
.
needs_out_params
()
and
if
(
not
main_offload_copy
and
model
.
needs_out_params
()
and
contains
(
mod_output_names
,
ins
))
contains
(
mod_output_names
,
ins
))
{
{
auto
out_param
=
m
.
add_parameter
(
mod_output_names
[
ins
],
s
);
auto
out_param
=
m
.
add_parameter
(
mod_output_names
[
ins
],
s
);
m
.
replace_instruction
(
ins
,
out_param
);
m
.
replace_instruction
(
ins
,
out_param
);
continue
;
}
}
else
m
.
replace_instruction
(
{
ins
,
m
.
replace_instruction
(
ins
,
m
.
insert_instruction
(
ins
,
make_op
(
model
.
name
(),
migraphx
::
value
{{
"shape"
,
to_value
(
s
)}}));
make_op
(
model
.
name
(),
migraphx
::
value
{{
"shape"
,
to_value
(
s
)}})));
}
}
}
}
}
...
...
src/shape.cpp
View file @
9686cb33
...
@@ -483,6 +483,17 @@ std::string shape::type_string() const { return name(this->type()); }
...
@@ -483,6 +483,17 @@ std::string shape::type_string() const { return name(this->type()); }
bool
shape
::
dynamic
()
const
{
return
not
impl
->
m_dyn_dims
.
empty
();
}
bool
shape
::
dynamic
()
const
{
return
not
impl
->
m_dyn_dims
.
empty
();
}
bool
shape
::
any_of_dynamic
()
const
{
if
(
this
->
dynamic
())
{
return
true
;
}
return
std
::
any_of
(
this
->
sub_shapes
().
cbegin
(),
this
->
sub_shapes
().
cend
(),
[](
auto
s
)
{
return
s
.
any_of_dynamic
();
});
}
const
std
::
vector
<
shape
::
dynamic_dimension
>&
shape
::
dyn_dims
()
const
{
return
impl
->
m_dyn_dims
;
}
const
std
::
vector
<
shape
::
dynamic_dimension
>&
shape
::
dyn_dims
()
const
{
return
impl
->
m_dyn_dims
;
}
std
::
vector
<
std
::
size_t
>
shape
::
min_lens
()
const
std
::
vector
<
std
::
size_t
>
shape
::
min_lens
()
const
...
...
src/targets/gpu/lowering.cpp
View file @
9686cb33
...
@@ -111,6 +111,7 @@ struct miopen_apply
...
@@ -111,6 +111,7 @@ struct miopen_apply
add_loop_op
();
add_loop_op
();
add_neg_op
();
add_neg_op
();
add_nms_op
();
add_nms_op
();
add_select_module_op
();
}
}
void
copy_params
()
const
void
copy_params
()
const
...
@@ -358,6 +359,33 @@ struct miopen_apply
...
@@ -358,6 +359,33 @@ struct miopen_apply
return
mod
->
replace_instruction
(
ins
,
gpu_out
);
return
mod
->
replace_instruction
(
ins
,
gpu_out
);
});
});
}
}
/**
* Turns on use_local_alloc in the select_module submodules.
* Changes the submodule returns to a hip::sync_stream.
*/
void
add_select_module_op
()
{
apply_map
.
emplace
(
"select_module"
,
[
=
](
instruction_ref
ins
)
{
std
::
vector
<
instruction_ref
>
inputs
=
ins
->
inputs
();
auto
mod_args
=
ins
->
module_inputs
();
for
(
auto
smod
:
mod_args
)
{
smod
->
use_local_alloc
=
true
;
auto
last_ins
=
std
::
prev
(
smod
->
end
());
if
(
last_ins
->
name
()
==
"@return"
)
{
for
(
auto
out_ins
:
last_ins
->
inputs
())
{
auto
sync_out
=
smod
->
insert_instruction
(
last_ins
,
make_op
(
"hip::sync_stream"
),
out_ins
);
smod
->
replace_return
({
sync_out
});
}
}
}
return
ins
;
});
}
};
};
void
lowering
::
apply
(
module
&
m
)
const
{
miopen_apply
{
&
m
,
this
}.
apply
();
}
void
lowering
::
apply
(
module
&
m
)
const
{
miopen_apply
{
&
m
,
this
}.
apply
();
}
...
...
test/op_shape_test.cpp
View file @
9686cb33
...
@@ -2364,25 +2364,15 @@ TEST_CASE(rnn)
...
@@ -2364,25 +2364,15 @@ TEST_CASE(rnn)
TEST_CASE
(
select_module_dyn
)
TEST_CASE
(
select_module_dyn
)
{
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
3
,
3
},
{
255
,
255
},
{
255
,
255
}}};
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
3
,
3
},
{
255
,
255
},
{
255
,
255
}}};
migraphx
::
shape
out_attr
=
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
1000
,
1000
}}};
std
::
vector
<
migraphx
::
shape
>
sub_shapes
=
{};
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
1000
,
1000
}}});
migraphx
::
shape
out_attr
=
migraphx
::
shape
{
sub_shapes
};
expect_shape
(
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
1000
,
1000
}}}
,
out_attr
,
migraphx
::
make_op
(
"select_module"
,
{{
"output_dyn_shape"
,
migraphx
::
to_value
(
out_attr
)}}),
migraphx
::
make_op
(
"select_module"
,
{{
"output_dyn_shape
s
"
,
migraphx
::
to_value
(
out_attr
)}}),
input
);
input
);
}
}
TEST_CASE
(
select_module_static
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
3
,
3
,
255
,
255
}};
migraphx
::
shape
out_attr
=
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
1000
,
1000
}}};
expect_shape
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
,
1000
}},
migraphx
::
make_op
(
"select_module"
,
{{
"output_dyn_shape"
,
migraphx
::
to_value
(
out_attr
)},
{
"output_batch_index"
,
0
},
{
"input_batch_index"
,
0
}}),
input
);
}
TEST_CASE
(
slice_shape
)
TEST_CASE
(
slice_shape
)
{
{
migraphx
::
shape
input
{
migraphx
::
shape
::
int32_type
,
{
2
,
2
,
3
}};
migraphx
::
shape
input
{
migraphx
::
shape
::
int32_type
,
{
2
,
2
,
3
}};
...
...
test/ref_ops_test.cpp
View file @
9686cb33
This diff is collapsed.
Click to expand it.
test/shape_test.cpp
View file @
9686cb33
...
@@ -238,6 +238,30 @@ TEST_CASE(test_shape_dynamic_serialize)
...
@@ -238,6 +238,30 @@ TEST_CASE(test_shape_dynamic_serialize)
EXPECT
(
s3
!=
s4
);
EXPECT
(
s3
!=
s4
);
}
}
TEST_CASE
(
any_of_dynamic_true
)
{
std
::
vector
<
migraphx
::
shape
>
sub_shapes
=
{};
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
4
,
4
}}});
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
5
}});
migraphx
::
shape
s0
{
sub_shapes
};
EXPECT
(
s0
.
any_of_dynamic
());
sub_shapes
=
{};
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
1
},
{
4
,
4
}}});
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
5
}});
migraphx
::
shape
s1
{
sub_shapes
};
EXPECT
(
s1
.
any_of_dynamic
());
}
TEST_CASE
(
any_of_dynamic_false
)
{
std
::
vector
<
migraphx
::
shape
>
sub_shapes
=
{};
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
,
4
}});
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
3
,
4
,
5
}});
migraphx
::
shape
s
{
sub_shapes
};
EXPECT
(
not
s
.
any_of_dynamic
());
}
TEST_CASE
(
test_shape_packed
)
TEST_CASE
(
test_shape_packed
)
{
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
2
},
{
2
,
1
}};
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
2
},
{
2
,
1
}};
...
...
test/verify/run_verify.cpp
View file @
9686cb33
...
@@ -185,7 +185,16 @@ void run_verify::verify(const std::string& name, const migraphx::program& p) con
...
@@ -185,7 +185,16 @@ void run_verify::verify(const std::string& name, const migraphx::program& p) con
migraphx
::
parameter_map
m
;
migraphx
::
parameter_map
m
;
for
(
auto
&&
x
:
p
.
get_parameter_shapes
())
for
(
auto
&&
x
:
p
.
get_parameter_shapes
())
{
{
m
[
x
.
first
]
=
migraphx
::
generate_argument
(
x
.
second
,
get_hash
(
x
.
first
));
if
(
x
.
second
.
dynamic
())
{
// create static shape using maximum dimensions
migraphx
::
shape
static_shape
{
x
.
second
.
type
(),
x
.
second
.
max_lens
()};
m
[
x
.
first
]
=
migraphx
::
generate_argument
(
static_shape
,
get_hash
(
x
.
first
));
}
else
{
m
[
x
.
first
]
=
migraphx
::
generate_argument
(
x
.
second
,
get_hash
(
x
.
first
));
}
}
}
auto
gold_f
=
detach_async
([
=
]
{
return
run_ref
(
p
,
m
);
});
auto
gold_f
=
detach_async
([
=
]
{
return
run_ref
(
p
,
m
);
});
...
...
test/verify/test_select_module_add.cpp
0 → 100644
View file @
9686cb33
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct
test_select_module_add
:
verify_program
<
test_select_module_add
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
lit_s
{
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
1
}}};
auto
literal_ins
=
mm
->
add_literal
(
migraphx
::
literal
{
lit_s
,
{
6
}});
// create batch submodules
auto
create_submodule
=
[
&
](
std
::
size_t
batch_size
,
std
::
string
module_name
)
{
auto
*
submod
=
p
.
create_module
(
module_name
);
migraphx
::
shape
sm_shape
{
migraphx
::
shape
::
float_type
,
{
batch_size
,
4
}};
auto
sm_input
=
submod
->
add_parameter
(
"data"
,
sm_shape
);
auto
broadcast_lit
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
),
literal_ins
,
sm_input
);
auto
add_ins
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
sm_input
,
broadcast_lit
);
submod
->
add_return
({
add_ins
});
return
submod
;
};
auto
*
batch1
=
create_submodule
(
1
,
"batch_1"
);
auto
*
batch2
=
create_submodule
(
2
,
"batch_2"
);
auto
*
batch3
=
create_submodule
(
3
,
"batch_3"
);
auto
*
batch4
=
create_submodule
(
4
,
"batch_4"
);
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
4
,
4
}}};
auto
input
=
mm
->
add_parameter
(
"data"
,
s
);
std
::
vector
<
migraphx
::
shape
>
sub_shapes
=
{};
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
4
,
4
}}});
migraphx
::
shape
out_attr
=
migraphx
::
shape
{
sub_shapes
};
auto
sm_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"select_module"
,
{{
"output_dyn_shapes"
,
migraphx
::
to_value
(
out_attr
)}}),
{
input
},
{
batch1
,
batch2
,
batch3
,
batch4
});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"get_tuple_elem"
,
{{
"index"
,
0
}}),
sm_ins
);
mm
->
add_return
({
ret
});
return
p
;
}
};
test/verify/test_select_module_reduce.cpp
0 → 100644
View file @
9686cb33
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct
test_select_module_reduce
:
verify_program
<
test_select_module_reduce
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
// create batch submodules
auto
create_submodule
=
[
&
](
std
::
size_t
batch_size
,
std
::
string
module_name
)
{
auto
submod
=
p
.
create_module
(
module_name
);
migraphx
::
shape
sm_shape
{
migraphx
::
shape
::
float_type
,
{
batch_size
,
2
,
2
}};
auto
sm_input
=
submod
->
add_parameter
(
"data"
,
sm_shape
);
auto
reduce_ins
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"reduce_sum"
,
{{
"axes"
,
{
1
}}}),
sm_input
);
auto
squeeze_ins
=
submod
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
1
}}}),
reduce_ins
);
submod
->
add_return
({
squeeze_ins
});
return
submod
;
};
auto
*
batch1
=
create_submodule
(
1
,
"batch_1"
);
auto
*
batch2
=
create_submodule
(
2
,
"batch_2"
);
auto
*
batch3
=
create_submodule
(
3
,
"batch_3"
);
auto
*
batch4
=
create_submodule
(
4
,
"batch_4"
);
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
2
,
2
},
{
2
,
2
}}};
auto
input
=
mm
->
add_parameter
(
"data"
,
s
);
std
::
vector
<
migraphx
::
shape
>
sub_shapes
=
{};
sub_shapes
.
push_back
(
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
1
,
4
},
{
2
,
2
}}});
migraphx
::
shape
out_attr
=
migraphx
::
shape
{
sub_shapes
};
auto
sm_ins
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"select_module"
,
{{
"output_dyn_shapes"
,
migraphx
::
to_value
(
out_attr
)}}),
{
input
},
{
batch1
,
batch2
,
batch3
,
batch4
});
auto
ret
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"get_tuple_elem"
,
{{
"index"
,
0
}}),
sm_ins
);
mm
->
add_return
({
ret
});
return
p
;
}
};
tools/include/operation.hpp
View file @
9686cb33
...
@@ -168,7 +168,7 @@ shape compute_shape_op(const T& x, const std::vector<shape>& inputs)
...
@@ -168,7 +168,7 @@ shape compute_shape_op(const T& x, const std::vector<shape>& inputs)
}
}
template
<
class
T
>
template
<
class
T
>
auto
mod_compute_shape_op
(
rank
<
2
>
,
auto
mod_compute_shape_op
(
rank
<
1
>
,
const
T
&
x
,
const
T
&
x
,
const
std
::
vector
<
shape
>&
inputs
,
const
std
::
vector
<
shape
>&
inputs
,
const
std
::
vector
<
module_ref
>&
mod_args
)
const
std
::
vector
<
module_ref
>&
mod_args
)
...
@@ -177,15 +177,6 @@ auto mod_compute_shape_op(rank<2>,
...
@@ -177,15 +177,6 @@ auto mod_compute_shape_op(rank<2>,
return
x
.
compute_shape
(
inputs
,
mod_args
);
return
x
.
compute_shape
(
inputs
,
mod_args
);
}
}
template
<
class
T
>
auto
mod_compute_shape_op
(
rank
<
1
>
,
const
T
&
x
,
const
std
::
vector
<
shape
>&
inputs
,
const
std
::
vector
<
module_ref
>&
)
->
decltype
(
x
.
compute_shape
(
inputs
))
{
return
x
.
compute_shape
(
inputs
);
}
template
<
class
T
>
template
<
class
T
>
shape
mod_compute_shape_op
(
rank
<
0
>
,
shape
mod_compute_shape_op
(
rank
<
0
>
,
const
T
&
x
,
const
T
&
x
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment