Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
ad16770f
Commit
ad16770f
authored
Jun 21, 2022
by
Paul
Browse files
Merge branch 'jit-layernorm' into bert-opt2
parents
c06d254a
b3955af4
Changes
30
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
280 additions
and
17 deletions
+280
-17
src/tf/parse_relu6.cpp
src/tf/parse_relu6.cpp
+3
-8
src/tf/tf_parser.cpp
src/tf/tf_parser.cpp
+7
-1
test/gpu/adjust_allocation.cpp
test/gpu/adjust_allocation.cpp
+10
-6
test/gpu/pack_int8_args.cpp
test/gpu/pack_int8_args.cpp
+4
-1
test/module_test.cpp
test/module_test.cpp
+14
-0
test/replace_allocate.cpp
test/replace_allocate.cpp
+195
-0
test/tf/gen_tf_pb.py
test/tf/gen_tf_pb.py
+10
-0
test/tf/relu6_mismatch_test.pb
test/tf/relu6_mismatch_test.pb
+8
-0
test/tf/tf_test.cpp
test/tf/tf_test.cpp
+25
-0
tools/include/allocation_model.hpp
tools/include/allocation_model.hpp
+4
-1
No files found.
src/tf/parse_relu6.cpp
View file @
ad16770f
...
...
@@ -18,15 +18,10 @@ struct parse_relu6 : op_parser<parse_relu6>
const
tf_parser
::
node_info
&
info
,
std
::
vector
<
instruction_ref
>
args
)
const
{
auto
input_lens
=
args
[
0
]
->
get_shape
().
lens
();
auto
min_val
=
info
.
add_literal
(
0.0
f
);
auto
max_val
=
info
.
add_literal
(
6.0
f
);
auto
min_val
=
info
.
add_literal
(
0.0
f
);
auto
max_val
=
info
.
add_literal
(
6.0
f
);
min_val
=
info
.
add_instruction
(
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
input_lens
}}),
min_val
);
max_val
=
info
.
add_instruction
(
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
input_lens
}}),
max_val
);
return
info
.
add_instruction
(
make_op
(
"clip"
),
args
.
front
(),
min_val
,
max_val
);
return
info
.
add_common_op
(
"clip"
,
args
[
0
],
min_val
,
max_val
);
}
};
...
...
src/tf/tf_parser.cpp
View file @
ad16770f
...
...
@@ -79,7 +79,13 @@ instruction_ref tf_parser::node_info::add_broadcastable_binary_op(const std::str
instruction_ref
arg0
,
instruction_ref
arg1
)
const
{
return
add_common_op
(
*
mm
,
make_op
(
op_name
),
{
arg0
,
arg1
});
return
this
->
add_common_op
(
op_name
,
arg0
,
arg1
);
}
instruction_ref
tf_parser
::
node_info
::
add_common_op
(
const
std
::
string
&
op_name
,
std
::
vector
<
instruction_ref
>
inputs
)
const
{
return
migraphx
::
add_common_op
(
*
mm
,
make_op
(
op_name
),
std
::
move
(
inputs
));
}
int64_t
tf_parser
::
parse_axis
(
const
int64_t
dim
,
const
size_t
num_dims
)
const
...
...
test/gpu/adjust_allocation.cpp
100755 → 100644
View file @
ad16770f
...
...
@@ -6,6 +6,7 @@
#include <migraphx/auto_contiguous.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/eliminate_contiguous.hpp>
#include <migraphx/replace_allocate.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/op/add.hpp>
...
...
@@ -20,12 +21,15 @@
void
run_lowering
(
migraphx
::
program
&
p
,
bool
offload_copy
=
false
)
{
auto
ctx
=
migraphx
::
gpu
::
context
{};
migraphx
::
run_passes
(
*
p
.
get_main_module
(),
{
migraphx
::
auto_contiguous
{},
migraphx
::
gpu
::
lowering
{
&
ctx
,
offload_copy
},
migraphx
::
dead_code_elimination
{},
migraphx
::
eliminate_contiguous
{
"gpu::contiguous"
},
migraphx
::
dead_code_elimination
{}});
migraphx
::
run_passes
(
*
p
.
get_main_module
(),
{
migraphx
::
auto_contiguous
{},
migraphx
::
gpu
::
lowering
{
&
ctx
,
offload_copy
},
migraphx
::
dead_code_elimination
{},
migraphx
::
eliminate_contiguous
{
"gpu::contiguous"
},
migraphx
::
dead_code_elimination
{},
migraphx
::
replace_allocate
{
migraphx
::
gpu
::
gpu_allocation_model
{},
offload_copy
},
migraphx
::
dead_code_elimination
{}});
}
TEST_CASE
(
tanh_shape
)
...
...
test/gpu/pack_int8_args.cpp
View file @
ad16770f
...
...
@@ -2,13 +2,14 @@
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/gpu/target.hpp>
#include <migraphx/gpu/allocation_model.hpp>
#include <migraphx/apply_alpha_beta.hpp>
#include <migraphx/adjust_allocation.hpp>
#include <migraphx/gpu/pack_int8_args.hpp>
#include <migraphx/gpu/rocblas.hpp>
#include <migraphx/auto_contiguous.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/
eliminate_contiguous
.hpp>
#include <migraphx/
replace_allocate
.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/pass_manager.hpp>
...
...
@@ -22,6 +23,8 @@ void run_passes(migraphx::module& m)
{
migraphx
::
auto_contiguous
{},
migraphx
::
gpu
::
lowering
{
&
ctx
,
false
},
migraphx
::
dead_code_elimination
{},
migraphx
::
replace_allocate
{
migraphx
::
gpu
::
gpu_allocation_model
{}},
migraphx
::
dead_code_elimination
{},
migraphx
::
gpu
::
pack_int8_args
{},
migraphx
::
dead_code_elimination
{}});
}
...
...
test/module_test.cpp
View file @
ad16770f
...
...
@@ -312,4 +312,18 @@ TEST_CASE(module_without_bypass)
EXPECT
(
found
);
}
TEST_CASE
(
multiple_module_dependency
)
{
// Test when an instruction from a submodule depends on previous module
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
*
sub
=
p
.
create_module
(
"sub"
);
auto
l1
=
mm
->
add_literal
(
migraphx
::
literal
(
3
));
// second same literal to make sure instruction_ref is being compared, rather than the
// instructions
sub
->
add_literal
(
migraphx
::
literal
(
3
));
sub
->
add_instruction
(
sum_op
{},
l1
,
l1
);
EXPECT
((
sub
->
validate
()
==
sub
->
end
()));
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/replace_allocate.cpp
0 → 100644
View file @
ad16770f
#include <migraphx/allocation_model.hpp>
#include <migraphx/replace_allocate.hpp>
#include <migraphx/dead_code_elimination.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/check_shapes.hpp>
#include <migraphx/argument.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/register_op.hpp>
#include <basic_ops.hpp>
#include <test.hpp>
struct
allocate_no_out
:
migraphx
::
auto_register_op
<
allocate_no_out
>
{
migraphx
::
shape
s
{};
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
migraphx
::
pack
(
f
(
self
.
s
,
"shape"
));
}
std
::
string
name
()
const
{
return
"allocate_no_out"
;
}
migraphx
::
shape
compute_shape
(
const
std
::
vector
<
migraphx
::
shape
>&
inputs
)
const
{
migraphx
::
check_shapes
{
inputs
,
*
this
}.
has
(
0
);
return
s
;
}
migraphx
::
argument
compute
(
migraphx
::
context
&
,
const
migraphx
::
shape
&
output_shape
,
const
std
::
vector
<
migraphx
::
argument
>&
)
const
{
return
{
output_shape
};
}
};
struct
allocate_with_out
:
migraphx
::
auto_register_op
<
allocate_with_out
>
{
migraphx
::
shape
s
{};
template
<
class
Self
,
class
F
>
static
auto
reflect
(
Self
&
self
,
F
f
)
{
return
migraphx
::
pack
(
f
(
self
.
s
,
"shape"
));
}
std
::
string
name
()
const
{
return
"allocate_with_out"
;
}
migraphx
::
shape
compute_shape
(
const
std
::
vector
<
migraphx
::
shape
>&
inputs
)
const
{
migraphx
::
check_shapes
{
inputs
,
*
this
}.
has
(
0
);
return
s
;
}
migraphx
::
argument
compute
(
migraphx
::
context
&
,
const
migraphx
::
shape
&
output_shape
,
const
std
::
vector
<
migraphx
::
argument
>&
)
const
{
return
{
output_shape
};
}
};
// allocation model that has no out params
struct
allocation_no_out_model
{
std
::
string
name
()
const
{
return
"allocate_no_out"
;
}
migraphx
::
operation
allocate
(
const
migraphx
::
shape
&
s
)
const
{
return
migraphx
::
make_op
(
name
(),
{{
"shape"
,
to_value
(
s
)}});
}
migraphx
::
operation
preallocate
(
const
migraphx
::
shape
&
,
const
std
::
string
&
)
const
{
return
{};
}
std
::
string
copy
()
const
{
return
{};
}
bool
needs_out_params
()
const
{
return
false
;
}
};
// allocation model with out params
struct
allocation_with_out_model
{
std
::
string
name
()
const
{
return
"allocate_with_out"
;
}
migraphx
::
operation
allocate
(
const
migraphx
::
shape
&
s
)
const
{
return
migraphx
::
make_op
(
name
(),
{{
"shape"
,
to_value
(
s
)}});
}
migraphx
::
operation
preallocate
(
const
migraphx
::
shape
&
,
const
std
::
string
&
)
const
{
return
{};
}
std
::
string
copy
()
const
{
return
{};
}
bool
needs_out_params
()
const
{
return
true
;
}
};
void
run_pass
(
migraphx
::
module
&
m
,
migraphx
::
allocation_model
model
,
bool
offload_copy
=
false
)
{
migraphx
::
run_passes
(
m
,
{
migraphx
::
replace_allocate
{
std
::
move
(
model
),
offload_copy
},
migraphx
::
dead_code_elimination
{}});
}
void
run_pass
(
migraphx
::
program
&
p
,
migraphx
::
allocation_model
model
,
bool
offload_copy
=
false
)
{
migraphx
::
run_passes
(
p
,
{
migraphx
::
replace_allocate
{
std
::
move
(
model
),
offload_copy
},
migraphx
::
dead_code_elimination
{}});
}
migraphx
::
module
create_simple_program
()
{
migraphx
::
module
m
;
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
5
}};
auto
x
=
m
.
add_parameter
(
"x"
,
s
);
auto
y
=
m
.
add_parameter
(
"y"
,
s
);
auto
alloc
=
m
.
add_instruction
(
migraphx
::
make_op
(
"allocate"
,
{{
"shape"
,
migraphx
::
to_value
(
s
)}}));
m
.
add_instruction
(
pass_op
{},
alloc
,
x
,
y
);
return
m
;
}
TEST_CASE
(
allocate_no_out
)
{
migraphx
::
module
m
=
create_simple_program
();
run_pass
(
m
,
allocation_no_out_model
{});
EXPECT
(
std
::
any_of
(
m
.
begin
(),
m
.
end
(),
[](
const
migraphx
::
instruction
&
ins
)
{
return
migraphx
::
contains
(
ins
.
name
(),
"allocate_no_out"
);
}));
}
TEST_CASE
(
allocate_with_out_param
)
{
migraphx
::
module
m
=
create_simple_program
();
run_pass
(
m
,
allocation_with_out_model
{});
EXPECT
(
std
::
none_of
(
m
.
begin
(),
m
.
end
(),
[](
const
migraphx
::
instruction
&
ins
)
{
return
migraphx
::
contains
(
ins
.
name
(),
"allocate"
);
}));
}
TEST_CASE
(
allocate_with_out_return
)
{
migraphx
::
module
m
=
create_simple_program
();
m
.
add_return
({
std
::
prev
(
m
.
end
())});
run_pass
(
m
,
allocation_with_out_model
{});
EXPECT
(
std
::
none_of
(
m
.
begin
(),
m
.
end
(),
[](
const
migraphx
::
instruction
&
ins
)
{
return
migraphx
::
contains
(
ins
.
name
(),
"allocate"
);
}));
}
TEST_CASE
(
allocate_with_out_no_params
)
{
migraphx
::
module
m
;
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
5
}};
auto
x
=
m
.
add_parameter
(
"x"
,
s
);
auto
y
=
m
.
add_parameter
(
"y"
,
s
);
auto
z
=
m
.
add_parameter
(
"z"
,
s
);
auto
alloc
=
m
.
add_instruction
(
migraphx
::
make_op
(
"allocate"
,
{{
"shape"
,
migraphx
::
to_value
(
s
)}}));
auto
pass1
=
m
.
add_instruction
(
pass_op
{},
alloc
,
x
,
y
);
auto
alloc2
=
m
.
add_instruction
(
migraphx
::
make_op
(
"allocate"
,
{{
"shape"
,
migraphx
::
to_value
(
s
)}}));
m
.
add_instruction
(
pass_op
{},
alloc2
,
z
,
pass1
);
run_pass
(
m
,
allocation_with_out_model
{});
EXPECT
(
std
::
any_of
(
m
.
begin
(),
m
.
end
(),
[](
const
migraphx
::
instruction
&
ins
)
{
return
migraphx
::
contains
(
ins
.
name
(),
"allocate_with_out"
);
}));
}
TEST_CASE
(
if_allocate
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
cond_s
{
migraphx
::
shape
::
bool_type
};
auto
cond
=
mm
->
add_parameter
(
"cond"
,
cond_s
);
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
5
}};
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
y
=
mm
->
add_parameter
(
"y"
,
s
);
auto
*
then_mod
=
p
.
create_module
(
"If_0_if"
);
auto
alloc
=
then_mod
->
add_instruction
(
migraphx
::
make_op
(
"allocate"
,
{{
"shape"
,
migraphx
::
to_value
(
s
)}}));
auto
a1
=
then_mod
->
add_instruction
(
pass_op
{},
alloc
,
x
);
then_mod
->
add_return
({
a1
});
auto
*
else_mod
=
p
.
create_module
(
"If_0_else"
);
auto
alloc1
=
else_mod
->
add_instruction
(
migraphx
::
make_op
(
"allocate"
,
{{
"shape"
,
migraphx
::
to_value
(
s
)}}));
auto
a2
=
else_mod
->
add_instruction
(
pass_op
{},
alloc1
,
y
);
else_mod
->
add_return
({
a2
});
mm
->
add_instruction
(
migraphx
::
make_op
(
"if"
),
{
cond
},
{
then_mod
,
else_mod
});
run_pass
(
p
,
allocation_with_out_model
{});
EXPECT
(
std
::
any_of
(
mm
->
begin
(),
mm
->
end
(),
[](
const
migraphx
::
instruction
&
ins
)
{
return
migraphx
::
contains
(
ins
.
name
(),
"allocate_with_out"
);
}));
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/tf/gen_tf_pb.py
View file @
ad16770f
...
...
@@ -471,6 +471,15 @@ def relu6_test(g1):
tf
.
nn
.
relu6
(
g1_input
,
'relu6'
)
@
tf_test
def
relu6_mismatch_test
(
g1
):
with
g1
.
as_default
():
g1_input
=
tf
.
compat
.
v1
.
placeholder
(
tf
.
float16
,
shape
=
(
1
,
3
,
13
,
37
),
name
=
'0'
)
tf
.
nn
.
relu6
(
g1_input
,
'relu6'
)
@
tf_test
def
reshape_test
(
g1
):
with
g1
.
as_default
():
...
...
@@ -676,6 +685,7 @@ if __name__ == '__main__':
pow_test
()
relu_test
()
relu6_test
()
relu6_mismatch_test
()
reshape_test
()
rsqrt_test
()
shape_test
()
...
...
test/tf/relu6_mismatch_test.pb
0 → 100644
View file @
ad16770f
:
0Placeholder*
dtype0*
shape: %
relu6Relu60*
T0"
\ No newline at end of file
test/tf/tf_test.cpp
View file @
ad16770f
...
...
@@ -706,6 +706,31 @@ TEST_CASE(relu6_test)
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
relu6_mismatch_test
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
std
::
vector
<
size_t
>
input_lens
{
1
,
3
,
13
,
37
};
auto
l0
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
half_type
,
input_lens
});
auto
min_val
=
mm
->
add_literal
(
0.0
f
);
auto
max_val
=
mm
->
add_literal
(
6.0
f
);
auto
l0_convert
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"convert"
,
{{
"target_type"
,
migraphx
::
shape
::
float_type
}}),
l0
);
min_val
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
input_lens
}}),
min_val
);
max_val
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
input_lens
}}),
max_val
);
mm
->
add_instruction
(
migraphx
::
make_op
(
"clip"
),
l0_convert
,
min_val
,
max_val
);
auto
prog
=
optimize_tf
(
"relu6_mismatch_test.pb"
,
false
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
reshape_test
)
{
migraphx
::
program
p
;
...
...
tools/include/allocation_model.hpp
View file @
ad16770f
...
...
@@ -28,6 +28,8 @@ struct allocation_model
operation
allocate
(
const
shape
&
s
)
const
;
/// Create a preallocated operator for the given shape
operation
preallocate
(
const
shape
&
s
,
const
std
::
string
&
id
)
const
;
/// Check if outputs are to be inserted
bool
needs_out_params
()
const
;
};
#else
...
...
@@ -37,7 +39,8 @@ interface('allocation_model',
virtual
(
'
name
'
,
returns
=
'
std
::
string
'
,
const
=
True
),
virtual
(
'
copy
'
,
returns
=
'
std
::
string
'
,
const
=
True
),
virtual
(
'
allocate
'
,
s
=
'
const
shape
&
'
,
returns
=
'
operation
'
,
const
=
True
),
virtual
(
'
preallocate
'
,
s
=
'
const
shape
&
'
,
id
=
'
std
::
string
'
,
returns
=
'
operation
'
,
const
=
True
)
virtual
(
'
preallocate
'
,
s
=
'
const
shape
&
'
,
id
=
'
std
::
string
'
,
returns
=
'
operation
'
,
const
=
True
),
virtual
(
'
needs_out_params
'
,
returns
=
'
bool
'
,
const
=
True
)
)
%>
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment