Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
421ecad6
Commit
421ecad6
authored
May 08, 2023
by
Alan Turner
Browse files
Merge remote-tracking branch 'origin/develop' into ck-gsg
parents
3d0426e9
7cf05301
Changes
84
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
1060 additions
and
63 deletions
+1060
-63
test/api/test_gpu.cpp
test/api/test_gpu.cpp
+99
-1
test/fuse_pointwise.cpp
test/fuse_pointwise.cpp
+32
-0
test/gpu/mlir.cpp
test/gpu/mlir.cpp
+33
-0
test/gpu/quantization.cpp
test/gpu/quantization.cpp
+11
-1
test/onnx/.onnxrt-commit
test/onnx/.onnxrt-commit
+1
-1
test/onnx/onnx_test.cpp
test/onnx/onnx_test.cpp
+19
-2
test/op_shape_test.cpp
test/op_shape_test.cpp
+27
-0
test/py/test_gpu.py
test/py/test_gpu.py
+42
-3
test/py/test_gpu_offload.py
test/py/test_gpu_offload.py
+48
-11
test/py/test_instruction.py
test/py/test_instruction.py
+53
-0
test/py/test_shape.py
test/py/test_shape.py
+42
-0
test/ref_ops_test.cpp
test/ref_ops_test.cpp
+0
-44
test/shape_test.cpp
test/shape_test.cpp
+27
-0
test/simplify_algebra_test.cpp
test/simplify_algebra_test.cpp
+339
-0
test/verify/test_dot_mul_a.cpp
test/verify/test_dot_mul_a.cpp
+50
-0
test/verify/test_dot_mul_b.cpp
test/verify/test_dot_mul_b.cpp
+50
-0
test/verify/test_mul_dot_a.cpp
test/verify/test_mul_dot_a.cpp
+49
-0
test/verify/test_mul_dot_b.cpp
test/verify/test_mul_dot_b.cpp
+49
-0
test/verify/test_reduce_mean_bias_half.cpp
test/verify/test_reduce_mean_bias_half.cpp
+48
-0
test/verify/test_softmax4.cpp
test/verify/test_softmax4.cpp
+41
-0
No files found.
test/api/test_gpu.cpp
View file @
421ecad6
...
...
@@ -25,7 +25,6 @@
#include <hip/hip_runtime_api.h>
#include <migraphx/migraphx.h>
#include <migraphx/migraphx.hpp>
#include <migraphx/manage_ptr.hpp>
#include "test.hpp"
...
...
@@ -72,6 +71,105 @@ hip_ptr get_hip_buffer(size_t size)
return
hip_ptr
{
ptr
};
}
// TODO: placeholder until we have a way to copy tuple arguments to/from device through c++ api
// TEST_CASE(dynamic_batch_load_and_run)
//{
// migraphx::onnx_options o_options;
// migraphx::dynamic_dimensions dyn_dims = {{1, 4, {2, 4}}, {3, 3}, {4, 4}, {4, 4}};
// o_options.set_dyn_input_parameter_shape("0", dyn_dims);
// dyn_dims = {{2, 2}, {3, 3}, {3, 3}, {3, 3}};
// o_options.set_dyn_input_parameter_shape("1", dyn_dims);
// auto p = migraphx::parse_onnx("conv_dynamic_batch_test.onnx", o_options);
// migraphx::compile_options c_options;
// c_options.set_split_single_dyn_dim();
// p.compile(migraphx::target("gpu"), c_options);
// auto out_shapes = p.get_output_shapes();
// CHECK(out_shapes.size() == 1);
// EXPECT(out_shapes[0].dynamic());
//
// std::vector<float> a(0.12, 2*3*4*4);
// std::vector<float> c(0.75, 2*3*3*3);
//
// auto param_shapes = p.get_parameter_shapes();
// int batch_size = 2;
// std::unordered_map<std::string, migraphx::argument> arg_map;
//
// arg_map["0"] = migraphx::argument(param_shapes["0"].to_static(batch_size), a.data());
// arg_map["1"] = migraphx::argument(param_shapes["1"].to_static(batch_size), c.data());
//
// migraphx::program_parameters pp;
// std::vector<hip_ptr> buffs;
// std::vector<migraphx::argument> args;
//
// // copy to GPU and create parameter map
// for(auto&& name : param_shapes.names())
// {
// if(arg_map.find(name) != arg_map.end())
// {
// args.push_back(arg_map.at(name));
// }
// else
// {
// migraphx::shape static_shape = param_shapes[name].to_static(batch_size);
// auto output_arg = migraphx::argument(static_shape);
// args.push_back(output_arg);
// }
// buffs.push_back(get_hip_buffer(args.rbegin()->get_shape().bytes()));
// auto err = hipMemcpy(buffs.rbegin()->get(),
// args.rbegin()->data(),
// args.rbegin()->get_shape().bytes(),
// hipMemcpyHostToDevice);
// EXPECT(err == hipSuccess);
// pp.add(name, migraphx::argument(args.rbegin()->get_shape(), buffs.rbegin()->get()));
// }
//
// auto output = p.eval(pp)[0];
//
// // copy output back to host
// auto host_arg = migraphx::argument(output.get_shape());
// auto err = hipMemcpy(
// host_arg.data(), output.data(), output.get_shape().bytes(), hipMemcpyDeviceToHost);
// EXPECT(err == hipSuccess);
//}
TEST_CASE
(
dynamic_batch_load_and_run_offload
)
{
migraphx
::
onnx_options
o_options
;
migraphx
::
dynamic_dimensions
dyn_dims
=
{
migraphx
::
dynamic_dimension
{
1
,
4
,
{
2
,
4
}},
migraphx
::
dynamic_dimension
{
3
,
3
},
migraphx
::
dynamic_dimension
{
4
,
4
},
migraphx
::
dynamic_dimension
{
4
,
4
}};
o_options
.
set_dyn_input_parameter_shape
(
"0"
,
dyn_dims
);
dyn_dims
=
{
migraphx
::
dynamic_dimension
{
2
,
2
},
migraphx
::
dynamic_dimension
{
3
,
3
},
migraphx
::
dynamic_dimension
{
3
,
3
},
migraphx
::
dynamic_dimension
{
3
,
3
}};
o_options
.
set_dyn_input_parameter_shape
(
"1"
,
dyn_dims
);
auto
p
=
migraphx
::
parse_onnx
(
"conv_dynamic_batch_test.onnx"
,
o_options
);
auto
shapes_before
=
p
.
get_output_shapes
();
migraphx
::
compile_options
c_options
;
c_options
.
set_offload_copy
();
p
.
compile
(
migraphx
::
target
(
"gpu"
),
c_options
);
auto
out_shapes
=
p
.
get_output_shapes
();
CHECK
(
out_shapes
.
size
()
==
1
);
EXPECT
(
out_shapes
[
0
].
dynamic
());
// batch size = 2
std
::
vector
<
float
>
a
(
2
*
3
*
4
*
4
,
0.12
);
std
::
vector
<
float
>
c
(
2
*
3
*
3
*
3
,
0.75
);
migraphx
::
program_parameters
pp
;
auto
param_shapes
=
p
.
get_parameter_shapes
();
pp
.
add
(
"0"
,
migraphx
::
argument
(
migraphx
::
shape
(
migraphx_shape_float_type
,
{
2
,
3
,
4
,
4
}),
a
.
data
()));
pp
.
add
(
"1"
,
migraphx
::
argument
(
migraphx
::
shape
(
migraphx_shape_float_type
,
{
2
,
3
,
3
,
3
}),
c
.
data
()));
auto
outputs
=
p
.
eval
(
pp
);
CHECK
(
shapes_before
.
size
()
==
outputs
.
size
());
CHECK
(
bool
{
outputs
.
front
().
get_shape
()
==
migraphx
::
shape
(
migraphx_shape_float_type
,
{
2
,
1
,
3
,
3
})});
}
TEST_CASE
(
load_and_run_async
)
{
auto
p
=
migraphx
::
parse_onnx
(
"conv_relu_maxpool_test.onnx"
);
...
...
test/fuse_pointwise.cpp
View file @
421ecad6
...
...
@@ -329,4 +329,36 @@ TEST_CASE(all_scalar_input)
EXPECT
(
p1
==
p2
);
}
TEST_CASE
(
no_input
)
{
migraphx
::
program
p
;
{
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
g_shape
{
migraphx
::
shape
::
int64_type
,
{
1
},
{
0
}};
migraphx
::
shape
s_indices
{
migraphx
::
shape
::
int32_type
,
{
3
}};
std
::
vector
<
int
>
indices
{
3
,
800
,
800
};
auto
a0
=
mm
->
add_literal
(
migraphx
::
literal
{
s_indices
,
indices
});
auto
a1
=
mm
->
add_literal
(
migraphx
::
literal
{
g_shape
,
{
1
}});
int
axis
=
0
;
auto
out
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"gather"
,
{{
"axis"
,
axis
}}),
a0
,
a1
);
mm
->
add_return
({
out
});
}
run_pass
(
p
);
// This should NOT create a pointwise module if there are no inputs here.
migraphx
::
program
p2
;
{
auto
*
mm
=
p2
.
get_main_module
();
migraphx
::
shape
g_shape
{
migraphx
::
shape
::
int64_type
,
{
1
},
{
0
}};
migraphx
::
shape
s_indices
{
migraphx
::
shape
::
int32_type
,
{
3
}};
std
::
vector
<
int
>
indices
{
3
,
800
,
800
};
auto
a0
=
mm
->
add_literal
(
migraphx
::
literal
{
s_indices
,
indices
});
auto
a1
=
mm
->
add_literal
(
migraphx
::
literal
{
g_shape
,
{
1
}});
int
axis
=
0
;
auto
out
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"gather"
,
{{
"axis"
,
axis
}}),
a0
,
a1
);
mm
->
add_return
({
out
});
}
EXPECT
(
p
==
p2
);
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/gpu/mlir.cpp
View file @
421ecad6
...
...
@@ -213,4 +213,37 @@ module {
EXPECT
(
verify_mlir
(
m
));
}
TEST_CASE
(
conv_int8_dequantize_quantize
)
{
const
std
::
string
mlir_output
=
R"__migraphx__(
module {
func.func @main(%arg0: tensor<2x8x3x3xi8>, %arg1: tensor<1x8x4x4xi8>, %arg2: tensor<1x2x2x2xf32>, %arg3: tensor<1x2x2x2xi32>) -> tensor<1x2x2x2xi32> attributes {arch = "", kernel = "mixr"} {
%0 = migraphx.quant_convolution(%arg1, %arg0) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xi8>, tensor<2x8x3x3xi8>) -> tensor<1x2x2x2xi32>
%1 = migraphx.dequantizelinear(%0, %arg2, %arg3) : (tensor<1x2x2x2xi32>, tensor<1x2x2x2xf32>, tensor<1x2x2x2xi32>) -> tensor<1x2x2x2xf32>
%2 = migraphx.quantizelinear(%1, %arg2, %arg3) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>, tensor<1x2x2x2xi32>) -> tensor<1x2x2x2xi32>
return %2 : tensor<1x2x2x2xi32>
}
}
)__migraphx__"
;
migraphx
::
module
m
;
auto
x
=
m
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
int8_type
,
{
1
,
8
,
4
,
4
}});
auto
w
=
m
.
add_parameter
(
"w"
,
{
migraphx
::
shape
::
int8_type
,
{
2
,
8
,
3
,
3
}});
auto
conv
=
m
.
add_instruction
(
migraphx
::
make_op
(
"quant_convolution"
),
x
,
w
);
migraphx
::
shape
ss
{
migraphx
::
shape
::
float_type
,
{
1
,
2
,
2
,
2
}};
migraphx
::
shape
sz
{
migraphx
::
shape
::
int32_type
,
{
1
,
2
,
2
,
2
}};
auto
input2
=
m
.
add_parameter
(
"x_scale"
,
ss
);
auto
input3
=
m
.
add_parameter
(
"x_zero_point"
,
sz
);
auto
dequant
=
m
.
add_instruction
(
migraphx
::
make_op
(
"dequantizelinear"
),
conv
,
input2
,
input3
);
auto
r
=
m
.
add_instruction
(
migraphx
::
make_op
(
"quantizelinear"
),
dequant
,
input2
,
input3
);
m
.
add_return
({
r
});
auto
s
=
migraphx
::
gpu
::
dump_mlir
(
m
);
// Skip test if MLIR is not enabled
if
(
s
.
empty
())
return
;
CHECK
(
encode
(
s
)
==
encode
(
mlir_output
));
EXPECT
(
verify_mlir
(
m
));
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/gpu/quantization.cpp
View file @
421ecad6
...
...
@@ -23,6 +23,7 @@
*/
#include <iostream>
#include <vector>
#include <migraphx/gpu/fuse_mlir.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/quantization.hpp>
...
...
@@ -110,7 +111,16 @@ TEST_CASE(int8_quantization)
migraphx
::
target
gpu_t
=
migraphx
::
make_target
(
"gpu"
);
run_prog
(
p
,
gpu_t
,
m
,
gpu_result
);
EXPECT
(
migraphx
::
verify_range
(
ref_result
,
gpu_result
));
// Note: the tolerance for mlir_enabled result is temporarily bumped
// higher because the lowering pipeline between mlir fallback and
// regular non-mlir pipeline diverged. MLIR fallback uses the
// rewrite_quantization at the very end of the pipeline, whereas
// the regular pipeline uses the rewrite_quantization in the much
// earlier stage.
if
(
migraphx
::
gpu
::
mlir_enabled
())
EXPECT
(
migraphx
::
verify_range
(
ref_result
,
gpu_result
,
1e5
));
else
EXPECT
(
migraphx
::
verify_range
(
ref_result
,
gpu_result
));
}
}
...
...
test/onnx/.onnxrt-commit
View file @
421ecad6
ad4db1269972f92fdba932bb5770943291be3ca5
c294040bac0e34bd7ef0cb97424bace7998900e7
test/onnx/onnx_test.cpp
View file @
421ecad6
...
...
@@ -4959,13 +4959,13 @@ TEST_CASE(reducemax_dyn_test)
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
3
,
3
},
{
4
,
4
},
{
5
,
5
},
{
6
,
6
}}});
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{{
3
,
5
},
{
4
,
4
},
{
5
,
5
},
{
6
,
6
}}});
auto
r0
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_max"
,
{{
"axes"
,
{
2
}}}),
l0
);
auto
r1
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"squeeze"
,
{{
"axes"
,
{
2
}}}),
r0
);
mm
->
add_return
({
r1
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"x"
]
=
{{
3
,
3
},
{
4
,
4
},
{
5
,
5
},
{
6
,
6
}};
options
.
map_dyn_input_dims
[
"x"
]
=
{{
3
,
5
},
{
4
,
4
},
{
5
,
5
},
{
6
,
6
}};
auto
prog
=
migraphx
::
parse_onnx
(
"reducemax_dyn_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
...
...
@@ -6953,6 +6953,23 @@ TEST_CASE(variable_batch_user_input_test6)
EXPECT
(
test
::
throws
([
&
]
{
migraphx
::
parse_onnx
(
"variable_batch_test.onnx"
,
options
);
}));
}
TEST_CASE
(
variable_batch_user_input_test7
)
{
// if entry in map_dyn_input_dims is all fixed dynamic_dimensions, convert it to a static shape
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
l0
=
mm
->
add_parameter
(
"0"
,
migraphx
::
shape
{
migraphx
::
shape
::
float_type
,
{
2
,
3
,
16
,
16
}});
auto
r
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"identity"
),
l0
);
mm
->
add_return
({
r
});
migraphx
::
onnx_options
options
;
options
.
map_dyn_input_dims
[
"0"
]
=
{{
2
,
2
,
{
2
}},
{
3
,
3
},
{
16
,
16
},
{
16
,
16
}};
auto
prog
=
migraphx
::
parse_onnx
(
"variable_batch_test.onnx"
,
options
);
EXPECT
(
p
==
prog
);
}
TEST_CASE
(
variable_batch_leq_zero_test
)
{
migraphx
::
program
p
;
...
...
test/op_shape_test.cpp
View file @
421ecad6
...
...
@@ -1822,6 +1822,33 @@ TEST_CASE(pad_dyn_shape1)
expect_shape
(
output
,
migraphx
::
make_op
(
"pad"
,
{{
"pads"
,
{
0
,
0
,
1
,
1
,
0
,
0
,
1
,
1
}}}),
input
);
}
TEST_CASE
(
pointwise_no_module
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
0
},
{
0
}};
throws_shape
(
migraphx
::
make_op
(
"pointwise"
),
input
);
}
TEST_CASE
(
pointwise_no_input
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
module
m
;
std
::
vector
<
migraphx
::
instruction_ref
>
args
{};
auto
output
=
migraphx
::
shape
(
migraphx
::
shape
::
float_type
,
{
1
},
{
0
});
auto
l
=
m
.
add_literal
(
migraphx
::
literal
(
output
,
{
1
}));
m
.
add_return
({
l
});
EXPECT
(
test
::
throws
([
&
]
{
mm
->
add_instruction
(
migraphx
::
make_op
(
"pointwise"
),
args
,
{
&
m
});
}));
}
TEST_CASE
(
pointwise_no_output
)
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
module
m
;
std
::
vector
<
migraphx
::
instruction_ref
>
args
{};
EXPECT
(
test
::
throws
([
&
]
{
mm
->
add_instruction
(
migraphx
::
make_op
(
"pointwise"
),
args
,
{
&
m
});
}));
}
TEST_CASE
(
pooling_shape0
)
{
migraphx
::
shape
input
{
migraphx
::
shape
::
float_type
,
{
4
,
3
,
3
,
3
}};
...
...
test/py/test_gpu.py
View file @
421ecad6
...
...
@@ -86,8 +86,8 @@ def test_nonzero():
params
=
{}
shapes
=
p
.
get_parameter_shapes
()
params
[
"data"
]
=
np
.
array
([
1
,
1
,
0
,
1
]).
reshape
(
shapes
[
"data"
].
lens
()).
astype
(
np
.
bool
)
params
[
"data"
]
=
np
.
array
([
1
,
1
,
0
,
1
]).
reshape
(
shapes
[
"data"
].
lens
()).
astype
(
bool
)
r
=
p
.
run
(
params
)
print
(
r
)
...
...
@@ -127,15 +127,54 @@ def test_if_pl():
params
[
"x"
]
=
np
.
ones
(
6
).
reshape
(
shapes
[
"x"
].
lens
()).
astype
(
np
.
float32
)
params
[
"y"
]
=
np
.
array
([
2.0
,
2.0
,
2.0
,
2.0
,
2.0
,
2.0
,
2.0
,
2.0
,
2.0
]).
reshape
(
shapes
[
"y"
].
lens
()).
astype
(
np
.
float32
)
params
[
"cond"
]
=
np
.
array
([
1
]).
reshape
(()).
astype
(
np
.
bool
)
params
[
"cond"
]
=
np
.
array
([
1
]).
reshape
(()).
astype
(
bool
)
r
=
p
.
run
(
params
)[
-
1
]
print
(
r
)
def
test_dyn_batch
():
a
=
migraphx
.
shape
.
dynamic_dimension
(
1
,
4
,
{
2
,
4
})
b
=
migraphx
.
shape
.
dynamic_dimension
(
3
,
3
)
c
=
migraphx
.
shape
.
dynamic_dimension
(
32
,
32
)
dd_map
=
{
"0"
:
[
a
,
b
,
c
,
c
]}
p
=
migraphx
.
parse_onnx
(
"conv_relu_maxpool_test.onnx"
,
map_dyn_input_dims
=
dd_map
)
print
(
p
)
print
(
"Compiling ..."
)
p
.
compile
(
migraphx
.
get_target
(
"gpu"
))
print
(
p
)
def
run_prog
(
batch_size
):
params
=
{}
for
key
,
value
in
p
.
get_parameter_shapes
().
items
():
# convert to a static shape
if
value
.
dynamic
():
dds
=
value
.
dyn_dims
()
new_lens
=
[]
for
dd
in
dds
:
if
dd
.
is_fixed
():
new_lens
.
append
(
dd
.
min
)
else
:
new_lens
.
append
(
batch_size
)
s
=
migraphx
.
shape
(
type
=
value
.
type_string
(),
lens
=
new_lens
)
else
:
s
=
value
print
(
"Parameter {} -> {}"
.
format
(
key
,
s
))
params
[
key
]
=
migraphx
.
generate_argument
(
s
)
r
=
p
.
run
(
params
)
print
(
r
)
run_prog
(
1
)
run_prog
(
2
)
run_prog
(
3
)
run_prog
(
4
)
test_conv_relu
()
test_sub_uint64
()
test_neg_int64
()
test_fp16_imagescaler
()
test_if_pl
()
test_nonzero
()
test_dyn_batch
()
test/py/test_gpu_offload.py
View file @
421ecad6
...
...
@@ -23,16 +23,53 @@
#####################################################################################
import
migraphx
p
=
migraphx
.
parse_onnx
(
"conv_relu_maxpool_test.onnx"
)
print
(
p
)
print
(
"Compiling ..."
)
p
.
compile
(
migraphx
.
get_target
(
"gpu"
),
offload_copy
=
False
)
print
(
p
)
params
=
{}
for
key
,
value
in
p
.
get_parameter_shapes
().
items
():
print
(
"Parameter {} -> {}"
.
format
(
key
,
value
))
params
[
key
]
=
migraphx
.
to_gpu
(
migraphx
.
generate_argument
(
value
))
def
test_conv_relu
():
p
=
migraphx
.
parse_onnx
(
"conv_relu_maxpool_test.onnx"
)
print
(
p
)
print
(
"Compiling ..."
)
p
.
compile
(
migraphx
.
get_target
(
"gpu"
),
offload_copy
=
False
)
print
(
p
)
params
=
{}
r
=
migraphx
.
from_gpu
(
p
.
run
(
params
)[
-
1
])
print
(
r
)
for
key
,
value
in
p
.
get_parameter_shapes
().
items
():
print
(
"Parameter {} -> {}"
.
format
(
key
,
value
))
params
[
key
]
=
migraphx
.
to_gpu
(
migraphx
.
generate_argument
(
value
))
r
=
migraphx
.
from_gpu
(
p
.
run
(
params
)[
-
1
])
print
(
r
)
# TODO: placeholder until tuple shapes and arguments exposed
#def test_dyn_batch():
# a = migraphx.shape.dynamic_dimension(1, 4, {2, 4})
# b = migraphx.shape.dynamic_dimension(3, 3)
# c = migraphx.shape.dynamic_dimension(32, 32)
# dd_map = {"0": [a, b, c, c]}
# p = migraphx.parse_onnx("conv_relu_maxpool_test.onnx",
# map_dyn_input_dims=dd_map)
# print(p)
# print("Compiling ...")
# p.compile(migraphx.get_target("gpu"), offload_copy=False)
#
# print(p)
#
# def run_prog(batch_size):
# params = {}
# for key, value in p.get_parameter_shapes().items():
# print("Parameter {} -> {}".format(key, value))
# params[key] = migraphx.to_gpu(
# migraphx.generate_argument(value.to_static(batch_size)))
#
# print("before_output")
# outputs = p.run(params)
# print(outputs)
# r = migraphx.from_gpu(p.run(params)[-1])
# print(r)
#
# run_prog(1)
# run_prog(2)
# run_prog(3)
# run_prog(4)
test_conv_relu
()
test/py/test_instruction.py
0 → 100755
View file @
421ecad6
#####################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#####################################################################################
import
migraphx
def
test_instruction_shape
():
p
=
migraphx
.
program
()
mm
=
p
.
get_main_module
()
input_shape
=
migraphx
.
shape
(
lens
=
[
4
,
4
,
64
],
type
=
"half_type"
)
i
=
mm
.
add_parameter
(
"x"
,
input_shape
)
i2
=
mm
.
add_instruction
(
migraphx
.
op
(
"reshape"
,
dims
=
[
16
,
64
]),
[
i
])
out_shape
=
i2
.
shape
()
assert
out_shape
.
lens
()
==
[
16
,
64
]
assert
out_shape
.
strides
()
==
[
64
,
1
]
assert
out_shape
.
type_string
()
==
"half_type"
def
test_instruction_op
():
p
=
migraphx
.
program
()
mm
=
p
.
get_main_module
()
input_shape
=
migraphx
.
shape
(
lens
=
[
2
,
24
])
i
=
mm
.
add_parameter
(
"x"
,
input_shape
)
i2
=
mm
.
add_instruction
(
migraphx
.
op
(
"relu"
),
[
i
])
out_op
=
i2
.
op
()
assert
out_op
.
name
()
==
"relu"
if
__name__
==
"__main__"
:
test_instruction_shape
()
test_instruction_op
()
test/py/test_shape.py
View file @
421ecad6
...
...
@@ -29,6 +29,7 @@ def test_create_shape():
assert
s
.
standard
()
assert
s
.
packed
()
assert
s
.
lens
()
==
[
1
,
64
,
3
,
3
]
assert
s
.
ndim
()
==
4
def
test_create_shape_broadcast
():
...
...
@@ -49,7 +50,48 @@ def test_create_shape_type():
assert
s
.
type_size
()
==
4
def
test_create_dyn_dims
():
a
=
migraphx
.
shape
.
dynamic_dimension
()
assert
a
.
is_fixed
()
assert
a
.
min
==
0
b
=
migraphx
.
shape
.
dynamic_dimension
(
4
,
4
)
assert
b
.
is_fixed
()
assert
b
.
max
==
4
c
=
migraphx
.
shape
.
dynamic_dimension
(
1
,
4
,
{
2
,
4
})
assert
not
c
.
is_fixed
()
assert
c
.
min
==
1
assert
c
.
max
==
4
assert
c
.
optimals
==
{
2
,
4
}
dyn_dims
=
[
a
,
b
]
dyn_dims
.
append
(
c
)
assert
dyn_dims
[
1
]
==
b
def
test_create_dyn_shape
():
a
=
migraphx
.
shape
.
dynamic_dimension
(
1
,
4
,
{
2
,
4
})
b
=
migraphx
.
shape
.
dynamic_dimension
(
4
,
4
)
dds
=
[
a
,
b
]
dyn_shape
=
migraphx
.
shape
(
type
=
'float'
,
dyn_dims
=
dds
)
assert
dyn_shape
.
dynamic
()
assert
dyn_shape
.
dyn_dims
()[
0
].
min
==
dds
[
0
].
min
assert
dyn_shape
.
dyn_dims
()[
0
].
max
==
dds
[
0
].
max
assert
dyn_shape
.
dyn_dims
()[
0
].
optimals
==
dds
[
0
].
optimals
def
test_type_enum
():
mgx_types
=
[
'bool_type'
,
'double_type'
,
'float_type'
,
'half_type'
,
'int16_type'
,
'int32_type'
,
'int64_type'
,
'int8_type'
,
'uint16_type'
,
'uint32_type'
,
'uint64_type'
,
'uint8_type'
]
for
t
in
mgx_types
:
assert
hasattr
(
migraphx
.
shape
.
type_t
,
t
)
if
__name__
==
"__main__"
:
test_create_shape
()
test_create_shape_broadcast
()
test_create_shape_type
()
test_create_dyn_dims
()
test_create_dyn_shape
()
test/ref_ops_test.cpp
View file @
421ecad6
...
...
@@ -1132,50 +1132,6 @@ TEST_CASE(conv_dyn_batch_test)
result
.
visit
([
&
](
auto
output
)
{
results_vector
.
assign
(
output
.
begin
(),
output
.
end
());
});
EXPECT
(
migraphx
::
verify_range
(
results_vector
,
sol
));
a = {2.71567607, -0.9960829, 0.91671127, 0.28140706, 0.63235772, 0.08077253, 0.80927712,
-0.59108931, -1.05421555, -2.76622486, -0.85044265, -0.52049929, 0.67726439, -0.65290606,
0.02345525, -0.33579525, 0.38901961, 1.05473483, -1.31188095, 1.8963089, -0.07265259,
0.947339, 0.41949373, -0.70814759, 0.25892952, 1.07311416, 1.2571274, -0.62318051,
-0.19951548, -0.94232577, -0.29393643, 0.42292568, -0.80230367, 1.40909171, 0.63617158,
0.13900366, 1.09253144, -0.15265895, 1.54781747, 0.72780299, 1.09189606, -0.38068101,
0.97057933, -0.58958799, 1.56188643, 0.21474874, 0.58725154, -1.27097559, -0.03024297,
1.09437096, -0.4897908, 0.34838957, -1.31042492, -1.69069934, 0.86956722, -0.40457946,
0.46691212, 1.29273605, 0.26464137, 0.22073045, -1.02178168, 0.22163901, -1.84387338,
0.75522131, -0.45775682, -0.42241111, -1.50944722, 1.07256448, -1.95876884, -0.28106022,
0.3341668, 2.13129425, -1.14728117, -1.06555498, -0.298444, -0.88322699, -0.65866792,
-2.06007552, 0.01374334, 0.45612028, 0.52715492, 1.01914406, -1.72659791, 0.80650896,
0.16860051, 2.24112225, -0.78620857, 0.36566174, -0.07020134, -0.47976932, -0.68230027,
-0.94711417, -0.54506505, 1.66504931, -0.71860826, 0.61132306};
c = {-0.14601797, -0.13000923, 0.06521662, 0.06178288, -0.11083675, 0.10154136, 0.09990512,
0.06030385, -0.11374587, -0.17523311, -0.14344215, 0.17802463, 0.06300922, -0.15325832,
0.07066704, 0.05166031, 0.00615084, -0.02606523, 0.08083995, -0.17913306, 0.0624622,
0.0735731, -0.04198661, -0.0164391, -0.06374192, 0.16569914, 0.10681538, 0.07370754,
0.02802075, 0.00282027, 0.15104802, -0.11084409, -0.00197773, 0.07924436, 0.03528272,
0.04765259, -0.15896152, 0.07917164, 0.12125669, -0.1154705, -0.11999125, 0.12749968,
-0.06269585, 0.18658121, -0.03944227, 0.0111798, -0.17731084, 0.11789055, -0.09982193,
0.08142821, 0.0729029, 0.11303909, 0.12735154, 0.03885292};
sol = {-0.20817225,
0.87965256,
0.14958936,
-1.24887264,
-0.06540672,
0.20778663,
0.40456355,
-0.99900877};
migraphx::shape input_fixed_shape1{migraphx::shape::float_type, {1, 3, 4, 4}};
migraphx::parameter_map params1;
params1["X"] = migraphx::argument(input_fixed_shape1, a.data());
params1["W"] = migraphx::argument(weights_shape, c.data());
result = p.eval(params1).back();
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, sol));
}
TEST_CASE
(
conv_dyn_img_shape_test
)
...
...
test/shape_test.cpp
View file @
421ecad6
...
...
@@ -30,6 +30,7 @@
#include <array>
#include <algorithm>
#include <numeric>
#include <migraphx/verify.hpp>
#include "test.hpp"
TEST_CASE
(
test_shape_default
)
...
...
@@ -200,6 +201,20 @@ TEST_CASE(dynamic_dimension_add_sub_fixed)
EXPECT
((
2
+
e
)
==
d
);
}
TEST_CASE
(
dynamic_dimension_serialize
)
{
using
migraphx
::
shape
;
auto
a
=
shape
::
dynamic_dimension
{
2
,
5
,
{
2
,
3
}};
auto
b
=
shape
::
dynamic_dimension
{
3
,
6
,
{
3
}};
auto
v1
=
migraphx
::
to_value
(
a
);
auto
v2
=
migraphx
::
to_value
(
b
);
EXPECT
(
v1
!=
v2
);
auto
c
=
migraphx
::
from_value
<
shape
::
dynamic_dimension
>
(
v1
);
EXPECT
(
a
==
c
);
auto
d
=
migraphx
::
from_value
<
shape
::
dynamic_dimension
>
(
v2
);
EXPECT
(
b
==
d
);
}
TEST_CASE
(
test_shape_dynamic_errors
)
{
using
migraphx
::
shape
;
...
...
@@ -929,4 +944,16 @@ TEST_CASE(test_with_type)
EXPECT
(
s
.
strides
()
==
new_s
.
strides
());
}
TEST_CASE
(
test_multi_index
)
{
migraphx
::
shape
s
{
migraphx
::
shape
::
float_type
,
{
2
,
4
,
6
}};
EXPECT
(
migraphx
::
verify_range
(
s
.
multi
(
0
),
std
::
vector
<
size_t
>
{
0
,
0
,
0
}));
EXPECT
(
migraphx
::
verify_range
(
s
.
multi
(
4
),
std
::
vector
<
size_t
>
{
0
,
0
,
4
}));
EXPECT
(
migraphx
::
verify_range
(
s
.
multi
(
6
),
std
::
vector
<
size_t
>
{
0
,
1
,
0
}));
EXPECT
(
migraphx
::
verify_range
(
s
.
multi
(
8
),
std
::
vector
<
size_t
>
{
0
,
1
,
2
}));
EXPECT
(
migraphx
::
verify_range
(
s
.
multi
(
24
),
std
::
vector
<
size_t
>
{
1
,
0
,
0
}));
EXPECT
(
migraphx
::
verify_range
(
s
.
multi
(
30
),
std
::
vector
<
size_t
>
{
1
,
1
,
0
}));
EXPECT
(
migraphx
::
verify_range
(
s
.
multi
(
34
),
std
::
vector
<
size_t
>
{
1
,
1
,
4
}));
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/simplify_algebra_test.cpp
View file @
421ecad6
...
...
@@ -613,6 +613,60 @@ TEST_CASE(simplify_inner_broadcast_scalar)
EXPECT
(
m1
==
m2
);
}
TEST_CASE
(
simplify_inner_broadcast_different_dims
)
{
auto
b
=
migraphx
::
op
::
multibroadcast
{{
2
,
384
,
768
}};
migraphx
::
module
m1
;
{
auto
x
=
m1
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
int32_type
,
{
384
,
768
}});
auto
y
=
m1
.
add_parameter
(
"y"
,
{
migraphx
::
shape
::
int32_type
,
{
768
}});
auto
xb
=
m1
.
add_instruction
(
b
,
x
);
auto
yb
=
m1
.
add_instruction
(
b
,
y
);
auto
sum
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"add"
),
xb
,
yb
);
m1
.
add_instruction
(
pass_op
{},
sum
);
}
run_pass
(
m1
);
migraphx
::
module
m2
;
{
auto
x
=
m2
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
int32_type
,
{
384
,
768
}});
auto
y
=
m2
.
add_parameter
(
"y"
,
{
migraphx
::
shape
::
int32_type
,
{
768
}});
auto
yb
=
m2
.
add_instruction
(
migraphx
::
op
::
multibroadcast
{{
384
,
768
}},
y
);
auto
sum
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"add"
),
x
,
yb
);
auto
sumb
=
m2
.
add_instruction
(
b
,
sum
);
m2
.
add_instruction
(
pass_op
{},
sumb
);
}
EXPECT
(
m1
==
m2
);
}
TEST_CASE
(
simplify_inner_broadcast_different_broadcasts
)
{
auto
b
=
migraphx
::
op
::
broadcast
{
1
,
{
1
,
24
,
112
,
112
}};
auto
mb
=
migraphx
::
op
::
multibroadcast
{{
1
,
24
,
112
,
112
}};
migraphx
::
module
m1
;
{
auto
x
=
m1
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
int32_type
,
{
24
}});
auto
y
=
m1
.
add_parameter
(
"y"
,
{
migraphx
::
shape
::
int32_type
,
{
24
,
1
,
1
}});
auto
xb
=
m1
.
add_instruction
(
b
,
x
);
auto
yb
=
m1
.
add_instruction
(
mb
,
y
);
auto
sum
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"add"
),
xb
,
yb
);
m1
.
add_instruction
(
pass_op
{},
sum
);
}
run_pass
(
m1
);
migraphx
::
module
m2
;
{
auto
x
=
m2
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
int32_type
,
{
24
}});
auto
y
=
m2
.
add_parameter
(
"y"
,
{
migraphx
::
shape
::
int32_type
,
{
24
,
1
,
1
}});
auto
xs
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"squeeze"
),
x
);
auto
ys
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"squeeze"
),
y
);
auto
sum
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"add"
),
xs
,
ys
);
auto
sumb
=
m2
.
add_instruction
(
b
,
sum
);
m2
.
add_instruction
(
pass_op
{},
sumb
);
}
EXPECT
(
m1
==
m2
);
}
TEST_CASE
(
simplify_add_conv1
)
{
migraphx
::
module
m
;
...
...
@@ -3003,6 +3057,38 @@ TEST_CASE(reorder_slice_ins_deps)
EXPECT
(
m
==
create_module
());
}
TEST_CASE
(
dot_broadcast_different_rank
)
{
migraphx
::
module
m1
;
{
auto
x
=
m1
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
768
}});
auto
y
=
m1
.
add_parameter
(
"y"
,
{
migraphx
::
shape
::
float_type
,
{
768
,
3072
}});
auto
xb
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
{
2
,
384
,
768
}}}),
x
);
auto
yb
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
{
2
,
768
,
3072
}}}),
y
);
auto
dot
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"dot"
),
xb
,
yb
);
m1
.
add_return
({
dot
});
};
migraphx
::
module
m2
;
{
auto
x
=
m2
.
add_parameter
(
"x"
,
{
migraphx
::
shape
::
float_type
,
{
768
}});
auto
y
=
m2
.
add_parameter
(
"y"
,
{
migraphx
::
shape
::
float_type
,
{
768
,
3072
}});
auto
xb
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
{
384
,
768
}}}),
x
);
auto
yb
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
{
768
,
3072
}}}),
y
);
auto
dot
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"dot"
),
xb
,
yb
);
auto
broadcast
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
{
2
,
384
,
3072
}}}),
dot
);
m2
.
add_return
({
broadcast
});
};
run_pass
(
m1
);
EXPECT
(
m1
.
sort
()
==
m2
.
sort
());
}
TEST_CASE
(
dot_fusion_reshape
)
{
migraphx
::
module
m1
;
...
...
@@ -3052,4 +3138,257 @@ TEST_CASE(dot_fusion_reshape)
EXPECT
(
m1
.
sort
()
==
m2
.
sort
());
}
TEST_CASE
(
mul_dot_a
)
{
migraphx
::
shape
as
{
migraphx
::
shape
::
float_type
,
{
2
,
256
,
32
}};
migraphx
::
shape
bs
{
migraphx
::
shape
::
float_type
,
{
2
,
32
,
128
}};
migraphx
::
module
m1
;
{
auto
a
=
m1
.
add_parameter
(
"input"
,
as
);
auto
lit
=
m1
.
add_literal
(
migraphx
::
generate_literal
({
migraphx
::
shape
::
float_type
,
{
1
,
1
,
32
}}));
auto
litb
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
as
.
lens
()}}),
lit
);
auto
mul
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"mul"
),
a
,
litb
);
auto
b
=
m1
.
add_literal
(
migraphx
::
generate_literal
(
bs
));
auto
dot
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"dot"
),
mul
,
b
);
m1
.
add_return
({
dot
});
};
run_pass
(
m1
);
migraphx
::
module
m2
;
{
auto
a
=
m2
.
add_parameter
(
"input"
,
as
);
auto
lit
=
m2
.
add_literal
(
migraphx
::
generate_literal
({
migraphx
::
shape
::
float_type
,
{
1
,
1
,
32
}}));
auto
litb
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
migraphx
::
reorder_dims
(
bs
.
lens
(),
{
0
,
2
,
1
})}}),
lit
);
auto
litt
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
1
}}}),
litb
);
auto
b
=
m2
.
add_literal
(
migraphx
::
generate_literal
(
bs
));
auto
mul
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"mul"
),
b
,
litt
);
auto
dot
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"dot"
),
a
,
mul
);
m2
.
add_return
({
dot
});
};
EXPECT
(
m1
.
sort
()
==
m2
.
sort
());
}
TEST_CASE
(
mul_dot_b
)
{
migraphx
::
shape
as
{
migraphx
::
shape
::
float_type
,
{
2
,
256
,
32
}};
migraphx
::
shape
bs
{
migraphx
::
shape
::
float_type
,
{
2
,
32
,
128
}};
migraphx
::
module
m1
;
{
auto
b
=
m1
.
add_parameter
(
"input"
,
bs
);
auto
lit
=
m1
.
add_literal
(
migraphx
::
generate_literal
({
migraphx
::
shape
::
float_type
,
{
1
,
32
,
1
}}));
auto
litb
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
bs
.
lens
()}}),
lit
);
auto
mul
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"mul"
),
b
,
litb
);
auto
a
=
m1
.
add_literal
(
migraphx
::
generate_literal
(
as
));
auto
dot
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"dot"
),
a
,
mul
);
m1
.
add_return
({
dot
});
};
run_pass
(
m1
);
migraphx
::
module
m2
;
{
auto
b
=
m2
.
add_parameter
(
"input"
,
bs
);
auto
lit
=
m2
.
add_literal
(
migraphx
::
generate_literal
({
migraphx
::
shape
::
float_type
,
{
1
,
32
,
1
}}));
auto
litb
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
migraphx
::
reorder_dims
(
as
.
lens
(),
{
0
,
2
,
1
})}}),
lit
);
auto
litt
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"transpose"
,
{{
"permutation"
,
{
0
,
2
,
1
}}}),
litb
);
auto
a
=
m2
.
add_literal
(
migraphx
::
generate_literal
(
as
));
auto
mul
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"mul"
),
a
,
litt
);
auto
dot
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"dot"
),
mul
,
b
);
m2
.
add_return
({
dot
});
};
EXPECT
(
m1
.
sort
()
==
m2
.
sort
());
}
TEST_CASE
(
mul_dot_a_not_k_broadcast
)
{
migraphx
::
shape
as
{
migraphx
::
shape
::
float_type
,
{
2
,
256
,
32
}};
migraphx
::
shape
bs
{
migraphx
::
shape
::
float_type
,
{
2
,
32
,
128
}};
migraphx
::
module
m1
;
{
auto
a
=
m1
.
add_parameter
(
"input"
,
as
);
auto
lit
=
m1
.
add_literal
(
migraphx
::
generate_literal
({
migraphx
::
shape
::
float_type
,
{
1
,
256
,
1
}}));
auto
litb
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
as
.
lens
()}}),
lit
);
auto
mul
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"mul"
),
a
,
litb
);
auto
b
=
m1
.
add_literal
(
migraphx
::
generate_literal
(
bs
));
auto
dot
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"dot"
),
mul
,
b
);
m1
.
add_return
({
dot
});
};
migraphx
::
module
m2
=
m1
;
run_pass
(
m1
);
EXPECT
(
m1
.
sort
()
==
m2
.
sort
());
}
TEST_CASE
(
mul_dot_b_not_k_broadcast
)
{
migraphx
::
shape
as
{
migraphx
::
shape
::
float_type
,
{
2
,
256
,
32
}};
migraphx
::
shape
bs
{
migraphx
::
shape
::
float_type
,
{
2
,
32
,
128
}};
migraphx
::
module
m1
;
{
auto
b
=
m1
.
add_parameter
(
"input"
,
bs
);
auto
lit
=
m1
.
add_literal
(
migraphx
::
generate_literal
({
migraphx
::
shape
::
float_type
,
{
1
,
1
,
128
}}));
auto
litb
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
bs
.
lens
()}}),
lit
);
auto
mul
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"mul"
),
b
,
litb
);
auto
a
=
m1
.
add_literal
(
migraphx
::
generate_literal
(
as
));
auto
dot
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"dot"
),
a
,
mul
);
m1
.
add_return
({
dot
});
};
migraphx
::
module
m2
=
m1
;
run_pass
(
m1
);
EXPECT
(
m1
.
sort
()
==
m2
.
sort
());
}
TEST_CASE
(
dot_mul_a
)
{
migraphx
::
shape
as
{
migraphx
::
shape
::
float_type
,
{
2
,
256
,
32
}};
migraphx
::
shape
bs
{
migraphx
::
shape
::
float_type
,
{
2
,
32
,
128
}};
migraphx
::
module
m1
;
{
auto
a
=
m1
.
add_parameter
(
"input"
,
as
);
auto
b
=
m1
.
add_literal
(
migraphx
::
generate_literal
(
bs
));
auto
dot
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"dot"
),
a
,
b
);
auto
lit
=
m1
.
add_literal
(
migraphx
::
generate_literal
({
migraphx
::
shape
::
float_type
,
{
1
,
1
,
128
}}));
auto
litb
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
dot
->
get_shape
().
lens
()}}),
lit
);
auto
mul
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"mul"
),
dot
,
litb
);
m1
.
add_return
({
mul
});
};
run_pass
(
m1
);
migraphx
::
module
m2
;
{
auto
a
=
m2
.
add_parameter
(
"input"
,
as
);
auto
b
=
m2
.
add_literal
(
migraphx
::
generate_literal
(
bs
));
auto
lit
=
m2
.
add_literal
(
migraphx
::
generate_literal
({
migraphx
::
shape
::
float_type
,
{
1
,
1
,
128
}}));
auto
litb
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
bs
.
lens
()}}),
lit
);
auto
mul
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"mul"
),
b
,
litb
);
auto
dot
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"dot"
),
a
,
mul
);
m2
.
add_return
({
dot
});
};
EXPECT
(
m1
.
sort
()
==
m2
.
sort
());
}
TEST_CASE
(
dot_mul_a_non_const
)
{
migraphx
::
shape
as
{
migraphx
::
shape
::
float_type
,
{
2
,
256
,
32
}};
migraphx
::
shape
bs
{
migraphx
::
shape
::
float_type
,
{
2
,
32
,
128
}};
migraphx
::
module
m1
;
{
auto
a
=
m1
.
add_parameter
(
"input"
,
as
);
auto
b
=
m1
.
add_literal
(
migraphx
::
generate_literal
(
bs
));
auto
dot
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"dot"
),
a
,
b
);
auto
lit
=
m1
.
add_literal
(
migraphx
::
generate_literal
({
migraphx
::
shape
::
float_type
,
{
1
,
256
,
1
}}));
auto
litb
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
dot
->
get_shape
().
lens
()}}),
lit
);
auto
mul
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"mul"
),
dot
,
litb
);
m1
.
add_return
({
mul
});
};
migraphx
::
module
m2
=
m1
;
run_pass
(
m1
);
EXPECT
(
m1
.
sort
()
==
m2
.
sort
());
}
TEST_CASE
(
dot_mul_b
)
{
migraphx
::
shape
as
{
migraphx
::
shape
::
float_type
,
{
2
,
256
,
32
}};
migraphx
::
shape
bs
{
migraphx
::
shape
::
float_type
,
{
2
,
32
,
128
}};
migraphx
::
module
m1
;
{
auto
a
=
m1
.
add_literal
(
migraphx
::
generate_literal
(
as
));
auto
b
=
m1
.
add_parameter
(
"input"
,
bs
);
auto
dot
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"dot"
),
a
,
b
);
auto
lit
=
m1
.
add_literal
(
migraphx
::
generate_literal
({
migraphx
::
shape
::
float_type
,
{
1
,
256
,
1
}}));
auto
litb
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
dot
->
get_shape
().
lens
()}}),
lit
);
auto
mul
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"mul"
),
dot
,
litb
);
m1
.
add_return
({
mul
});
};
run_pass
(
m1
);
migraphx
::
module
m2
;
{
auto
a
=
m2
.
add_literal
(
migraphx
::
generate_literal
(
as
));
auto
b
=
m2
.
add_parameter
(
"input"
,
bs
);
auto
lit
=
m2
.
add_literal
(
migraphx
::
generate_literal
({
migraphx
::
shape
::
float_type
,
{
1
,
256
,
1
}}));
auto
litb
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
as
.
lens
()}}),
lit
);
auto
mul
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"mul"
),
a
,
litb
);
auto
dot
=
m2
.
add_instruction
(
migraphx
::
make_op
(
"dot"
),
mul
,
b
);
m2
.
add_return
({
dot
});
};
EXPECT
(
m1
.
sort
()
==
m2
.
sort
());
}
TEST_CASE
(
dot_mul_b_non_const
)
{
migraphx
::
shape
as
{
migraphx
::
shape
::
float_type
,
{
2
,
256
,
32
}};
migraphx
::
shape
bs
{
migraphx
::
shape
::
float_type
,
{
2
,
32
,
128
}};
migraphx
::
module
m1
;
{
auto
a
=
m1
.
add_literal
(
migraphx
::
generate_literal
(
as
));
auto
b
=
m1
.
add_parameter
(
"input"
,
bs
);
auto
dot
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"dot"
),
a
,
b
);
auto
lit
=
m1
.
add_literal
(
migraphx
::
generate_literal
({
migraphx
::
shape
::
float_type
,
{
1
,
1
,
128
}}));
auto
litb
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
dot
->
get_shape
().
lens
()}}),
lit
);
auto
mul
=
m1
.
add_instruction
(
migraphx
::
make_op
(
"mul"
),
dot
,
litb
);
m1
.
add_return
({
mul
});
};
migraphx
::
module
m2
=
m1
;
run_pass
(
m1
);
EXPECT
(
m1
.
sort
()
==
m2
.
sort
());
}
int
main
(
int
argc
,
const
char
*
argv
[])
{
test
::
run
(
argc
,
argv
);
}
test/verify/test_dot_mul_a.cpp
0 → 100644
View file @
421ecad6
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/instruction.hpp>
struct
test_dot_mul_a
:
verify_program
<
test_dot_mul_a
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
as
{
migraphx
::
shape
::
float_type
,
{
2
,
256
,
32
}};
migraphx
::
shape
bs
{
migraphx
::
shape
::
float_type
,
{
2
,
32
,
128
}};
auto
a
=
mm
->
add_parameter
(
"input"
,
as
);
auto
b
=
mm
->
add_literal
(
migraphx
::
generate_literal
(
bs
));
auto
dot
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"dot"
),
a
,
b
);
auto
lit
=
mm
->
add_literal
(
migraphx
::
generate_literal
({
migraphx
::
shape
::
float_type
,
{
1
,
1
,
128
}}));
auto
litb
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
dot
->
get_shape
().
lens
()}}),
lit
);
auto
mul
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"mul"
),
dot
,
litb
);
mm
->
add_return
({
mul
});
return
p
;
}
};
test/verify/test_dot_mul_b.cpp
0 → 100644
View file @
421ecad6
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/instruction.hpp>
struct
test_dot_mul_b
:
verify_program
<
test_dot_mul_b
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
as
{
migraphx
::
shape
::
float_type
,
{
2
,
256
,
32
}};
migraphx
::
shape
bs
{
migraphx
::
shape
::
float_type
,
{
2
,
32
,
128
}};
auto
a
=
mm
->
add_literal
(
migraphx
::
generate_literal
(
as
));
auto
b
=
mm
->
add_parameter
(
"input"
,
bs
);
auto
dot
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"dot"
),
a
,
b
);
auto
lit
=
mm
->
add_literal
(
migraphx
::
generate_literal
({
migraphx
::
shape
::
float_type
,
{
1
,
256
,
1
}}));
auto
litb
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
dot
->
get_shape
().
lens
()}}),
lit
);
auto
mul
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"mul"
),
dot
,
litb
);
mm
->
add_return
({
mul
});
return
p
;
}
};
test/verify/test_mul_dot_a.cpp
0 → 100644
View file @
421ecad6
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct
test_mul_dot_a
:
verify_program
<
test_mul_dot_a
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
as
{
migraphx
::
shape
::
float_type
,
{
2
,
256
,
32
}};
migraphx
::
shape
bs
{
migraphx
::
shape
::
float_type
,
{
2
,
32
,
128
}};
auto
a
=
mm
->
add_parameter
(
"input"
,
as
);
auto
lit
=
mm
->
add_literal
(
migraphx
::
generate_literal
({
migraphx
::
shape
::
float_type
,
{
1
,
1
,
32
}}));
auto
litb
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
as
.
lens
()}}),
lit
);
auto
mul
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"mul"
),
a
,
litb
);
auto
b
=
mm
->
add_literal
(
migraphx
::
generate_literal
(
bs
));
auto
dot
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"dot"
),
mul
,
b
);
mm
->
add_return
({
dot
});
return
p
;
}
};
test/verify/test_mul_dot_b.cpp
0 → 100644
View file @
421ecad6
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct
test_mul_dot_b
:
verify_program
<
test_mul_dot_b
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
as
{
migraphx
::
shape
::
float_type
,
{
2
,
256
,
32
}};
migraphx
::
shape
bs
{
migraphx
::
shape
::
float_type
,
{
2
,
32
,
128
}};
auto
b
=
mm
->
add_parameter
(
"input"
,
bs
);
auto
lit
=
mm
->
add_literal
(
migraphx
::
generate_literal
({
migraphx
::
shape
::
float_type
,
{
1
,
32
,
1
}}));
auto
litb
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"multibroadcast"
,
{{
"out_lens"
,
bs
.
lens
()}}),
lit
);
auto
mul
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"mul"
),
b
,
litb
);
auto
a
=
mm
->
add_literal
(
migraphx
::
generate_literal
(
as
));
auto
dot
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"dot"
),
a
,
mul
);
mm
->
add_return
({
dot
});
return
p
;
}
};
test/verify/test_reduce_mean_bias_half.cpp
0 → 100644
View file @
421ecad6
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/instruction.hpp>
struct
test_reduce_mean_bias_half
:
verify_program
<
test_reduce_mean_bias_half
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
migraphx
::
shape
s
{
migraphx
::
shape
::
half_type
,
{
1
,
32
,
128
}};
migraphx
::
shape
bs
{
migraphx
::
shape
::
half_type
,
{
1
,
32
,
128
}};
auto
x
=
mm
->
add_parameter
(
"x"
,
s
);
auto
reduce
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"reduce_mean"
,
{{
"axes"
,
{
2
}}}),
x
);
auto
bias
=
mm
->
add_parameter
(
"bias"
,
reduce
->
get_shape
());
auto
add
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"add"
),
reduce
,
bias
);
auto
abs
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"abs"
),
add
);
auto
sqrt
=
mm
->
add_instruction
(
migraphx
::
make_op
(
"sqrt"
),
abs
);
mm
->
add_return
({
sqrt
});
return
p
;
};
};
test/verify/test_softmax4.cpp
0 → 100644
View file @
421ecad6
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct
test_softmax4
:
verify_program
<
test_softmax4
>
{
migraphx
::
program
create_program
()
const
{
migraphx
::
program
p
;
auto
*
mm
=
p
.
get_main_module
();
auto
x
=
mm
->
add_parameter
(
"x"
,
migraphx
::
shape
{
migraphx
::
shape
::
half_type
,
{
1
,
12
,
384
,
384
}});
mm
->
add_instruction
(
migraphx
::
make_op
(
"softmax"
,
{{
"axis"
,
3
}}),
x
);
return
p
;
}
};
Prev
1
2
3
4
5
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment