Commit fd911fab authored by charlie's avatar charlie
Browse files

Merge branch 'dyn_gemm' of github.com:ROCmSoftwarePlatform/AMDMIGraphX into dyn_onnx_matmul

parents a9b042f4 ab148dbd
...@@ -5,14 +5,14 @@ on: ...@@ -5,14 +5,14 @@ on:
branches: [develop] branches: [develop]
types: [opened, synchronize, closed] types: [opened, synchronize, closed]
schedule: schedule:
- cron: "0 5 * * 1-6" - cron: "0 6 * * 1-6"
workflow_dispatch: workflow_dispatch:
inputs: inputs:
rocm_release: rocm_release:
description: ROCm Version description: ROCm Version
required: true required: true
default: '5.2' default: '5.3'
performance_reports_repo: performance_reports_repo:
description: Result repository description: Result repository
required: true required: true
...@@ -30,9 +30,9 @@ concurrency: "perftest-${{ github.head_ref || github.base_ref || 'schedule' }}" ...@@ -30,9 +30,9 @@ concurrency: "perftest-${{ github.head_ref || github.base_ref || 'schedule' }}"
jobs: jobs:
release: release:
uses: rocmsoftwareplatform/migraphx-benchmark/.github/workflows/perf-test.yml@main uses: ROCmSoftwarePlatform/migraphx-benchmark/.github/workflows/perf-test.yml@main
with: with:
rocm_release: ${{ github.event.inputs.rocm_release || '5.2' }} rocm_release: ${{ github.event.inputs.rocm_release || '5.3' }}
result_number: ${{ github.event.inputs.result_number || '10' }} result_number: ${{ github.event.inputs.result_number || '10' }}
flags: ${{ github.event.inputs.flags || '-s' }} flags: ${{ github.event.inputs.flags || '-s' }}
performance_reports_repo: ${{ github.event.inputs.performance_reports_repo || 'ROCmSoftwarePlatform/migraphx-reports' }} performance_reports_repo: ${{ github.event.inputs.performance_reports_repo || 'ROCmSoftwarePlatform/migraphx-reports' }}
......
...@@ -29,6 +29,7 @@ See below for a comprehensive list of commands and option arguments, as well as ...@@ -29,6 +29,7 @@ See below for a comprehensive list of commands and option arguments, as well as
| --tf | Load file as a tensorflow graph | | --tf | Load file as a tensorflow graph |
| --migraphx | Load file as a migraphx graph | | --migraphx | Load file as a migraphx graph |
| --migraphx-json | Load file as a migraphx JSON graph | | --migraphx-json | Load file as a migraphx JSON graph |
| --batch | Set batch size for the model |
| --nhwc | Treat tensorflow format as nhwc | | --nhwc | Treat tensorflow format as nhwc |
| --nchw | Treat tensorflow format as nchw | | --nchw | Treat tensorflow format as nchw |
| --skip-unknown-operators | Skip unknown operators when parsing and continue to parse | | --skip-unknown-operators | Skip unknown operators when parsing and continue to parse |
......
...@@ -21,6 +21,6 @@ ...@@ -21,6 +21,6 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE. # THE SOFTWARE.
##################################################################################### #####################################################################################
tensorflow==2.7.2 tensorflow==2.9.3
onnxruntime onnxruntime
tokenizers tokenizers
\ No newline at end of file
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <migraphx/config.hpp> #include <migraphx/config.hpp>
#include <migraphx/value.hpp> #include <migraphx/value.hpp>
#include <migraphx/op/normalize_attribute.hpp> #include <migraphx/op/normalize_attribute.hpp>
#include <migraphx/dyn_output.hpp>
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
...@@ -45,17 +46,15 @@ struct transpose ...@@ -45,17 +46,15 @@ struct transpose
} }
std::string name() const { return "transpose"; } std::string name() const { return "transpose"; }
shape compute_shape(std::vector<shape> inputs) const shape compute_shape(std::vector<shape> inputs) const
{ {
check_shapes{inputs, *this}.has(1); check_shapes{inputs, *this, true}.has(1);
auto input = inputs.at(0); auto input = inputs.at(0);
auto input_lens = input.lens();
auto input_strides = input.strides();
auto t = input.type();
if(dims.size() != input_lens.size()) if(dims.size() != input.ndim())
{ {
MIGRAPHX_THROW("Permutation has wrong number of axes"); MIGRAPHX_THROW("TRANSPOSE: Permutation has wrong number of axes");
} }
std::vector<int64_t> axes(dims.size()); std::vector<int64_t> axes(dims.size());
std::iota(axes.begin(), axes.end(), 0); std::iota(axes.begin(), axes.end(), 0);
...@@ -63,19 +62,36 @@ struct transpose ...@@ -63,19 +62,36 @@ struct transpose
{ {
MIGRAPHX_THROW("TRANSPOSE: Invalid permutation"); MIGRAPHX_THROW("TRANSPOSE: Invalid permutation");
} }
std::vector<size_t> output_lens(input_lens.size());
std::vector<size_t> output_strides(input_lens.size()); if(input.dynamic())
for(std::size_t i = 0; i < output_lens.size(); i++)
{ {
output_lens[i] = input_lens[dims[i]]; std::vector<shape::dynamic_dimension> output_dyn_dims(input.ndim());
output_strides[i] = input_strides[dims[i]]; std::transform(dims.cbegin(), dims.cend(), output_dyn_dims.begin(), [&](auto dim) {
return input.dyn_dims()[dim];
});
return {input.type(), output_dyn_dims};
}
else
{
auto input_lens = input.lens();
auto input_strides = input.strides();
std::vector<size_t> output_lens(input.ndim());
std::vector<size_t> output_strides(input.ndim());
for(std::size_t i = 0; i < input.ndim(); i++)
{
output_lens[i] = input_lens[dims[i]];
output_strides[i] = input_strides[dims[i]];
}
return {input.type(), output_lens, output_strides};
} }
return {t, output_lens, output_strides};
} }
argument compute(shape output_shape, std::vector<argument> args) const
argument compute(const dyn_output& dyn_out, std::vector<argument> args) const
{ {
return args[0].reshape(output_shape); return args[0].reshape(dyn_out.computed_shape);
} }
std::ptrdiff_t output_alias(const std::vector<shape>&) const { return 0; } std::ptrdiff_t output_alias(const std::vector<shape>&) const { return 0; }
}; };
......
...@@ -47,7 +47,7 @@ struct parse_transpose : op_parser<parse_transpose> ...@@ -47,7 +47,7 @@ struct parse_transpose : op_parser<parse_transpose>
} }
// if perm is empty, use the default value // if perm is empty, use the default value
auto n_dim = args.front()->get_shape().lens().size(); auto n_dim = args.front()->get_shape().ndim();
if(perm.empty()) if(perm.empty())
{ {
perm.resize(n_dim); perm.resize(n_dim);
......
...@@ -146,8 +146,6 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti ...@@ -146,8 +146,6 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
dead_code_elimination{}, dead_code_elimination{},
pack_int8_args{}, pack_int8_args{},
dead_code_elimination{}, dead_code_elimination{},
adjust_allocation{gpu_allocation_model{}},
dead_code_elimination{},
fuse_ops{&ctx, options.fast_math}, fuse_ops{&ctx, options.fast_math},
dead_code_elimination{}, dead_code_elimination{},
replace_allocate{gpu_allocation_model{}, options.offload_copy}, replace_allocate{gpu_allocation_model{}, options.offload_copy},
......
...@@ -6372,6 +6372,21 @@ def transpose_test(): ...@@ -6372,6 +6372,21 @@ def transpose_test():
return ([node], [x], [y]) return ([node], [x], [y])
@onnx_test
def transpose_dyn_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [None, 2, 2, 3])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [None, 3, 2, 2])
node = onnx.helper.make_node(
'Transpose',
perm=[0, 3, 1, 2],
inputs=['0'],
outputs=['1'],
)
return ([node], [x], [y])
@onnx_test @onnx_test
def transpose_gather_test(): def transpose_gather_test():
x = helper.make_tensor_value_info('data', TensorProto.FLOAT, [3, 5, 4, 6]) x = helper.make_tensor_value_info('data', TensorProto.FLOAT, [3, 5, 4, 6])
......
...@@ -6082,6 +6082,24 @@ TEST_CASE(transpose_test) ...@@ -6082,6 +6082,24 @@ TEST_CASE(transpose_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(transpose_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input = mm->add_parameter(
"0",
migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {2, 2, 0}, {2, 2, 0}, {3, 3, 0}}});
std::vector<int64_t> perm{0, 3, 1, 2};
auto t0 = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), input);
mm->add_return({t0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
auto prog = migraphx::parse_onnx("transpose_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(topk_attrk_test) TEST_CASE(topk_attrk_test)
{ {
migraphx::program p; migraphx::program p;
......
...@@ -2341,6 +2341,28 @@ TEST_CASE(transpose_shape) ...@@ -2341,6 +2341,28 @@ TEST_CASE(transpose_shape)
throws_shape(migraphx::make_op("transpose", {{"permutation", {1, 2}}}), input); throws_shape(migraphx::make_op("transpose", {{"permutation", {1, 2}}}), input);
} }
TEST_CASE(transpose_dyn_shape0)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4, 0}, {2, 2, 0}}};
migraphx::shape output{migraphx::shape::float_type, {{2, 2, 0}, {1, 4, 0}}};
expect_shape(input, migraphx::make_op("transpose", {{"permutation", {0, 1}}}), input);
expect_shape(output, migraphx::make_op("transpose", {{"permutation", {1, 0}}}), input);
}
TEST_CASE(transpose_dyn_shape1)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4, 0}, {4, 4, 0}, {4, 4, 0}}};
migraphx::shape output{migraphx::shape::float_type, {{4, 4, 0}, {4, 4, 0}, {1, 4, 0}}};
expect_shape(input, migraphx::make_op("transpose", {{"permutation", {0, 1, 2}}}), input);
expect_shape(output, migraphx::make_op("transpose", {{"permutation", {2, 1, 0}}}), input);
}
TEST_CASE(transpose_axes_error)
{
migraphx::shape input{migraphx::shape::float_type, {2, 2}};
throws_shape(migraphx::make_op("transpose", {{"permutation", {1}}}), input);
}
TEST_CASE(step_test) TEST_CASE(step_test)
{ {
migraphx::shape s1{migraphx::shape::float_type, {1, 2, 4}}; migraphx::shape s1{migraphx::shape::float_type, {1, 2, 4}};
......
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
#include <migraphx/half.hpp> #include <migraphx/half.hpp>
template <class T> template <class T>
void dot_2D_test() void dot_2d_test()
{ {
migraphx::program p; migraphx::program p;
...@@ -82,11 +82,11 @@ void dot_2D_test() ...@@ -82,11 +82,11 @@ void dot_2D_test()
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); }); result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(c, results_vector)); EXPECT(migraphx::verify_range(c, results_vector));
} }
TEST_CASE_REGISTER(dot_2D_test<float>) TEST_CASE_REGISTER(dot_2d_test<float>)
TEST_CASE_REGISTER(dot_2D_test<double>) TEST_CASE_REGISTER(dot_2d_test<double>)
template <class T> template <class T>
void dot_4D_test() void dot_4d_test()
{ {
migraphx::program p; migraphx::program p;
...@@ -133,8 +133,8 @@ void dot_4D_test() ...@@ -133,8 +133,8 @@ void dot_4D_test()
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); }); result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(c, results_vector)); EXPECT(migraphx::verify_range(c, results_vector));
} }
TEST_CASE_REGISTER(dot_4D_test<float>) TEST_CASE_REGISTER(dot_4d_test<float>)
TEST_CASE_REGISTER(dot_4D_test<double>) TEST_CASE_REGISTER(dot_4d_test<double>)
TEST_CASE(dot_3D_test) TEST_CASE(dot_3D_test)
{ {
...@@ -1250,7 +1250,8 @@ TEST_CASE(dot_dyn_4D_test) ...@@ -1250,7 +1250,8 @@ TEST_CASE(dot_dyn_4D_test)
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
migraphx::shape a_shape{migraphx::shape::float_type, {1, 1, 4, 5}}; migraphx::shape a_shape{migraphx::shape::float_type,
{{1, 1, 0}, {1, 1, 0}, {4, 6, 4}, {5, 5, 0}}};
auto al = mm->add_parameter("a", a_shape); auto al = mm->add_parameter("a", a_shape);
migraphx::shape b_shape{migraphx::shape::float_type, {1, 1, 5, 3}}; migraphx::shape b_shape{migraphx::shape::float_type, {1, 1, 5, 3}};
auto bl = mm->add_parameter("b", b_shape); auto bl = mm->add_parameter("b", b_shape);
......
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment