"vscode:/vscode.git/clone" did not exist on "9b9cd83db6f781063b0b7192d8fad290c336451d"
Commit 5ec8f913 authored by Ted Themistokleous's avatar Ted Themistokleous Committed by Ted Themistokleous
Browse files

Merge branch 'develop' into simplify_1_mul_div_ops

parents 32d69e8e d78bcdfb
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include <migraphx/register_target.hpp> #include <migraphx/register_target.hpp>
#include <migraphx/replace_allocate.hpp> #include <migraphx/replace_allocate.hpp>
#include <migraphx/rewrite_batchnorm.hpp> #include <migraphx/rewrite_batchnorm.hpp>
#include <migraphx/rewrite_gelu.hpp>
#include <migraphx/rewrite_pooling.hpp> #include <migraphx/rewrite_pooling.hpp>
#include <migraphx/rewrite_quantization.hpp> #include <migraphx/rewrite_quantization.hpp>
#include <migraphx/rewrite_rnn.hpp> #include <migraphx/rewrite_rnn.hpp>
...@@ -116,6 +117,8 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti ...@@ -116,6 +117,8 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
inline_module{}, inline_module{},
rewrite_pooling{}, rewrite_pooling{},
dead_code_elimination{}, dead_code_elimination{},
rewrite_gelu{},
dead_code_elimination{},
eliminate_common_subexpression{}, eliminate_common_subexpression{},
dead_code_elimination{}, dead_code_elimination{},
simplify_algebra{}, simplify_algebra{},
...@@ -134,8 +137,6 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti ...@@ -134,8 +137,6 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
lowering{&ctx, options.offload_copy}, lowering{&ctx, options.offload_copy},
eliminate_contiguous{"gpu::contiguous"}, eliminate_contiguous{"gpu::contiguous"},
dead_code_elimination{}, dead_code_elimination{},
replace_allocate{gpu_allocation_model{}, options.offload_copy},
dead_code_elimination{},
eliminate_concat{concat_gpu_optimization{}}, eliminate_concat{concat_gpu_optimization{}},
dead_code_elimination{}, dead_code_elimination{},
pack_int8_args{}, pack_int8_args{},
...@@ -144,6 +145,8 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti ...@@ -144,6 +145,8 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
dead_code_elimination{}, dead_code_elimination{},
fuse_ops{&ctx, options.fast_math}, fuse_ops{&ctx, options.fast_math},
dead_code_elimination{}, dead_code_elimination{},
replace_allocate{gpu_allocation_model{}, options.offload_copy},
dead_code_elimination{},
compile_ops{&ctx}, compile_ops{&ctx},
dead_code_elimination{}, dead_code_elimination{},
write_literals{&ctx}, write_literals{&ctx},
......
...@@ -244,7 +244,6 @@ struct ref_convolution : auto_register_op<ref_convolution<Op>> ...@@ -244,7 +244,6 @@ struct ref_convolution : auto_register_op<ref_convolution<Op>>
auto weights_lens = args[1].get_shape().lens(); auto weights_lens = args[1].get_shape().lens();
std::vector<std::size_t> k_lens{weights_lens.begin() + 2, weights_lens.end()}; std::vector<std::size_t> k_lens{weights_lens.begin() + 2, weights_lens.end()};
padding = calc_dyn_auto_pad(img_lens, k_lens, op.stride, op.dilation); padding = calc_dyn_auto_pad(img_lens, k_lens, op.stride, op.dilation);
std::cout << "[ ";
output_shape = output_shape =
compute_padded_shape({args.at(0).get_shape(), args.at(1).get_shape()}, padding); compute_padded_shape({args.at(0).get_shape(), args.at(1).get_shape()}, padding);
} }
......
...@@ -100,7 +100,7 @@ struct parse_conv : op_parser<parse_conv> ...@@ -100,7 +100,7 @@ struct parse_conv : op_parser<parse_conv>
{ {
MIGRAPHX_THROW("padding should have 4 values"); MIGRAPHX_THROW("padding should have 4 values");
} }
if(padding[0] != padding[2] || padding[1] != padding[3]) if(padding[0] != padding[2] or padding[1] != padding[3])
{ {
MIGRAPHX_THROW("migraphx does not support asymetric padding"); MIGRAPHX_THROW("migraphx does not support asymetric padding");
} }
......
...@@ -90,7 +90,7 @@ struct parse_depthwiseconv : op_parser<parse_depthwiseconv> ...@@ -90,7 +90,7 @@ struct parse_depthwiseconv : op_parser<parse_depthwiseconv>
calculate_padding(0, pads, input_dims[2], op.stride[0], op.dilation[0], weight_h); calculate_padding(0, pads, input_dims[2], op.stride[0], op.dilation[0], weight_h);
calculate_padding(1, pads, input_dims[3], op.stride[1], op.dilation[1], weight_w); calculate_padding(1, pads, input_dims[3], op.stride[1], op.dilation[1], weight_w);
if(pads[0] != pads[2] || pads[1] != pads[3]) if(pads[0] != pads[2] or pads[1] != pads[3])
{ {
std::vector<int64_t> padding = {0, 0, pads[0], pads[1], 0, 0, pads[2], pads[3]}; std::vector<int64_t> padding = {0, 0, pads[0], pads[1], 0, 0, pads[2], pads[3]};
l0 = info.add_instruction(migraphx::make_op("pad", {{"pads", padding}}), l0); l0 = info.add_instruction(migraphx::make_op("pad", {{"pads", padding}}), l0);
......
...@@ -42,7 +42,7 @@ struct parse_pooling : op_parser<parse_pooling> ...@@ -42,7 +42,7 @@ struct parse_pooling : op_parser<parse_pooling>
tf_parser::node_info info, tf_parser::node_info info,
std::vector<instruction_ref> args) const std::vector<instruction_ref> args) const
{ {
if(!starts_with(opd.tf_name, "Max") && !starts_with(opd.tf_name, "Av")) if(not starts_with(opd.tf_name, "Max") and not starts_with(opd.tf_name, "Av"))
{ {
MIGRAPHX_THROW("tf pooling mode must be Max or Average"); MIGRAPHX_THROW("tf pooling mode must be Max or Average");
} }
......
...@@ -41,8 +41,9 @@ struct parse_relu6 : op_parser<parse_relu6> ...@@ -41,8 +41,9 @@ struct parse_relu6 : op_parser<parse_relu6>
const tf_parser::node_info& info, const tf_parser::node_info& info,
std::vector<instruction_ref> args) const std::vector<instruction_ref> args) const
{ {
auto min_val = info.add_literal(0.0f); shape::type_t output_type = args[0]->get_shape().type();
auto max_val = info.add_literal(6.0f); auto min_val = info.add_literal(migraphx::literal{migraphx::shape{output_type}, {0.0f}});
auto max_val = info.add_literal(migraphx::literal{migraphx::shape{output_type}, {6.0f}});
return info.add_common_op("clip", args[0], min_val, max_val); return info.add_common_op("clip", args[0], min_val, max_val);
} }
......
...@@ -371,7 +371,7 @@ void tf_parser::parse_node(const std::string& name) ...@@ -371,7 +371,7 @@ void tf_parser::parse_node(const std::string& name)
{ {
result = ops[node.op()](*this, {get_attributes(node), node.op(), mm}, args); result = ops[node.op()](*this, {get_attributes(node), node.op(), mm}, args);
} }
assert(!result.empty()); assert(not result.empty());
// First output has no ":" delimiter // First output has no ":" delimiter
instructions[name] = result.front(); instructions[name] = result.front();
for(size_t i = 1; i < result.size(); i++) for(size_t i = 1; i < result.size(); i++)
...@@ -458,7 +458,7 @@ literal tf_parser::parse_tensor(const tensorflow::TensorProto& t) const ...@@ -458,7 +458,7 @@ literal tf_parser::parse_tensor(const tensorflow::TensorProto& t) const
{ {
std::vector<size_t> dims = parse_dims(t.tensor_shape()); std::vector<size_t> dims = parse_dims(t.tensor_shape());
size_t shape_size = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<size_t>()); size_t shape_size = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<size_t>());
if(!t.tensor_content().empty()) // has raw data if(not t.tensor_content().empty()) // has raw data
{ {
const std::string& s = t.tensor_content(); const std::string& s = t.tensor_content();
switch(t.dtype()) switch(t.dtype())
......
...@@ -78,7 +78,7 @@ void tmp_dir::execute(const std::string& exe, const std::string& args) const ...@@ -78,7 +78,7 @@ void tmp_dir::execute(const std::string& exe, const std::string& args) const
tmp_dir::~tmp_dir() tmp_dir::~tmp_dir()
{ {
if(!enabled(MIGRAPHX_DEBUG_SAVE_TEMP_DIR{})) if(not enabled(MIGRAPHX_DEBUG_SAVE_TEMP_DIR{}))
{ {
fs::remove_all(this->path); fs::remove_all(this->path);
} }
......
...@@ -400,7 +400,7 @@ std::pair<value*, bool> value::insert(const value& v) ...@@ -400,7 +400,7 @@ std::pair<value*, bool> value::insert(const value& v)
{ {
if(v.key.empty()) if(v.key.empty())
{ {
if(!x) if(not x)
x = std::make_shared<array_value_holder>(); x = std::make_shared<array_value_holder>();
get_array_impl(x).push_back(v); get_array_impl(x).push_back(v);
assert(this->if_array()); assert(this->if_array());
...@@ -408,7 +408,7 @@ std::pair<value*, bool> value::insert(const value& v) ...@@ -408,7 +408,7 @@ std::pair<value*, bool> value::insert(const value& v)
} }
else else
{ {
if(!x) if(not x)
x = std::make_shared<object_value_holder>(); x = std::make_shared<object_value_holder>();
auto p = x->if_object()->emplace(v.key, get_array_impl(x).size()); auto p = x->if_object()->emplace(v.key, get_array_impl(x).size());
if(p.second) if(p.second)
...@@ -420,7 +420,7 @@ std::pair<value*, bool> value::insert(const value& v) ...@@ -420,7 +420,7 @@ std::pair<value*, bool> value::insert(const value& v)
value* value::insert(const value* pos, const value& v) value* value::insert(const value* pos, const value& v)
{ {
assert(v.key.empty()); assert(v.key.empty());
if(!x) if(not x)
x = std::make_shared<array_value_holder>(); x = std::make_shared<array_value_holder>();
auto&& a = get_array_impl(x); auto&& a = get_array_impl(x);
auto it = a.insert(a.begin() + (pos - begin()), v); auto it = a.insert(a.begin() + (pos - begin()), v);
...@@ -466,7 +466,7 @@ bool compare(const value& x, const value& y, F f) ...@@ -466,7 +466,7 @@ bool compare(const value& x, const value& y, F f)
value::type_t value::get_type() const value::type_t value::get_type() const
{ {
if(!x) if(not x)
return null_type; return null_type;
return x->get_type(); return x->get_type();
} }
......
...@@ -55,7 +55,7 @@ struct simple_custom_op final : migraphx::experimental_custom_op_base ...@@ -55,7 +55,7 @@ struct simple_custom_op final : migraphx::experimental_custom_op_base
virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override
{ {
if(!inputs[0].standard()) if(not inputs[0].standard())
{ {
throw std::runtime_error("first arg must be standard shaped"); throw std::runtime_error("first arg must be standard shaped");
} }
......
...@@ -49,6 +49,6 @@ bool create_shapes(bool dynamic_allowed) ...@@ -49,6 +49,6 @@ bool create_shapes(bool dynamic_allowed)
TEST_CASE(allow_dynamic_shape) { EXPECT(create_shapes(true)); } TEST_CASE(allow_dynamic_shape) { EXPECT(create_shapes(true)); }
TEST_CASE(fail_dynamic_shape) { EXPECT(!create_shapes(false)); } TEST_CASE(fail_dynamic_shape) { EXPECT(not create_shapes(false)); }
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -187,7 +187,7 @@ TEST_CASE(print_test) ...@@ -187,7 +187,7 @@ TEST_CASE(print_test)
std::stringstream ss; std::stringstream ss;
ss << p; ss << p;
std::string s = ss.str(); std::string s = ss.str();
EXPECT(!s.empty()); EXPECT(not s.empty());
} }
TEST_CASE(param_test) TEST_CASE(param_test)
......
...@@ -26,8 +26,9 @@ ...@@ -26,8 +26,9 @@
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
#include <migraphx/program.hpp> #include <migraphx/program.hpp>
#include <migraphx/register_target.hpp> #include <migraphx/register_target.hpp>
#include <migraphx/ref/target.hpp> #include <migraphx/fpga/target.hpp>
#include <migraphx/target_assignments.hpp> #include <migraphx/target_assignments.hpp>
#include <migraphx/iterator_for.hpp>
migraphx::program create_program() migraphx::program create_program()
{ {
...@@ -37,8 +38,8 @@ migraphx::program create_program() ...@@ -37,8 +38,8 @@ migraphx::program create_program()
auto x = mm->add_parameter("x", s); auto x = mm->add_parameter("x", s);
auto y = mm->add_parameter("y", s); auto y = mm->add_parameter("y", s);
auto z = mm->add_parameter("z", s); auto z = mm->add_parameter("z", s);
auto diff = mm->add_instruction(migraphx::make_op("div"), x, y); auto diff = mm->add_instruction(migraphx::make_op("add"), x, y);
mm->add_instruction(migraphx::make_op("div"), diff, z); mm->add_instruction(migraphx::make_op("add"), diff, z);
return p; return p;
} }
...@@ -46,15 +47,17 @@ TEST_CASE(is_supported) ...@@ -46,15 +47,17 @@ TEST_CASE(is_supported)
{ {
auto p = create_program(); auto p = create_program();
auto targets = migraphx::get_targets(); auto targets = migraphx::get_targets();
EXPECT(!targets.empty()); EXPECT(not targets.empty());
auto first_target = targets[0]; auto t = migraphx::make_target("fpga");
auto t = migraphx::make_target(first_target);
const auto assignments = p.get_target_assignments({t}); const auto assignments = p.get_target_assignments({t});
for(const auto& [ins, target] : assignments) const auto* mod = p.get_main_module();
EXPECT(mod->size() == assignments.size());
for(const auto ins : iterator_for(*mod))
{ {
(void)ins; const auto& target = assignments.at(ins);
EXPECT(target == first_target); EXPECT(target == "fpga");
} }
} }
......
...@@ -112,12 +112,12 @@ struct mod_pass_op ...@@ -112,12 +112,12 @@ struct mod_pass_op
migraphx::shape compute_shape(std::vector<migraphx::shape> inputs, migraphx::shape compute_shape(std::vector<migraphx::shape> inputs,
std::vector<migraphx::module_ref> mods) const std::vector<migraphx::module_ref> mods) const
{ {
if(!mods.empty()) if(not mods.empty())
{ {
auto out_shapes = mods[0]->get_output_shapes(); auto out_shapes = mods[0]->get_output_shapes();
return out_shapes[0]; return out_shapes[0];
} }
if(!inputs.empty()) if(not inputs.empty())
{ {
return inputs.front(); return inputs.front();
} }
...@@ -186,9 +186,10 @@ struct nop ...@@ -186,9 +186,10 @@ struct nop
migraphx::shape compute_shape(const std::vector<migraphx::shape>&) const { return {}; } migraphx::shape compute_shape(const std::vector<migraphx::shape>&) const { return {}; }
}; };
inline migraphx::literal get_2x2() inline migraphx::literal get_2x2(int base = 0)
{ {
return migraphx::literal{{migraphx::shape::float_type, {2, 2}}, {1, 2, 3, 4}}; return migraphx::literal{{migraphx::shape::float_type, {2, 2}},
{base + 1, base + 2, base + 3, base + 4}};
} }
inline migraphx::literal get_2x2_transposed() inline migraphx::literal get_2x2_transposed()
......
...@@ -108,15 +108,7 @@ struct function ...@@ -108,15 +108,7 @@ struct function
}; };
template <class Stream, class Iterator> template <class Stream, class Iterator>
inline Stream& stream_range(Stream& s, Iterator start, Iterator last) Stream& stream_range(Stream& s, Iterator start, Iterator last);
{
if(start != last)
{
s << *start;
std::for_each(std::next(start), last, [&](auto&& x) { s << ", " << x; });
}
return s;
}
template <class Stream> template <class Stream>
inline Stream& operator<<(Stream& s, std::nullptr_t) inline Stream& operator<<(Stream& s, std::nullptr_t)
...@@ -136,6 +128,17 @@ inline auto operator<<(Stream& s, const Range& v) -> decltype(stream_range(s, v. ...@@ -136,6 +128,17 @@ inline auto operator<<(Stream& s, const Range& v) -> decltype(stream_range(s, v.
return s; return s;
} }
template <class Stream, class Iterator>
inline Stream& stream_range(Stream& s, Iterator start, Iterator last)
{
if(start != last)
{
s << *start;
std::for_each(std::next(start), last, [&](auto&& x) { s << ", " << x; });
}
return s;
}
template <class T> template <class T>
const T& get_value(const T& x) const T& get_value(const T& x)
{ {
...@@ -342,7 +345,7 @@ inline std::ostream& operator<<(std::ostream& os, const color& c) ...@@ -342,7 +345,7 @@ inline std::ostream& operator<<(std::ostream& os, const color& c)
template <class T, class F> template <class T, class F>
void failed(T x, const char* msg, const char* func, const char* file, int line, F f) void failed(T x, const char* msg, const char* func, const char* file, int line, F f)
{ {
if(!bool(x.value())) if(not bool(x.value()))
{ {
std::cout << func << std::endl; std::cout << func << std::endl;
std::cout << file << ":" << line << ":" << std::endl; std::cout << file << ":" << line << ":" << std::endl;
......
...@@ -39,8 +39,8 @@ TEST_CASE(literal_test) ...@@ -39,8 +39,8 @@ TEST_CASE(literal_test)
migraphx::literal l2 = l1; // NOLINT migraphx::literal l2 = l1; // NOLINT
EXPECT(l1 == l2); EXPECT(l1 == l2);
EXPECT(l1.at<int>(0) == 1); EXPECT(l1.at<int>(0) == 1);
EXPECT(!l1.empty()); EXPECT(not l1.empty());
EXPECT(!l2.empty()); EXPECT(not l2.empty());
migraphx::literal l3{}; migraphx::literal l3{};
migraphx::literal l4{}; migraphx::literal l4{};
......
...@@ -3589,7 +3589,7 @@ def nms_test(): ...@@ -3589,7 +3589,7 @@ def nms_test():
st = helper.make_tensor_value_info('score_threshold', TensorProto.FLOAT, st = helper.make_tensor_value_info('score_threshold', TensorProto.FLOAT,
[1]) [1])
out = helper.make_tensor_value_info('selected_indices', TensorProto.INT64, out = helper.make_tensor_value_info('selected_indices', TensorProto.INT64,
[6, 3]) [None, 3])
node = onnx.helper.make_node('NonMaxSuppression', node = onnx.helper.make_node('NonMaxSuppression',
inputs=[ inputs=[
...@@ -3603,6 +3603,108 @@ def nms_test(): ...@@ -3603,6 +3603,108 @@ def nms_test():
return ([node], [b, s, mo, iou, st], [out]) return ([node], [b, s, mo, iou, st], [out])
@onnx_test
def nms_use_dyn_output_false_test():
b = helper.make_tensor_value_info('boxes', TensorProto.FLOAT, [1, 6, 4])
s = helper.make_tensor_value_info('scores', TensorProto.FLOAT, [1, 1, 6])
mo = helper.make_tensor_value_info('max_output_boxes_per_class',
TensorProto.INT64, [1])
iou = helper.make_tensor_value_info('iou_threshold', TensorProto.FLOAT,
[1])
st = helper.make_tensor_value_info('score_threshold', TensorProto.FLOAT,
[1])
out = helper.make_tensor_value_info('selected_indices', TensorProto.INT64,
[None, 3])
node = onnx.helper.make_node('NonMaxSuppression',
inputs=[
'boxes', 'scores',
'max_output_boxes_per_class',
'iou_threshold', 'score_threshold'
],
outputs=['selected_indices'],
use_dyn_output=0)
return ([node], [b, s, mo, iou, st], [out])
@onnx_test
def nms_dynamic_batch_test():
b = helper.make_tensor_value_info('boxes', TensorProto.FLOAT, [None, 6, 4])
s = helper.make_tensor_value_info('scores', TensorProto.FLOAT,
[None, 1, 6])
mo = helper.make_tensor_value_info('max_output_boxes_per_class',
TensorProto.INT64, [1])
iou = helper.make_tensor_value_info('iou_threshold', TensorProto.FLOAT,
[1])
st = helper.make_tensor_value_info('score_threshold', TensorProto.FLOAT,
[1])
out = helper.make_tensor_value_info('selected_indices', TensorProto.INT64,
[None, 3])
node = onnx.helper.make_node('NonMaxSuppression',
inputs=[
'boxes', 'scores',
'max_output_boxes_per_class',
'iou_threshold', 'score_threshold'
],
outputs=['selected_indices'],
center_point_box=1,
use_dyn_output=1)
return ([node], [b, s, mo, iou, st], [out])
@onnx_test
def nms_dynamic_boxes_test():
b = helper.make_tensor_value_info('boxes', TensorProto.FLOAT, [1, None, 4])
s = helper.make_tensor_value_info('scores', TensorProto.FLOAT,
[1, 1, None])
mo = helper.make_tensor_value_info('max_output_boxes_per_class',
TensorProto.INT64, [1])
iou = helper.make_tensor_value_info('iou_threshold', TensorProto.FLOAT,
[1])
st = helper.make_tensor_value_info('score_threshold', TensorProto.FLOAT,
[1])
out = helper.make_tensor_value_info('selected_indices', TensorProto.INT64,
[None, 3])
node = onnx.helper.make_node('NonMaxSuppression',
inputs=[
'boxes', 'scores',
'max_output_boxes_per_class',
'iou_threshold', 'score_threshold'
],
outputs=['selected_indices'])
return ([node], [b, s, mo, iou, st], [out])
@onnx_test
def nms_dynamic_classes_test():
b = helper.make_tensor_value_info('boxes', TensorProto.FLOAT, [1, 6, 4])
s = helper.make_tensor_value_info('scores', TensorProto.FLOAT,
[1, None, 6])
mo = helper.make_tensor_value_info('max_output_boxes_per_class',
TensorProto.INT64, [1])
iou = helper.make_tensor_value_info('iou_threshold', TensorProto.FLOAT,
[1])
st = helper.make_tensor_value_info('score_threshold', TensorProto.FLOAT,
[1])
out = helper.make_tensor_value_info('selected_indices', TensorProto.INT64,
[None, 3])
node = onnx.helper.make_node('NonMaxSuppression',
inputs=[
'boxes', 'scores',
'max_output_boxes_per_class',
'iou_threshold', 'score_threshold'
],
outputs=['selected_indices'])
return ([node], [b, s, mo, iou, st], [out])
@onnx_test @onnx_test
def not_test(): def not_test():
x = helper.make_tensor_value_info('0', TensorProto.INT32, [4]) x = helper.make_tensor_value_info('0', TensorProto.INT32, [4])
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment