Unverified Commit e5f47154 authored by Umang Yadav's avatar Umang Yadav Committed by GitHub
Browse files

Merge branch 'develop' into workspace_size

parents 4a3afd0f d78bcdfb
......@@ -100,7 +100,7 @@ struct parse_conv : op_parser<parse_conv>
{
MIGRAPHX_THROW("padding should have 4 values");
}
if(padding[0] != padding[2] || padding[1] != padding[3])
if(padding[0] != padding[2] or padding[1] != padding[3])
{
MIGRAPHX_THROW("migraphx does not support asymetric padding");
}
......
......@@ -90,7 +90,7 @@ struct parse_depthwiseconv : op_parser<parse_depthwiseconv>
calculate_padding(0, pads, input_dims[2], op.stride[0], op.dilation[0], weight_h);
calculate_padding(1, pads, input_dims[3], op.stride[1], op.dilation[1], weight_w);
if(pads[0] != pads[2] || pads[1] != pads[3])
if(pads[0] != pads[2] or pads[1] != pads[3])
{
std::vector<int64_t> padding = {0, 0, pads[0], pads[1], 0, 0, pads[2], pads[3]};
l0 = info.add_instruction(migraphx::make_op("pad", {{"pads", padding}}), l0);
......
......@@ -42,7 +42,7 @@ struct parse_pooling : op_parser<parse_pooling>
tf_parser::node_info info,
std::vector<instruction_ref> args) const
{
if(!starts_with(opd.tf_name, "Max") && !starts_with(opd.tf_name, "Av"))
if(not starts_with(opd.tf_name, "Max") and not starts_with(opd.tf_name, "Av"))
{
MIGRAPHX_THROW("tf pooling mode must be Max or Average");
}
......
......@@ -41,8 +41,9 @@ struct parse_relu6 : op_parser<parse_relu6>
const tf_parser::node_info& info,
std::vector<instruction_ref> args) const
{
auto min_val = info.add_literal(0.0f);
auto max_val = info.add_literal(6.0f);
shape::type_t output_type = args[0]->get_shape().type();
auto min_val = info.add_literal(migraphx::literal{migraphx::shape{output_type}, {0.0f}});
auto max_val = info.add_literal(migraphx::literal{migraphx::shape{output_type}, {6.0f}});
return info.add_common_op("clip", args[0], min_val, max_val);
}
......
......@@ -371,7 +371,7 @@ void tf_parser::parse_node(const std::string& name)
{
result = ops[node.op()](*this, {get_attributes(node), node.op(), mm}, args);
}
assert(!result.empty());
assert(not result.empty());
// First output has no ":" delimiter
instructions[name] = result.front();
for(size_t i = 1; i < result.size(); i++)
......@@ -458,7 +458,7 @@ literal tf_parser::parse_tensor(const tensorflow::TensorProto& t) const
{
std::vector<size_t> dims = parse_dims(t.tensor_shape());
size_t shape_size = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<size_t>());
if(!t.tensor_content().empty()) // has raw data
if(not t.tensor_content().empty()) // has raw data
{
const std::string& s = t.tensor_content();
switch(t.dtype())
......
......@@ -78,7 +78,7 @@ void tmp_dir::execute(const std::string& exe, const std::string& args) const
tmp_dir::~tmp_dir()
{
if(!enabled(MIGRAPHX_DEBUG_SAVE_TEMP_DIR{}))
if(not enabled(MIGRAPHX_DEBUG_SAVE_TEMP_DIR{}))
{
fs::remove_all(this->path);
}
......
......@@ -400,7 +400,7 @@ std::pair<value*, bool> value::insert(const value& v)
{
if(v.key.empty())
{
if(!x)
if(not x)
x = std::make_shared<array_value_holder>();
get_array_impl(x).push_back(v);
assert(this->if_array());
......@@ -408,7 +408,7 @@ std::pair<value*, bool> value::insert(const value& v)
}
else
{
if(!x)
if(not x)
x = std::make_shared<object_value_holder>();
auto p = x->if_object()->emplace(v.key, get_array_impl(x).size());
if(p.second)
......@@ -420,7 +420,7 @@ std::pair<value*, bool> value::insert(const value& v)
value* value::insert(const value* pos, const value& v)
{
assert(v.key.empty());
if(!x)
if(not x)
x = std::make_shared<array_value_holder>();
auto&& a = get_array_impl(x);
auto it = a.insert(a.begin() + (pos - begin()), v);
......@@ -466,7 +466,7 @@ bool compare(const value& x, const value& y, F f)
value::type_t value::get_type() const
{
if(!x)
if(not x)
return null_type;
return x->get_type();
}
......
......@@ -55,7 +55,7 @@ struct simple_custom_op final : migraphx::experimental_custom_op_base
virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override
{
if(!inputs[0].standard())
if(not inputs[0].standard())
{
throw std::runtime_error("first arg must be standard shaped");
}
......
......@@ -49,6 +49,6 @@ bool create_shapes(bool dynamic_allowed)
TEST_CASE(allow_dynamic_shape) { EXPECT(create_shapes(true)); }
TEST_CASE(fail_dynamic_shape) { EXPECT(!create_shapes(false)); }
TEST_CASE(fail_dynamic_shape) { EXPECT(not create_shapes(false)); }
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -187,7 +187,7 @@ TEST_CASE(print_test)
std::stringstream ss;
ss << p;
std::string s = ss.str();
EXPECT(!s.empty());
EXPECT(not s.empty());
}
TEST_CASE(param_test)
......
......@@ -47,7 +47,7 @@ TEST_CASE(is_supported)
{
auto p = create_program();
auto targets = migraphx::get_targets();
EXPECT(!targets.empty());
EXPECT(not targets.empty());
auto t = migraphx::make_target("fpga");
const auto assignments = p.get_target_assignments({t});
......
......@@ -112,12 +112,12 @@ struct mod_pass_op
migraphx::shape compute_shape(std::vector<migraphx::shape> inputs,
std::vector<migraphx::module_ref> mods) const
{
if(!mods.empty())
if(not mods.empty())
{
auto out_shapes = mods[0]->get_output_shapes();
return out_shapes[0];
}
if(!inputs.empty())
if(not inputs.empty())
{
return inputs.front();
}
......
......@@ -345,7 +345,7 @@ inline std::ostream& operator<<(std::ostream& os, const color& c)
template <class T, class F>
void failed(T x, const char* msg, const char* func, const char* file, int line, F f)
{
if(!bool(x.value()))
if(not bool(x.value()))
{
std::cout << func << std::endl;
std::cout << file << ":" << line << ":" << std::endl;
......
......@@ -39,8 +39,8 @@ TEST_CASE(literal_test)
migraphx::literal l2 = l1; // NOLINT
EXPECT(l1 == l2);
EXPECT(l1.at<int>(0) == 1);
EXPECT(!l1.empty());
EXPECT(!l2.empty());
EXPECT(not l1.empty());
EXPECT(not l2.empty());
migraphx::literal l3{};
migraphx::literal l4{};
......
......@@ -38,7 +38,6 @@
#include <migraphx/onnx.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/convolution.hpp>
#include <migraphx/op/pad.hpp>
#include <migraphx/op/pooling.hpp>
#include <migraphx/op/lrn.hpp>
#include <migraphx/op/reshape.hpp>
......
......@@ -3988,7 +3988,8 @@ TEST_CASE(not_test)
std::vector<char> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<bool> gold(data.size());
std::transform(data.begin(), data.end(), gold.begin(), [](bool n) -> bool { return !n; });
std::transform(
data.begin(), data.end(), gold.begin(), [](bool n) -> bool { return not n; });
EXPECT(migraphx::verify_range(results_vector, gold));
}
}
......
......@@ -43,7 +43,7 @@ TEST_CASE(test_shape_assign)
migraphx::shape s1{migraphx::shape::float_type, {100, 32, 8, 8}};
migraphx::shape s2 = s1; // NOLINT
EXPECT(s1 == s2);
EXPECT(!(s1 != s2));
EXPECT(not(s1 != s2));
}
TEST_CASE(test_shape_packed_default)
......@@ -325,7 +325,7 @@ TEST_CASE(test_shape_default_copy)
migraphx::shape s1{};
migraphx::shape s2{};
EXPECT(s1 == s2);
EXPECT(!(s1 != s2));
EXPECT(not(s1 != s2));
}
TEST_CASE(test_shape_normalize_standard1)
......
......@@ -495,10 +495,10 @@ def relu6_test(g1):
@tf_test
def relu6_mismatch_test(g1):
def relu6_half_test(g1):
with g1.as_default():
g1_input = tf.compat.v1.placeholder(tf.float16,
shape=(1, 3, 13, 37),
shape=(1, 3, 16, 16),
name='0')
tf.nn.relu6(g1_input, 'relu6')
......@@ -708,7 +708,7 @@ if __name__ == '__main__':
pow_test()
relu_test()
relu6_test()
relu6_mismatch_test()
relu6_half_test()
reshape_test()
rsqrt_test()
shape_test()
......
......@@ -2,7 +2,7 @@
:
0 Placeholder*
dtype0*
shape: %
shape:

relu6Relu60*
T0"
\ No newline at end of file
......@@ -729,27 +729,23 @@ TEST_CASE(relu6_test)
EXPECT(p == prog);
}
TEST_CASE(relu6_mismatch_test)
TEST_CASE(relu6_half_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<size_t> input_lens{1, 3, 13, 37};
auto l0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::half_type, input_lens});
auto min_val = mm->add_literal(0.0f);
auto max_val = mm->add_literal(6.0f);
auto l0_convert = mm->add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::float_type}}), l0);
std::vector<size_t> input_lens{1, 3, 16, 16};
auto l0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::half_type, input_lens});
auto min_val =
mm->add_literal(migraphx::literal{migraphx::shape{migraphx::shape::half_type}, {0.0f}});
auto max_val =
mm->add_literal(migraphx::literal{migraphx::shape{migraphx::shape::half_type}, {6.0f}});
min_val = mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
min_val);
max_val = mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", input_lens}}),
max_val);
mm->add_instruction(migraphx::make_op("clip"), l0_convert, min_val, max_val);
auto prog = optimize_tf("relu6_mismatch_test.pb", false);
mm->add_instruction(migraphx::make_op("clip"), l0, min_val, max_val);
auto prog = optimize_tf("relu6_half_test.pb", false);
EXPECT(p == prog);
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment