Commit b24ec343 authored by charlie's avatar charlie
Browse files

random debugs

parent 961f0e1b
......@@ -51,6 +51,14 @@ void verify_reduced_program(const program& p,
const parameter_map& inputs = {},
verify::tolerance tols = verify::tolerance{});
void verify_reduced(program p,
int n,
const target& t,
compile_options options,
precision quantize,
const parameter_map& inputs,
verify::tolerance tols);
} // namespace MIGRAPHX_INLINE_NS
} // namespace driver
} // namespace migraphx
......
......@@ -340,9 +340,10 @@ void fuse_reduce::apply(module_pass_manager& mpm) const
mpm.run_pass(dead_code_elimination{});
for(int i = 0; i < 4; i++)
{
match::find_matches(
mpm, find_reduce_pointwise{}, find_pointwise_reduce{}, find_reduce_reduce{});
mpm.run_pass(dead_code_elimination{});
// DEBUG
// match::find_matches(
// mpm, find_reduce_pointwise{}, find_pointwise_reduce{}, find_reduce_reduce{});
// mpm.run_pass(dead_code_elimination{});
}
}
......
......@@ -238,7 +238,7 @@ bool allclose(const R1& r1, const R2& r2, tolerance tols)
if(n == range_distance(r2))
{
auto idx = mismatch_idx(r1, r2, [&](auto x, auto y) {
return abs_diff(double(x), double(y)) > tols.atol + tols.rtol * std::abs(double(y));
return abs_diff(double(x), double(y)) < tols.atol + tols.rtol * std::abs(double(y));
});
return idx >= range_distance(r1);
}
......
......@@ -80,43 +80,46 @@ void propagate_constant::apply(module& m) const
// Compute literals in parallel
std::vector<instruction_ref> const_instrs_vec{const_instrs.begin(), const_instrs.end()};
std::vector<argument> literals(const_instrs_vec.size());
for(int i = 0; i < const_instrs_vec.size(); ++i)
{
// DEBUG
auto ins = const_instrs_vec[i];
if(ins->get_shape().type() == shape::half_type)
{
auto inputs = ins->inputs();
std::vector<instruction_ref> new_inputs(inputs.size());
std::vector<instruction_ref> added_instructions;
std::transform(inputs.begin(), inputs.end(), new_inputs.begin(), [&](auto input) {
auto input_type = input->get_shape().type();
if(input_type != shape::half_type and input_type != shape::float_type)
return input;
auto ai = m.add_instruction(
make_op("convert", {{"target_type", shape::double_type}}), input);
added_instructions.push_back(ai);
return ai;
});
auto new_ins = m.add_instruction(ins->get_operator(), new_inputs);
added_instructions.push_back(new_ins);
auto after_convert = m.add_instruction(
make_op("convert", {{"target_type", ins->get_shape().type()}}), new_ins);
added_instructions.push_back(after_convert);
literals[i] = after_convert->eval();
for(auto a_ins : added_instructions)
{
m.remove_instruction(a_ins);
}
}
else
{
literals[i] = const_instrs_vec[i]->eval();
}
// Original
// literals[i] = const_instrs_vec[i]->eval();
}
// DEBUG
// for(int i = 0; i < const_instrs_vec.size(); ++i)
//{
// auto ins = const_instrs_vec[i];
// if(ins->get_shape().type() == shape::half_type)
// {
// auto inputs = ins->inputs();
// std::vector<instruction_ref> new_inputs(inputs.size());
// std::vector<instruction_ref> added_instructions;
// std::transform(inputs.begin(), inputs.end(), new_inputs.begin(), [&](auto input) {
// auto input_type = input->get_shape().type();
// if(input_type != shape::half_type and input_type != shape::float_type)
// return input;
// auto ai = m.add_instruction(
// make_op("convert", {{"target_type", shape::double_type}}), input);
// added_instructions.push_back(ai);
// return ai;
// });
// auto new_ins = m.add_instruction(ins->get_operator(), new_inputs);
// added_instructions.push_back(new_ins);
// auto after_convert = m.add_instruction(
// make_op("convert", {{"target_type", ins->get_shape().type()}}), new_ins);
// added_instructions.push_back(after_convert);
// literals[i] = after_convert->eval();
// for(auto a_ins : added_instructions)
// {
// m.remove_instruction(a_ins);
// }
// }
// else
// {
// literals[i] = const_instrs_vec[i]->eval();
// }
// }
// Original
par_for(const_instrs_vec.size(), 1, [&](const auto i) {
literals[i] = const_instrs_vec[i]->eval();
});
// Replace instructions in m
for(size_t i = 0; i < const_instrs_vec.size(); i++)
......
......@@ -62,11 +62,11 @@ static void quantize_module(module& m, const std::vector<std::string>& ins_names
// return m.insert_instruction(
// ins, make_op("convert", {{"target_type", shape::half_type}}), input);
// DEBUG hack to fp32 atleast
if(input_type != shape::half_type)
// DEBUG hack to fp64
if(input_type != shape::half_type and input_type != shape::float_type)
return input;
return m.insert_instruction(
ins, make_op("convert", {{"target_type", shape::float_type}}), input);
ins, make_op("convert", {{"target_type", shape::double_type}}), input);
});
......
......@@ -847,7 +847,7 @@ void fuse_ops::apply(module& m) const
match::find_matches(m, find_conv_pointwise{ctx}, find_conv_bias_relu{ctx}, find_conv_bias{ctx});
run_passes(m, {dead_code_elimination{}});
match::find_matches(m,
find_layernorm_pointwise{},
// find_layernorm_pointwise{},
find_concat_pointwise{},
find_gemm_pointwise{},
find_contiguous_tranpose_gemm{},
......
......@@ -86,6 +86,8 @@ struct miopen_apply
auto& ctx = get_context();
int8_x4_format = get_int8_x4_format(ctx);
compute_fp32 = get_compute_fp32_flag();
// DEBUG
// compute_fp32 = true;
offload_copy = (mod == mpm->get_root_module()) ? pass->offload_copy : false;
add_generic_op("contiguous");
......
......@@ -108,6 +108,8 @@ def check_correctness(gold_outputs,
print('Expected value: \n{}'.format(gold_outputs[i]))
print('......')
print('Actual value: \n{}\n'.format(outputs[i]))
print('\n\n')
print(r'\nDifference: \n{}\n'.format(outputs[i] - gold_outputs[i]))
else:
print('Outputs do not match')
break
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment