Commit 5a068d7e authored by Khalique Ahmed's avatar Khalique Ahmed
Browse files

formatting

parent 95665ff2
......@@ -50,8 +50,8 @@ struct layernorm_matcher
{
return f("div")(arg(0)(x_minus_mean()),
arg(1)(skip_broadcasts(f("sqrt")(arg(0)(f("add")(
either_arg(0, 1)(variance(), is_constant().bind("eps"))))))));
arg(1)(skip_broadcasts(f("sqrt")(arg(0)(
f("add")(either_arg(0, 1)(variance(), is_constant().bind("eps"))))))));
}
auto matcher() const { return layernorm_onnx(); }
......
......@@ -92,7 +92,7 @@ struct layernorm_compiler : compiler<layernorm_compiler>
options.output = inputs.back();
options.inputs = inputs;
options.kernel_name = v.get("kernel", "layernorm_kernel");
auto eps = v.get("epsilon", 1e-12f);
auto eps = v.get("epsilon", 1e-12f);
auto src = interpolate_string(layernorm_kernel,
{{"kernel", options.kernel_name},
......
......@@ -38,8 +38,7 @@ struct layernorm_base
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(
f(self.epsilon, "epsilon"));
return pack(f(self.epsilon, "epsilon"));
}
shape compute_shape(std::vector<shape> inputs, std::vector<module_ref> mods) const
{
......@@ -68,7 +67,7 @@ struct layernorm_base
struct layernorm : layernorm_base<layernorm, 0>
{
std::string name() const { return "gpu::prelayernorm"; }
};
MIGRAPHX_REGISTER_OP(layernorm);
......@@ -87,7 +86,7 @@ struct find_layernorm
{
auto ins = r.result;
auto x_ins = r.instructions["x"];
auto eps = r.instructions["eps"]->eval().at<float>();
auto eps = r.instructions["eps"]->eval().at<float>();
m.replace_instruction(ins, layernorm{eps}, x_ins);
}
......@@ -104,7 +103,7 @@ struct find_add_layernorm
{
auto ins = r.result;
auto add_ins = r.instructions["add"];
auto eps = r.instructions["eps"]->eval().at<float>();
auto eps = r.instructions["eps"]->eval().at<float>();
m.replace_instruction(ins, add_layernorm{eps}, add_ins->inputs());
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment