"docs/result_pdb.png" did not exist on "d28f1092d5a4fcf008e8934f81ce003c2a67db87"
Unverified Commit 0a347dff authored by Shucai Xiao's avatar Shucai Xiao Committed by GitHub
Browse files

Move alloc copy to finalize (#602)



* code backup

* code backup
Co-authored-by: default avatarmvermeulen <5479696+mvermeulen@users.noreply.github.com>
parent 37fbabf5
......@@ -165,6 +165,11 @@ argument get_preallocation(context& ctx, const std::string& id)
return ctx.get_current_device().preallocations.at(id);
}
void store_preallocated_param(context& ctx, const std::string& id, const argument& a)
{
ctx.get_current_device().preallocations[id] = a;
}
// clang-format off
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
......
......@@ -3,6 +3,7 @@
#include <migraphx/config.hpp>
#include <migraphx/argument.hpp>
#include <migraphx/literal.hpp>
#include <migraphx/check_shapes.hpp>
#include <utility>
......@@ -151,7 +152,9 @@ struct hip_copy
std::ptrdiff_t output_alias(const std::vector<shape>&) const { return 1; }
};
struct hip_load_memory
void store_preallocated_param(context& ctx, const std::string& id, const argument& a);
struct hip_allocate_memory
{
shape s;
std::string id{};
......@@ -162,16 +165,53 @@ struct hip_load_memory
return pack(f(self.s, "shape"), f(self.id, "id"));
}
std::string name() const { return "hip::hip_load_memory"; }
std::string name() const { return "hip::hip_allocate_memory"; }
shape compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs}.has(0);
return s;
}
argument compute(context& ctx, const shape&, const std::vector<argument>&) const
{
return get_preallocation(ctx, id);
}
void finalize(context& ctx, const shape&, const std::vector<shape>&) const
{
argument a = allocate_gpu(s);
store_preallocated_param(ctx, id, a);
}
};
struct hip_copy_literal
{
literal l;
std::string id{};
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return pack(f(self.l, "literal"), f(self.id, "id"));
}
std::string name() const { return "hip::hip_copy_literal"; }
shape compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs}.has(0);
return l.get_shape();
}
argument compute(context& ctx, const shape&, const std::vector<argument>&) const
{
return get_preallocation(ctx, id);
}
void finalize(context& ctx, const shape&, const std::vector<shape>&) const
{
argument a = to_gpu(l.get_argument());
store_preallocated_param(ctx, id, a);
}
};
} // namespace gpu
......
......@@ -19,9 +19,7 @@ void preallocate_param::apply(program& p) const
std::string id = any_cast<builtin::param>(ins->get_operator()).parameter;
if(id != param)
continue;
argument a = allocate_gpu(ins->get_shape());
ctx->get_current_device().preallocations[id] = a;
auto r = p.insert_instruction(ins, hip_load_memory{a.get_shape(), id});
auto r = p.insert_instruction(ins, hip_allocate_memory{ins->get_shape(), id});
p.replace_instruction(ins, r);
}
}
......
......@@ -96,6 +96,7 @@ void schedule_model::record(program& p, instruction_ref ins, std::size_t wait_id
static std::unordered_map<std::string, std::size_t> create_weight_map()
{
return {{"hip::load_literal", 0},
{"hip::hip_allocate_memory", 0},
{"hip::hip_load_memory", 0},
{"hip::allocate", 0},
{"gpu::convolution", 8},
......
......@@ -28,9 +28,7 @@ void write_literals::apply(program& p) const
else
{
std::string id = "@literal:" + std::to_string(n);
argument a = to_gpu(ins->get_literal().get_argument());
ctx->get_current_device().preallocations[id] = a;
p.replace_instruction(ins, hip_load_memory{a.get_shape(), id});
p.replace_instruction(ins, hip_copy_literal{ins->get_literal(), id});
n++;
}
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment