"docs/vscode:/vscode.git/clone" did not exist on "f80f30dfd89933dbcc3c9bf8fe9ad21b7d13f3de"
Commit 263579c2 authored by Paul's avatar Paul
Browse files

Formatting

parent d007b98f
...@@ -26,8 +26,10 @@ struct argument : raw_data<argument> ...@@ -26,8 +26,10 @@ struct argument : raw_data<argument>
} }
argument(shape s, std::function<char*()> d) : data(d), m_shape(s) {} argument(shape s, std::function<char*()> d) : data(d), m_shape(s) {}
template<class T> template <class T>
argument(shape s, T* d) : data([d] { return reinterpret_cast<char*>(d); }), m_shape(s) {} argument(shape s, T* d) : data([d] { return reinterpret_cast<char*>(d); }), m_shape(s)
{
}
/// Provides a raw pointer to the data /// Provides a raw pointer to the data
std::function<char*()> data; std::function<char*()> data;
......
...@@ -43,7 +43,7 @@ using remove_ptr = typename std:: ...@@ -43,7 +43,7 @@ using remove_ptr = typename std::
template <class T> template <class T>
using shared = std::shared_ptr<remove_ptr<T>>; using shared = std::shared_ptr<remove_ptr<T>>;
template<class T> template <class T>
shared<T> share(T p) shared<T> share(T p)
{ {
return shared<T>{std::move(p)}; return shared<T>{std::move(p)};
......
...@@ -227,10 +227,7 @@ struct outline ...@@ -227,10 +227,7 @@ struct outline
check_shapes{inputs}.has(0); check_shapes{inputs}.has(0);
return s; return s;
} }
argument compute(shape, std::vector<argument>) const argument compute(shape, std::vector<argument>) const { return {s, nullptr}; }
{
return {s, nullptr};
}
}; };
} // namespace rtg } // namespace rtg
......
...@@ -55,7 +55,7 @@ struct program ...@@ -55,7 +55,7 @@ struct program
} }
instruction_ref add_literal(literal l); instruction_ref add_literal(literal l);
instruction_ref add_outline(shape s); instruction_ref add_outline(shape s);
instruction_ref add_parameter(std::string name, shape s); instruction_ref add_parameter(std::string name, shape s);
......
...@@ -70,18 +70,20 @@ instruction_ref program::add_parameter(std::string name, shape s) ...@@ -70,18 +70,20 @@ instruction_ref program::add_parameter(std::string name, shape s)
shape program::get_parameter_shape(std::string name) shape program::get_parameter_shape(std::string name)
{ {
auto ins = std::find_if( auto ins = std::find_if(
impl->instructions.begin(), impl->instructions.end(), [&](const instruction& x) { impl->instructions.begin(), impl->instructions.end(), [&](const instruction& x) {
if(x.op.name() == "@param") if(x.op.name() == "@param")
{ {
return any_cast<builtin::param>(x.op).parameter == name; return any_cast<builtin::param>(x.op).parameter == name;
} }
else else
{ {
return false; return false;
} }
}); });
if (ins != this->end()) return ins->result; if(ins != this->end())
else return {}; return ins->result;
else
return {};
} }
bool program::has_instruction(instruction_ref ins) const bool program::has_instruction(instruction_ref ins) const
......
...@@ -26,7 +26,10 @@ struct cpu_convolution ...@@ -26,7 +26,10 @@ struct cpu_convolution
auto wei_h = weights.get_shape().lens()[2]; auto wei_h = weights.get_shape().lens()[2];
auto wei_w = weights.get_shape().lens()[3]; auto wei_w = weights.get_shape().lens()[3];
dfor(output_shape.lens()[0], output_shape.lens()[1], output_shape.lens()[2], output_shape.lens()[3])( dfor(output_shape.lens()[0],
output_shape.lens()[1],
output_shape.lens()[2],
output_shape.lens()[3])(
[&](std::size_t o, std::size_t w, std::size_t i, std::size_t j) { [&](std::size_t o, std::size_t w, std::size_t i, std::size_t j) {
const int start_x = i * op.stride[0] - op.padding[0]; const int start_x = i * op.stride[0] - op.padding[0];
const int start_y = j * op.stride[1] - op.padding[1]; const int start_y = j * op.stride[1] - op.padding[1];
......
...@@ -8,7 +8,6 @@ ...@@ -8,7 +8,6 @@
namespace rtg { namespace rtg {
namespace miopen { namespace miopen {
struct hip_allocate struct hip_allocate
{ {
std::string name() const { return "hip::allocate"; } std::string name() const { return "hip::allocate"; }
...@@ -19,7 +18,7 @@ struct hip_allocate ...@@ -19,7 +18,7 @@ struct hip_allocate
} }
argument compute(shape output_shape, std::vector<argument>) const argument compute(shape output_shape, std::vector<argument>) const
{ {
char * data = nullptr; char* data = nullptr;
// TODO: Check return status // TODO: Check return status
hipMalloc(&data, output_shape.bytes()); hipMalloc(&data, output_shape.bytes());
return {output_shape, data}; return {output_shape, data};
...@@ -42,7 +41,6 @@ struct hip_free ...@@ -42,7 +41,6 @@ struct hip_free
} }
}; };
using miopen_handle = RTG_MANAGE_PTR(miopenHandle_t, miopenDestroy); using miopen_handle = RTG_MANAGE_PTR(miopenHandle_t, miopenDestroy);
using tensor_descriptor = RTG_MANAGE_PTR(miopenTensorDescriptor_t, miopenDestroyTensorDescriptor); using tensor_descriptor = RTG_MANAGE_PTR(miopenTensorDescriptor_t, miopenDestroyTensorDescriptor);
using convolution_descriptor = RTG_MANAGE_PTR(miopenConvolutionDescriptor_t, using convolution_descriptor = RTG_MANAGE_PTR(miopenConvolutionDescriptor_t,
...@@ -152,10 +150,10 @@ struct miopen_relu ...@@ -152,10 +150,10 @@ struct miopen_relu
{ {
shared<activation_descriptor> ad; shared<activation_descriptor> ad;
std::string name() const { return "miopen::relu"; } std::string name() const { return "miopen::relu"; }
shape compute_shape(std::vector<shape> inputs) const shape compute_shape(std::vector<shape> inputs) const
{ {
check_shapes{inputs}.has(3); check_shapes{inputs}.has(3);
return inputs.at(1); return inputs.at(1);
} }
argument compute(shape output_shape, std::vector<argument> args) const argument compute(shape output_shape, std::vector<argument> args) const
...@@ -163,7 +161,14 @@ struct miopen_relu ...@@ -163,7 +161,14 @@ struct miopen_relu
float alpha = 1, beta = 0; float alpha = 1, beta = 0;
auto x_desc = make_tensor(args[1].get_shape()); auto x_desc = make_tensor(args[1].get_shape());
auto y_desc = make_tensor(output_shape); auto y_desc = make_tensor(output_shape);
miopenActivationForward(args[0].get(), ad.get(), &alpha, x_desc.get(), args[1].get(), &beta, y_desc.get(), args[2].get()); miopenActivationForward(args[0].get(),
ad.get(),
&alpha,
x_desc.get(),
args[1].get(),
&beta,
y_desc.get(),
args[2].get());
return args[2]; return args[2];
} }
...@@ -192,13 +197,13 @@ struct miopen_apply ...@@ -192,13 +197,13 @@ struct miopen_apply
instruction_ref insert_allocation(instruction_ref ins, const shape& s) instruction_ref insert_allocation(instruction_ref ins, const shape& s)
{ {
if (ins == --prog->end()) if(ins == --prog->end())
{ {
return prog->add_parameter("output", s); return prog->add_parameter("output", s);
} }
else else
{ {
auto is = prog->add_outline(s); auto is = prog->add_outline(s);
auto result = prog->insert_instruction(ins, hip_allocate{}, is); auto result = prog->insert_instruction(ins, hip_allocate{}, is);
prog->insert_instruction(++ins, hip_free{}, result); prog->insert_instruction(++ins, hip_free{}, result);
return result; return result;
...@@ -207,21 +212,27 @@ struct miopen_apply ...@@ -207,21 +212,27 @@ struct miopen_apply
void apply_convolution(instruction_ref ins) void apply_convolution(instruction_ref ins)
{ {
auto&& op = any_cast<convolution>(ins->op); auto&& op = any_cast<convolution>(ins->op);
auto cd = make_conv(op); auto cd = make_conv(op);
auto output = insert_allocation(ins, ins->result); auto output = insert_allocation(ins, ins->result);
prog->replace_instruction(ins, miopen_convolution{op, std::move(cd)}, handle, ins->arguments.at(0), ins->arguments.at(1), output); prog->replace_instruction(ins,
miopen_convolution{op, std::move(cd)},
handle,
ins->arguments.at(0),
ins->arguments.at(1),
output);
} }
void apply_activation(instruction_ref ins) void apply_activation(instruction_ref ins)
{ {
auto&& op = any_cast<activation>(ins->op); auto&& op = any_cast<activation>(ins->op);
auto ad = make_relu(); auto ad = make_relu();
if(op.mode == "relu") if(op.mode == "relu")
{ {
auto output = insert_allocation(ins, ins->result); auto output = insert_allocation(ins, ins->result);
prog->replace_instruction(ins, miopen_relu{std::move(ad)}, handle, ins->arguments.at(0), output); prog->replace_instruction(
ins, miopen_relu{std::move(ad)}, handle, ins->arguments.at(0), output);
} }
} }
}; };
......
...@@ -11,8 +11,8 @@ ...@@ -11,8 +11,8 @@
#include "test.hpp" #include "test.hpp"
using hip_ptr = RTG_MANAGE_PTR(void, hipFree); using hip_ptr = RTG_MANAGE_PTR(void, hipFree);
using miopen_handle = RTG_MANAGE_PTR(miopenHandle_t, miopenDestroy); using miopen_handle = RTG_MANAGE_PTR(miopenHandle_t, miopenDestroy);
template <class Result, class F, class... Ts> template <class Result, class F, class... Ts>
Result make_obj(F f, Ts... xs) Result make_obj(F f, Ts... xs)
...@@ -33,18 +33,18 @@ hip_ptr hip_allocate(std::size_t sz) ...@@ -33,18 +33,18 @@ hip_ptr hip_allocate(std::size_t sz)
return hip_ptr{result}; return hip_ptr{result};
} }
template<class T> template <class T>
hip_ptr write(const T& x) hip_ptr write(const T& x)
{ {
using type = typename T::value_type; using type = typename T::value_type;
auto size = x.size() * sizeof(type); auto size = x.size() * sizeof(type);
auto result = hip_allocate(size); auto result = hip_allocate(size);
// TODO: Check status // TODO: Check status
hipMemcpy(result.get(), x.data(), size, hipMemcpyHostToDevice); hipMemcpy(result.get(), x.data(), size, hipMemcpyHostToDevice);
return result; return result;
} }
template<class T> template <class T>
std::vector<T> read(const hip_ptr& x, std::size_t sz) std::vector<T> read(const hip_ptr& x, std::size_t sz)
{ {
std::vector<T> result(sz); std::vector<T> result(sz);
...@@ -56,9 +56,9 @@ std::vector<T> read(const hip_ptr& x, std::size_t sz) ...@@ -56,9 +56,9 @@ std::vector<T> read(const hip_ptr& x, std::size_t sz)
rtg::program create_program() rtg::program create_program()
{ {
rtg::program p; rtg::program p;
auto input = p.add_parameter("x", rtg::shape{rtg::shape::float_type, {4, 3, 3, 3}}); auto input = p.add_parameter("x", rtg::shape{rtg::shape::float_type, {4, 3, 3, 3}});
auto weights = p.add_parameter("w", rtg::shape{rtg::shape::float_type, {4, 3, 3, 3}}); auto weights = p.add_parameter("w", rtg::shape{rtg::shape::float_type, {4, 3, 3, 3}});
auto conv = p.add_instruction(rtg::convolution{}, input, weights); auto conv = p.add_instruction(rtg::convolution{}, input, weights);
p.add_instruction(rtg::activation{"relu"}, conv); p.add_instruction(rtg::activation{"relu"}, conv);
return p; return p;
} }
...@@ -92,10 +92,7 @@ std::vector<float> cpu() ...@@ -92,10 +92,7 @@ std::vector<float> cpu()
auto x = get_tensor_argument_cpu({rtg::shape::float_type, {4, 3, 3, 3}}); auto x = get_tensor_argument_cpu({rtg::shape::float_type, {4, 3, 3, 3}});
auto w = get_tensor_argument_cpu({rtg::shape::float_type, {4, 3, 3, 3}}); auto w = get_tensor_argument_cpu({rtg::shape::float_type, {4, 3, 3, 3}});
p.compile(rtg::cpu::cpu_target{}); p.compile(rtg::cpu::cpu_target{});
auto r = p.eval({ auto r = p.eval({{"x", x}, {"w", w}});
{"x", x},
{"w", w}
});
r.visit([&](auto output) { result.assign(output.begin(), output.end()); }); r.visit([&](auto output) { result.assign(output.begin(), output.end()); });
return result; return result;
} }
...@@ -107,27 +104,20 @@ std::vector<float> gpu() ...@@ -107,27 +104,20 @@ std::vector<float> gpu()
auto x = get_tensor_argument_gpu({rtg::shape::float_type, {4, 3, 3, 3}}); auto x = get_tensor_argument_gpu({rtg::shape::float_type, {4, 3, 3, 3}});
auto w = get_tensor_argument_gpu({rtg::shape::float_type, {4, 3, 3, 3}}); auto w = get_tensor_argument_gpu({rtg::shape::float_type, {4, 3, 3, 3}});
p.compile(rtg::miopen::miopen_target{}); p.compile(rtg::miopen::miopen_target{});
auto y = get_tensor_argument_gpu(p.get_parameter_shape("output")); auto y = get_tensor_argument_gpu(p.get_parameter_shape("output"));
auto handle = make_obj<miopen_handle>(&miopenCreate); auto handle = make_obj<miopen_handle>(&miopenCreate);
auto r = p.eval({ auto r = p.eval(
{"x", x}, {{"x", x}, {"w", w}, {"output", y}, {"handle", {rtg::shape::any_type, handle.get()}}});
{"w", w},
{"output", y},
{"handle", {rtg::shape::any_type, handle.get()}}
});
r.visit([&](auto output) { result.assign(output.begin(), output.end()); }); r.visit([&](auto output) { result.assign(output.begin(), output.end()); });
return result; return result;
} }
void test1() void test1()
{ {
auto x = cpu(); auto x = cpu();
auto y = gpu(); auto y = gpu();
if (x == y) if(x == y)
printf("FAILED\n"); printf("FAILED\n");
} }
int main() int main() { test1(); }
{
test1();
}
...@@ -78,10 +78,10 @@ struct lhs_expression ...@@ -78,10 +78,10 @@ struct lhs_expression
T value() const { return lhs; } T value() const { return lhs; }
// NOLINTNEXTLINE // NOLINTNEXTLINE
#define TEST_LHS_OPERATOR(op, name) \ #define TEST_LHS_OPERATOR(op, name) \
template <class U> \ template <class U> \
auto operator op(const U& rhs) const \ auto operator op(const U& rhs) const \
{ \ { \
return make_expression(lhs, rhs, name{}); /* NOLINT */ \ return make_expression(lhs, rhs, name{}); /* NOLINT */ \
} }
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment