Commit 263579c2 authored by Paul's avatar Paul
Browse files

Formatting

parent d007b98f
......@@ -26,8 +26,10 @@ struct argument : raw_data<argument>
}
argument(shape s, std::function<char*()> d) : data(d), m_shape(s) {}
template<class T>
argument(shape s, T* d) : data([d] { return reinterpret_cast<char*>(d); }), m_shape(s) {}
template <class T>
argument(shape s, T* d) : data([d] { return reinterpret_cast<char*>(d); }), m_shape(s)
{
}
/// Provides a raw pointer to the data
std::function<char*()> data;
......
......@@ -43,7 +43,7 @@ using remove_ptr = typename std::
template <class T>
using shared = std::shared_ptr<remove_ptr<T>>;
template<class T>
template <class T>
shared<T> share(T p)
{
return shared<T>{std::move(p)};
......
......@@ -227,10 +227,7 @@ struct outline
check_shapes{inputs}.has(0);
return s;
}
argument compute(shape, std::vector<argument>) const
{
return {s, nullptr};
}
argument compute(shape, std::vector<argument>) const { return {s, nullptr}; }
};
} // namespace rtg
......
......@@ -80,8 +80,10 @@ shape program::get_parameter_shape(std::string name)
return false;
}
});
if (ins != this->end()) return ins->result;
else return {};
if(ins != this->end())
return ins->result;
else
return {};
}
bool program::has_instruction(instruction_ref ins) const
......
......@@ -26,7 +26,10 @@ struct cpu_convolution
auto wei_h = weights.get_shape().lens()[2];
auto wei_w = weights.get_shape().lens()[3];
dfor(output_shape.lens()[0], output_shape.lens()[1], output_shape.lens()[2], output_shape.lens()[3])(
dfor(output_shape.lens()[0],
output_shape.lens()[1],
output_shape.lens()[2],
output_shape.lens()[3])(
[&](std::size_t o, std::size_t w, std::size_t i, std::size_t j) {
const int start_x = i * op.stride[0] - op.padding[0];
const int start_y = j * op.stride[1] - op.padding[1];
......
......@@ -8,7 +8,6 @@
namespace rtg {
namespace miopen {
struct hip_allocate
{
std::string name() const { return "hip::allocate"; }
......@@ -19,7 +18,7 @@ struct hip_allocate
}
argument compute(shape output_shape, std::vector<argument>) const
{
char * data = nullptr;
char* data = nullptr;
// TODO: Check return status
hipMalloc(&data, output_shape.bytes());
return {output_shape, data};
......@@ -42,7 +41,6 @@ struct hip_free
}
};
using miopen_handle = RTG_MANAGE_PTR(miopenHandle_t, miopenDestroy);
using tensor_descriptor = RTG_MANAGE_PTR(miopenTensorDescriptor_t, miopenDestroyTensorDescriptor);
using convolution_descriptor = RTG_MANAGE_PTR(miopenConvolutionDescriptor_t,
......@@ -163,7 +161,14 @@ struct miopen_relu
float alpha = 1, beta = 0;
auto x_desc = make_tensor(args[1].get_shape());
auto y_desc = make_tensor(output_shape);
miopenActivationForward(args[0].get(), ad.get(), &alpha, x_desc.get(), args[1].get(), &beta, y_desc.get(), args[2].get());
miopenActivationForward(args[0].get(),
ad.get(),
&alpha,
x_desc.get(),
args[1].get(),
&beta,
y_desc.get(),
args[2].get());
return args[2];
}
......@@ -192,7 +197,7 @@ struct miopen_apply
instruction_ref insert_allocation(instruction_ref ins, const shape& s)
{
if (ins == --prog->end())
if(ins == --prog->end())
{
return prog->add_parameter("output", s);
}
......@@ -211,7 +216,12 @@ struct miopen_apply
auto cd = make_conv(op);
auto output = insert_allocation(ins, ins->result);
prog->replace_instruction(ins, miopen_convolution{op, std::move(cd)}, handle, ins->arguments.at(0), ins->arguments.at(1), output);
prog->replace_instruction(ins,
miopen_convolution{op, std::move(cd)},
handle,
ins->arguments.at(0),
ins->arguments.at(1),
output);
}
void apply_activation(instruction_ref ins)
......@@ -221,7 +231,8 @@ struct miopen_apply
if(op.mode == "relu")
{
auto output = insert_allocation(ins, ins->result);
prog->replace_instruction(ins, miopen_relu{std::move(ad)}, handle, ins->arguments.at(0), output);
prog->replace_instruction(
ins, miopen_relu{std::move(ad)}, handle, ins->arguments.at(0), output);
}
}
};
......
......@@ -33,7 +33,7 @@ hip_ptr hip_allocate(std::size_t sz)
return hip_ptr{result};
}
template<class T>
template <class T>
hip_ptr write(const T& x)
{
using type = typename T::value_type;
......@@ -44,7 +44,7 @@ hip_ptr write(const T& x)
return result;
}
template<class T>
template <class T>
std::vector<T> read(const hip_ptr& x, std::size_t sz)
{
std::vector<T> result(sz);
......@@ -92,10 +92,7 @@ std::vector<float> cpu()
auto x = get_tensor_argument_cpu({rtg::shape::float_type, {4, 3, 3, 3}});
auto w = get_tensor_argument_cpu({rtg::shape::float_type, {4, 3, 3, 3}});
p.compile(rtg::cpu::cpu_target{});
auto r = p.eval({
{"x", x},
{"w", w}
});
auto r = p.eval({{"x", x}, {"w", w}});
r.visit([&](auto output) { result.assign(output.begin(), output.end()); });
return result;
}
......@@ -109,12 +106,8 @@ std::vector<float> gpu()
p.compile(rtg::miopen::miopen_target{});
auto y = get_tensor_argument_gpu(p.get_parameter_shape("output"));
auto handle = make_obj<miopen_handle>(&miopenCreate);
auto r = p.eval({
{"x", x},
{"w", w},
{"output", y},
{"handle", {rtg::shape::any_type, handle.get()}}
});
auto r = p.eval(
{{"x", x}, {"w", w}, {"output", y}, {"handle", {rtg::shape::any_type, handle.get()}}});
r.visit([&](auto output) { result.assign(output.begin(), output.end()); });
return result;
}
......@@ -123,11 +116,8 @@ void test1()
{
auto x = cpu();
auto y = gpu();
if (x == y)
if(x == y)
printf("FAILED\n");
}
int main()
{
test1();
}
int main() { test1(); }
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment