Commit 511c8d8f authored by Paul's avatar Paul
Browse files

Merge from develop

parents 9b7c44ab 2a2c146c
...@@ -33,6 +33,10 @@ auto generic_find_impl(rank<0>, C&& c, const T& x) ...@@ -33,6 +33,10 @@ auto generic_find_impl(rank<0>, C&& c, const T& x)
return std::find(c.begin(), c.end(), x); return std::find(c.begin(), c.end(), x);
} }
struct empty
{
};
} // namespace detail } // namespace detail
template <class C, class T> template <class C, class T>
...@@ -71,6 +75,12 @@ bool all_of(const std::initializer_list<T>& c, const Predicate& p) ...@@ -71,6 +75,12 @@ bool all_of(const std::initializer_list<T>& c, const Predicate& p)
return std::all_of(c.begin(), c.end(), p); return std::all_of(c.begin(), c.end(), p);
} }
template <class Predicate>
bool all_of(detail::empty, const Predicate&)
{
return true;
}
template <class C, class Predicate> template <class C, class Predicate>
bool any_of(const C& c, const Predicate& p) bool any_of(const C& c, const Predicate& p)
{ {
...@@ -83,6 +93,12 @@ bool any_of(const std::initializer_list<T>& c, const Predicate& p) ...@@ -83,6 +93,12 @@ bool any_of(const std::initializer_list<T>& c, const Predicate& p)
return std::any_of(c.begin(), c.end(), p); return std::any_of(c.begin(), c.end(), p);
} }
template <class Predicate>
bool any_of(detail::empty, const Predicate&)
{
return false;
}
template <class C, class Predicate> template <class C, class Predicate>
bool none_of(const C& c, const Predicate& p) bool none_of(const C& c, const Predicate& p)
{ {
...@@ -95,6 +111,12 @@ bool none_of(const std::initializer_list<T>& c, const Predicate& p) ...@@ -95,6 +111,12 @@ bool none_of(const std::initializer_list<T>& c, const Predicate& p)
return std::none_of(c.begin(), c.end(), p); return std::none_of(c.begin(), c.end(), p);
} }
template <class Predicate>
bool none_of(detail::empty, const Predicate&)
{
return true;
}
template <class Range, class Iterator> template <class Range, class Iterator>
void copy(Range&& r, Iterator it) void copy(Range&& r, Iterator it)
{ {
......
...@@ -212,6 +212,25 @@ auto visit_all(T&& x, Ts&&... xs) ...@@ -212,6 +212,25 @@ auto visit_all(T&& x, Ts&&... xs)
}; };
} }
template <class T>
auto visit_all(const std::vector<T>& x)
{
auto&& s = x.front().get_shape();
if(!std::all_of(
x.begin(), x.end(), [&](const T& y) { return y.get_shape().type() == s.type(); }))
MIGRAPHX_THROW("Types must be the same");
return [&](auto v) {
s.visit_type([&](auto as) {
using type = typename decltype(as)::type;
std::vector<tensor_view<type>> result;
std::transform(x.begin(), x.end(), std::back_inserter(result), [&](const auto& y) {
return make_view(y.get_shape(), as.from(y.data()));
});
v(result);
});
};
}
} // namespace MIGRAPHX_INLINE_NS } // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx } // namespace migraphx
......
...@@ -15,35 +15,18 @@ struct and_ : std::is_same<and_<Bs...>, and_<(Bs || true)...>> // NOLINT ...@@ -15,35 +15,18 @@ struct and_ : std::is_same<and_<Bs...>, and_<(Bs || true)...>> // NOLINT
template <bool B> template <bool B>
using bool_c = std::integral_constant<bool, B>; using bool_c = std::integral_constant<bool, B>;
template <int N> #define MIGRAPHX_REQUIRES_PRIMITIVE_CAT(x, y) x##y
struct requires_enum #define MIGRAPHX_REQUIRES_CAT(x, y) MIGRAPHX_REQUIRES_PRIMITIVE_CAT(x, y)
{
enum e
{
a = 0
};
};
#define MIGRAPHX_REQUIRES_CAT(x, y) x##y #define MIGRAPHX_REQUIRES_VAR() MIGRAPHX_REQUIRES_CAT(PrivateRequires, __LINE__)
#ifdef CPPCHECK #ifdef CPPCHECK
#define MIGRAPHX_REQUIRES(...) class = void #define MIGRAPHX_REQUIRES(...) class = void
#else #else
#if 0 #define MIGRAPHX_REQUIRES(...) \
// TODO: This currently crashed on clang bool MIGRAPHX_REQUIRES_VAR() = true, \
#define MIGRAPHX_REQUIRES(...) \ typename std::enable_if<(MIGRAPHX_REQUIRES_VAR() && (migraphx::and_<__VA_ARGS__>{})), \
typename migraphx::requires_enum<__LINE__>::e MIGRAPHX_REQUIRES_CAT( \ int>::type = 0
PrivateRequires, \
__LINE__) = migraphx::requires_enum<__LINE__>::a, \
class = typename std::enable_if<and_<__VA_ARGS__, \
MIGRAPHX_REQUIRES_CAT(PrivateRequires, __LINE__) == \
migraphx::requires_enum<__LINE__>::a>{}>::type
#else
#define MIGRAPHX_REQUIRES(...) \
typename migraphx::requires_enum<__LINE__>::e MIGRAPHX_REQUIRES_CAT( \
PrivateRequires, __LINE__) = migraphx::requires_enum<__LINE__>::a, \
class = typename std::enable_if<and_<__VA_ARGS__>{}>::type
#endif
#endif #endif
} // namespace MIGRAPHX_INLINE_NS } // namespace MIGRAPHX_INLINE_NS
......
...@@ -42,7 +42,9 @@ template <class Range> ...@@ -42,7 +42,9 @@ template <class Range>
auto stream_write_value_impl(rank<1>, std::ostream& os, const Range& r) auto stream_write_value_impl(rank<1>, std::ostream& os, const Range& r)
-> decltype(r.begin(), r.end(), void()) -> decltype(r.begin(), r.end(), void())
{ {
os << "{";
os << stream_range(r); os << stream_range(r);
os << "}";
} }
template <class T> template <class T>
......
...@@ -52,6 +52,8 @@ inline std::string transform_string(std::string s, F f) ...@@ -52,6 +52,8 @@ inline std::string transform_string(std::string s, F f)
inline std::string to_upper(std::string s) { return transform_string(std::move(s), ::toupper); } inline std::string to_upper(std::string s) { return transform_string(std::move(s), ::toupper); }
inline std::string to_lower(std::string s) { return transform_string(std::move(s), ::tolower); }
inline bool starts_with(const std::string& value, const std::string& prefix) inline bool starts_with(const std::string& value, const std::string& prefix)
{ {
if(prefix.size() > value.size()) if(prefix.size() > value.size())
......
...@@ -19,7 +19,7 @@ rocm_install_targets( ...@@ -19,7 +19,7 @@ rocm_install_targets(
add_executable(read_onnx read_onnx.cpp) add_executable(read_onnx read_onnx.cpp)
rocm_clang_tidy_check(read_onnx) rocm_clang_tidy_check(read_onnx)
target_link_libraries(read_onnx migraphx_onnx) target_link_libraries(read_onnx migraphx_cpu migraphx_onnx)
if(MIGRAPHX_ENABLE_GPU) if(MIGRAPHX_ENABLE_GPU)
......
...@@ -100,6 +100,7 @@ struct onnx_parser ...@@ -100,6 +100,7 @@ struct onnx_parser
void init_actv_func() void init_actv_func()
{ {
// Support name format of all lower case or the first letter capital
map_actv_funcs.insert(std::make_pair("tanh", op::tanh{})); map_actv_funcs.insert(std::make_pair("tanh", op::tanh{}));
map_actv_funcs.insert(std::make_pair("relu", op::relu{})); map_actv_funcs.insert(std::make_pair("relu", op::relu{}));
map_actv_funcs.insert(std::make_pair("sigmoid", op::sigmoid{})); map_actv_funcs.insert(std::make_pair("sigmoid", op::sigmoid{}));
...@@ -352,7 +353,8 @@ struct onnx_parser ...@@ -352,7 +353,8 @@ struct onnx_parser
{ {
// insert zeros for pad op (args[0] has 4 dims) // insert zeros for pad op (args[0] has 4 dims)
padding = {0, 0, padding[0], padding[1], 0, 0, padding[2], padding[3]}; padding = {0, 0, padding[0], padding[1], 0, 0, padding[2], padding[3]};
l0 = prog.add_instruction(op::pad{padding}, l0); l0 = prog.add_instruction(op::pad{padding, std::numeric_limits<float>::lowest()},
l0);
} }
else else
{ {
...@@ -870,7 +872,9 @@ struct onnx_parser ...@@ -870,7 +872,9 @@ struct onnx_parser
auto names = attributes.at("activations").strings(); auto names = attributes.at("activations").strings();
vec_names.clear(); vec_names.clear();
vec_names.resize(names.size()); vec_names.resize(names.size());
std::copy(names.begin(), names.end(), vec_names.begin()); std::transform(names.begin(), names.end(), vec_names.begin(), [](auto name) {
return to_lower(name);
});
} }
auto name_it = std::find_if(vec_names.begin(), vec_names.end(), [&](auto& name) { auto name_it = std::find_if(vec_names.begin(), vec_names.end(), [&](auto& name) {
...@@ -961,7 +965,9 @@ struct onnx_parser ...@@ -961,7 +965,9 @@ struct onnx_parser
auto names = attributes.at("activations").strings(); auto names = attributes.at("activations").strings();
vec_names.clear(); vec_names.clear();
vec_names.resize(names.size()); vec_names.resize(names.size());
std::copy(names.begin(), names.end(), vec_names.begin()); std::transform(names.begin(), names.end(), vec_names.begin(), [](auto name) {
return to_lower(name);
});
} }
// need 4 activation functions // need 4 activation functions
...@@ -1088,7 +1094,9 @@ struct onnx_parser ...@@ -1088,7 +1094,9 @@ struct onnx_parser
auto names = attributes.at("activations").strings(); auto names = attributes.at("activations").strings();
vec_names.clear(); vec_names.clear();
vec_names.resize(names.size()); vec_names.resize(names.size());
std::copy(names.begin(), names.end(), vec_names.begin()); std::transform(names.begin(), names.end(), vec_names.begin(), [](auto name) {
return to_lower(name);
});
} }
// need 6 activation functions for bidirectional directions // need 6 activation functions for bidirectional directions
......
...@@ -12,12 +12,7 @@ if(MIGRAPHX_ENABLE_PYTHON) ...@@ -12,12 +12,7 @@ if(MIGRAPHX_ENABLE_PYTHON)
C_VISIBILITY_PRESET hidden C_VISIBILITY_PRESET hidden
CXX_VISIBILITY_PRESET hidden CXX_VISIBILITY_PRESET hidden
) )
if(MIGRAPHX_ENABLE_TF) target_link_libraries(migraphx_py PRIVATE migraphx migraphx_tf migraphx_onnx migraphx_cpu)
target_link_libraries(migraphx_py PRIVATE migraphx migraphx_tf migraphx_cpu)
target_compile_definitions(migraphx_py PRIVATE -DENABLE_TF)
else()
target_link_libraries(migraphx_py PRIVATE migraphx migraphx_onnx migraphx_cpu)
endif()
if(MIGRAPHX_ENABLE_GPU) if(MIGRAPHX_ENABLE_GPU)
target_link_libraries(migraphx_py PRIVATE migraphx_gpu) target_link_libraries(migraphx_py PRIVATE migraphx_gpu)
target_compile_definitions(migraphx_py PRIVATE -DHAVE_GPU) target_compile_definitions(migraphx_py PRIVATE -DHAVE_GPU)
......
...@@ -6,11 +6,9 @@ ...@@ -6,11 +6,9 @@
#include <migraphx/generate.hpp> #include <migraphx/generate.hpp>
#include <migraphx/cpu/target.hpp> #include <migraphx/cpu/target.hpp>
#include <migraphx/stringutils.hpp> #include <migraphx/stringutils.hpp>
#ifdef ENABLE_TF
#include <migraphx/tf.hpp> #include <migraphx/tf.hpp>
#else
#include <migraphx/onnx.hpp> #include <migraphx/onnx.hpp>
#endif #include <migraphx/type_name.hpp>
#ifdef HAVE_GPU #ifdef HAVE_GPU
#include <migraphx/gpu/target.hpp> #include <migraphx/gpu/target.hpp>
...@@ -104,8 +102,13 @@ migraphx::shape to_shape(const py::buffer_info& info) ...@@ -104,8 +102,13 @@ migraphx::shape to_shape(const py::buffer_info& info)
t = as.type_enum(); t = as.type_enum();
n = sizeof(as()); n = sizeof(as());
} }
}); });
if(n == 0)
{
MIGRAPHX_THROW("MIGRAPHX PYTHON: Unsupported data type" + info.format);
}
auto strides = info.strides; auto strides = info.strides;
std::transform(strides.begin(), strides.end(), strides.begin(), [&](auto i) -> std::size_t { std::transform(strides.begin(), strides.end(), strides.begin(), [&](auto i) -> std::size_t {
return n > 0 ? i / n : 0; return n > 0 ? i / n : 0;
...@@ -161,16 +164,13 @@ PYBIND11_MODULE(migraphx, m) ...@@ -161,16 +164,13 @@ PYBIND11_MODULE(migraphx, m)
.def("__ne__", std::not_equal_to<migraphx::program>{}) .def("__ne__", std::not_equal_to<migraphx::program>{})
.def("__repr__", [](const migraphx::program& p) { return migraphx::to_string(p); }); .def("__repr__", [](const migraphx::program& p) { return migraphx::to_string(p); });
#ifdef ENABLE_TF
m.def("parse_tf", m.def("parse_tf",
&migraphx::parse_tf, &migraphx::parse_tf,
"Parse tf protobuf (default format is nhwc)", "Parse tf protobuf (default format is nhwc)",
py::arg("filename"), py::arg("filename"),
py::arg("is_nhwc") = true); py::arg("is_nhwc") = true);
#else
m.def("parse_onnx", &migraphx::parse_onnx); m.def("parse_onnx", &migraphx::parse_onnx);
#endif
m.def("get_target", [](const std::string& name) -> migraphx::target { m.def("get_target", [](const std::string& name) -> migraphx::target {
if(name == "cpu") if(name == "cpu")
return migraphx::cpu::target{}; return migraphx::cpu::target{};
......
This diff is collapsed.
...@@ -517,40 +517,60 @@ struct cpu_unary ...@@ -517,40 +517,60 @@ struct cpu_unary
} }
}; };
struct softmax2d struct cpu_softmax
{ {
std::string name() const { return "cpu::softmax2d"; } op::softmax op;
shape compute_shape(const std::vector<shape>& inputs) const { return inputs.front(); }
template <class Self, class F>
static auto reflect(Self& self, F f)
{
return migraphx::reflect(self.op, f);
}
std::string name() const { return "cpu::softmax"; }
shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); }
template <typename T>
std::size_t compute_batch_index(T idx, shape& batch_shape, int axis) const
{
idx[axis] = 0;
return batch_shape.index(idx);
}
argument compute(context&, const shape& output_shape, std::vector<argument> args) const argument compute(context&, const shape& output_shape, std::vector<argument> args) const
{ {
argument result{output_shape}; argument result{output_shape};
auto batch_lens = output_shape.lens();
batch_lens[op.axis] = 1;
shape batch_shape{shape::int32_type, batch_lens};
visit_all(result, args[0])([&](auto output, auto input) { visit_all(result, args[0])([&](auto output, auto input) {
using value_type = typename decltype(input)::value_type; using value_type = typename decltype(input)::value_type;
auto nb = input.get_shape().lens()[0]; std::vector<value_type> batch_max(batch_shape.elements(),
auto nc = input.get_shape().lens()[1]; std::numeric_limits<value_type>::lowest());
auto nh = input.get_shape().lens()[2]; shape_for_each(output_shape, [&](auto idx) {
auto nw = input.get_shape().lens()[3]; auto index = this->compute_batch_index(idx, batch_shape, op.axis);
dfor(nb, nh, nw)([&](std::size_t b, std::size_t i, std::size_t j) { batch_max[index] = std::max(batch_max[index], input(idx.begin(), idx.end()));
value_type cmax = std::numeric_limits<value_type>::lowest(); });
for(std::size_t c = 0; c < nc; c++)
{ shape_for_each(output_shape, [&](auto idx) {
cmax = std::max(cmax, input(b, c, i, j)); auto index = this->compute_batch_index(idx, batch_shape, op.axis);
} output(idx.begin(), idx.end()) =
for(std::size_t c = 0; c < nc; c++) std::exp(input(idx.begin(), idx.end()) - batch_max[index]);
{ });
output(b, c, i, j) = std::exp(input(b, c, i, j) - cmax);
} std::vector<value_type> batch_sum(batch_shape.elements(), value_type(0));
value_type sum = value_type(0); shape_for_each(output_shape, [&](auto idx) {
for(std::size_t c = 0; c < nc; c++) auto index = this->compute_batch_index(idx, batch_shape, op.axis);
{ batch_sum[index] += output(idx.begin(), idx.end());
sum += output(b, c, i, j); });
}
for(std::size_t c = 0; c < nc; c++) shape_for_each(output_shape, [&](auto idx) {
{ auto index = this->compute_batch_index(idx, batch_shape, op.axis);
output(b, c, i, j) = output(b, c, i, j) / sum; output(idx.begin(), idx.end()) /= batch_sum[index];
}
}); });
}); });
return result; return result;
} }
}; };
...@@ -569,33 +589,19 @@ struct cpu_logsoftmax ...@@ -569,33 +589,19 @@ struct cpu_logsoftmax
shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); } shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); }
template <typename T> template <typename T>
std::size_t compute_batch_index(const T& idx, shape& batch_shape, int axis) const std::size_t compute_batch_index(T idx, const shape& batch_shape, int axis) const
{ {
if(axis == 0) idx[axis] = 0;
{ return batch_shape.index(idx);
return 0;
}
else
{
std::vector<std::size_t> batch_idx(idx.begin(), idx.begin() + axis);
return batch_shape.index(batch_idx.begin(), batch_idx.end());
}
} }
argument compute(context&, const shape& output_shape, std::vector<argument> args) const argument compute(context&, const shape& output_shape, std::vector<argument> args) const
{ {
argument result{output_shape}; argument result{output_shape};
auto lens = output_shape.lens(); auto batch_lens = output_shape.lens();
std::vector<std::size_t> batch_lens{}; batch_lens[op.axis] = 1;
if(op.axis == 0) shape batch_shape{shape::int32_type, batch_lens};
{
batch_lens.push_back(1);
}
else
{
batch_lens.insert(batch_lens.begin(), lens.begin(), lens.begin() + op.axis);
}
shape batch_shape{migraphx::shape::uint32_type, batch_lens};
visit_all(result, args[0])([&](auto output, auto input) { visit_all(result, args[0])([&](auto output, auto input) {
using value_type = typename decltype(input)::value_type; using value_type = typename decltype(input)::value_type;
std::vector<value_type> batch_max(batch_shape.elements(), std::vector<value_type> batch_max(batch_shape.elements(),
...@@ -660,7 +666,7 @@ struct cpu_apply ...@@ -660,7 +666,7 @@ struct cpu_apply
apply_map["logsoftmax"] = extend_op<cpu_logsoftmax, op::logsoftmax>(); apply_map["logsoftmax"] = extend_op<cpu_logsoftmax, op::logsoftmax>();
apply_map["lrn"] = extend_op<cpu_lrn, op::lrn>(); apply_map["lrn"] = extend_op<cpu_lrn, op::lrn>();
apply_map["pad"] = extend_op<cpu_pad, op::pad>(); apply_map["pad"] = extend_op<cpu_pad, op::pad>();
apply_map["softmax"] = simple_op<softmax2d>(); apply_map["softmax"] = extend_op<cpu_softmax, op::softmax>();
} }
void apply() void apply()
......
...@@ -27,6 +27,7 @@ add_library(migraphx_device ...@@ -27,6 +27,7 @@ add_library(migraphx_device
device/add_relu.cpp device/add_relu.cpp
device/contiguous.cpp device/contiguous.cpp
device/logsoftmax.cpp device/logsoftmax.cpp
device/softmax.cpp
device/convert.cpp device/convert.cpp
device/mul.cpp device/mul.cpp
device/concat.cpp device/concat.cpp
......
...@@ -10,22 +10,20 @@ namespace gpu { ...@@ -10,22 +10,20 @@ namespace gpu {
namespace device { namespace device {
argument concat(hipStream_t stream, argument concat(hipStream_t stream,
const migraphx::shape& output_shape, const migraphx::shape&,
std::vector<migraphx::argument> args, std::vector<migraphx::argument> args,
std::vector<std::size_t> offsets) std::vector<std::size_t> offsets)
{ {
for(std::size_t l = 0; l < args.size() - 1; l++) auto ninputs = args.size() - 1;
for(std::size_t j = 0; j < ninputs; j++)
{ {
auto argl = args[l]; auto&& arg = args[j];
std::size_t nelements = argl.get_shape().elements(); std::size_t nelements = arg.get_shape().elements();
visit_all(args.back(), argl)([&](auto output, auto input) { auto offset = offsets[j];
visit_tensor_size(output_shape.lens().size(), [&](auto ndim) { hip_visit_all(args.back(), arg)([&](auto output, auto input) {
auto* outptr = output.data() + offsets[l]; gs_launch(stream, nelements)([=](auto i) {
const auto* inptr = input.data(); auto idx = output.get_shape().index(input.get_shape().multi(i));
hip_tensor_descriptor<ndim> desc_input(input.get_shape()); output.data()[idx + offset] = input.data()[i];
hip_tensor_descriptor<ndim> desc_output(output.get_shape());
gs_launch(stream, nelements)(
[=](auto i) { outptr[desc_output.linear(desc_input.multi(i))] = inptr[i]; });
}); });
}); });
} }
......
...@@ -11,35 +11,30 @@ inline namespace MIGRAPHX_INLINE_NS { ...@@ -11,35 +11,30 @@ inline namespace MIGRAPHX_INLINE_NS {
namespace gpu { namespace gpu {
namespace device { namespace device {
argument gather(hipStream_t stream, argument gather(hipStream_t stream, argument result, argument arg1, argument arg2, int axis)
const migraphx::shape& output_shape,
std::vector<migraphx::argument> args,
int axis)
{ {
auto axis_index = (axis < 0) ? (axis + args[0].get_shape().lens().size()) : axis; auto axis_index = (axis < 0) ? (axis + arg1.get_shape().lens().size()) : axis;
visit_all(args.back(), args[0])([&](auto output, auto input) { auto& input_shape = arg1.get_shape();
std::size_t nelements = output_shape.elements(); auto lens = input_shape.lens();
args[1].visit([&](auto indices) { lens[axis_index] = arg2.get_shape().elements();
const auto* indices_ptr = device_cast(indices.data()); shape out_comp_shape{result.get_shape().type(), lens};
auto* out_ptr = device_cast(output.data()); std::size_t nelements = result.get_shape().elements();
const auto* in_ptr = device_cast(input.data());
auto& input_shape = args[0].get_shape(); visit_all(result, arg1)([&](auto output, auto input_v) {
auto lens = input_shape.lens(); hip_visit_views(input_v, out_comp_shape)([&](auto input, auto out_comp) {
lens[axis_index] = args[1].get_shape().elements(); arg2.visit([&](auto indices) {
migraphx::shape out_comp_shape{output_shape.type(), lens}; const auto* indices_ptr = device_cast(indices.data());
visit_tensor_size(out_comp_shape.lens().size(), [&](auto n_out_dim) { auto* output_ptr = device_cast(output.data());
hip_tensor_descriptor<n_out_dim> desc_input(input_shape); gs_launch(stream, nelements)([=](auto i) {
hip_tensor_descriptor<n_out_dim> desc_output(out_comp_shape); auto idx = out_comp.multi(i);
gs_launch(stream, nelements)([=](auto ii) { idx[axis_index] = indices_ptr[idx[axis_index]];
auto in_idx = desc_output.multi(ii); output_ptr[i] = input[idx];
in_idx[axis_index] = indices_ptr[in_idx[axis_index]];
out_ptr[ii] = in_ptr[desc_input.linear(in_idx)];
}); });
}); });
}); });
}); });
return args.back(); return result;
} }
} // namespace device } // namespace device
......
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_ARRAY_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_ARRAY_HPP
#include <migraphx/gpu/device/types.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
template <class T, std::size_t N>
struct hip_array
{
T d[N];
MIGRAPHX_DEVICE_CONSTEXPR T& operator[](std::size_t i) { return d[i]; }
MIGRAPHX_DEVICE_CONSTEXPR const T& operator[](std::size_t i) const { return d[i]; }
MIGRAPHX_DEVICE_CONSTEXPR T* data() { return d; }
MIGRAPHX_DEVICE_CONSTEXPR const T* data() const { return d; }
MIGRAPHX_DEVICE_CONSTEXPR std::integral_constant<std::size_t, N> size() const { return {}; }
MIGRAPHX_DEVICE_CONSTEXPR T* begin() { return d; }
MIGRAPHX_DEVICE_CONSTEXPR const T* begin() const { return d; }
MIGRAPHX_DEVICE_CONSTEXPR T* end() { return d + size(); }
MIGRAPHX_DEVICE_CONSTEXPR const T* end() const { return d + size(); }
MIGRAPHX_DEVICE_CONSTEXPR T dot(const hip_array& x) const
{
T result = 0;
for(std::size_t i = 0; i < N; i++)
result += x[i] * d[i];
return result;
}
MIGRAPHX_DEVICE_CONSTEXPR T product() const
{
T result = 1;
for(std::size_t i = 0; i < N; i++)
result *= d[i];
return result;
}
friend MIGRAPHX_DEVICE_CONSTEXPR hip_array operator*(const hip_array& x, const hip_array& y)
{
hip_array result;
for(std::size_t i = 0; i < N; i++)
result[i] = x[i] * y[i];
return result;
}
};
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_SHAPE_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_SHAPE_HPP
#include <migraphx/gpu/device/array.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
template <std::size_t N>
struct hip_shape
{
using hip_index = hip_array<std::size_t, N>;
hip_array<std::size_t, N> lens = {};
hip_array<std::size_t, N> strides = {};
bool standard = false;
__device__ __host__ hip_shape() = default;
hip_shape(const shape& s) : standard(s.standard())
{
assert(s.lens().size() == N);
assert(s.strides().size() == N);
std::copy(s.lens().begin(), s.lens().end(), lens.begin());
std::copy(s.strides().begin(), s.strides().end(), strides.begin());
}
MIGRAPHX_DEVICE_CONSTEXPR std::size_t elements() const { return lens.product(); }
MIGRAPHX_DEVICE_CONSTEXPR std::size_t index(hip_index x) const { return x.dot(strides); }
MIGRAPHX_DEVICE_CONSTEXPR std::size_t index(std::initializer_list<std::size_t> x) const
{
std::size_t idx = 0;
for(std::size_t i = 0; i < x.size(); i++)
idx += *(x.begin() + i) * strides[i];
return idx;
}
MIGRAPHX_DEVICE_CONSTEXPR std::size_t index(std::size_t i) const
{
if(this->standard)
return i;
else
{
const std::size_t rank = this->lens.size();
std::size_t s = 1;
std::size_t result = 0;
for(std::size_t j = 0; j < this->lens.size(); j++)
{
const std::size_t k = rank - j - 1;
const std::size_t stride = this->strides[k];
const std::size_t len = this->lens[k];
const std::size_t slen = s * len;
const std::size_t idx = (i % slen) / s;
result += stride * idx;
s = slen;
}
return result;
}
}
MIGRAPHX_DEVICE_CONSTEXPR hip_index multi(std::size_t idx) const
{
hip_index result;
std::size_t tidx = idx;
for(std::size_t is = 0; is < result.size(); is++)
{
result[is] = tidx / strides[is];
tidx = tidx % strides[is];
}
return result;
}
};
template <std::size_t N>
hip_shape<N> make_hip_shape(const shape& x)
{
return x;
}
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_RTGLIB_DEAVICE_TENSOR_HPP #ifndef MIGRAPHX_GUARD_RTGLIB_DEAVICE_TENSOR_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEAVICE_TENSOR_HPP #define MIGRAPHX_GUARD_RTGLIB_DEAVICE_TENSOR_HPP
#include <hip/hip_runtime.h> #include <migraphx/gpu/device/visit.hpp>
#include <migraphx/functional.hpp>
#include <migraphx/config.hpp>
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
namespace gpu { namespace gpu {
namespace device { namespace device {
template <class F> template <std::size_t NDim>
void visit_tensor_size(std::size_t n, F f) using hip_tensor_index = hip_array<std::size_t, NDim>;
{
switch(n)
{
case 1:
{
f(std::integral_constant<std::size_t, 1>{});
break;
}
case 2:
{
f(std::integral_constant<std::size_t, 2>{});
break;
}
case 3:
{
f(std::integral_constant<std::size_t, 3>{});
break;
}
case 4:
{
f(std::integral_constant<std::size_t, 4>{});
break;
}
case 5:
{
f(std::integral_constant<std::size_t, 5>{});
break;
}
default: throw std::runtime_error("Unknown tensor size");
}
}
template <size_t NDim>
struct hip_index
{
size_t d[NDim];
__device__ __host__ size_t& operator[](size_t i) { return d[i]; }
__device__ __host__ size_t operator[](size_t i) const { return d[i]; }
};
template <size_t NDim> template <std::size_t NDim>
struct hip_tensor_descriptor struct hip_tensor_descriptor
{ {
__device__ __host__ hip_tensor_descriptor() = default; __device__ __host__ hip_tensor_descriptor() = default;
...@@ -63,26 +22,26 @@ struct hip_tensor_descriptor ...@@ -63,26 +22,26 @@ struct hip_tensor_descriptor
std::copy(s.strides().begin(), s.strides().end(), strides); std::copy(s.strides().begin(), s.strides().end(), strides);
} }
__device__ __host__ hip_index<NDim> multi(size_t idx) const __device__ __host__ hip_tensor_index<NDim> multi(std::size_t idx) const
{ {
hip_index<NDim> result{}; hip_tensor_index<NDim> result{};
size_t tidx = idx; std::size_t tidx = idx;
for(size_t is = 0; is < NDim; is++) for(std::size_t is = 0; is < NDim; is++)
{ {
result[is] = tidx / strides[is]; result[is] = tidx / strides[is];
tidx = tidx % strides[is]; tidx = tidx % strides[is];
} }
return result; return result;
} }
__device__ __host__ size_t linear(hip_index<NDim> s) const __device__ __host__ std::size_t linear(hip_tensor_index<NDim> s) const
{ {
size_t idx = 0; std::size_t idx = 0;
for(size_t i = 0; i < NDim; i++) for(std::size_t i = 0; i < NDim; i++)
idx += s[i] * strides[i]; idx += s[i] * strides[i];
return idx; return idx;
} }
size_t lens[NDim] = {}; std::size_t lens[NDim] = {};
size_t strides[NDim] = {}; std::size_t strides[NDim] = {};
}; };
} // namespace device } // namespace device
......
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_TENSOR_VIEW_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_TENSOR_VIEW_HPP
#include <migraphx/gpu/device/shape.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
namespace device {
template <class T, std::size_t N>
struct hip_tensor_view
{
using value_type = T;
using hip_index = typename hip_shape<N>::hip_index;
__device__ __host__ hip_tensor_view() = default;
__host__ hip_tensor_view(tensor_view<T> x) : d(x.data()), s(x.get_shape()) {}
__host__ hip_tensor_view(T* x, const shape& ss) : d(x), s(ss) {}
MIGRAPHX_DEVICE_CONSTEXPR const hip_shape<N>& get_shape() const { return s; }
MIGRAPHX_DEVICE_CONSTEXPR std::size_t size() const { return s.elements(); }
MIGRAPHX_DEVICE_CONSTEXPR value_type* data() const { return d; }
template <class U>
MIGRAPHX_DEVICE_CONSTEXPR value_type& operator[](U i) const
{
return d[s.index(i)];
}
MIGRAPHX_DEVICE_CONSTEXPR value_type* begin() const { return d; }
MIGRAPHX_DEVICE_CONSTEXPR value_type* end() const { return d + size(); }
private:
value_type* d = nullptr;
hip_shape<N> s{};
};
template <std::size_t N, class T>
hip_tensor_view<T, N> make_hip_view(const shape& s, T* x)
{
return {x, s};
}
template <std::size_t N, class T>
hip_tensor_view<T, N> make_hip_view(tensor_view<T> x)
{
return {x};
}
} // namespace device
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
...@@ -8,14 +8,45 @@ ...@@ -8,14 +8,45 @@
#ifndef MIGRAPHX_GUARD_RTGLIB_GPU_DEVICE_TYPES_HPP #ifndef MIGRAPHX_GUARD_RTGLIB_GPU_DEVICE_TYPES_HPP
#define MIGRAPHX_GUARD_RTGLIB_GPU_DEVICE_TYPES_HPP #define MIGRAPHX_GUARD_RTGLIB_GPU_DEVICE_TYPES_HPP
#include <hip/hip_runtime.h>
#include <migraphx/half.hpp> #include <migraphx/half.hpp>
#include <migraphx/config.hpp> #include <migraphx/config.hpp>
#include <migraphx/tensor_view.hpp>
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
namespace gpu { namespace gpu {
namespace device { namespace device {
#define MIGRAPHX_DEVICE_CONSTEXPR constexpr __device__ __host__ // NOLINT
template <class T, std::size_t N>
using vec = T __attribute__((ext_vector_type(N)));
template <std::size_t N, class T>
__device__ __host__ T* as_pointer(vec<T, N>* x)
{
return reinterpret_cast<T*>(x);
}
template <std::size_t N, class T>
__device__ __host__ vec<T, N>* as_vec(T* x)
{
return reinterpret_cast<vec<T, N>*>(x);
}
template <std::size_t N, class T>
tensor_view<vec<T, N>> as_vec(tensor_view<T> x)
{
return {x.get_shape(), as_vec<N>(x.data())};
}
template <std::size_t N, class... Ts>
auto pack_vec(Ts... xs)
{
return [=](auto f, std::size_t n) { return f(as_vec<N>(xs)[n]...); };
}
using gpu_half = __fp16; using gpu_half = __fp16;
namespace detail { namespace detail {
...@@ -25,6 +56,12 @@ struct device_type ...@@ -25,6 +56,12 @@ struct device_type
using type = T; using type = T;
}; };
template <class T, std::size_t N>
struct device_type<vec<T, N>>
{
using type = vec<typename device_type<T>::type, N>;
};
template <> template <>
struct device_type<half> struct device_type<half>
{ {
...@@ -38,7 +75,7 @@ struct host_type ...@@ -38,7 +75,7 @@ struct host_type
}; };
template <> template <>
struct device_type<gpu_half> struct host_type<gpu_half>
{ {
using type = half; using type = half;
}; };
...@@ -64,9 +101,9 @@ host_type<T>* host_cast(T* x) ...@@ -64,9 +101,9 @@ host_type<T>* host_cast(T* x)
} }
template <class T> template <class T>
device_type<T> device_cast(T x) device_type<T> device_cast(const T& x)
{ {
return reinterpret_cast<device_type<T>>(x); return reinterpret_cast<const device_type<T>&>(x);
} }
template <class T> template <class T>
...@@ -75,6 +112,12 @@ device_type<T>* device_cast(T* x) ...@@ -75,6 +112,12 @@ device_type<T>* device_cast(T* x)
return reinterpret_cast<device_type<T>*>(x); return reinterpret_cast<device_type<T>*>(x);
} }
template <class T>
tensor_view<device_type<T>> device_cast(tensor_view<T> x)
{
return {x.get_shape(), reinterpret_cast<device_type<T>*>(x.data())};
}
template <class T> template <class T>
T to_hip_type(T x) T to_hip_type(T x)
{ {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment