Commit 7e2cf4e8 authored by Khalique's avatar Khalique
Browse files

Merge branch 'develop' of https://github.com/ROCmSoftwarePlatform/AMDMIGraphX into tf_pb

parents d926aca3 f67fc560
......@@ -145,6 +145,7 @@ rocm_create_package(
DESCRIPTION "AMD's graph optimizer"
MAINTAINER "Paul Fultz II <paul.fultz@amd.com>"
LDCONFIG
PTH
DEPENDS miopen-hip rocblas hip_hcc half
)
......
......@@ -107,6 +107,10 @@ rocmtest tidy: rocmnode('rocmtest') { cmake_build ->
stage('Clang Release') {
cmake_build("hcc", "-DCMAKE_BUILD_TYPE=release")
}
stage('Clang Release Python 3') {
cmake_build("hcc", "-DCMAKE_BUILD_TYPE=release -DPYTHON_EXECUTABLE=/usr/local/bin/python3")
}
}, gcc5: rocmnode('rocmtest') { cmake_build ->
stage('GCC 5 Debug') {
cmake_build("g++-5", "-DCMAKE_BUILD_TYPE=debug")
......
......@@ -124,6 +124,8 @@ struct tensor_view
return m_data + this->size();
}
std::vector<T> to_vector() const { return std::vector<T>(this->begin(), this->end()); }
friend std::ostream& operator<<(std::ostream& os, const tensor_view<T>& x)
{
if(!x.empty())
......
......@@ -1133,7 +1133,15 @@ struct onnx_parser
case onnx::TensorProto::BOOL:
return literal{{shape::int32_type, dims}, t.int32_data().begin(), t.int32_data().end()};
case onnx::TensorProto::FLOAT16:
return literal{{shape::half_type, dims}, t.float_data().begin(), t.float_data().end()};
{
std::vector<uint16_t> data_uint16(t.int32_data().begin(), t.int32_data().end());
std::vector<half> data_half;
std::transform(data_uint16.begin(),
data_uint16.end(),
std::back_inserter(data_half),
[](uint16_t raw_val) { return *reinterpret_cast<half*>(&raw_val); });
return literal{{shape::half_type, dims}, data_half.begin(), data_half.end()};
}
case onnx::TensorProto::DOUBLE:
return literal{
{shape::double_type, dims}, t.double_data().begin(), t.double_data().end()};
......
......@@ -203,9 +203,8 @@ void memory_coloring_impl::rewrite()
if(is_allocate(ins))
{
assert(!ins->inputs().empty());
p_program->replace_instruction(
ins, op::load{ins->inputs().at(0)->get_shape(), offset}, scratch_param);
ins, op::load{ins->get_shape(), offset}, scratch_param);
}
else if(is_literal(ins))
{
......
......@@ -348,13 +348,17 @@ argument generic_eval(const program& p,
}
else if(ins->name() == "@param")
{
results.emplace(ins, trace(ins, [&] {
auto param_name =
any_cast<builtin::param>(ins->get_operator()).parameter;
if(not contains(params, param_name))
MIGRAPHX_THROW("Parameter not found: " + param_name);
return params.at(param_name);
}));
results.emplace(
ins, trace(ins, [&] {
auto param_name = any_cast<builtin::param>(ins->get_operator()).parameter;
if(not contains(params, param_name))
MIGRAPHX_THROW("Parameter not found: " + param_name);
auto param = params.at(param_name);
if(param.get_shape() != ins->get_shape())
MIGRAPHX_THROW("Incorrect shape {" + to_string(param.get_shape()) +
"} for parameter: " + param_name);
return param;
}));
}
else if(ins->name() == "@outline")
{
......
option(MIGRAPHX_ENABLE_PYTHON "Enable python bindings" ON)
if(MIGRAPHX_ENABLE_PYTHON)
find_program(DEFAULT_PYTHON_EXE python)
if(DEFAULT_PYTHON_EXE)
set(PYTHON_EXECUTABLE ${DEFAULT_PYTHON_EXE} CACHE PATH "Path to python executable")
endif()
find_package(pybind11 REQUIRED)
pybind11_add_module(migraphx_py migraphx_py.cpp)
set_target_properties(migraphx_py PROPERTIES
OUTPUT_NAME migraphx
C_VISIBILITY_PRESET hidden
CXX_VISIBILITY_PRESET hidden
)
target_link_libraries(migraphx_py PRIVATE migraphx migraphx_onnx migraphx_cpu)
if(MIGRAPHX_ENABLE_GPU)
target_link_libraries(migraphx_py PRIVATE migraphx_gpu)
target_compile_definitions(migraphx_py PRIVATE -DHAVE_GPU)
endif()
find_program(DEFAULT_PYTHON_EXE python)
if(DEFAULT_PYTHON_EXE)
set(PYTHON_EXECUTABLE ${DEFAULT_PYTHON_EXE} CACHE PATH "Path to python executable")
endif()
find_package(pybind11 REQUIRED)
pybind11_add_module(migraphx_py migraphx_py.cpp)
set_target_properties(migraphx_py PROPERTIES
OUTPUT_NAME migraphx
C_VISIBILITY_PRESET hidden
CXX_VISIBILITY_PRESET hidden
)
target_link_libraries(migraphx_py PRIVATE migraphx migraphx_onnx migraphx_cpu)
if(MIGRAPHX_ENABLE_GPU)
target_link_libraries(migraphx_py PRIVATE migraphx_gpu)
target_compile_definitions(migraphx_py PRIVATE -DHAVE_GPU)
endif()
rocm_install_targets(TARGETS migraphx_py)
endif()
......@@ -28,6 +28,11 @@ struct throw_half
{
throw std::runtime_error("Half not supported in python yet.");
}
void operator()(migraphx::tensor_view<migraphx::half>) const
{
throw std::runtime_error("Half not supported in python yet.");
}
};
template <class F>
......@@ -42,6 +47,8 @@ struct skip_half
}
void operator()(migraphx::shape::as<migraphx::half>) const {}
void operator()(migraphx::tensor_view<migraphx::half>) const {}
};
template <class F>
......@@ -50,6 +57,12 @@ void visit_type(const migraphx::shape& s, F f)
s.visit_type(throw_half<F>{f});
}
template <class T, class F>
void visit(const migraphx::raw_data<T>& x, F f)
{
x.visit(throw_half<F>{f});
}
template <class F>
void visit_types(F f)
{
......@@ -60,6 +73,9 @@ template <class T>
py::buffer_info to_buffer_info(T& x)
{
migraphx::shape s = x.get_shape();
auto strides = s.strides();
std::transform(
strides.begin(), strides.end(), strides.begin(), [&](auto i) { return i * s.type_size(); });
py::buffer_info b;
visit_type(s, [&](auto as) {
b = py::buffer_info(x.data(),
......@@ -67,7 +83,7 @@ py::buffer_info to_buffer_info(T& x)
py::format_descriptor<decltype(as())>::format(),
s.lens().size(),
s.lens(),
s.strides());
strides);
});
return b;
}
......@@ -75,11 +91,20 @@ py::buffer_info to_buffer_info(T& x)
migraphx::shape to_shape(const py::buffer_info& info)
{
migraphx::shape::type_t t;
std::size_t n = 0;
visit_types([&](auto as) {
if(info.format == py::format_descriptor<decltype(as())>::format())
{
t = as.type_enum();
n = sizeof(as());
}
});
return migraphx::shape{t, info.shape, info.strides};
auto strides = info.strides;
std::transform(strides.begin(), strides.end(), strides.begin(), [&](auto i) -> std::size_t {
return n > 0 ? i / n : 0;
});
return migraphx::shape{t, info.shape, strides};
}
PYBIND11_MODULE(migraphx, m)
......@@ -108,6 +133,13 @@ PYBIND11_MODULE(migraphx, m)
py::buffer_info info = b.request();
new(&x) migraphx::argument(to_shape(info), info.ptr);
})
.def("get_shape", &migraphx::argument::get_shape)
.def("tolist",
[](migraphx::argument& x) {
py::list l{x.get_shape().elements()};
visit(x, [&](auto data) { l = py::cast(data.to_vector()); });
return l;
})
.def("__eq__", std::equal_to<migraphx::argument>{})
.def("__ne__", std::not_equal_to<migraphx::argument>{})
.def("__repr__", [](const migraphx::argument& x) { return migraphx::to_string(x); });
......
......@@ -11,7 +11,7 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_DISABLE_NULL_STREAM)
MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_ENABLE_NULL_STREAM)
struct hip_device
{
......@@ -40,7 +40,7 @@ struct hip_device
hipStream_t get()
{
if(enabled(MIGRAPHX_DISABLE_NULL_STREAM{}))
if(not enabled(MIGRAPHX_ENABLE_NULL_STREAM{}))
{
setup();
if(s == nullptr)
......@@ -53,7 +53,7 @@ struct hip_device
auto create_miopen_handle()
{
if(enabled(MIGRAPHX_DISABLE_NULL_STREAM{}))
if(not enabled(MIGRAPHX_ENABLE_NULL_STREAM{}))
return make_obj<miopen_handle>(&miopenCreateWithStream, get());
else
return make_obj<miopen_handle>(&miopenCreate);
......
......@@ -23,12 +23,13 @@ void copy_to_gpu(const argument& src, const argument& dst);
struct hip_allocate
{
shape s;
std::string tag{};
std::string name() const { return "hip::allocate"; }
shape compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs}.has(1);
return inputs.front();
check_shapes{inputs}.has(0);
return s;
}
argument compute(context&, const shape& output_shape, const std::vector<argument>&) const
{
......
......@@ -127,8 +127,7 @@ struct miopen_apply
}
else
{
auto is = prog->add_outline(s);
auto result = prog->insert_instruction(ins, hip_allocate{std::move(tag)}, is);
auto result = prog->insert_instruction(ins, hip_allocate{s, std::move(tag)});
return result;
}
}
......
......@@ -37,8 +37,7 @@ void write_literals::apply(program& p) const
{
literal l = ins->get_literal();
auto pre = p.add_literal(l);
auto s = p.add_outline(l.get_shape());
auto alloc = p.insert_instruction(std::next(pre), hip_allocate{}, s);
auto alloc = p.insert_instruction(std::next(pre), hip_allocate{l.get_shape()});
p.replace_instruction(ins, hip_copy{}, pre, alloc);
}
else
......
......@@ -7,6 +7,7 @@
#include <migraphx/verify.hpp>
#include <migraphx/onnx.hpp>
#include "test.hpp"
#include <migraphx/half.hpp>
float sigmoid(float x) { return 1 / (1 + expf(-x)); }
......@@ -1375,4 +1376,22 @@ TEST_CASE(pad_test)
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(fp16_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::half_type, {1}};
migraphx::half a{1.5};
migraphx::half b{2.5};
migraphx::half c{4.0};
auto l0 = p.add_literal(migraphx::literal{s, {a}});
auto l1 = p.add_literal(migraphx::literal{s, {b}});
p.add_instruction(migraphx::op::add{}, l0, l1);
p.compile(migraphx::cpu::target{});
auto result = p.eval({});
std::vector<migraphx::half> results_vector(1);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<migraphx::half> gold{c};
EXPECT(migraphx::verify_range(results_vector, gold));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -128,7 +128,7 @@ TEST_CASE(print_test)
{
migraphx::program p;
auto x = p.add_parameter("x", {migraphx::shape::int64_type});
auto x = p.add_parameter("x", {migraphx::shape::int32_type});
auto two = p.add_literal(2);
p.add_instruction(sum_op{}, x, two);
......@@ -142,8 +142,8 @@ TEST_CASE(param_test)
{
migraphx::program p;
auto x = p.add_parameter("x", {migraphx::shape::int64_type});
auto y = p.add_parameter("y", {migraphx::shape::int64_type});
auto x = p.add_parameter("x", {migraphx::shape::int32_type});
auto y = p.add_parameter("y", {migraphx::shape::int32_type});
p.add_instruction(sum_op{}, x, y);
auto result = p.eval(
......@@ -156,8 +156,8 @@ TEST_CASE(param_error_test)
{
migraphx::program p;
auto x = p.add_parameter("x", {migraphx::shape::int64_type});
auto y = p.add_parameter("y", {migraphx::shape::int64_type});
auto x = p.add_parameter("x", {migraphx::shape::int32_type});
auto y = p.add_parameter("y", {migraphx::shape::int32_type});
p.add_instruction(sum_op{}, x, y);
EXPECT(test::throws<migraphx::exception>(
......@@ -167,6 +167,22 @@ TEST_CASE(param_error_test)
"Parameter not found: y"));
}
TEST_CASE(param_shape_error_test)
{
migraphx::program p;
auto x = p.add_parameter("x", {migraphx::shape::int32_type, {1, 2}});
auto y = p.add_parameter("y", {migraphx::shape::int32_type, {1, 2}});
p.add_instruction(sum_op{}, x, y);
EXPECT(test::throws<migraphx::exception>(
[&] {
p.eval({{"x", migraphx::literal{1}.get_argument()},
{"y", migraphx::literal{2}.get_argument()}});
},
"Incorrect shape"));
}
TEST_CASE(replace_test)
{
migraphx::program p;
......
......@@ -21,8 +21,8 @@ struct allocate
std::string name() const { return "allocate"; }
migraphx::shape compute_shape(const std::vector<migraphx::shape>& inputs) const
{
migraphx::check_shapes{inputs, *this}.has(1);
return inputs.front();
migraphx::check_shapes{inputs, *this}.has(0);
return s;
}
migraphx::argument compute(migraphx::context&,
const migraphx::shape& output_shape,
......@@ -34,8 +34,7 @@ struct allocate
migraphx::instruction_ref add_alloc(migraphx::program& p, const migraphx::shape& s)
{
auto a0 = p.add_outline(s);
return p.add_instruction(allocate{}, a0);
return p.add_instruction(allocate{s});
}
bool no_allocate(const migraphx::program& p)
......
add-fp16-example:m

0
12"Add test-add-fp16* 
*|B0* 
*B1Z
0


Z
1


b
2


B
\ No newline at end of file
......@@ -1186,7 +1186,9 @@ TEST_CASE(group_conv_test)
migraphx::op::convolution op;
op.group = 4;
p.add_instruction(op, l0, l1);
migraphx::parse_onnx("group_conv_test.onnx");
auto prog = migraphx::parse_onnx("group_conv_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(pad_test)
......@@ -1194,13 +1196,14 @@ TEST_CASE(pad_test)
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 2}});
p.add_instruction(migraphx::op::pad{{1, 1, 1, 1}}, l0);
migraphx::parse_onnx("pad_test.onnx");
auto prog = migraphx::parse_onnx("pad_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(lrn_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 28, 24, 24}});
migraphx::op::lrn op;
op.size = 5;
......@@ -1208,7 +1211,22 @@ TEST_CASE(lrn_test)
op.beta = 0.75;
op.bias = 1.0;
p.add_instruction(op, l0);
migraphx::parse_onnx("lrn_test.onnx");
auto prog = migraphx::parse_onnx("lrn_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(add_fp16_test)
{
migraphx::program p;
auto l0 =
p.add_literal(migraphx::literal{migraphx::shape{migraphx::shape::half_type, {1}}, {1.5}});
auto l1 =
p.add_literal(migraphx::literal{migraphx::shape{migraphx::shape::half_type, {1}}, {2.5}});
p.add_instruction(migraphx::op::add{}, l0, l1);
auto prog = migraphx::parse_onnx("add_fp16_test.onnx");
EXPECT(p == prog);
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -4,6 +4,8 @@ find_package(PythonInterp)
function(add_py_test NAME SCRIPT)
set (ENV_COMMAND ${CMAKE_COMMAND} -E env
"PYTHONPATH=$<TARGET_FILE_DIR:migraphx_py>"
"PYTHONMALLOC=debug"
"MALLOC_CHECK_=3"
)
add_test(
NAME test_py_${NAME}
......@@ -15,7 +17,8 @@ endfunction()
add_dependencies(tests migraphx_py)
add_dependencies(check migraphx_py)
add_py_test(cpu cpu.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(cpu test_cpu.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
if(MIGRAPHX_ENABLE_GPU)
add_py_test(gpu gpu.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(gpu test_gpu.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(array test_array.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
endif()
import migraphx, struct, array, sys
try:
from functools import reduce
except:
pass
def assert_eq(x, y):
if x == y:
pass
else:
raise Exception(str(x) + " != " + str(y))
def read_float(b, index):
return struct.unpack_from('f', b, index*4)[0]
def write_float(b, index):
struct.pack_into('f', b, index*4)
def nelements(lens):
return reduce(lambda x,y: x*y,lens, 1)
def create_buffer(t, data, shape):
a = array.array(t, data)
if sys.version_info >= (3, 0):
m = memoryview(a.tobytes())
return m.cast(t, shape)
else:
m = memoryview(a.tostring())
return m
def check_argument(a):
l = a.tolist()
for i in range(len(l)):
assert_eq(l[i], read_float(a, i))
def check_shapes(r, m):
lens = list(m.shape)
strides = [int(s/m.itemsize) for s in m.strides]
elements = nelements(lens)
assert_eq(r.get_shape().elements(), elements)
assert_eq(r.get_shape().lens(), lens)
assert_eq(r.get_shape().strides(), strides)
def run(p):
params = {}
for key, value in p.get_parameter_shapes().items():
params[key] = migraphx.to_gpu(migraphx.generate_argument(value))
return migraphx.from_gpu(p.run(params))
def test_shape(shape):
data = list(range(nelements(shape)))
m = create_buffer('f', data, shape)
a = migraphx.argument(m)
check_shapes(a, m)
assert_eq(a.tolist(), data)
def test_input():
if sys.version_info >= (3, 0):
test_shape([4])
test_shape([2, 3])
else:
data = list(range(4))
m = create_buffer('f', data, [4])
a1 = migraphx.argument(m)
a2 = migraphx.argument(bytearray(a1))
check_shapes(a2, m)
assert_eq(a1.tolist(), m.tolist())
def test_output():
p = migraphx.parse_onnx("conv_relu_maxpool.onnx")
p.compile(migraphx.get_target("gpu"))
r1 = run(p)
r2 = run(p)
assert_eq(r1, r2)
assert_eq(r1.tolist(), r2.tolist())
check_argument(r1)
check_argument(r2)
m1 = memoryview(r1)
m2 = memoryview(r2)
check_shapes(r1, m1)
check_shapes(r2, m2)
test_input()
test_output()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment