Commit 06fb0905 authored by Scott Thornton's avatar Scott Thornton
Browse files

Added MNIST test for cpu target

parents 0a59f103 cff16121
......@@ -3,24 +3,24 @@
#include <fstream>
#include <stdexcept>
#include <rtg/onnx.hpp>
#include <migraph/onnx.hpp>
#include <rtg/cpu/cpu_target.hpp>
#include <rtg/generate.hpp>
#include <migraph/cpu/cpu_target.hpp>
#include <migraph/generate.hpp>
std::vector<float> read_mnist_images(std::string full_path, int& number_of_images, int& image_size)
{
auto reverseInt = [](int i) {
auto reverse_int = [](unsigned int i) {
unsigned char c1, c2, c3, c4;
c1 = i & 255;
c2 = (i >> 8) & 255;
c3 = (i >> 16) & 255;
c4 = (i >> 24) & 255;
return (static_cast<int>(c1) << 24) + (static_cast<int>(c2) << 16) +
(static_cast<int>(c3) << 8) + c4;
c1 = i & 255u;
c2 = (i >> 8u) & 255u;
c3 = (i >> 16u) & 255u;
c4 = (i >> 24u) & 255u;
return (static_cast<unsigned int>(c1) << 24u) + (static_cast<unsigned int>(c2) << 16u) +
(static_cast<unsigned int>(c3) << 8u) + c4;
};
typedef unsigned char uchar;
using uchar = unsigned char;
std::ifstream file(full_path, std::ios::binary);
......@@ -28,21 +28,21 @@ std::vector<float> read_mnist_images(std::string full_path, int& number_of_image
{
int magic_number = 0, n_rows = 0, n_cols = 0;
file.read((char*)&magic_number, sizeof(magic_number));
magic_number = reverseInt(magic_number);
file.read(reinterpret_cast<char*>(&magic_number), sizeof(magic_number));
magic_number = reverse_int(magic_number);
if(magic_number != 2051)
throw std::runtime_error("Invalid MNIST image file!");
file.read((char*)&number_of_images, sizeof(number_of_images)),
number_of_images = reverseInt(number_of_images);
file.read((char*)&n_rows, sizeof(n_rows)), n_rows = reverseInt(n_rows);
file.read((char*)&n_cols, sizeof(n_cols)), n_cols = reverseInt(n_cols);
file.read(reinterpret_cast<char*>(&number_of_images), sizeof(number_of_images));
number_of_images = reverse_int(number_of_images);
file.read(reinterpret_cast<char*>(&n_rows), sizeof(n_rows));
n_rows = reverse_int(n_rows);
file.read(reinterpret_cast<char*>(&n_cols), sizeof(n_cols));
n_cols = reverse_int(n_cols);
image_size = n_rows * n_cols;
printf("n_rows: %d n_cols: %d image_size: %d\n\n", n_rows, n_cols, image_size);
// uchar** _dataset = new uchar*[number_of_images];
// for(int i = 0; i < number_of_images; i++) {
// _dataset[i] = new uchar[image_size];
......@@ -55,7 +55,7 @@ std::vector<float> read_mnist_images(std::string full_path, int& number_of_image
for(int j = 0; j < image_size; j++)
{
uchar tmp;
file.read((char*)&tmp, 1);
file.read(reinterpret_cast<char*>(&tmp), 1);
result[i * image_size + j] = tmp / 255.0;
}
}
......@@ -69,37 +69,37 @@ std::vector<float> read_mnist_images(std::string full_path, int& number_of_image
std::vector<int32_t> read_mnist_labels(std::string full_path, int& number_of_labels)
{
auto reverseInt = [](int i) {
auto reverse_int = [](unsigned int i) {
unsigned char c1, c2, c3, c4;
c1 = i & 255;
c2 = (i >> 8) & 255;
c3 = (i >> 16) & 255;
c4 = (i >> 24) & 255;
return (static_cast<int>(c1) << 24) + (static_cast<int>(c2) << 16) +
(static_cast<int>(c3) << 8) + c4;
c1 = i & 255u;
c2 = (i >> 8u) & 255u;
c3 = (i >> 16u) & 255u;
c4 = (i >> 24u) & 255u;
return (static_cast<unsigned int>(c1) << 24u) + (static_cast<unsigned int>(c2) << 16u) +
(static_cast<unsigned int>(c3) << 8u) + c4;
};
typedef unsigned char uchar;
using uchar = unsigned char;
std::ifstream file(full_path, std::ios::binary);
if(file.is_open())
{
int magic_number = 0;
file.read((char*)&magic_number, sizeof(magic_number));
magic_number = reverseInt(magic_number);
file.read(reinterpret_cast<char*>(&magic_number), sizeof(magic_number));
magic_number = reverse_int(magic_number);
if(magic_number != 2049)
throw std::runtime_error("Invalid MNIST label file!");
file.read((char*)&number_of_labels, sizeof(number_of_labels)),
number_of_labels = reverseInt(number_of_labels);
file.read(reinterpret_cast<char*>(&number_of_labels), sizeof(number_of_labels));
number_of_labels = reverse_int(number_of_labels);
std::vector<int32_t> result(number_of_labels);
for(int i = 0; i < number_of_labels; i++)
{
uchar tmp;
file.read((char*)&tmp, 1);
file.read(reinterpret_cast<char*>(&tmp), 1);
result[i] = tmp;
}
return result;
......@@ -137,23 +137,23 @@ int main(int argc, char const* argv[])
std::vector<int32_t> labels = read_mnist_labels(labelfile, nlabels);
std::string file = argv[1];
auto prog = rtg::parse_onnx(file);
prog.compile(rtg::cpu::cpu_target{});
auto prog = migraph::parse_onnx(file);
prog.compile(migraph::cpu::cpu_target{});
// auto s = prog.get_parameter_shape("Input3");
auto s = rtg::shape{rtg::shape::float_type, {1, 1, 28, 28}};
auto s = migraph::shape{migraph::shape::float_type, {1, 1, 28, 28}};
std::cout << s << std::endl;
auto ptr = input.data();
for (int i = 0; i < 20; i++)
{
printf("label: %d ----> ", labels[i]);
auto input3 = rtg::argument{s, &ptr[784*i]};
std::cout << "label: " << labels[i] << " ----> ";
auto input3 = migraph::argument{s, &ptr[784*i]};
auto result = prog.eval({{"Input3", input3}});
std::vector<float> logits;
result.visit([&](auto output) { logits.assign(output.begin(), output.end()); });
std::vector<float> probs = softmax(logits);
for (auto x : probs) printf("%8.4f ", x);
printf("\n");
for (auto x : probs) std::cout << x << " ";
std::cout << std::endl;
}
printf("\n");
std::cout << std::endl;
}
}
......@@ -7,12 +7,12 @@
#include <functional>
#include <array>
#include <rtg/fallthrough.hpp>
#include <rtg/program.hpp>
#include <rtg/operators.hpp>
#include <rtg/ranges.hpp>
#include <migraph/fallthrough.hpp>
#include <migraph/program.hpp>
#include <migraph/operators.hpp>
#include <migraph/ranges.hpp>
namespace rtg {
namespace migraph {
struct unknown
{
......@@ -25,7 +25,10 @@ struct unknown
else
return input.front();
}
argument compute(shape, std::vector<argument>) const { RTG_THROW("not computable"); }
argument compute(context&, shape, std::vector<argument>) const
{
MIGRAPH_THROW("not computable");
}
friend std::ostream& operator<<(std::ostream& os, const unknown& x)
{
os << x.name();
......@@ -198,7 +201,7 @@ struct onnx_parser
void parse_node(std::string name)
{
if(name.empty())
RTG_THROW("Onnx node must have a name");
MIGRAPH_THROW("Onnx node must have a name");
if(instructions.count(name) == 0)
{
auto&& node = nodes.at(name);
......@@ -275,7 +278,7 @@ struct onnx_parser
case onnx::AttributeProto::TENSORS: return {};
case onnx::AttributeProto::GRAPHS: return {};
}
RTG_THROW("Invalid attribute type");
MIGRAPH_THROW("Invalid attribute type");
}
static literal parse_tensor(const onnx::TensorProto& t)
......@@ -309,7 +312,7 @@ struct onnx_parser
case onnx::TensorProto::COMPLEX64: throw std::runtime_error("");
case onnx::TensorProto::COMPLEX128: throw std::runtime_error("");
}
RTG_THROW("Invalid tensor type");
MIGRAPH_THROW("Invalid tensor type");
}
static shape parse_type(const onnx::TypeProto& t)
......@@ -372,4 +375,4 @@ program parse_onnx(const std::string& name)
return std::move(parser.prog);
}
} // namespace rtg
} // namespace migraph
#include <rtg/onnx.hpp>
#include <rtg/cpu/cpu_target.hpp>
#include <rtg/generate.hpp>
#include <migraph/onnx.hpp>
int main(int argc, char const* argv[])
{
if(argc > 1)
{
std::string file = argv[1];
auto prog = rtg::parse_onnx(file);
prog.compile(rtg::cpu::cpu_target{});
auto s = prog.get_parameter_shape("Input3");
auto input3 = generate_argument(s);
auto out = prog.eval({{"Input3", input3}});
(void)out;
auto prog = migraph::parse_onnx(file);
std::cout << prog << std::endl;
}
}
#include <migraph/onnx.hpp>
#include <migraph/cpu/cpu_target.hpp>
#include <migraph/miopen/miopen_target.hpp>
#include <migraph/miopen/hip.hpp>
#include <migraph/generate.hpp>
#include <miopen/miopen.h>
#include <migraph/miopen/miopen.hpp>
migraph::argument run_cpu(std::string file)
{
auto p = migraph::parse_onnx(file);
p.compile(migraph::cpu::cpu_target{});
auto s = p.get_parameter_shape("Input3");
auto input3 = migraph::generate_argument(s);
auto out = p.eval({{"Input3", input3}});
std::cout << p << std::endl;
return out;
}
migraph::argument run_gpu(std::string file)
{
auto p = migraph::parse_onnx(file);
p.compile(migraph::cpu::cpu_target{});
auto s = p.get_parameter_shape("Input3");
auto input3 = migraph::miopen::to_gpu(migraph::generate_argument(s));
auto output =
migraph::miopen::to_gpu(migraph::generate_argument(p.get_parameter_shape("output")));
auto handle = migraph::miopen::make_obj<migraph::miopen::miopen_handle>(&miopenCreate);
auto out = p.eval({{"Input3", input3}, {"output", output}});
std::cout << p << std::endl;
return migraph::miopen::from_gpu(out);
}
int main(int argc, char const* argv[])
{
if(argc > 1)
{
std::string file = argv[1];
auto x = run_cpu(file);
auto y = run_gpu(file);
if(x == y)
{
std::cout << "Passed" << std::endl;
}
else
{
std::cout << "Not equal" << std::endl;
std::cout << x << std::endl;
std::cout << y << std::endl;
}
}
}
#include <rtg/program.hpp>
#include <rtg/stringutils.hpp>
#include <rtg/instruction.hpp>
#include <migraph/program.hpp>
#include <migraph/stringutils.hpp>
#include <migraph/instruction.hpp>
#include <iostream>
#include <algorithm>
namespace rtg {
namespace migraph {
struct program_impl
{
// A list is used to keep references to an instruction stable
std::list<instruction> instructions;
context ctx;
};
const operation& get_operation(instruction_ref ins) { return ins->op; }
......@@ -109,9 +110,10 @@ instruction_ref program::validate() const
void program::compile(const target& t)
{
assert(this->validate() != impl->instructions.end());
this->impl->ctx = t.get_context();
t.apply(*this);
if(this->validate() == impl->instructions.end())
RTG_THROW("Invalid program from compilation");
MIGRAPH_THROW("Invalid program from compilation");
}
argument program::eval(std::unordered_map<std::string, argument> params) const
......@@ -140,10 +142,7 @@ argument program::eval(std::unordered_map<std::string, argument> params) const
ins.arguments.end(),
values.begin(),
[&](instruction_ref i) { return results.at(std::addressof(*i)); });
result = ins.op.compute(ins.result, values);
if(result.get_shape().elements() > 0 and result.get_shape().packed() and
std::isnan(result.at<float>()))
std::cout << "Nan: " << ins.op.name() << std::endl;
result = ins.op.compute(this->impl->ctx, ins.result, values);
}
results.emplace(std::addressof(ins), result);
}
......@@ -197,4 +196,4 @@ std::ostream& operator<<(std::ostream& os, const program& p)
return os;
}
} // namespace rtg
} // namespace migraph
#include <rtg/shape.hpp>
#include <rtg/stringutils.hpp>
#include <migraph/shape.hpp>
#include <migraph/stringutils.hpp>
#include <numeric>
#include <algorithm>
#include <functional>
#include <iostream>
namespace rtg {
namespace migraph {
shape::shape() : m_type(float_type), m_packed(false) {}
......@@ -80,6 +80,16 @@ std::size_t shape::index(std::size_t i) const
});
}
bool shape::packed() const { return this->m_packed; }
bool shape::broadcasted() const
{
assert(this->lens().size() == this->strides().size());
return std::accumulate(this->strides().begin(),
this->strides().end(),
std::size_t{1},
std::multiplies<std::size_t>()) == 0;
}
std::size_t shape::element_space() const
{
// TODO: Get rid of intermediate vector
......@@ -99,13 +109,12 @@ std::string shape::type_string() const
{
switch(this->m_type)
{
case any_type: return "any";
#define RTG_SHAPE_TYPE_STRING_CASE(x, t) \
#define MIGRAPH_SHAPE_TYPE_STRING_CASE(x, t) \
case x: return #x;
RTG_SHAPE_VISIT_TYPES(RTG_SHAPE_TYPE_STRING_CASE)
#undef RTG_SHAPE_TYPE_STRING_CASE
MIGRAPH_SHAPE_VISIT_TYPES(MIGRAPH_SHAPE_TYPE_STRING_CASE)
#undef MIGRAPH_SHAPE_TYPE_STRING_CASE
}
RTG_THROW("Invalid type");
MIGRAPH_THROW("Invalid type");
}
bool operator==(const shape& x, const shape& y)
......@@ -122,4 +131,4 @@ std::ostream& operator<<(std::ostream& os, const shape& x)
return os;
}
} // namespace rtg
} // namespace migraph
add_library(rtg_cpu
add_library(migraph_cpu
cpu_target.cpp
)
rocm_clang_tidy_check(rtg_cpu)
target_link_libraries(rtg_cpu rtg)
target_include_directories(rtg_cpu PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>)
rocm_clang_tidy_check(migraph_cpu)
target_link_libraries(migraph_cpu migraph)
target_include_directories(migraph_cpu PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>)
#include <rtg/cpu/cpu_target.hpp>
#include <rtg/instruction.hpp>
#include <rtg/dfor.hpp>
#include <rtg/operators.hpp>
#include <rtg/shape_for_each.hpp>
#include <migraph/cpu/cpu_target.hpp>
#include <migraph/instruction.hpp>
#include <migraph/dfor.hpp>
#include <migraph/operators.hpp>
#include <migraph/shape_for_each.hpp>
namespace rtg {
namespace migraph {
namespace cpu {
template <typename T>
......@@ -20,7 +20,7 @@ struct cpu_convolution
std::string name() const { return "cpu::convolution"; }
shape compute_shape(std::vector<shape> inputs) const { return op.compute_shape(inputs); }
argument compute(shape output_shape, std::vector<argument> args) const
argument compute(context&, shape output_shape, std::vector<argument> args) const
{
argument result{output_shape};
visit_all(result, args[0], args[1])([&](auto output, auto input, auto weights) {
......@@ -86,7 +86,7 @@ struct cpu_pooling
std::string name() const { return "cpu::pooling_" + Op::name(); }
shape compute_shape(std::vector<shape> inputs) const { return op.compute_shape(inputs); }
argument compute(shape output_shape, std::vector<argument> args) const
argument compute(context&, shape output_shape, std::vector<argument> args) const
{
argument result{output_shape};
visit_all(result, args[0])([&](auto output, auto input) {
......@@ -134,7 +134,7 @@ struct cpu_transpose
std::string name() const { return "cpu::transpose"; }
shape compute_shape(std::vector<shape> inputs) const { return op.compute_shape(inputs); }
argument compute(shape output_shape, std::vector<argument> args) const
argument compute(context&, shape output_shape, std::vector<argument> args) const
{
return {output_shape, std::move(args.front().data)};
}
......@@ -145,7 +145,7 @@ struct cpu_contiguous
contiguous op;
std::string name() const { return "cpu::contiguous"; }
shape compute_shape(std::vector<shape> inputs) const { return op.compute_shape(inputs); }
argument compute(shape output_shape, std::vector<argument> args) const
argument compute(context&, shape output_shape, std::vector<argument> args) const
{
argument result{output_shape};
visit_all(result, args[0])([&](auto output, auto input) {
......@@ -163,7 +163,7 @@ struct cpu_reshape
std::string name() const { return "cpu::reshape"; }
shape compute_shape(std::vector<shape> inputs) const { return op.compute_shape(inputs); }
argument compute(shape output_shape, std::vector<argument> args) const
argument compute(context&, shape output_shape, std::vector<argument> args) const
{
return {output_shape, std::move(args.front().data)};
}
......@@ -175,7 +175,7 @@ struct cpu_gemm
std::string name() const { return "cpu::gemm"; }
shape compute_shape(std::vector<shape> inputs) const { return op.compute_shape(inputs); }
argument compute(shape output_shape, std::vector<argument> args) const
argument compute(context&, shape output_shape, std::vector<argument> args) const
{
argument result{output_shape};
visit_all(result, args[0], args[1])([&](auto cmat, auto amat, auto bmat) {
......@@ -334,7 +334,7 @@ struct cpu_unary
Op op;
std::string name() const { return op.name(); }
shape compute_shape(std::vector<shape> inputs) const { return inputs.front(); }
argument compute(shape output_shape, std::vector<argument> args) const
argument compute(context&, shape output_shape, std::vector<argument> args) const
{
argument result{output_shape};
result.visit([&](auto output) {
......@@ -350,7 +350,7 @@ struct softmax2d
{
std::string name() const { return "cpu::softmax2d"; }
shape compute_shape(std::vector<shape> inputs) const { return inputs.front(); }
argument compute(shape output_shape, std::vector<argument> args) const
argument compute(context&, shape output_shape, std::vector<argument> args) const
{
argument result{output_shape};
visit_all(result, args[0])([&](auto output, auto input) {
......@@ -426,7 +426,7 @@ struct cpu_binary
Op op;
std::string name() const { return op.name(); }
shape compute_shape(std::vector<shape> inputs) const { return inputs.front(); }
argument compute(shape output_shape, std::vector<argument> args) const
argument compute(context&, shape output_shape, std::vector<argument> args) const
{
argument result{output_shape};
visit_all(result, args[0], args[1])([&](auto output, auto input1, auto input2) {
......@@ -544,4 +544,4 @@ void cpu_target::apply(program& p) const { cpu_apply{&p}.apply(); }
} // namespace cpu
} // namespace rtg
} // namespace migraph
#ifndef RTG_GUARD_RTGLIB_CPU_TARGET_HPP
#define RTG_GUARD_RTGLIB_CPU_TARGET_HPP
#ifndef MIGRAPH_GUARD_MIGRAPHLIB_CPU_TARGET_HPP
#define MIGRAPH_GUARD_MIGRAPHLIB_CPU_TARGET_HPP
#include <rtg/program.hpp>
#include <migraph/program.hpp>
namespace rtg {
namespace migraph {
namespace cpu {
struct cpu_target
{
std::string name() const;
void apply(program& p) const;
context get_context() const { return {}; }
};
} // namespace cpu
} // namespace rtg
} // namespace migraph
#endif
......@@ -6,9 +6,10 @@ if(NOT TARGET MIOpen)
message(SEND_ERROR "Cant find miopen")
endif()
add_library(rtg_miopen
add_library(migraph_miopen
hip.cpp
miopen_target.cpp
)
rocm_clang_tidy_check(rtg_miopen)
target_link_libraries(rtg_miopen rtg MIOpen)
target_include_directories(rtg_miopen PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>)
rocm_clang_tidy_check(migraph_miopen)
target_link_libraries(migraph_miopen migraph MIOpen)
target_include_directories(migraph_miopen PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>)
#include <migraph/miopen/hip.hpp>
#include <migraph/manage_ptr.hpp>
#include <miopen/miopen.h>
#include <vector>
namespace migraph {
namespace miopen {
using hip_ptr = MIGRAPH_MANAGE_PTR(void, hipFree);
hip_ptr allocate_gpu(std::size_t sz)
{
void* result;
// TODO: Check status
hipMalloc(&result, sz);
return hip_ptr{result};
}
template <class T>
hip_ptr write_to_gpu(const T& x)
{
using type = typename T::value_type;
auto size = x.size() * sizeof(type);
return write_to_gpu(x.data(), size);
}
template <class T>
std::vector<T> read_from_gpu(const void* x, std::size_t sz)
{
std::vector<T> result(sz);
// TODO: Check status
hipMemcpy(result.data(), x, sz * sizeof(T), hipMemcpyDeviceToHost);
return result;
}
hip_ptr write_to_gpu(const void* x, std::size_t sz)
{
auto result = allocate_gpu(sz);
// TODO: Check status
hipMemcpy(result.get(), x, sz, hipMemcpyHostToDevice);
return result;
}
migraph::argument allocate_gpu(migraph::shape s)
{
auto p = share(allocate_gpu(s.bytes()));
return {s, [p]() mutable { return reinterpret_cast<char*>(p.get()); }};
}
migraph::argument to_gpu(migraph::argument arg)
{
auto p = share(write_to_gpu(arg.data(), arg.get_shape().bytes()));
return {arg.get_shape(), [p]() mutable { return reinterpret_cast<char*>(p.get()); }};
}
migraph::argument from_gpu(migraph::argument arg)
{
migraph::argument result;
arg.visit([&](auto x) {
using type = typename decltype(x)::value_type;
auto v = read_from_gpu<type>(arg.data(), x.get_shape().bytes() / sizeof(type));
result = {x.get_shape(), [v]() mutable { return reinterpret_cast<char*>(v.data()); }};
});
return result;
}
} // namespace miopen
} // namespace migraph
#ifndef MIGRAPH_GUARD_MIGRAPHLIB_HIP_HPP
#define MIGRAPH_GUARD_MIGRAPHLIB_HIP_HPP
#include <migraph/operators.hpp>
namespace migraph {
namespace miopen {
migraph::argument allocate_gpu(migraph::shape s);
migraph::argument to_gpu(migraph::argument arg);
migraph::argument from_gpu(migraph::argument arg);
struct hip_allocate
{
std::string name() const { return "hip::allocate"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs}.has(1);
return inputs.front();
}
argument compute(context&, shape output_shape, std::vector<argument>) const
{
return allocate_gpu(output_shape);
}
};
} // namespace miopen
} // namespace migraph
#endif
#ifndef MIGRAPH_GUARD_MIGRAPHLIB_MIOPEN_HPP
#define MIGRAPH_GUARD_MIGRAPHLIB_MIOPEN_HPP
#include <migraph/manage_ptr.hpp>
#include <miopen/miopen.h>
namespace migraph {
namespace miopen {
using miopen_handle = MIGRAPH_MANAGE_PTR(miopenHandle_t, miopenDestroy);
using tensor_descriptor = MIGRAPH_MANAGE_PTR(miopenTensorDescriptor_t,
miopenDestroyTensorDescriptor);
using convolution_descriptor = MIGRAPH_MANAGE_PTR(miopenConvolutionDescriptor_t,
miopenDestroyConvolutionDescriptor);
using pooling_descriptor = MIGRAPH_MANAGE_PTR(miopenPoolingDescriptor_t,
miopenDestroyPoolingDescriptor);
using activation_descriptor = MIGRAPH_MANAGE_PTR(miopenActivationDescriptor_t,
miopenDestroyActivationDescriptor);
template <class Result, class F, class... Ts>
Result make_obj(F f, Ts... xs)
{
typename Result::pointer x = nullptr;
auto status = f(&x, xs...);
Result r{x};
if(status != miopenStatusSuccess)
MIGRAPH_THROW("MIOpen call failed");
return r;
}
inline tensor_descriptor make_tensor(const migraph::shape& s)
{
auto t = make_obj<tensor_descriptor>(&miopenCreateTensorDescriptor);
// Convert to ints
std::vector<int> lens(s.lens().begin(), s.lens().end());
std::vector<int> strides(s.strides().begin(), s.strides().end());
miopenDataType_t d;
if(s.type() == shape::float_type)
d = miopenFloat;
else
MIGRAPH_THROW("Unsupported type");
miopenSetTensorDescriptor(t.get(), d, s.lens().size(), lens.data(), strides.data());
return t;
}
inline convolution_descriptor make_conv(const migraph::convolution& op)
{
auto c = make_obj<convolution_descriptor>(&miopenCreateConvolutionDescriptor);
miopenInitConvolutionDescriptor(c.get(),
miopenConvolution,
op.padding[0],
op.padding[1],
op.stride[0],
op.stride[1],
op.dilation[0],
op.dilation[1]);
return c;
}
inline pooling_descriptor make_pooling(const migraph::pooling& op)
{
miopenPoolingMode_t mode;
if(op.mode == "max")
mode = miopenPoolingMax;
else
mode = miopenPoolingAverage;
auto p = make_obj<pooling_descriptor>(&miopenCreatePoolingDescriptor);
miopenSet2dPoolingDescriptor(p.get(),
mode,
op.lengths[0],
op.lengths[1],
op.padding[0],
op.padding[1],
op.stride[0],
op.stride[1]);
return p;
}
inline activation_descriptor make_relu()
{
auto ad = make_obj<activation_descriptor>(&miopenCreateActivationDescriptor);
miopenSetActivationDescriptor(ad.get(), miopenActivationRELU, 0, 0, 0);
return ad;
}
} // namespace miopen
} // namespace migraph
#endif
#ifndef RTG_GUARD_RTGLIB_MIOPEN_TARGET_HPP
#define RTG_GUARD_RTGLIB_MIOPEN_TARGET_HPP
#ifndef MIGRAPH_GUARD_MIGRAPHLIB_MIOPEN_TARGET_HPP
#define MIGRAPH_GUARD_MIGRAPHLIB_MIOPEN_TARGET_HPP
#include <rtg/program.hpp>
#include <migraph/program.hpp>
namespace rtg {
namespace migraph {
namespace miopen {
struct miopen_target
{
std::string name() const;
void apply(program& p) const;
context get_context() const;
};
} // namespace miopen
} // namespace rtg
} // namespace migraph
#endif
#include <rtg/miopen/miopen_target.hpp>
#include <rtg/manage_ptr.hpp>
#include <rtg/instruction.hpp>
#include <rtg/operators.hpp>
#include <migraph/miopen/miopen_target.hpp>
#include <migraph/manage_ptr.hpp>
#include <migraph/instruction.hpp>
#include <migraph/operators.hpp>
#include <migraph/shape_for_each.hpp>
#include <migraph/miopen/miopen.hpp>
#include <migraph/miopen/hip.hpp>
#include <migraph/dfor.hpp>
#include <miopen/miopen.h>
namespace rtg {
namespace migraph {
namespace miopen {
struct hip_allocate
struct miopen_context
{
std::string name() const { return "hip::allocate"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs}.has(1);
return inputs.front();
}
argument compute(shape output_shape, std::vector<argument>) const
{
char* data = nullptr;
// TODO: Check return status
hipMalloc(&data, output_shape.bytes());
return {output_shape, data};
}
shared<miopen_handle> handle;
};
struct hip_free
{
std::string name() const { return "hip::free"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs}.has(1);
return {};
}
argument compute(shape, std::vector<argument> args) const
{
// TODO: Check return status
hipFree(args.front().data());
return {};
}
};
using miopen_handle = RTG_MANAGE_PTR(miopenHandle_t, miopenDestroy);
using tensor_descriptor = RTG_MANAGE_PTR(miopenTensorDescriptor_t, miopenDestroyTensorDescriptor);
using convolution_descriptor = RTG_MANAGE_PTR(miopenConvolutionDescriptor_t,
miopenDestroyConvolutionDescriptor);
using activation_descriptor = RTG_MANAGE_PTR(miopenActivationDescriptor_t,
miopenDestroyActivationDescriptor);
template <class Result, class F, class... Ts>
Result make_obj(F f, Ts... xs)
{
typename Result::pointer x = nullptr;
auto status = f(&x, xs...);
Result r{x};
if(status != miopenStatusSuccess)
RTG_THROW("MIOpen call failed");
return r;
}
tensor_descriptor make_tensor(const rtg::shape& s)
{
auto t = make_obj<tensor_descriptor>(&miopenCreateTensorDescriptor);
// Convert to ints
std::vector<int> lens(s.lens().begin(), s.lens().end());
std::vector<int> strides(s.strides().begin(), s.strides().end());
miopenDataType_t d;
if(s.type() == shape::float_type)
d = miopenFloat;
else
RTG_THROW("Unsupported type");
miopenSetTensorDescriptor(t.get(), d, s.lens().size(), lens.data(), strides.data());
return t;
}
convolution_descriptor make_conv(const rtg::convolution& op)
{
auto c = make_obj<convolution_descriptor>(&miopenCreateConvolutionDescriptor);
miopenInitConvolutionDescriptor(c.get(),
miopenConvolution,
op.padding[0],
op.padding[1],
op.stride[0],
op.stride[1],
op.dilation[0],
op.dilation[1]);
return c;
}
activation_descriptor make_relu()
{
auto ad = make_obj<activation_descriptor>(&miopenCreateActivationDescriptor);
miopenSetActivationDescriptor(ad.get(), miopenActivationRELU, 0, 0, 0);
return ad;
}
struct miopen_convolution
{
convolution op;
......@@ -103,46 +23,153 @@ struct miopen_convolution
std::string name() const { return "miopen::convolution"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs}.has(4);
return op.compute_shape({inputs.at(1), inputs.at(2)});
check_shapes{inputs, *this}.has(3);
return op.compute_shape({inputs.at(0), inputs.at(1)});
}
argument compute(shape output_shape, std::vector<argument> args) const
argument compute(context& gctx, shape output_shape, std::vector<argument> args) const
{
auto x_desc = make_tensor(args[1].get_shape());
auto w_desc = make_tensor(args[2].get_shape());
auto& ctx = any_cast<miopen_context>(gctx);
auto x_desc = make_tensor(args[0].get_shape());
auto w_desc = make_tensor(args[1].get_shape());
auto y_desc = make_tensor(output_shape);
float alpha = 1, beta = 0;
int algo_count;
miopenConvAlgoPerf_t perf;
miopenFindConvolutionForwardAlgorithm(args[0].implicit(),
miopenFindConvolutionForwardAlgorithm(ctx.handle.get(),
x_desc.get(),
args[1].implicit(),
args[0].implicit(),
w_desc.get(),
args[2].implicit(),
args[1].implicit(),
cd.get(),
y_desc.get(),
args[3].implicit(),
args[2].implicit(),
1,
&algo_count,
&perf,
nullptr,
0,
false);
miopenConvolutionForward(args[0].implicit(),
miopenConvolutionForward(ctx.handle.get(),
&alpha,
x_desc.get(),
args[1].implicit(),
args[0].implicit(),
w_desc.get(),
args[2].implicit(),
args[1].implicit(),
cd.get(),
perf.fwd_algo,
&beta,
y_desc.get(),
args[3].implicit(),
args[2].implicit(),
nullptr,
0);
return args[3];
return args[2];
}
};
struct miopen_pooling
{
pooling op;
shared<pooling_descriptor> pd;
std::string name() const { return "miopen::pooling"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs, *this}.has(2);
return op.compute_shape({inputs.at(1)});
}
argument compute(context& gctx, shape output_shape, std::vector<argument> args) const
{
auto& ctx = any_cast<miopen_context>(gctx);
auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape);
float alpha = 1, beta = 0;
miopenPoolingForward(ctx.handle.get(),
pd.get(),
&alpha,
x_desc.get(),
args[0].implicit(),
&beta,
y_desc.get(),
args[1].implicit(),
false,
nullptr,
0);
return args[1];
}
};
struct miopen_add
{
std::string name() const { return "miopen::add"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs, *this}.has(3);
return inputs.at(0);
}
argument compute(context& gctx, shape output_shape, std::vector<argument> args) const
{
if(args[1].get_shape().broadcasted())
{
argument result{output_shape};
visit_all(result, from_gpu(args[0]), from_gpu(args[1]))(
[&](auto output, auto input1, auto input2) {
shape_for_each(output.get_shape(), [&](const auto& idx) {
output(idx.begin(), idx.end()) =
input1(idx.begin(), idx.end()) + input2(idx.begin(), idx.end());
});
});
return to_gpu(result);
}
else
{
auto& ctx = any_cast<miopen_context>(gctx);
float alpha = 1, beta = 0;
auto a_desc = make_tensor(args[0].get_shape());
auto b_desc = make_tensor(args[1].get_shape());
auto c_desc = make_tensor(output_shape);
miopenOpTensor(ctx.handle.get(),
miopenTensorOpAdd,
&alpha,
a_desc.get(),
args[0].implicit(),
&alpha,
b_desc.get(),
args[1].implicit(),
&beta,
c_desc.get(),
args[2].implicit());
return args[2];
}
}
};
struct miopen_gemm
{
gemm op;
std::string name() const { return "miopen::convolution"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs, *this}.has(3);
return op.compute_shape({inputs.at(0), inputs.at(1)});
}
argument compute(context&, shape output_shape, std::vector<argument> args) const
{
argument result{output_shape};
visit_all(result, from_gpu(args[0]), from_gpu(args[1]))(
[&](auto output, auto input1, auto input2) {
dfor(input1.get_shape().lens()[0],
input2.get_shape().lens()[1],
input2.get_shape().lens()[0])(
[&](auto i, auto j, auto k) { output(i, j) += input1(i, k) * input2(k, j); });
});
return to_gpu(result);
}
};
......@@ -152,36 +179,36 @@ struct miopen_relu
std::string name() const { return "miopen::relu"; }
shape compute_shape(std::vector<shape> inputs) const
{
check_shapes{inputs}.has(3);
check_shapes{inputs, *this}.has(2);
return inputs.at(1);
}
argument compute(shape output_shape, std::vector<argument> args) const
argument compute(context& gctx, shape output_shape, std::vector<argument> args) const
{
auto& ctx = any_cast<miopen_context>(gctx);
float alpha = 1, beta = 0;
auto x_desc = make_tensor(args[1].get_shape());
auto x_desc = make_tensor(args[0].get_shape());
auto y_desc = make_tensor(output_shape);
miopenActivationForward(args[0].implicit(),
miopenActivationForward(ctx.handle.get(),
ad.get(),
&alpha,
x_desc.get(),
args[1].implicit(),
args[0].implicit(),
&beta,
y_desc.get(),
args[2].implicit());
args[1].implicit());
return args[2];
return args[1];
}
};
struct miopen_apply
{
program* prog = nullptr;
instruction_ref handle{};
void apply()
{
handle = prog->add_parameter("handle", shape{shape::any_type});
prog->insert_instruction(prog->begin(), check_context<miopen_context>{});
for(auto it = prog->begin(); it != prog->end(); it++)
{
if(it->op.name() == "convolution")
......@@ -192,6 +219,18 @@ struct miopen_apply
{
apply_activation(it);
}
else if(it->op.name() == "pooling")
{
apply_pooling(it);
}
else if(it->op.name() == "add")
{
apply_add(it);
}
else if(it->op.name() == "gemm")
{
apply_gemm(it);
}
}
}
......@@ -205,7 +244,6 @@ struct miopen_apply
{
auto is = prog->add_outline(s);
auto result = prog->insert_instruction(ins, hip_allocate{}, is);
prog->insert_instruction(++ins, hip_free{}, result);
return result;
}
}
......@@ -218,12 +256,21 @@ struct miopen_apply
prog->replace_instruction(ins,
miopen_convolution{op, std::move(cd)},
handle,
ins->arguments.at(0),
ins->arguments.at(1),
output);
}
void apply_pooling(instruction_ref ins)
{
auto&& op = any_cast<pooling>(ins->op);
auto pd = make_pooling(op);
auto output = insert_allocation(ins, ins->result);
prog->replace_instruction(
ins, miopen_pooling{op, std::move(pd)}, ins->arguments.at(0), output);
}
void apply_activation(instruction_ref ins)
{
auto&& op = any_cast<activation>(ins->op);
......@@ -232,15 +279,35 @@ struct miopen_apply
{
auto output = insert_allocation(ins, ins->result);
prog->replace_instruction(
ins, miopen_relu{std::move(ad)}, handle, ins->arguments.at(0), output);
ins, miopen_relu{std::move(ad)}, ins->arguments.at(0), output);
}
}
void apply_add(instruction_ref ins)
{
auto output = insert_allocation(ins, ins->result);
prog->replace_instruction(
ins, miopen_add{}, ins->arguments.at(0), ins->arguments.at(1), output);
}
void apply_gemm(instruction_ref ins)
{
auto&& op = any_cast<gemm>(ins->op);
auto output = insert_allocation(ins, ins->result);
prog->replace_instruction(
ins, miopen_gemm{op}, ins->arguments.at(0), ins->arguments.at(1), output);
}
};
std::string miopen_target::name() const { return "miopen"; }
void miopen_target::apply(program& p) const { miopen_apply{&p}.apply(); }
context miopen_target::get_context() const
{
return miopen_context{share(make_obj<miopen_handle>(&miopenCreate))};
}
} // namespace miopen
} // namespace rtg
} // namespace migraph
......@@ -10,12 +10,12 @@ set(CTEST_PARALLEL_LEVEL ${N} CACHE STRING "CTest parallel level")
add_custom_target(check COMMAND ${CMAKE_CTEST_COMMAND} --output-on-failure -j ${CTEST_PARALLEL_LEVEL} -C ${CMAKE_CFG_INTDIR})
add_custom_target(tests)
find_program(RTG_GDB gdb)
find_program(MIGRAPH_GDB gdb)
if(RTG_GDB)
set(RTG_TEST_GDB On CACHE BOOL "")
if(MIGRAPH_GDB)
set(MIGRAPH_TEST_GDB On CACHE BOOL "")
else()
set(RTG_TEST_GDB Off CACHE BOOL "")
set(MIGRAPH_TEST_GDB Off CACHE BOOL "")
endif()
set(SKIP_TESTS)
......@@ -34,8 +34,8 @@ function(add_test_command NAME EXE)
%1 ${ARGN}")
add_test(NAME ${NAME} COMMAND ${WINE_CMD} cmd /c "${CMAKE_CURRENT_BINARY_DIR}/test_${NAME}.cmd" $<TARGET_FILE:${EXE}>)
else()
if(RTG_TEST_GDB)
# add_test(NAME ${NAME} COMMAND ${RTG_GDB}
if(MIGRAPH_TEST_GDB)
# add_test(NAME ${NAME} COMMAND ${MIGRAPH_GDB}
# --batch
# --return-child-result
# -ex "set disable-randomization off"
......@@ -52,7 +52,7 @@ function(add_test_command NAME EXE)
if(NOT RESULT EQUAL 0)
# TODO: check for core files based on pid when setting /proc/sys/kernel/core_uses_pid
if(EXISTS ${TEST_DIR}/core)
execute_process(COMMAND ${RTG_GDB} $<TARGET_FILE:${EXE}> ${TEST_DIR}/core -batch -ex bt)
execute_process(COMMAND ${MIGRAPH_GDB} $<TARGET_FILE:${EXE}> ${TEST_DIR}/core -batch -ex bt)
endif()
message(FATAL_ERROR \"Test failed\")
endif()
......@@ -82,7 +82,7 @@ function(add_test_executable TEST_NAME)
add_dependencies(tests ${TEST_NAME})
add_dependencies(check ${TEST_NAME})
set_tests_properties(${TEST_NAME} PROPERTIES FAIL_REGULAR_EXPRESSION "FAILED")
target_link_libraries(${TEST_NAME} rtg rtg_cpu)
target_link_libraries(${TEST_NAME} migraph migraph_cpu)
target_include_directories(${TEST_NAME} PUBLIC include)
endfunction(add_test_executable)
......@@ -93,13 +93,13 @@ foreach(TEST ${TESTS})
add_test_executable(test_${BASE_NAME} ${TEST})
endforeach()
if(RTG_ENABLE_MIOPEN)
if(MIGRAPH_ENABLE_MIOPEN)
# miopen tests
file(GLOB MIOPEN_TESTS miopen/*.cpp)
foreach(TEST ${MIOPEN_TESTS})
get_filename_component(BASE_NAME ${TEST} NAME_WE)
add_test_executable(test_miopen_${BASE_NAME} ${TEST})
target_link_libraries(test_miopen_${BASE_NAME} rtg_miopen)
target_link_libraries(test_miopen_${BASE_NAME} migraph_miopen)
endforeach()
endif()
#include <iostream>
#include <vector>
#include <rtg/literal.hpp>
#include <rtg/operators.hpp>
#include <rtg/cpu/cpu_target.hpp>
#include <migraph/literal.hpp>
#include <migraph/operators.hpp>
#include <migraph/cpu/cpu_target.hpp>
#include "test.hpp"
#include "verify.hpp"
void exp_test()
{
rtg::program p;
rtg::shape s{rtg::shape::float_type, {3}};
auto l = p.add_literal(rtg::literal{s, {-1, 0, 1}});
p.add_instruction(rtg::exp{}, l);
p.compile(rtg::cpu::cpu_target{});
migraph::program p;
migraph::shape s{migraph::shape::float_type, {3}};
auto l = p.add_literal(migraph::literal{s, {-1, 0, 1}});
p.add_instruction(migraph::exp{}, l);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
......@@ -22,11 +22,11 @@ void exp_test()
void sin_test()
{
rtg::program p;
rtg::shape s{rtg::shape::float_type, {3}};
auto l = p.add_literal(rtg::literal{s, {-1, 0, 1}});
p.add_instruction(rtg::sin{}, l);
p.compile(rtg::cpu::cpu_target{});
migraph::program p;
migraph::shape s{migraph::shape::float_type, {3}};
auto l = p.add_literal(migraph::literal{s, {-1, 0, 1}});
p.add_instruction(migraph::sin{}, l);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
......@@ -36,11 +36,11 @@ void sin_test()
void cos_test()
{
rtg::program p;
rtg::shape s{rtg::shape::float_type, {3}};
auto l = p.add_literal(rtg::literal{s, {-1, 0, 1}});
p.add_instruction(rtg::cos{}, l);
p.compile(rtg::cpu::cpu_target{});
migraph::program p;
migraph::shape s{migraph::shape::float_type, {3}};
auto l = p.add_literal(migraph::literal{s, {-1, 0, 1}});
p.add_instruction(migraph::cos{}, l);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
......@@ -50,11 +50,11 @@ void cos_test()
void tan_test()
{
rtg::program p;
rtg::shape s{rtg::shape::float_type, {3}};
auto l = p.add_literal(rtg::literal{s, {-1, 0, 1}});
p.add_instruction(rtg::tan{}, l);
p.compile(rtg::cpu::cpu_target{});
migraph::program p;
migraph::shape s{migraph::shape::float_type, {3}};
auto l = p.add_literal(migraph::literal{s, {-1, 0, 1}});
p.add_instruction(migraph::tan{}, l);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
......@@ -64,12 +64,12 @@ void tan_test()
void add_test()
{
rtg::program p;
rtg::shape s{rtg::shape::float_type, {3}};
auto l1 = p.add_literal(rtg::literal{s, {-1, 0, 1}});
auto l2 = p.add_literal(rtg::literal{s, {1, 2, 3}});
p.add_instruction(rtg::add{}, l1, l2);
p.compile(rtg::cpu::cpu_target{});
migraph::program p;
migraph::shape s{migraph::shape::float_type, {3}};
auto l1 = p.add_literal(migraph::literal{s, {-1, 0, 1}});
auto l2 = p.add_literal(migraph::literal{s, {1, 2, 3}});
p.add_instruction(migraph::add{}, l1, l2);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
......@@ -79,16 +79,16 @@ void add_test()
void broadcast_test()
{
rtg::program p;
rtg::shape a_shape{rtg::shape::int32_type, {2, 2}};
migraph::program p;
migraph::shape a_shape{migraph::shape::int32_type, {2, 2}};
std::vector<int32_t> a_data{0, 0, 0, 0};
rtg::shape b_shape{rtg::shape::int32_type, {2}};
migraph::shape b_shape{migraph::shape::int32_type, {2}};
std::vector<int32_t> b_data{-2, -3};
uint64_t axis = 0;
auto l1 = p.add_literal(rtg::literal{a_shape, a_data});
auto l2 = p.add_literal(rtg::literal{b_shape, b_data});
p.add_instruction(rtg::broadcast{axis}, l1, l2);
p.compile(rtg::cpu::cpu_target{});
auto l1 = p.add_literal(migraph::literal{a_shape, a_data});
auto l2 = p.add_literal(migraph::literal{b_shape, b_data});
p.add_instruction(migraph::broadcast{axis}, l1, l2);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
auto output = result.get<int32_t>();
EXPECT(output(0, 0) == -2);
......@@ -98,17 +98,17 @@ void broadcast_test()
}
void add_broadcast_test()
{
rtg::program p;
rtg::shape a_shape{rtg::shape::float_type, {2, 2, 3}};
migraph::program p;
migraph::shape a_shape{migraph::shape::float_type, {2, 2, 3}};
std::vector<float> a_data{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
rtg::shape b_shape{rtg::shape::float_type, {2, 2}};
migraph::shape b_shape{migraph::shape::float_type, {2, 2}};
std::vector<float> b_data{0, -1, -2, -3};
uint64_t axis = 0;
auto l1 = p.add_literal(rtg::literal{a_shape, a_data});
auto l2 = p.add_literal(rtg::literal{b_shape, b_data});
auto l3 = p.add_instruction(rtg::broadcast{axis}, l1, l2);
p.add_instruction(rtg::add{}, l1, l3);
p.compile(rtg::cpu::cpu_target{});
auto l1 = p.add_literal(migraph::literal{a_shape, a_data});
auto l2 = p.add_literal(migraph::literal{b_shape, b_data});
auto l3 = p.add_instruction(migraph::broadcast{axis}, l1, l2);
p.add_instruction(migraph::add{}, l1, l3);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
EXPECT(result.get_shape().packed());
std::vector<float> results_vector(12);
......@@ -119,12 +119,12 @@ void add_broadcast_test()
void sub_test()
{
rtg::program p;
rtg::shape s{rtg::shape::float_type, {3}};
auto l1 = p.add_literal(rtg::literal{s, {-1, 0, 1}});
auto l2 = p.add_literal(rtg::literal{s, {1, 2, 3}});
p.add_instruction(rtg::sub{}, l1, l2);
p.compile(rtg::cpu::cpu_target{});
migraph::program p;
migraph::shape s{migraph::shape::float_type, {3}};
auto l1 = p.add_literal(migraph::literal{s, {-1, 0, 1}});
auto l2 = p.add_literal(migraph::literal{s, {1, 2, 3}});
p.add_instruction(migraph::sub{}, l1, l2);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
......@@ -134,12 +134,12 @@ void sub_test()
void mul_test()
{
rtg::program p;
rtg::shape s{rtg::shape::float_type, {3}};
auto l1 = p.add_literal(rtg::literal{s, {-1, 0, 1}});
auto l2 = p.add_literal(rtg::literal{s, {1, 2, 3}});
p.add_instruction(rtg::mul{}, l1, l2);
p.compile(rtg::cpu::cpu_target{});
migraph::program p;
migraph::shape s{migraph::shape::float_type, {3}};
auto l1 = p.add_literal(migraph::literal{s, {-1, 0, 1}});
auto l2 = p.add_literal(migraph::literal{s, {1, 2, 3}});
p.add_instruction(migraph::mul{}, l1, l2);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
......@@ -149,12 +149,12 @@ void mul_test()
void div_test()
{
rtg::program p;
rtg::shape s{rtg::shape::float_type, {3}};
auto l1 = p.add_literal(rtg::literal{s, {-1.0f, 0.5f, 1.0f}});
auto l2 = p.add_literal(rtg::literal{s, {1.0f, 2.0f, 4.0f}});
p.add_instruction(rtg::div{}, l1, l2);
p.compile(rtg::cpu::cpu_target{});
migraph::program p;
migraph::shape s{migraph::shape::float_type, {3}};
auto l1 = p.add_literal(migraph::literal{s, {-1.0f, 0.5f, 1.0f}});
auto l2 = p.add_literal(migraph::literal{s, {1.0f, 2.0f, 4.0f}});
p.add_instruction(migraph::div{}, l1, l2);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
......@@ -164,37 +164,37 @@ void div_test()
void reshape_test()
{
rtg::shape a_shape{rtg::shape::float_type, {24, 1, 1, 1}};
migraph::shape a_shape{migraph::shape::float_type, {24, 1, 1, 1}};
std::vector<float> data(24);
std::iota(data.begin(), data.end(), -3);
{
rtg::program p;
auto l = p.add_literal(rtg::literal{a_shape, data});
migraph::program p;
auto l = p.add_literal(migraph::literal{a_shape, data});
std::vector<int64_t> new_shape = {8, 3, 1, 1};
p.add_instruction(rtg::reshape{new_shape}, l);
p.compile(rtg::cpu::cpu_target{});
p.add_instruction(migraph::reshape{new_shape}, l);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(test::verify_range(results_vector, data));
}
{
rtg::program p;
auto l = p.add_literal(rtg::literal{a_shape, data});
migraph::program p;
auto l = p.add_literal(migraph::literal{a_shape, data});
std::vector<int64_t> new_shape = {1, 3, 4, 2};
p.add_instruction(rtg::reshape{new_shape}, l);
p.compile(rtg::cpu::cpu_target{});
p.add_instruction(migraph::reshape{new_shape}, l);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(test::verify_range(results_vector, data));
}
{
rtg::program p;
auto l = p.add_literal(rtg::literal{a_shape, data});
migraph::program p;
auto l = p.add_literal(migraph::literal{a_shape, data});
std::vector<int64_t> new_shape = {1, 3, 4, 2};
p.add_instruction(rtg::reshape{new_shape}, l);
p.compile(rtg::cpu::cpu_target{});
p.add_instruction(migraph::reshape{new_shape}, l);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
......@@ -204,7 +204,7 @@ void reshape_test()
void gemm_test()
{
rtg::program p;
migraph::program p;
std::vector<float> a = {-0.00925222, 0.56250403, 0.70107397, 0.75402161, -0.505885,
1.33628943, -0.11413, -0.31270559, 1.59336732, -0.19361027,
-0.91620867, 0.40108416, -0.06969921, 0.68483471, -0.39906632,
......@@ -236,12 +236,12 @@ void gemm_test()
-1.29885596e+00,
2.16294914e+00,
-1.48101497e-01};
rtg::shape a_shape{rtg::shape::float_type, {4, 5}};
auto al = p.add_literal(rtg::literal{a_shape, a});
rtg::shape b_shape{rtg::shape::float_type, {5, 3}};
auto bl = p.add_literal(rtg::literal{b_shape, b});
p.add_instruction(rtg::gemm{}, al, bl);
p.compile(rtg::cpu::cpu_target{});
migraph::shape a_shape{migraph::shape::float_type, {4, 5}};
auto al = p.add_literal(migraph::literal{a_shape, a});
migraph::shape b_shape{migraph::shape::float_type, {5, 3}};
auto bl = p.add_literal(migraph::literal{b_shape, b});
p.add_instruction(migraph::gemm{}, al, bl);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(12);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
......@@ -254,7 +254,7 @@ void gemm_test()
void maxpool_test()
{
rtg::program p;
migraph::program p;
std::vector<float> a = {
-2.1314404, -1.63041711, 1.54562736, 1.04625261, -1.42931843, -0.48703974, 0.4065806,
-0.1524526, 1.30775225, 0.45538983, -0.06631992, -1.75332725, 1.33493888, 0.47327688,
......@@ -293,10 +293,10 @@ void maxpool_test()
1.95433736, 2.46601582, 1.53285873, 1.95433736, 1.06763375, 1.4545635,
1.33624589, 1.16736257, 0.6126079, 1.36892557, 2.40126371, 1.53441942,
0.52119428, 2.07681108, 0.88494766, 1.51522756, 0.54275119, 0.6629802};
rtg::shape a_shape{rtg::shape::float_type, {2, 3, 6, 6}};
auto al = p.add_literal(rtg::literal{a_shape, a});
p.add_instruction(rtg::pooling{"max", {{0, 0}}, {{2, 2}}, {{3, 2}}}, al);
p.compile(rtg::cpu::cpu_target{});
migraph::shape a_shape{migraph::shape::float_type, {2, 3, 6, 6}};
auto al = p.add_literal(migraph::literal{a_shape, a});
p.add_instruction(migraph::pooling{"max", {{0, 0}}, {{2, 2}}, {{3, 2}}}, al);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::cout << result.get_shape() << std::endl;
std::vector<float> results_vector(36);
......@@ -311,7 +311,7 @@ void maxpool_test()
void softmax_test()
{
rtg::program p;
migraph::program p;
std::vector<float> a = {
-5.61869681e-01, 9.07827199e-01, 1.29255986e+00, 3.18533443e-02, -1.22183852e-03,
-2.83830553e-01, -1.03245842e+00, -9.28322077e-01, -8.82696748e-01, 1.11327164e-01,
......@@ -358,10 +358,10 @@ void softmax_test()
0.17377149, 0.76075399, 0.20071237, 0.32632929, 0.36892858, 0.09416146, 0.26656723,
0.42914796};
rtg::shape a_shape{rtg::shape::float_type, {5, 3, 4, 2}};
auto al = p.add_literal(rtg::literal{a_shape, a});
p.add_instruction(rtg::softmax{}, al);
p.compile(rtg::cpu::cpu_target{});
migraph::shape a_shape{migraph::shape::float_type, {5, 3, 4, 2}};
auto al = p.add_literal(migraph::literal{a_shape, a});
p.add_instruction(migraph::softmax{}, al);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(120);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
......@@ -370,7 +370,7 @@ void softmax_test()
void conv2d_test()
{
rtg::program p;
migraph::program p;
std::vector<float> a = {
2.71567607, -0.9960829, 0.91671127, 0.28140706, 0.63235772, 0.08077253, 0.80927712,
-0.59108931, -1.05421555, -2.76622486, -0.85044265, -0.52049929, 0.67726439, -0.65290606,
......@@ -416,14 +416,14 @@ void conv2d_test()
0.71606487,
-0.55201721,
-0.46427044};
rtg::shape a_shape{rtg::shape::float_type, {2, 3, 4, 4}};
auto al = p.add_literal(rtg::literal{a_shape, a});
migraph::shape a_shape{migraph::shape::float_type, {2, 3, 4, 4}};
auto al = p.add_literal(migraph::literal{a_shape, a});
rtg::shape c_shape{rtg::shape::float_type, {2, 3, 3, 3}};
auto cl = p.add_literal(rtg::literal{c_shape, c});
migraph::shape c_shape{migraph::shape::float_type, {2, 3, 3, 3}};
auto cl = p.add_literal(migraph::literal{c_shape, c});
p.add_instruction(rtg::convolution{}, al, cl);
p.compile(rtg::cpu::cpu_target{});
p.add_instruction(migraph::convolution{}, al, cl);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(16);
......@@ -433,7 +433,7 @@ void conv2d_test()
void conv2d_padding_test()
{
rtg::program p;
migraph::program p;
std::vector<float> a = {
2.71567607, -0.9960829, 0.91671127, 0.28140706, 0.63235772, 0.08077253, 0.80927712,
-0.59108931, -1.05421555, -2.76622486, -0.85044265, -0.52049929, 0.67726439, -0.65290606,
......@@ -472,14 +472,14 @@ void conv2d_padding_test()
-0.20369984, -0.83037728, -1.40423918, -0.46160448, -0.22944322, 0.36074194, 0.49579027,
0.46527559};
rtg::shape a_shape{rtg::shape::float_type, {2, 3, 4, 4}};
auto al = p.add_literal(rtg::literal{a_shape, a});
migraph::shape a_shape{migraph::shape::float_type, {2, 3, 4, 4}};
auto al = p.add_literal(migraph::literal{a_shape, a});
rtg::shape c_shape{rtg::shape::float_type, {2, 3, 3, 3}};
auto cl = p.add_literal(rtg::literal{c_shape, c});
migraph::shape c_shape{migraph::shape::float_type, {2, 3, 3, 3}};
auto cl = p.add_literal(migraph::literal{c_shape, c});
p.add_instruction(rtg::convolution{{{1, 1}}, {{1, 1}}}, al, cl);
p.compile(rtg::cpu::cpu_target{});
p.add_instruction(migraph::convolution{{{1, 1}}, {{1, 1}}}, al, cl);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(64);
......@@ -489,7 +489,7 @@ void conv2d_padding_test()
void conv2d_padding_stride_test()
{
rtg::program p;
migraph::program p;
std::vector<float> a = {
2.71567607, -0.9960829, 0.91671127, 0.28140706, 0.63235772, 0.08077253, 0.80927712,
-0.59108931, -1.05421555, -2.76622486, -0.85044265, -0.52049929, 0.67726439, -0.65290606,
......@@ -533,14 +533,14 @@ void conv2d_padding_stride_test()
-0.16138598,
0.79344082};
rtg::shape a_shape{rtg::shape::float_type, {2, 3, 4, 4}};
auto al = p.add_literal(rtg::literal{a_shape, a});
migraph::shape a_shape{migraph::shape::float_type, {2, 3, 4, 4}};
auto al = p.add_literal(migraph::literal{a_shape, a});
rtg::shape c_shape{rtg::shape::float_type, {2, 3, 3, 3}};
auto cl = p.add_literal(rtg::literal{c_shape, c});
migraph::shape c_shape{migraph::shape::float_type, {2, 3, 3, 3}};
auto cl = p.add_literal(migraph::literal{c_shape, c});
p.add_instruction(rtg::convolution{{{1, 1}}, {{2, 2}}}, al, cl);
p.compile(rtg::cpu::cpu_target{});
p.add_instruction(migraph::convolution{{{1, 1}}, {{2, 2}}}, al, cl);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(16);
......@@ -550,16 +550,16 @@ void conv2d_padding_stride_test()
void transpose_test()
{
rtg::shape a_shape{rtg::shape::float_type, {1, 2, 2, 3}};
migraph::shape a_shape{migraph::shape::float_type, {1, 2, 2, 3}};
std::vector<float> data(12);
std::iota(data.begin(), data.end(), 0);
{
rtg::program p;
auto l = p.add_literal(rtg::literal{a_shape, data});
migraph::program p;
auto l = p.add_literal(migraph::literal{a_shape, data});
std::vector<int64_t> perm = {0, 3, 1, 2};
p.add_instruction(rtg::transpose{perm}, l);
p.compile(rtg::cpu::cpu_target{});
p.add_instruction(migraph::transpose{perm}, l);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
result.visit([&](auto output) {
......@@ -570,12 +570,12 @@ void transpose_test()
});
}
{
rtg::program p;
auto l = p.add_literal(rtg::literal{a_shape, data});
migraph::program p;
auto l = p.add_literal(migraph::literal{a_shape, data});
std::vector<int64_t> perm = {0, 3, 1, 2};
auto result = p.add_instruction(rtg::transpose{perm}, l);
p.add_instruction(rtg::contiguous{}, result);
p.compile(rtg::cpu::cpu_target{});
auto result = p.add_instruction(migraph::transpose{perm}, l);
p.add_instruction(migraph::contiguous{}, result);
p.compile(migraph::cpu::cpu_target{});
auto result2 = p.eval({});
std::vector<float> results_vector(12);
......@@ -587,14 +587,14 @@ void transpose_test()
void contiguous_test()
{
rtg::shape a_shape{rtg::shape::float_type, {1, 3, 2, 2}, {12, 1, 6, 3}};
migraph::shape a_shape{migraph::shape::float_type, {1, 3, 2, 2}, {12, 1, 6, 3}};
std::vector<float> data(12);
std::iota(data.begin(), data.end(), 0);
rtg::program p;
auto l = p.add_literal(rtg::literal{a_shape, data});
p.add_instruction(rtg::contiguous{}, l);
p.compile(rtg::cpu::cpu_target{});
migraph::program p;
auto l = p.add_literal(migraph::literal{a_shape, data});
p.add_instruction(migraph::contiguous{}, l);
p.compile(migraph::cpu::cpu_target{});
auto result = p.eval({});
std::vector<float> results_vector(12);
......
#include <rtg/program.hpp>
#include <rtg/argument.hpp>
#include <rtg/shape.hpp>
#include <migraph/program.hpp>
#include <migraph/argument.hpp>
#include <migraph/shape.hpp>
#include <sstream>
#include "test.hpp"
struct sum_op
{
std::string name() const { return "sum"; }
rtg::argument compute(rtg::shape, std::vector<rtg::argument> args) const
migraph::argument
compute(migraph::context&, migraph::shape, std::vector<migraph::argument> args) const
{
rtg::argument result;
migraph::argument result;
if(args.size() != 2)
RTG_THROW("Wrong args");
MIGRAPH_THROW("Wrong args");
if(args[0].get_shape() != args[1].get_shape())
RTG_THROW("Wrong args");
MIGRAPH_THROW("Wrong args");
if(args[0].get_shape().lens().size() != 1)
RTG_THROW("Wrong args");
MIGRAPH_THROW("Wrong args");
if(args[0].get_shape().lens().front() != 1)
RTG_THROW("Wrong args");
MIGRAPH_THROW("Wrong args");
args[0].visit_at([&](auto x) {
args[1].visit_at([&](auto y) { result = rtg::literal{x + y}.get_argument(); });
args[1].visit_at([&](auto y) { result = migraph::literal{x + y}.get_argument(); });
});
return result;
}
rtg::shape compute_shape(std::vector<rtg::shape> inputs) const
migraph::shape compute_shape(std::vector<migraph::shape> inputs) const
{
if(inputs.size() != 2)
RTG_THROW("Wrong inputs");
MIGRAPH_THROW("Wrong inputs");
return inputs.front();
}
};
......@@ -37,28 +38,29 @@ struct sum_op
struct minus_op
{
std::string name() const { return "minus"; }
rtg::argument compute(rtg::shape, std::vector<rtg::argument> args) const
migraph::argument
compute(migraph::context&, migraph::shape, std::vector<migraph::argument> args) const
{
rtg::argument result;
migraph::argument result;
if(args.size() != 2)
RTG_THROW("Wrong args");
MIGRAPH_THROW("Wrong args");
if(args[0].get_shape() != args[1].get_shape())
RTG_THROW("Wrong args");
MIGRAPH_THROW("Wrong args");
if(args[0].get_shape().lens().size() != 1)
RTG_THROW("Wrong args");
MIGRAPH_THROW("Wrong args");
if(args[0].get_shape().lens().front() != 1)
RTG_THROW("Wrong args");
MIGRAPH_THROW("Wrong args");
args[0].visit_at([&](auto x) {
args[1].visit_at([&](auto y) { result = rtg::literal{x - y}.get_argument(); });
args[1].visit_at([&](auto y) { result = migraph::literal{x - y}.get_argument(); });
});
return result;
}
rtg::shape compute_shape(std::vector<rtg::shape> inputs) const
migraph::shape compute_shape(std::vector<migraph::shape> inputs) const
{
if(inputs.size() != 2)
RTG_THROW("Wrong inputs");
MIGRAPH_THROW("Wrong inputs");
return inputs.front();
}
};
......@@ -66,24 +68,25 @@ struct minus_op
struct id_target
{
std::string name() const { return "id"; }
void apply(rtg::program&) const {}
void apply(migraph::program&) const {}
migraph::context get_context() const { return {}; }
};
void literal_test1()
{
rtg::program p;
migraph::program p;
auto one = p.add_literal(1);
auto two = p.add_literal(2);
p.add_instruction(sum_op{}, one, two);
auto result = p.eval({});
EXPECT(result == rtg::literal{3});
EXPECT(result != rtg::literal{4});
EXPECT(result == migraph::literal{3});
EXPECT(result != migraph::literal{4});
}
void literal_test2()
{
rtg::program p;
migraph::program p;
auto one = p.add_literal(1);
auto two = p.add_literal(2);
......@@ -91,15 +94,15 @@ void literal_test2()
p.add_instruction(sum_op{}, sum1, two);
auto result = p.eval({});
EXPECT(result == rtg::literal{5});
EXPECT(result != rtg::literal{3});
EXPECT(result == migraph::literal{5});
EXPECT(result != migraph::literal{3});
}
void print_test()
{
rtg::program p;
migraph::program p;
auto x = p.add_parameter("x", {rtg::shape::int64_type});
auto x = p.add_parameter("x", {migraph::shape::int64_type});
auto two = p.add_literal(2);
p.add_instruction(sum_op{}, x, two);
......@@ -111,21 +114,21 @@ void print_test()
void param_test()
{
rtg::program p;
migraph::program p;
auto x = p.add_parameter("x", {rtg::shape::int64_type});
auto y = p.add_parameter("y", {rtg::shape::int64_type});
auto x = p.add_parameter("x", {migraph::shape::int64_type});
auto y = p.add_parameter("y", {migraph::shape::int64_type});
p.add_instruction(sum_op{}, x, y);
auto result =
p.eval({{"x", rtg::literal{1}.get_argument()}, {"y", rtg::literal{2}.get_argument()}});
EXPECT(result == rtg::literal{3});
EXPECT(result != rtg::literal{4});
auto result = p.eval(
{{"x", migraph::literal{1}.get_argument()}, {"y", migraph::literal{2}.get_argument()}});
EXPECT(result == migraph::literal{3});
EXPECT(result != migraph::literal{4});
}
void replace_test()
{
rtg::program p;
migraph::program p;
auto one = p.add_literal(1);
auto two = p.add_literal(2);
......@@ -133,13 +136,13 @@ void replace_test()
p.replace_instruction(sum, minus_op{}, two, one);
auto result = p.eval({});
EXPECT(result == rtg::literal{1});
EXPECT(result != rtg::literal{3});
EXPECT(result == migraph::literal{1});
EXPECT(result != migraph::literal{3});
}
void insert_replace_test()
{
rtg::program p;
migraph::program p;
auto one = p.add_literal(1);
auto two = p.add_literal(2);
......@@ -150,21 +153,21 @@ void insert_replace_test()
p.replace_instruction(sum1, minus_op{}, sum0, two);
auto result = p.eval({});
EXPECT(result == rtg::literal{4});
EXPECT(result != rtg::literal{5});
EXPECT(result == migraph::literal{4});
EXPECT(result != migraph::literal{5});
}
void target_test()
{
rtg::program p;
migraph::program p;
auto one = p.add_literal(1);
auto two = p.add_literal(2);
p.add_instruction(sum_op{}, one, two);
p.compile(id_target{});
auto result = p.eval({});
EXPECT(result == rtg::literal{3});
EXPECT(result != rtg::literal{4});
EXPECT(result == migraph::literal{3});
EXPECT(result != migraph::literal{4});
}
int main()
......
......@@ -4,8 +4,8 @@
#include <cstdlib>
#include <iostream>
#ifndef RTG_GUARD_TEST_TEST_HPP
#define RTG_GUARD_TEST_TEST_HPP
#ifndef MIGRAPH_GUARD_TEST_TEST_HPP
#define MIGRAPH_GUARD_TEST_TEST_HPP
namespace test {
// NOLINTNEXTLINE
......@@ -114,10 +114,11 @@ struct capture
};
template <class T, class F>
void failed(T x, const char* msg, const char* file, int line, F f)
void failed(T x, const char* msg, const char* func, const char* file, int line, F f)
{
if(!x.value())
{
std::cout << func << std::endl;
std::cout << file << ":" << line << ":" << std::endl;
std::cout << " FAILED: " << msg << " " << x << std::endl;
f();
......@@ -162,11 +163,18 @@ void run_test()
} // namespace test
// NOLINTNEXTLINE
#define CHECK(...) \
test::failed(test::capture{}->*__VA_ARGS__, #__VA_ARGS__, __FILE__, __LINE__, [] {})
#define CHECK(...) \
test::failed( \
test::capture{}->*__VA_ARGS__, #__VA_ARGS__, __PRETTY_FUNCTION__, __FILE__, __LINE__, [] { \
})
// NOLINTNEXTLINE
#define EXPECT(...) \
test::failed(test::capture{}->*__VA_ARGS__, #__VA_ARGS__, __FILE__, __LINE__, &std::abort)
#define EXPECT(...) \
test::failed(test::capture{}->*__VA_ARGS__, \
#__VA_ARGS__, \
__PRETTY_FUNCTION__, \
__FILE__, \
__LINE__, \
&std::abort)
// NOLINTNEXTLINE
#define STATUS(...) EXPECT((__VA_ARGS__) == 0)
......
#ifndef RTG_GUARD_VERIFY_HPP
#define RTG_GUARD_VERIFY_HPP
#ifndef MIGRAPH_GUARD_VERIFY_HPP
#define MIGRAPH_GUARD_VERIFY_HPP
#include <algorithm>
#include <cmath>
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment