"src/git@developer.sourcefind.cn:gaoqiong/migraphx.git" did not exist on "2d22efb764328a3475cf71e60dd4821cda2f6885"
Unverified Commit 1dd4e4d9 authored by Paul Fultz II's avatar Paul Fultz II Committed by GitHub
Browse files

Refactor onnx parser (#699)



* Load op when serializing

* Formatting

* Add missing clip field

* Use make_op almost everywhere

* Formatting

* More make ops for rnns

* Get rid of spaces

* Formatting

* Remove operators headers

* Formatting

* Remove unused op headers

* Increase line threshold

* Refactor onnx_parser class

* Formatting

* Add op_parser

* Formatting

* Remove old onnx drivers

* Use file GLOB

* Parse arg ops

* Formatting

* Add pooling

* Formatting

* Add parse_natchnorm

* Add more operators

* Formatting

* Add more operators

* Formatting

* Add more operators

* Formatting

* Add more operators

* Add rnn operators

* Formatting

* Fix tidy issues

* Formatting

* Add back missing param

* Formatting

* Fix shadow variable

* Fix shadow in declaration

* Make global constant

* Formatting

* Add generic op

* Formatting

* Add binary op

* Formatting

* Add variadiac op

* Formatting

* Remove unused fields and functions

* Set default values

* Formatting

* Remove unused member variable

* Add add literal overload

* Use info.add_literal

* Formatting

* Call add_instruction through info class

* Fix tidy issues

* Formatting
Co-authored-by: default avatarmvermeulen <5479696+mvermeulen@users.noreply.github.com>
parent 69d2e38f
...@@ -18,7 +18,7 @@ namespace op { ...@@ -18,7 +18,7 @@ namespace op {
struct elu struct elu
{ {
std::string name() const { return "elu"; } std::string name() const { return "elu"; }
float alpha; float alpha = 1;
shape compute_shape(std::vector<shape> inputs) const shape compute_shape(std::vector<shape> inputs) const
{ {
check_shapes{inputs, *this}.has(1); check_shapes{inputs, *this}.has(1);
......
...@@ -17,7 +17,7 @@ namespace op { ...@@ -17,7 +17,7 @@ namespace op {
struct leaky_relu struct leaky_relu
{ {
float alpha; float alpha = 0.01;
template <class Self, class F> template <class Self, class F>
static auto reflect(Self& self, F f) static auto reflect(Self& self, F f)
......
...@@ -7,7 +7,9 @@ target_compile_options(onnx-proto PRIVATE -w) ...@@ -7,7 +7,9 @@ target_compile_options(onnx-proto PRIVATE -w)
target_link_libraries(onnx-proto PRIVATE ${PROTOBUF_LIBRARY}) target_link_libraries(onnx-proto PRIVATE ${PROTOBUF_LIBRARY})
set_target_properties(onnx-proto PROPERTIES POSITION_INDEPENDENT_CODE On) set_target_properties(onnx-proto PROPERTIES POSITION_INDEPENDENT_CODE On)
add_library(migraphx_onnx onnx.cpp) file(GLOB ONNX_SRCS *.cpp)
add_library(migraphx_onnx ${ONNX_SRCS})
target_include_directories(migraphx_onnx PRIVATE include)
set_target_properties(migraphx_onnx PROPERTIES EXPORT_NAME onnx) set_target_properties(migraphx_onnx PROPERTIES EXPORT_NAME onnx)
rocm_set_soversion(migraphx_onnx ${MIGRAPHX_SO_VERSION}) rocm_set_soversion(migraphx_onnx ${MIGRAPHX_SO_VERSION})
rocm_clang_tidy_check(migraphx_onnx) rocm_clang_tidy_check(migraphx_onnx)
...@@ -17,13 +19,3 @@ target_link_libraries(migraphx_onnx PUBLIC migraphx) ...@@ -17,13 +19,3 @@ target_link_libraries(migraphx_onnx PUBLIC migraphx)
rocm_install_targets( rocm_install_targets(
TARGETS migraphx_onnx TARGETS migraphx_onnx
) )
if(MIGRAPHX_ENABLE_GPU)
add_executable(mnist mnist.cpp)
rocm_clang_tidy_check(mnist)
target_link_libraries(mnist migraphx_all_targets migraphx_onnx)
add_executable(cifar10 cifar10.cpp)
rocm_clang_tidy_check(cifar10)
target_link_libraries(cifar10 migraphx_all_targets migraphx_onnx)
endif()
\ No newline at end of file
#include <migraphx/onnx/checks.hpp>
#include <migraphx/errors.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
void check_arg_empty(const argument& arg, const std::string& msg)
{
if(arg.empty())
{
MIGRAPHX_THROW(msg);
}
}
void check_attr_sizes(size_t kdims, size_t attr_size, const std::string& error_msg)
{
if(kdims != attr_size)
{
MIGRAPHX_THROW(error_msg + " k-dims: " + std::to_string(kdims) +
" attribute size: " + std::to_string(attr_size));
}
}
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#include <cstdio>
#include <string>
#include <fstream>
#include <numeric>
#include <stdexcept>
#include <migraphx/onnx.hpp>
#include <migraphx/ref/target.hpp>
#include <migraphx/gpu/target.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/generate.hpp>
#include "softmax.hpp"
auto read_cifar10_images(const std::string& full_path)
{
std::ifstream file(full_path, std::ios::binary);
const size_t nimages = 10;
const size_t nbytes_per_image = 3072;
std::vector<uint8_t> raw_data(nimages * (nbytes_per_image + 1));
std::vector<uint8_t> labels(nimages);
std::vector<float> data(nimages * nbytes_per_image);
if(file.is_open())
{
file.read(reinterpret_cast<char*>(raw_data.data()),
(nbytes_per_image + 1) * nimages * sizeof(uint8_t));
uint8_t* pimage = raw_data.data();
for(size_t i = 0; i < nimages; i++, pimage += nbytes_per_image)
{
labels[i] = *pimage++;
for(size_t j = 0; j < nbytes_per_image; j++)
{
float v = float(*(pimage + j)) / 255.0f;
data[i * nbytes_per_image + j] = v;
}
}
return std::make_pair(labels, data);
}
else
{
throw std::runtime_error("Cannot open file `" + full_path + "`!");
}
}
int main(int argc, char const* argv[])
{
if(argc < 4)
{
throw std::runtime_error("Usage: cifar10 [gpu | ref] <onnx file> <cifar10 data file>");
}
std::string gpu_ref = argv[1];
std::string file = argv[2];
std::string datafile = argv[3];
auto prog = migraphx::parse_onnx(file);
std::cout << prog << std::endl;
auto imageset = read_cifar10_images(datafile);
if(gpu_ref == "gpu")
{
// GPU target
prog.compile(migraphx::gpu::target{});
migraphx::parameter_map m;
auto s = migraphx::shape{migraphx::shape::float_type, {1, 3, 32, 32}};
for(auto&& x : prog.get_parameter_shapes())
{
m[x.first] = migraphx::gpu::to_gpu(migraphx::generate_argument(x.second));
}
auto labels = imageset.first;
auto input = imageset.second;
auto* ptr = input.data();
for(int i = 0; i < 10; i++)
{
std::cout << "label: " << static_cast<uint32_t>(labels[i]) << " ----> ";
m["0"] = migraphx::gpu::to_gpu(migraphx::argument{s, &ptr[3072 * i]});
auto gpu_result = prog.eval(m).back();
auto result = migraphx::gpu::from_gpu(gpu_result);
std::vector<float> logits;
result.visit([&](auto output) { logits.assign(output.begin(), output.end()); });
std::vector<float> probs = softmax<float>(logits);
for(auto x : probs)
std::cout << x << " ";
std::cout << std::endl << std::endl;
}
}
else
{
// CPU target
prog.compile(migraphx::ref::target{});
auto s = migraphx::shape{migraphx::shape::float_type, {1, 3, 32, 32}};
auto labels = imageset.first;
auto input = imageset.second;
auto* ptr = input.data();
for(int i = 0; i < 10; i++)
{
std::cout << "label: " << static_cast<uint32_t>(labels[i]) << " ----> ";
auto input3 = migraphx::argument{s, &ptr[3072 * i]};
auto result = prog.eval({{"0", input3}}).back();
std::vector<float> logits;
result.visit([&](auto output) { logits.assign(output.begin(), output.end()); });
std::vector<float> probs = softmax<float>(logits);
for(auto x : probs)
std::cout << x << " ";
std::cout << std::endl;
}
}
}
#include <migraphx/onnx/conv.hpp>
#include <algorithm>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
void recalc_conv_attributes(value& v, size_t kdims)
{
if(v["padding"].size() != kdims)
{
v["padding"].resize(kdims);
std::fill_n(v["padding"].begin(), kdims, 0);
}
if(v["stride"].size() != kdims)
{
v["stride"].resize(kdims);
std::fill_n(v["stride"].begin(), kdims, 1);
}
if(v["dilation"].size() != kdims)
{
v["dilation"].resize(kdims);
std::fill_n(v["dilation"].begin(), kdims, 1);
}
}
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#ifndef MIGRAPHX_GUARD_AMDMIGRAPHX_ONNX_CHECKS_HPP
#define MIGRAPHX_GUARD_AMDMIGRAPHX_ONNX_CHECKS_HPP
#include <migraphx/config.hpp>
#include <migraphx/argument.hpp>
#include <string>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
void check_arg_empty(const argument& arg, const std::string& msg);
void check_attr_sizes(size_t kdims, size_t attr_size, const std::string& error_msg);
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_AMDMIGRAPHX_ONNX_CONV_HPP
#define MIGRAPHX_GUARD_AMDMIGRAPHX_ONNX_CONV_HPP
#include <migraphx/config.hpp>
#include <migraphx/value.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
void recalc_conv_attributes(value& v, size_t kdims);
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_AMDMIGRAPHX_ONNX_MAP_ACTIVATION_FUNCTIONS_HPP
#define MIGRAPHX_GUARD_AMDMIGRAPHX_ONNX_MAP_ACTIVATION_FUNCTIONS_HPP
#include <migraphx/config.hpp>
#include <migraphx/operation.hpp>
#include <unordered_map>
#include <string>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
const std::unordered_map<std::string, operation>& map_activation_functions();
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_AMDMIGRAPHX_ONNX_PARSER_HPP
#define MIGRAPHX_GUARD_AMDMIGRAPHX_ONNX_PARSER_HPP
#include <migraphx/config.hpp>
#include <migraphx/program.hpp>
#include <google/protobuf/text_format.h>
#include <google/protobuf/io/zero_copy_stream_impl.h>
#include <onnx.pb.h>
#include <unordered_map>
#include <functional>
#include <utility>
#include <vector>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
namespace onnx = onnx_for_migraphx;
struct onnx_parser
{
std::string filename;
std::string path = ".";
using attribute_map = std::unordered_map<std::string, onnx::AttributeProto>;
struct node_info
{
attribute_map attributes{};
std::size_t num_outputs = 1;
std::string name = "";
module* mm = nullptr;
instruction_ref make_contiguous(instruction_ref ins) const;
instruction_ref add_bias(const std::vector<instruction_ref>& args,
instruction_ref curr_ins,
uint64_t axis) const;
instruction_ref add_broadcastable_binary_op(const std::string& op_name,
instruction_ref arg0,
instruction_ref arg1) const;
instruction_ref add_instruction(const operation& op,
const std::vector<instruction_ref>& args) const;
template <class... Ts>
instruction_ref add_instruction(const operation& op, Ts... xs) const
{
return add_instruction(op, {xs...});
}
instruction_ref add_literal(literal l) const;
template <class... Ts>
instruction_ref add_literal(Ts&&... xs) const
{
return add_literal(literal{std::forward<Ts>(xs)...});
}
};
using node_map = std::unordered_map<std::string, onnx::NodeProto>;
using op_func = std::function<std::vector<instruction_ref>(
const onnx_parser&, const node_info&, std::vector<instruction_ref>)>;
node_map nodes;
std::unordered_map<std::string, instruction_ref> instructions;
program prog = program();
std::size_t default_dim_value = 1;
std::unordered_map<std::string, std::vector<std::size_t>> map_input_dims;
bool skip_unknown_operators = false;
std::unordered_map<std::string, op_func> ops;
onnx_parser();
operation load(const std::string& name, const node_info& info) const;
void parse_undefined(module* mm, const std::string& name);
void parse_from(std::istream& is, std::string name = "");
void parse_from(const void* data, std::size_t size);
void parse_graph(const onnx::GraphProto& graph);
literal parse_value(const onnx::AttributeProto& attr) const;
literal parse_tensor(const onnx::TensorProto& t) const;
shape parse_type(const onnx::TypeProto& t, const std::vector<std::size_t>& input_dims) const;
};
shape::type_t get_type(int dtype);
std::vector<std::size_t> compute_broadcasted_lens(std::vector<std::size_t> s0,
std::vector<std::size_t> s1);
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_AMDMIGRAPHX_ONNX_REGISTER_OP_PARSER_HPP
#define MIGRAPHX_GUARD_AMDMIGRAPHX_ONNX_REGISTER_OP_PARSER_HPP
#include <migraphx/config.hpp>
#include <migraphx/auto_register.hpp>
#include <migraphx/onnx/onnx_parser.hpp>
#include <cstring>
#include <vector>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
struct op_desc
{
std::string onnx_name = "";
std::string op_name = "";
};
void register_op_parser(const std::string& name, onnx_parser::op_func f);
onnx_parser::op_func get_op_parser(const std::string& name);
std::vector<std::string> get_op_parsers();
inline std::vector<instruction_ref> implicit_multi_op(std::vector<instruction_ref> inss)
{
return inss;
}
inline std::vector<instruction_ref> implicit_multi_op(instruction_ref ins) { return {ins}; }
template <class T>
void register_op_parser()
{
T parser;
for(auto&& opd : parser.operators())
register_op_parser(opd.onnx_name, [opd, parser](auto&&... xs) {
return implicit_multi_op(parser.parse(opd, xs...));
});
}
struct register_op_parser_action
{
template <class T>
static void apply()
{
register_op_parser<T>();
}
};
template <class T>
using op_parser = auto_register<register_op_parser_action, T>;
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#ifndef MIGRAPHX_GUARD_AMDMIGRAPHX_ONNX_PADDING_HPP
#define MIGRAPHX_GUARD_AMDMIGRAPHX_ONNX_PADDING_HPP
#include <migraphx/config.hpp>
#include <migraphx/onnx/onnx_parser.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
bool is_asym_padding(const std::vector<int64_t>& padding);
void cal_auto_padding_size(onnx_parser::node_info info,
value& v,
const std::vector<std::size_t>& k_lens,
const std::vector<std::size_t>& dilation,
const std::vector<std::size_t>& in_lens,
std::vector<int64_t>& paddings);
void check_padding_mode(const onnx_parser::node_info& info, const std::string& op_name);
void tune_padding_size(const value& v,
std::vector<int64_t>& padding,
int count_include_pad,
std::vector<int64_t>& s_start);
void check_asym_padding(const onnx_parser::node_info& info,
instruction_ref& ins,
const std::vector<int64_t>& padding,
value& v,
int count_include_pad = 0,
float pad_val = 0);
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
#include <migraphx/onnx/map_activation_functions.hpp>
#include <migraphx/make_op.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
const std::unordered_map<std::string, operation>& map_activation_functions()
{
static const std::unordered_map<std::string, operation> m = {
{"tanh", make_op("tanh")},
{"relu", make_op("relu")},
{"sigmoid", make_op("sigmoid")},
{"leakyrelu", make_op("leaky_relu")},
{"elu", make_op("elu")}};
return m;
}
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#include <cstdio>
#include <string>
#include <fstream>
#include <numeric>
#include <stdexcept>
#include <migraphx/onnx.hpp>
#include <migraphx/gpu/target.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/generate.hpp>
#include "softmax.hpp"
auto reverse_int(unsigned int i)
{
unsigned char c1;
unsigned char c2;
unsigned char c3;
unsigned char c4;
c1 = i & 255u;
c2 = (i >> 8u) & 255u;
c3 = (i >> 16u) & 255u;
c4 = (i >> 24u) & 255u;
return (static_cast<unsigned int>(c1) << 24u) + (static_cast<unsigned int>(c2) << 16u) +
(static_cast<unsigned int>(c3) << 8u) + c4;
};
std::vector<float>
read_mnist_images(const std::string& full_path, int& number_of_images, int& image_size)
{
using uchar = unsigned char;
std::ifstream file(full_path, std::ios::binary);
if(file.is_open())
{
int magic_number = 0;
int n_rows = 0;
int n_cols = 0;
file.read(reinterpret_cast<char*>(&magic_number), sizeof(magic_number));
magic_number = reverse_int(magic_number);
if(magic_number != 2051)
throw std::runtime_error("Invalid MNIST image file!");
file.read(reinterpret_cast<char*>(&number_of_images), sizeof(number_of_images));
number_of_images = reverse_int(number_of_images);
file.read(reinterpret_cast<char*>(&n_rows), sizeof(n_rows));
n_rows = reverse_int(n_rows);
file.read(reinterpret_cast<char*>(&n_cols), sizeof(n_cols));
n_cols = reverse_int(n_cols);
image_size = n_rows * n_cols;
std::vector<float> result(number_of_images * image_size);
for(int i = 0; i < number_of_images; i++)
{
for(int j = 0; j < image_size; j++)
{
uchar tmp;
file.read(reinterpret_cast<char*>(&tmp), 1);
result[i * image_size + j] = tmp / 255.0;
}
}
return result;
}
else
{
throw std::runtime_error("Cannot open file `" + full_path + "`!");
}
}
std::vector<int32_t> read_mnist_labels(const std::string& full_path, int& number_of_labels)
{
using uchar = unsigned char;
std::ifstream file(full_path, std::ios::binary);
if(file.is_open())
{
int magic_number = 0;
file.read(reinterpret_cast<char*>(&magic_number), sizeof(magic_number));
magic_number = reverse_int(magic_number);
if(magic_number != 2049)
throw std::runtime_error("Invalid MNIST label file!");
file.read(reinterpret_cast<char*>(&number_of_labels), sizeof(number_of_labels));
number_of_labels = reverse_int(number_of_labels);
std::vector<int32_t> result(number_of_labels);
for(int i = 0; i < number_of_labels; i++)
{
uchar tmp;
file.read(reinterpret_cast<char*>(&tmp), 1);
result[i] = tmp;
}
return result;
}
else
{
throw std::runtime_error("Unable to open file `" + full_path + "`!");
}
}
int main(int argc, char const* argv[])
{
if(argc > 3)
{
std::string datafile = argv[2];
std::string labelfile = argv[3];
int nimages = -1;
int image_size = -1;
int nlabels = -1;
std::vector<float> input = read_mnist_images(datafile, nimages, image_size);
std::vector<int32_t> labels = read_mnist_labels(labelfile, nlabels);
std::string file = argv[1];
auto prog = migraphx::parse_onnx(file);
std::cout << prog << std::endl << std::endl;
prog.compile(migraphx::gpu::target{});
auto s = migraphx::shape{migraphx::shape::float_type, {1, 1, 28, 28}};
std::cout << s << std::endl;
auto* ptr = input.data();
migraphx::parameter_map m;
m["output"] =
migraphx::gpu::to_gpu(migraphx::generate_argument(prog.get_parameter_shape("output")));
for(int i = 0; i < 20; i++)
{
std::cout << "label: " << labels[i] << " ----> ";
m["0"] = migraphx::gpu::to_gpu(migraphx::argument{s, &ptr[784 * i]});
auto results = prog.eval(m).back();
auto result = migraphx::gpu::from_gpu(results);
std::vector<float> logits;
result.visit([&](auto output) { logits.assign(output.begin(), output.end()); });
std::vector<float> probs = softmax(logits);
for(auto x : probs)
std::cout << x << " ";
std::cout << std::endl;
}
std::cout << std::endl;
}
}
This diff is collapsed.
#include <migraphx/onnx/onnx_parser.hpp>
#include <migraphx/onnx/op_parser.hpp>
#include <migraphx/fallthrough.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/pad_calc.hpp>
#include <migraphx/type_traits.hpp>
#include <migraphx/float_equal.hpp>
#include <migraphx/file_buffer.hpp>
#include <migraphx/filesystem.hpp>
#include <migraphx/op/unknown.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
static onnx_parser::attribute_map get_attributes(const onnx::NodeProto& node)
{
std::unordered_map<std::string, onnx::AttributeProto> result;
for(auto&& attr : node.attribute())
{
result[attr.name()] = attr;
}
return result;
}
static literal
create_literal(shape::type_t shape_type, const std::vector<size_t>& dims, const char* data)
{
// in case of scalar constants in onnx file, use dims=1 to fill initializer data
if(dims.empty())
return literal{{shape_type}, data};
return literal{{shape_type, dims}, data};
}
template <class T, MIGRAPHX_REQUIRES(not std::is_pointer<T>{})>
static literal create_literal(shape::type_t shape_type, const std::vector<size_t>& dims, T data)
{
if(dims.empty())
return literal{{shape_type}, data.begin(), data.end()};
return literal{{shape_type, dims}, data.begin(), data.end()};
}
template <class T>
static literal from_repeated(shape::type_t t, const T& r)
{
std::size_t size = r.size();
return literal{{t, {size}}, r.begin(), r.end()};
}
instruction_ref onnx_parser::node_info::make_contiguous(instruction_ref ins) const
{
if(ins->get_shape().standard())
{
return ins;
}
return add_instruction(make_op("contiguous"), ins);
}
instruction_ref onnx_parser::node_info::add_bias(const std::vector<instruction_ref>& args,
instruction_ref curr_ins,
uint64_t axis) const
{
if(args.size() == 3)
{
auto bias_bcast = mm->add_instruction(
make_op("broadcast", {{"axis", axis}, {"dims", curr_ins->get_shape().lens()}}),
args[2]);
return mm->add_instruction(make_op("add"), curr_ins, bias_bcast);
}
return curr_ins;
}
std::vector<std::size_t> compute_broadcasted_lens(std::vector<std::size_t> s0,
std::vector<std::size_t> s1)
{
// Example:
// s0 = (3,2,4,5) and s1 = (2,1,1)
//
// In this case we need to broadcast (:,1,1) portion of
// s1 plus broadcast the 1st dimension of s1
// giving output_lens = (3,2,4,5)
//
// Another example:
// s0 = (3,2,1,5) and s1 = (2,7,5)
// In this case we need to broadcast the (:,:,1:,:) axis
// of s0 plus the 1st dimension of s1 giving
// output_lens = (3,2,7,5)
if(s0.size() > s1.size())
{
s0.swap(s1);
}
std::vector<std::size_t> out_lens(s1);
auto offset = s1.size() - s0.size();
std::transform(
s0.begin(), s0.end(), s1.begin() + offset, out_lens.begin() + offset, [&](auto a, auto b) {
if(a != b and a != 1 and b != 1)
{
MIGRAPHX_THROW("COMPUTE_BROADCASTLEN: shape {" + to_string_range(s0) + "} and {" +
to_string_range(s1) + "} mismatch!");
}
return std::max(a, b);
});
return out_lens;
}
instruction_ref onnx_parser::node_info::add_broadcastable_binary_op(const std::string& op_name,
instruction_ref arg0,
instruction_ref arg1) const
{
if(arg0->get_shape().lens() != arg1->get_shape().lens())
{
// Get lengths for both arguments
auto s0 = arg0->get_shape().lens();
auto s1 = arg1->get_shape().lens();
auto out_lens = compute_broadcasted_lens(s0, s1);
auto l0 = arg0;
if(arg0->get_shape().lens() != out_lens)
l0 = add_instruction(make_op("multibroadcast", {{"output_lens", out_lens}}), arg0);
auto l1 = arg1;
if(arg1->get_shape().lens() != out_lens)
l1 = add_instruction(make_op("multibroadcast", {{"output_lens", out_lens}}), arg1);
return add_instruction(make_op(op_name), l0, l1);
}
else
{
return add_instruction(make_op(op_name), {arg0, arg1});
}
}
instruction_ref
onnx_parser::node_info::add_instruction(const operation& op,
const std::vector<instruction_ref>& args) const
{
return mm->add_instruction(op, args);
}
instruction_ref onnx_parser::node_info::add_literal(literal l) const
{
return mm->add_literal(std::move(l));
}
onnx_parser::onnx_parser()
{
// Add all registered op parsers
for(auto&& name : get_op_parsers())
ops.emplace(name, get_op_parser(name));
}
operation onnx_parser::load(const std::string& name, const node_info& info) const
{
auto op = make_op(name);
auto v = op.to_value();
for(auto&& x : v)
{
if(info.attributes.count(x.get_key()) == 0)
continue;
literal s = parse_value(info.attributes.at(x.get_key()));
if(x.is_array())
{
std::vector<value> values;
s.visit([&](auto y) {
std::transform(y.begin(), y.end(), std::back_inserter(values), [](auto z) {
return value(z);
});
});
x = values;
}
else
{
s.visit([&](auto y) { x = y.front(); });
}
}
op.from_value(v);
return op;
}
void onnx_parser::parse_undefined(module* mm, const std::string& name)
{
if(!contains(instructions, name))
{
auto ins = mm->add_instruction(make_op("undefined"));
instructions[name] = ins;
}
}
void onnx_parser::parse_from(std::istream& is, std::string name)
{
this->filename = std::move(name);
auto parent_path = fs::path(this->filename).parent_path();
if(not parent_path.empty())
this->path = parent_path;
onnx::ModelProto model;
if(model.ParseFromIstream(&is))
{
if(model.has_graph())
{
this->parse_graph(model.graph());
}
}
else
{
MIGRAPHX_THROW("Failed reading onnx file.");
}
}
void onnx_parser::parse_from(const void* data, std::size_t size)
{
onnx::ModelProto model;
if(model.ParseFromArray(data, size))
{
if(model.has_graph())
{
this->parse_graph(model.graph());
}
}
else
{
MIGRAPHX_THROW("Failed reading onnx file.");
}
}
void onnx_parser::parse_graph(const onnx::GraphProto& graph)
{
module* mm = prog.get_main_module();
for(auto&& f : graph.initializer())
{
instructions[f.name()] = mm->add_literal(parse_tensor(f));
}
for(auto&& input : graph.input())
{
const std::string& name = input.name();
// input not in initializer_data, so it is a real input
if(!contains(instructions, name))
{
std::vector<std::size_t> dims;
if(map_input_dims.count(name) > 0)
{
dims = map_input_dims.at(name);
}
shape s = parse_type(input.type(), dims);
instructions[name] = mm->add_parameter(name, s);
}
}
for(auto&& node : graph.node())
{
std::vector<instruction_ref> args;
for(auto&& input : node.input())
{
if(input.empty())
{
this->parse_undefined(mm, input);
}
if(instructions.count(input) == 0)
{
MIGRAPHX_THROW("PARSE_GRAPH: invalid onnx file. Input \"" + input +
"\" is unavailable due to unordered nodes!");
}
args.push_back(instructions.at(input));
}
std::vector<instruction_ref> result;
std::size_t output_num = static_cast<std::size_t>(node.output().size());
if(ops.count(node.op_type()) == 0)
{
if(skip_unknown_operators)
result.push_back(mm->add_instruction(op::unknown{node.op_type()}, args));
else
MIGRAPHX_THROW("Unknown operator: " + node.op_type());
}
else
{
result = ops[node.op_type()](
*this, {get_attributes(node), output_num, node.op_type(), mm}, args);
}
output_num = std::min<std::size_t>(output_num, result.size());
std::transform(node.output().begin(),
node.output().begin() + output_num,
result.begin(),
std::inserter(instructions, instructions.end()),
[](auto&& x, auto&& y) { return std::make_pair(x, y); });
}
// Find instructions corresponding to the output
auto prog_output = graph.output();
std::vector<std::string> all_output_names;
std::vector<std::string> prog_output_names;
std::transform(prog_output.begin(),
prog_output.end(),
std::back_inserter(all_output_names),
[](auto& node) { return node.name(); });
std::copy_if(
all_output_names.begin(),
all_output_names.end(),
std::back_inserter(prog_output_names),
[&](const auto& name) { return !(name.empty() or instructions.count(name) == 0); });
std::vector<instruction_ref> output_ins;
std::transform(prog_output_names.begin(),
prog_output_names.end(),
std::back_inserter(output_ins),
[&](const auto& name) { return instructions[name]; });
// add the return instuction
mm->add_return(output_ins);
}
literal onnx_parser::parse_value(const onnx::AttributeProto& attr) const
{
switch(attr.type())
{
case onnx::AttributeProto::FLOAT: return literal{attr.f()};
case onnx::AttributeProto::INT: return literal{attr.i()};
case onnx::AttributeProto::TENSOR: return parse_tensor(attr.t());
case onnx::AttributeProto::FLOATS: return from_repeated(shape::float_type, attr.floats());
case onnx::AttributeProto::INTS: return from_repeated(shape::int64_type, attr.ints());
case onnx::AttributeProto::UNDEFINED:
case onnx::AttributeProto::GRAPH:
case onnx::AttributeProto::STRING:
case onnx::AttributeProto::STRINGS:
case onnx::AttributeProto::TENSORS:
case onnx::AttributeProto::SPARSE_TENSOR:
case onnx::AttributeProto::SPARSE_TENSORS:
case onnx::AttributeProto::GRAPHS: return {};
}
MIGRAPHX_THROW("PARSE_VALUE: Invalid attribute type " + std::to_string(attr.type()));
}
literal onnx_parser::parse_tensor(const onnx::TensorProto& t) const
{
std::vector<std::size_t> dims(t.dims().begin(), t.dims().end());
if(not t.external_data().empty())
{
const std::string& data_file = t.external_data().at(0).value();
auto raw_buffer = read_buffer(path + "/" + data_file);
std::string s(raw_buffer.begin(), raw_buffer.end());
auto type = get_type(t.data_type());
return create_literal(type, dims, s.data());
}
if(t.has_raw_data())
{
const std::string& s = t.raw_data();
auto type = get_type(t.data_type());
return create_literal(type, dims, s.data());
}
switch(t.data_type())
{
case onnx::TensorProto::BOOL: return create_literal(shape::bool_type, dims, t.int32_data());
case onnx::TensorProto::INT8: return create_literal(shape::int8_type, dims, t.int32_data());
case onnx::TensorProto::UINT8: return create_literal(shape::uint8_type, dims, t.int32_data());
case onnx::TensorProto::INT16: return create_literal(shape::int16_type, dims, t.int32_data());
case onnx::TensorProto::UINT16: return create_literal(shape::uint16_type, dims, t.int32_data());
case onnx::TensorProto::INT32: return create_literal(shape::int32_type, dims, t.int32_data());
case onnx::TensorProto::UINT32:
return create_literal(shape::uint32_type, dims, t.uint64_data());
case onnx::TensorProto::INT64: return create_literal(shape::int64_type, dims, t.int64_data());
case onnx::TensorProto::UINT64:
return create_literal(shape::uint64_type, dims, t.uint64_data());
case onnx::TensorProto::FLOAT16:
{
std::vector<uint16_t> data_uint16(t.int32_data().begin(), t.int32_data().end());
std::vector<half> data_half;
std::transform(data_uint16.begin(),
data_uint16.end(),
std::back_inserter(data_half),
[](uint16_t raw_val) { return *reinterpret_cast<half*>(&raw_val); });
return create_literal(shape::half_type, dims, data_half);
}
case onnx::TensorProto::DOUBLE:
return create_literal(shape::double_type, dims, t.double_data());
case onnx::TensorProto::FLOAT: return create_literal(shape::float_type, dims, t.float_data());
case onnx::TensorProto::UNDEFINED:
case onnx::TensorProto::STRING:
case onnx::TensorProto::COMPLEX64:
case onnx::TensorProto::COMPLEX128: throw std::runtime_error("");
}
MIGRAPHX_THROW("PARSE_TENSOR: Invalid tensor type");
}
shape onnx_parser::parse_type(const onnx::TypeProto& t,
const std::vector<std::size_t>& input_dims) const
{
shape::type_t shape_type = get_type(t.tensor_type().elem_type());
if(!input_dims.empty())
{
return {shape_type, input_dims};
}
std::vector<std::size_t> dims;
auto&& tensor_dims = t.tensor_type().shape().dim();
std::transform(tensor_dims.begin(),
tensor_dims.end(),
std::back_inserter(dims),
[&](auto&& d) -> std::size_t {
if(d.has_dim_value())
{
if(static_cast<int>(d.dim_value()) <= 0)
{
return default_dim_value;
}
return d.dim_value();
}
else
{
return default_dim_value;
}
});
if(dims.empty())
return {shape_type};
return {shape_type, dims};
}
shape::type_t get_type(int dtype)
{
switch(dtype)
{
case 1: return shape::float_type;
case 2: return shape::uint8_type;
case 3: return shape::int8_type;
case 4: return shape::uint16_type;
case 5: return shape::int16_type;
case 6: return shape::int32_type;
case 7: return shape::int64_type;
case 9: return shape::bool_type;
case 10: return shape::half_type;
case 11: return shape::double_type;
case 12: return shape::uint32_type;
case 13: return shape::uint64_type;
default: { MIGRAPHX_THROW("Prototensor data type " + std::to_string(dtype) + " not supported");
}
}
}
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#include <migraphx/onnx/op_parser.hpp>
#include <utility>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
std::unordered_map<std::string, onnx_parser::op_func>& op_parser_map()
{
static std::unordered_map<std::string, onnx_parser::op_func> m; // NOLINT
return m;
}
void register_op_parser(const std::string& name, onnx_parser::op_func f)
{
op_parser_map()[name] = std::move(f);
}
onnx_parser::op_func get_op_parser(const std::string& name) { return op_parser_map().at(name); }
std::vector<std::string> get_op_parsers()
{
std::vector<std::string> result;
std::transform(op_parser_map().begin(),
op_parser_map().end(),
std::back_inserter(result),
[&](auto&& p) { return p.first; });
return result;
}
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#include <migraphx/onnx/padding.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/pad_calc.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/make_op.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
void cal_auto_padding_size(onnx_parser::node_info info,
value& v,
const std::vector<std::size_t>& k_lens,
const std::vector<std::size_t>& dilation,
const std::vector<std::size_t>& in_lens,
std::vector<int64_t>& paddings)
{
size_t kdims = in_lens.size() - 2;
assert(k_lens.size() == kdims and dilation.size() == kdims);
if(!contains(info.attributes, "auto_pad"))
{
return;
}
auto auto_pad = info.attributes["auto_pad"].s();
if(auto_pad.find("SAME") != std::string::npos)
{
bool is_same_upper = (auto_pad.find("SAME_UPPER") != std::string::npos);
paddings.resize(2 * kdims);
for(size_t i = 0; i < paddings.size() / 2; i++)
{
calculate_padding(i,
paddings,
in_lens[i + 2],
v["stride"][i].to<int64_t>(),
dilation[i],
k_lens[i],
is_same_upper);
}
}
}
bool is_asym_padding(const std::vector<int64_t>& padding)
{
assert(padding.size() % 2 == 0);
size_t pad_ndims = padding.size() / 2;
for(size_t i = 0; i < pad_ndims; i++)
{
if(padding[i] != padding[i + pad_ndims])
{
return true;
}
}
return false;
}
void check_padding_mode(const onnx_parser::node_info& info, const std::string& op_name)
{
// ensure pads availabe only when auto_pad is "NOT_SET"
if(contains(info.attributes, "pads") and contains(info.attributes, "auto_pad"))
{
auto s = info.attributes.at("auto_pad").s();
if(to_upper(s) != "NOTSET")
{
MIGRAPHX_THROW("PARSE_" + op_name +
": auto_pad and padding cannot be specified simultaneously");
}
}
}
static void
tune_padding_to_symmetric(int64_t& left, int64_t& right, const int stride, int64_t& s_start)
{
s_start = 0;
if(left > right)
{
right = left;
}
else if(left < right)
{
auto diff = right - left;
s_start = (diff + stride - 1) / stride;
left = left + s_start * stride;
right = left;
}
}
void tune_padding_size(const value& v,
std::vector<int64_t>& padding,
int count_include_pad,
std::vector<int64_t>& s_start)
{
// maxpooling or count_include_pad is 1, no change is required.
if(v.at("mode").to<std::string>() == "max" or count_include_pad == 1)
{
return;
}
// if padding is symmetric, return directly
if(!is_asym_padding(padding))
{
return;
}
// asymmetric padding, make it symmetric
std::size_t n_dims = padding.size() / 2;
s_start.resize(n_dims);
for(std::size_t i = 0; i < n_dims; ++i)
{
tune_padding_to_symmetric(
padding[i], padding[i + n_dims], v.at("stride")[i].to<int64_t>(), s_start[i]);
}
}
void check_asym_padding(const onnx_parser::node_info& info,
instruction_ref& ins,
const std::vector<int64_t>& padding,
value& v,
int count_include_pad,
float pad_val)
{
size_t pad_ndims = padding.size() / 2;
auto left_pad_it = padding.begin();
auto right_pad_it = left_pad_it + pad_ndims;
if(is_asym_padding(padding) or count_include_pad == 1)
{
std::vector<int64_t> asym_pads{0, 0, 0, 0}; // don't pad N and C
// add left pads
asym_pads.insert(asym_pads.begin() + 2, left_pad_it, right_pad_it);
// add right pads
asym_pads.insert(asym_pads.begin() + pad_ndims + 4, right_pad_it, padding.end());
ins = info.add_instruction(make_op("pad", {{"pads", asym_pads}, {"value", pad_val}}), ins);
}
else
{
v["padding"] = std::vector<size_t>(left_pad_it, right_pad_it);
}
}
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#include <migraphx/onnx/op_parser.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/make_op.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
struct parse_arg_op : op_parser<parse_arg_op>
{
std::vector<op_desc> operators() const { return {{"ArgMax", "argmax"}, {"ArgMin", "argmin"}}; }
instruction_ref parse(const op_desc& opd,
const onnx_parser& parser,
onnx_parser::node_info info,
const std::vector<instruction_ref>& args) const
{
int64_t axis = 0;
if(contains(info.attributes, "axis"))
{
axis = static_cast<int64_t>(parser.parse_value(info.attributes.at("axis")).at<int>());
}
int keep_dims = 1;
if(contains(info.attributes, "keepdims"))
{
keep_dims = parser.parse_value(info.attributes.at("keepdims")).at<int>();
}
if(keep_dims == 0)
{
auto ins = info.add_instruction(make_op(opd.op_name, {{"axis", axis}}), args);
return info.add_instruction(make_op("squeeze", {{"axes", {axis}}}), ins);
}
else
{
return info.add_instruction(make_op(opd.op_name, {{"axis", axis}}), args);
}
}
};
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#include <migraphx/onnx/op_parser.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/make_op.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace onnx {
enum class reduce_mode_t
{
sum = 0,
mean = 1,
max = 2
};
struct parse_aten : op_parser<parse_aten>
{
std::vector<op_desc> operators() const { return {{"ATen"}}; }
instruction_ref parse(const op_desc& /*opd*/,
const onnx_parser& /*parser*/,
onnx_parser::node_info info,
std::vector<instruction_ref> args) const
{
if(contains(info.attributes, "operator"))
{
auto op_name = info.attributes.at("operator").s();
if(op_name.find("embedding_bag") != std::string::npos)
{
return parse_embedding_bag(info, std::move(args));
}
}
MIGRAPHX_THROW("PARSE_ATEN: unsupported custom operator");
}
instruction_ref parse_embedding_bag(onnx_parser::node_info info,
std::vector<instruction_ref> args) const
{
if(args[2]->get_shape().elements() != 1)
MIGRAPHX_THROW("PARSE_EMBEDDING_BAG: MIGraphX only supports offsets of size 1");
reduce_mode_t reduce_mode = reduce_mode_t::sum;
if(contains(info.attributes, "mode"))
{
reduce_mode = static_cast<reduce_mode_t>(info.attributes.at("mode").i());
}
auto l0 = info.add_instruction(make_op("gather"), args[0], args[1]);
switch(reduce_mode)
{
case reduce_mode_t::sum:
l0 = info.add_instruction(make_op("reduce_sum", {{"axes", {0}}}), l0);
break;
case reduce_mode_t::mean:
l0 = info.add_instruction(make_op("reduce_mean", {{"axes", {0}}}), l0);
break;
case reduce_mode_t::max:
l0 = info.add_instruction(make_op("reduce_max", {{"axes", {0}}}), l0);
break;
}
return l0;
}
};
} // namespace onnx
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment