Commit 433f2cdb authored by Khalique's avatar Khalique
Browse files

formatting

parent 74343e23
...@@ -1392,28 +1392,21 @@ struct onnx_parser ...@@ -1392,28 +1392,21 @@ struct onnx_parser
{ {
case onnx::TensorProto::UNDEFINED: throw std::runtime_error(""); case onnx::TensorProto::UNDEFINED: throw std::runtime_error("");
case onnx::TensorProto::FLOAT: case onnx::TensorProto::FLOAT:
return create_literal( return create_literal(shape::float_type, dims, t.float_data());
shape::float_type, dims, t.float_data());
case onnx::TensorProto::UINT8: throw std::runtime_error(""); case onnx::TensorProto::UINT8: throw std::runtime_error("");
case onnx::TensorProto::INT8: case onnx::TensorProto::INT8:
return create_literal( return create_literal(shape::int32_type, dims, t.int32_data());
shape::int32_type, dims, t.int32_data());
case onnx::TensorProto::UINT16: case onnx::TensorProto::UINT16:
return create_literal( return create_literal(shape::int32_type, dims, t.int32_data());
shape::int32_type, dims, t.int32_data());
case onnx::TensorProto::INT16: case onnx::TensorProto::INT16:
return create_literal( return create_literal(shape::int32_type, dims, t.int32_data());
shape::int32_type, dims, t.int32_data());
case onnx::TensorProto::INT32: case onnx::TensorProto::INT32:
return create_literal( return create_literal(shape::int32_type, dims, t.int32_data());
shape::int32_type, dims, t.int32_data());
case onnx::TensorProto::INT64: case onnx::TensorProto::INT64:
return create_literal( return create_literal(shape::int64_type, dims, t.int64_data());
shape::int64_type, dims, t.int64_data());
case onnx::TensorProto::STRING: throw std::runtime_error(""); case onnx::TensorProto::STRING: throw std::runtime_error("");
case onnx::TensorProto::BOOL: case onnx::TensorProto::BOOL:
return create_literal( return create_literal(shape::int32_type, dims, t.int32_data());
shape::int32_type, dims, t.int32_data());
case onnx::TensorProto::FLOAT16: case onnx::TensorProto::FLOAT16:
{ {
std::vector<uint16_t> data_uint16(t.int32_data().begin(), t.int32_data().end()); std::vector<uint16_t> data_uint16(t.int32_data().begin(), t.int32_data().end());
...@@ -1425,8 +1418,7 @@ struct onnx_parser ...@@ -1425,8 +1418,7 @@ struct onnx_parser
return create_literal(shape::half_type, dims, data_half); return create_literal(shape::half_type, dims, data_half);
} }
case onnx::TensorProto::DOUBLE: case onnx::TensorProto::DOUBLE:
return create_literal( return create_literal(shape::double_type, dims, t.double_data());
shape::double_type, dims, t.double_data());
case onnx::TensorProto::UINT32: throw std::runtime_error(""); case onnx::TensorProto::UINT32: throw std::runtime_error("");
case onnx::TensorProto::UINT64: throw std::runtime_error(""); case onnx::TensorProto::UINT64: throw std::runtime_error("");
case onnx::TensorProto::COMPLEX64: throw std::runtime_error(""); case onnx::TensorProto::COMPLEX64: throw std::runtime_error("");
...@@ -1445,8 +1437,7 @@ struct onnx_parser ...@@ -1445,8 +1437,7 @@ struct onnx_parser
} }
template <class T, MIGRAPHX_REQUIRES(not std::is_pointer<T>{})> template <class T, MIGRAPHX_REQUIRES(not std::is_pointer<T>{})>
static literal static literal create_literal(shape::type_t shape_type, const std::vector<size_t>& dims, T data)
create_literal(shape::type_t shape_type, const std::vector<size_t>& dims, T data)
{ {
if(dims.empty()) if(dims.empty())
return literal{{shape_type, {1}, {0}}, data.begin(), data.end()}; return literal{{shape_type, {1}, {0}}, data.begin(), data.end()};
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment