Unverified Commit 55daacd0 authored by Shucai Xiao's avatar Shucai Xiao Committed by GitHub
Browse files

upgrade onnx.proto and protobuf (#446)

parent 1b692d0f
...@@ -1897,6 +1897,8 @@ struct onnx_parser ...@@ -1897,6 +1897,8 @@ struct onnx_parser
case onnx::AttributeProto::STRING: case onnx::AttributeProto::STRING:
case onnx::AttributeProto::STRINGS: case onnx::AttributeProto::STRINGS:
case onnx::AttributeProto::TENSORS: case onnx::AttributeProto::TENSORS:
case onnx::AttributeProto::SPARSE_TENSOR:
case onnx::AttributeProto::SPARSE_TENSORS:
case onnx::AttributeProto::GRAPHS: return {}; case onnx::AttributeProto::GRAPHS: return {};
} }
MIGRAPHX_THROW("Invalid attribute type"); MIGRAPHX_THROW("Invalid attribute type");
......
...@@ -3,24 +3,42 @@ ...@@ -3,24 +3,42 @@
// //
// Copyright (c) Facebook Inc. and Microsoft Corporation. // Copyright (c) ONNX Project Contributors.
// Licensed under the MIT license. // Licensed under the MIT license.
syntax = "proto2"; syntax = "proto2";
package onnx; package onnx;
// Note [Release] // Overview
//
// ONNX is an open specification that is comprised of the following components:
//
// 1) A definition of an extensible computation graph model.
// 2) Definitions of standard data types.
// 3) Definitions of built-in operators.
//
// This document describes the syntax of models and their computation graphs,
// as well as the standard data types. Together, they are referred to as the ONNX
// Intermediate Representation, or 'IR' for short.
//
// The normative semantic specification of the ONNX IR is found in docs/IR.md.
// Definitions of the built-in neural network operators may be found in docs/Operators.md.
// Notes
//
// Release
//
// We are still in the very early stage of defining ONNX. The current // We are still in the very early stage of defining ONNX. The current
// version of ONNX is a starting point. While we are actively working // version of ONNX is a starting point. While we are actively working
// towards a complete spec, we would like to get the community involved // towards a complete spec, we would like to get the community involved
// by sharing our working version of ONNX. // by sharing our working version of ONNX.
//
// Note [Protobuf compatibility] // Protobuf compatibility
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ //
// Based on experience working with downstream vendors, we generally can't // To simplify framework compatibility, ONNX is defined using the subset of protobuf
// assume recent versions of protobufs. This means that we do not use any // that is compatible with both protobuf v2 and v3. This means that we do not use any
// protobuf features that are only available in proto3. // protobuf features that are only available in one of the two versions.
// //
// Here are the most notable contortions we have to carry out to work around // Here are the most notable contortions we have to carry out to work around
// these limitations: // these limitations:
...@@ -29,30 +47,11 @@ package onnx; ...@@ -29,30 +47,11 @@ package onnx;
// of key-value pairs, where order does not matter and duplicates // of key-value pairs, where order does not matter and duplicates
// are not allowed. // are not allowed.
// Note [Namespaces]
// ~~~~~~~~~~~~~~~~~ // Versioning
// ONNX gives explicit names to graphs, intermediate values and
// serialized tensors. To make it easier to generate names, we organize
// these into separate namespaces (so, e.g., a graph can have the same
// name as a serialized tensor.) The namespaces are as follows:
//
// - Node: These names identify specific nodes in the graph (but not, necessarily
// any particular input or output of the node.
// - Graph: These names identify graphs in the protobuf.
// - Attribute: These names identify attribute names for extra attributes that
// are passed to operators.
// - Operator: These names identify particular operators.
// - Value: These names identify intermediate values (typically tensors) flowing through
// the computation of a graph.
// - Shape: These names represent parameters for unknown shape dimensions.
// //
// We specify the namespace of a name in ONNX as comments in the form // ONNX versioning is specified in docs/IR.md and elaborated on in docs/Versioning.md
// of "namespace {Node,Graph,Operator,Attribute,Value,Shape}". Framework is responsible
// for supporting the namespaces.
// //
// Naming things is hard. Every element with a name has an optional doc_string associated
// with it, providing a human-readable description in text markdown.
// To be compatible with both proto2 and proto3, we will use a version number // To be compatible with both proto2 and proto3, we will use a version number
// that is not defined by the default value but an explicit enum number. // that is not defined by the default value but an explicit enum number.
enum Version { enum Version {
...@@ -61,26 +60,53 @@ enum Version { ...@@ -61,26 +60,53 @@ enum Version {
_START_VERSION = 0; _START_VERSION = 0;
// The version field is always serialized and we will use it to store the // The version field is always serialized and we will use it to store the
// version that the graph is generated from. This helps us set up version // version that the graph is generated from. This helps us set up version
// control. We should use version as // control.
// xx(major) - xx(minor) - xxxx(bugfix) // For the IR, we are using simple numbers starting with 0x00000001,
// and we are starting with 0x00000001 (0.0.1), which was the // which was the version we published on Oct 10, 2017.
// version we published on Oct 10, 2017. IR_VERSION_2017_10_10 = 0x0000000000000001;
IR_VERSION_2017_10_10 = 0x00000001;
// IR_VERSION 0.0.2 published on Oct 30, 2017 // IR_VERSION 2 published on Oct 30, 2017
// - Added type discriminator to AttributeProto to support proto3 users // - Added type discriminator to AttributeProto to support proto3 users
IR_VERSION_2017_10_30 = 0x00000002; IR_VERSION_2017_10_30 = 0x0000000000000002;
// IR VERSION 0.0.3 published on Nov 3, 2017 // IR VERSION 3 published on Nov 3, 2017
// - For operator versioning: // - For operator versioning:
// - Added new message OperatorSetIdProto // - Added new message OperatorSetIdProto
// - Added opset_import in ModelProto // - Added opset_import in ModelProto
// - For vendor extensions, added domain in NodeProto // - For vendor extensions, added domain in NodeProto
IR_VERSION = 0x00000003; IR_VERSION_2017_11_3 = 0x0000000000000003;
// IR VERSION 4 published on Jan 22, 2019
// - Relax constraint that initializers should be a subset of graph inputs
// - Add type BFLOAT16
IR_VERSION_2019_1_22 = 0x0000000000000004;
// IR VERSION 5 published on March 18, 2019
// - Add message TensorAnnotation.
// - Add quantization annotation in GraphProto to map tensor with its scale and zero point quantization parameters.
IR_VERSION_2019_3_18 = 0x0000000000000005;
// IR VERSION 6 published on Sep 19, 2019
// - Add support for sparse tensor constants stored in model.
// - Add message SparseTensorProto
// - Add sparse initializers
IR_VERSION_2019_9_19 = 0x0000000000000006;
// IR VERSION 7 published on <TBD>
// - Add a list to promote inference graph's initializers to global and
// mutable variables. Global variables are visible in all graphs of the
// stored models.
// - Add message TrainingInfoProto to store initialization
// method and training algorithm. The execution of TrainingInfoProto
// can modify the values of mutable variables.
// - Make inference graph callable from TrainingInfoProto via GraphCall operator.
IR_VERSION = 0x0000000000000007;
} }
// A named attribute containing either singular float, integer, string // Attributes
// and tensor values, or repeated float, integer, string and tensor values. //
// A named attribute containing either singular float, integer, string, graph,
// and tensor values, or repeated float, integer, string, graph, and tensor values.
// An AttributeProto MUST contain the name field, and *only one* of the // An AttributeProto MUST contain the name field, and *only one* of the
// following content fields, effectively enforcing a C/C++ union equivalent. // following content fields, effectively enforcing a C/C++ union equivalent.
message AttributeProto { message AttributeProto {
...@@ -94,26 +120,34 @@ message AttributeProto { ...@@ -94,26 +120,34 @@ message AttributeProto {
STRING = 3; STRING = 3;
TENSOR = 4; TENSOR = 4;
GRAPH = 5; GRAPH = 5;
SPARSE_TENSOR = 11;
FLOATS = 6; FLOATS = 6;
INTS = 7; INTS = 7;
STRINGS = 8; STRINGS = 8;
TENSORS = 9; TENSORS = 9;
GRAPHS = 10; GRAPHS = 10;
SPARSE_TENSORS = 12;
} }
// The name field MUST be present for this version of the IR. // The name field MUST be present for this version of the IR.
optional string name = 1; // namespace Attribute optional string name = 1; // namespace Attribute
// if ref_attr_name is not empty, ref_attr_name is the attribute name in parent function.
// In this case, this AttributeProto does not contain data, and it's a reference of attribute
// in parent scope.
// NOTE: This should ONLY be used in function (sub-graph). It's invalid to be used in main graph.
optional string ref_attr_name = 21;
// A human-readable documentation for this attribute. Markdown is allowed. // A human-readable documentation for this attribute. Markdown is allowed.
optional string doc_string = 13; optional string doc_string = 13;
// The type field MUST be present for this version of the IR. // The type field MUST be present for this version of the IR.
// For 0.0.1 versions of the IR, this field was not defined, and // For 0.0.1 versions of the IR, this field was not defined, and
// implementations needed to use has_field hueristics to determine // implementations needed to use has_field heuristics to determine
// which value field was in use. For IR_VERSION 0.0.2 or later, this // which value field was in use. For IR_VERSION 0.0.2 or later, this
// field MUST be set and match the f|i|s|t|... field in use. This // field MUST be set and match the f|i|s|t|... field in use. This
// change was made to accomodate proto3 implementations. // change was made to accommodate proto3 implementations.
optional AttributeType type = 20; // discriminator that indicates which field below is in use optional AttributeType type = 20; // discriminator that indicates which field below is in use
// Exactly ONE of the following fields must be present for this version of the IR // Exactly ONE of the following fields must be present for this version of the IR
...@@ -122,6 +156,7 @@ message AttributeProto { ...@@ -122,6 +156,7 @@ message AttributeProto {
optional bytes s = 4; // UTF-8 string optional bytes s = 4; // UTF-8 string
optional TensorProto t = 5; // tensor value optional TensorProto t = 5; // tensor value
optional GraphProto g = 6; // graph optional GraphProto g = 6; // graph
optional SparseTensorProto sparse_tensor = 22; // sparse tensor value
// Do not use field below, it's deprecated. // Do not use field below, it's deprecated.
// optional ValueProto v = 12; // value - subsumes everything but graph // optional ValueProto v = 12; // value - subsumes everything but graph
...@@ -130,6 +165,7 @@ message AttributeProto { ...@@ -130,6 +165,7 @@ message AttributeProto {
repeated bytes strings = 9; // list of UTF-8 strings repeated bytes strings = 9; // list of UTF-8 strings
repeated TensorProto tensors = 10; // list of tensors repeated TensorProto tensors = 10; // list of tensors
repeated GraphProto graphs = 11; // list of graph repeated GraphProto graphs = 11; // list of graph
repeated SparseTensorProto sparse_tensors = 23; // list of sparse tensors
} }
// Defines information on value, including the name, the type, and // Defines information on value, including the name, the type, and
...@@ -137,16 +173,20 @@ message AttributeProto { ...@@ -137,16 +173,20 @@ message AttributeProto {
message ValueInfoProto { message ValueInfoProto {
// This field MUST be present in this version of the IR. // This field MUST be present in this version of the IR.
optional string name = 1; // namespace Value optional string name = 1; // namespace Value
// This field MUST be present in this version of the IR. // This field MUST be present in this version of the IR for
// inputs and outputs of the top-level graph.
optional TypeProto type = 2; optional TypeProto type = 2;
// A human-readable documentation for this value. Markdown is allowed. // A human-readable documentation for this value. Markdown is allowed.
optional string doc_string = 3; optional string doc_string = 3;
} }
// NodeProto stores a node that is similar to the notion of "layer" // Nodes
// or "operator" in many deep learning frameworks. For example, it can be a //
// node of type "Conv" that takes in an image, a filter tensor and a bias // Computation graphs are made up of a DAG of nodes, which represent what is
// tensor, and produces the convolved output. // commonly called a "layer" or "pipeline stage" in machine learning frameworks.
//
// For example, it can be a node of type "Conv" that takes in an image, a filter
// tensor and a bias tensor, and produces the convolved output.
message NodeProto { message NodeProto {
repeated string input = 1; // namespace Value repeated string input = 1; // namespace Value
repeated string output = 2; // namespace Value repeated string output = 2; // namespace Value
...@@ -161,18 +201,125 @@ message NodeProto { ...@@ -161,18 +201,125 @@ message NodeProto {
optional string domain = 7; // namespace Domain optional string domain = 7; // namespace Domain
// Additional named attributes. // Additional named attributes.
// NOTE: Simply using ValueProto.NameValuePairProto is the most general
// solution. I kept AttributeProto to minimize churn on CI results.
repeated AttributeProto attribute = 5; repeated AttributeProto attribute = 5;
// A human-readable documentation for this node. Markdown is allowed. // A human-readable documentation for this node. Markdown is allowed.
optional string doc_string = 6; optional string doc_string = 6;
} }
// ModelProto is a top-level file/container format for bundling a ML model. // Training information
// The semantics of the model are described by the GraphProto that represents // TrainingInfoProto stores information for training a model.
// a parameterized computation graph against a set of named operators that are // In particular, this defines two functionalities: an initialization-step
// defined independently from the graph. // and a training-algorithm-step. Initialization resets the model
// back to its original state as if no training has been consumed.
// Training algorithm improves the model based on input data.
//
// The semantics of the initialization-step is that the initializers
// in ModelProto.graph and in TrainingInfoProto.algorithm are first
// initialized as specified by the initializers in the graph, and then
// updated by the "initialization_binding" in every instance in
// ModelProto.training_info.
//
// The field "algorithm" defines a computation graph which represents a
// training algorithm's step. After the execution of a
// TrainingInfoProto.algorithm, the initializers specified by "update_binding"
// may be immediately updated. If the targeted training algorithm contains
// consecutive update stages (such as block coordinate descent methods),
// the user needs to create a TrainingInfoProto for each stage.
message TrainingInfoProto {
// This field describes a graph to compute the initial tensors
// upon starting the training process. Initialization graph has no input
// and can have multiple outputs. Usually, trainable tensors in neural
// networks are randomly initialized. To achieve that, for each tensor,
// the user can put a random number operator such as RandomNormal or
// RandomUniform in TrainingInfoProto.initialization.node and assign its
// random output to the specific tensor using "initialization_binding".
// This graph can also set the initializers in "algorithm" in the same
// TrainingInfoProto; a use case is resetting the number of training
// iteration to zero.
//
// By default, this field is an empty graph and its evaluation does not
// produce any output.
optional GraphProto initialization = 1;
// This field represents a training algorithm step. Given required inputs,
// it computes outputs to update initializers in its own or inference graph's
// initializer lists. In general, this graph contains loss node, gradient node,
// optimizer node, increment of iteration count, and some calls to the inference
// graph.
//
// The field algorithm.node is the only place the user can use GraphCall
// operator. The only callable graph is the one stored in ModelProto.graph.
//
// By default, this field is an empty graph and its evaluation does not
// produce any output.
optional GraphProto algorithm = 2;
// This field specifies the bindings from the outputs of "initialization" to
// some initializers in "ModelProto.graph.initializer" and
// the "algorithm.initializer" in the same TrainingInfoProto.
// See "update_binding" below for details.
//
// By default, this field is empty and no initializer would be changed
// by the execution of "initialization".
repeated StringStringEntryProto initialization_binding = 3;
// Gradient-based training is usually an iterative procedure. In one gradient
// descent iteration, we apply
//
// x = x - r * g
//
// where "x" is the optimized tensor, "r" stands for learning rate, and "g" is
// gradient of "x" with respect to a chosen loss. To avoid adding assignments
// into the training graph, we split the update equation into
//
// y = x - r * g
// x = y
//
// The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To
// tell that "y" should be assigned to "x", the field "update_binding" may
// contain a key-value pair of strings, "x" (key of StringStringEntryProto)
// and "y" (value of StringStringEntryProto).
// For a neural network with multiple trainable (mutable) tensors, there can
// be multiple key-value pairs in "update_binding".
//
// The initializers appears as keys in "update_binding" are considered
// mutable and globally-visible variables. This implies some behaviors
// as described below.
//
// 1. We have only unique keys in all "update_binding"s so that two global
// variables may not have the same name. This ensures that one
// global variable is assigned up to once.
// 2. The keys must appear in names of "ModelProto.graph.initializer" or
// "TrainingInfoProto.algorithm.initializer".
// 3. The values must be output names of "algorithm".
// 4. If an optional input of a graph is omitted when using GraphCall, the
// global variable with the same name may be used.
// 5. When using GraphCall, the users always can pass values to optional
// inputs of the called graph even if the associated initializers appears
// as keys in "update_binding"s.
// 6. The graphs in TrainingInfoProto's can use global variables as
// their operator inputs.
// 7. Mutable variables are initialized to the value specified by the
// corresponding initializer, and then potentially updated by
// "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s.
//
// This field usually contains names of trainable tensors
// (in ModelProto.graph), optimizer states such as momentums in advanced
// stochastic gradient methods (in TrainingInfoProto.graph),
// and number of training iterations (in TrainingInfoProto.graph).
//
// By default, this field is empty and no initializer would be changed
// by the execution of "algorithm".
repeated StringStringEntryProto update_binding = 4;
}
// Models
//
// ModelProto is a top-level file/container format for bundling a ML model and
// associating its computation graph with metadata.
//
// The semantics of the model are described by the associated GraphProto's.
message ModelProto { message ModelProto {
// The version of the IR this model targets. See Version enum above. // The version of the IR this model targets. See Version enum above.
// This field MUST be present. // This field MUST be present.
...@@ -217,6 +364,17 @@ message ModelProto { ...@@ -217,6 +364,17 @@ message ModelProto {
// Named metadata values; keys should be distinct. // Named metadata values; keys should be distinct.
repeated StringStringEntryProto metadata_props = 14; repeated StringStringEntryProto metadata_props = 14;
// Training-specific information. Sequentially executing all stored
// `TrainingInfoProto.algorithm`s and assigning their outputs following
// the corresponding `TrainingInfoProto.update_binding`s is one training
// iteration. Similarly, to initialize the model
// (as if training hasn't happened), the user should sequentially execute
// all stored `TrainingInfoProto.initialization`s and assigns their outputs
// using `TrainingInfoProto.initialization_binding`s.
//
// If this field is empty, the training behavior of the model is undefined.
repeated TrainingInfoProto training_info = 20;
}; };
// StringStringEntryProto follows the pattern for cross-proto-version maps. // StringStringEntryProto follows the pattern for cross-proto-version maps.
...@@ -226,25 +384,38 @@ message StringStringEntryProto { ...@@ -226,25 +384,38 @@ message StringStringEntryProto {
optional string value= 2; optional string value= 2;
}; };
// GraphProto defines a parameterized series of nodes to form a directed acyclic graph. message TensorAnnotation {
// This is the equivalent of the "network" and "graph" in many deep learning optional string tensor_name = 1;
// <key, value> pairs to annotate tensor specified by <tensor_name> above.
// The keys used in the mapping below must be pre-defined in ONNX spec.
// For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
// quantization parameter keys.
repeated StringStringEntryProto quant_parameter_tensor_names = 2;
}
// Graphs
//
// A graph defines the computational logic of a model and is comprised of a parameterized
// list of nodes that form a directed acyclic graph based on their inputs and outputs.
// This is the equivalent of the "network" or "graph" in many deep learning
// frameworks. // frameworks.
message GraphProto { message GraphProto {
// The nodes in the graph. // The nodes in the graph, sorted topologically.
repeated NodeProto node = 1; repeated NodeProto node = 1;
// The name of the graph. // The name of the graph.
optional string name = 2; // namespace Graph optional string name = 2; // namespace Graph
// A list of named tensor values (constants), used to specify default // A list of named tensor values, used to specify constant inputs of the graph.
// values for some of the inputs of the graph.
// Each TensorProto entry must have a distinct name (within the list) that // Each TensorProto entry must have a distinct name (within the list) that
// also appears in the input list. // MAY also appear in the input list.
// In an evaluation, the default value specified here is used if and only if
// user specifies no value for the corresponding input parameter.
// May be used to pass serialized parameters for networks.
repeated TensorProto initializer = 5; repeated TensorProto initializer = 5;
// Initializers (see above) stored in sparse format.
repeated SparseTensorProto sparse_initializer = 15;
// A human-readable documentation for this graph. Markdown is allowed. // A human-readable documentation for this graph. Markdown is allowed.
optional string doc_string = 10; optional string doc_string = 10;
...@@ -256,7 +427,13 @@ message GraphProto { ...@@ -256,7 +427,13 @@ message GraphProto {
// must be distinct. It is optional for a value to appear in value_info list. // must be distinct. It is optional for a value to appear in value_info list.
repeated ValueInfoProto value_info = 13; repeated ValueInfoProto value_info = 13;
// DO NOT USE the following fields, they were deprecated before // This field carries information to indicate the mapping among a tensor and its
// quantization parameter tensors. For example:
// For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
// which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
repeated TensorAnnotation quantization_annotation = 14;
// DO NOT USE the following fields, they were deprecated from earlier versions.
// repeated string input = 3; // repeated string input = 3;
// repeated string output = 4; // repeated string output = 4;
// optional int64 ir_version = 6; // optional int64 ir_version = 6;
...@@ -265,7 +442,9 @@ message GraphProto { ...@@ -265,7 +442,9 @@ message GraphProto {
// optional string domain = 9; // optional string domain = 9;
} }
// A message defined to store a tensor in its serialized format. // Tensors
//
// A serialized tensor value.
message TensorProto { message TensorProto {
enum DataType { enum DataType {
UNDEFINED = 0; UNDEFINED = 0;
...@@ -280,13 +459,21 @@ message TensorProto { ...@@ -280,13 +459,21 @@ message TensorProto {
STRING = 8; // string STRING = 8; // string
BOOL = 9; // bool BOOL = 9; // bool
// Advanced types // IEEE754 half-precision floating-point format (16 bits wide).
// This format has 1 sign bit, 5 exponent bits, and 10 mantissa bits.
FLOAT16 = 10; FLOAT16 = 10;
DOUBLE = 11; DOUBLE = 11;
UINT32 = 12; UINT32 = 12;
UINT64 = 13; UINT64 = 13;
COMPLEX64 = 14; // complex with float32 real and imaginary components COMPLEX64 = 14; // complex with float32 real and imaginary components
COMPLEX128 = 15; // complex with float64 real and imaginary components COMPLEX128 = 15; // complex with float64 real and imaginary components
// Non-IEEE floating-point format based on IEEE754 single-precision
// floating-point number truncated to 16 bits.
// This format has 1 sign bit, 8 exponent bits, and 7 mantissa bits.
BFLOAT16 = 16;
// Future extensions go here. // Future extensions go here.
} }
...@@ -294,7 +481,8 @@ message TensorProto { ...@@ -294,7 +481,8 @@ message TensorProto {
repeated int64 dims = 1; repeated int64 dims = 1;
// The data type of the tensor. // The data type of the tensor.
optional DataType data_type = 2; // This field MUST have a valid TensorProto.DataType value
optional int32 data_type = 2;
// For very large tensors, we may want to store them in chunks, in which // For very large tensors, we may want to store them in chunks, in which
// case the following fields will specify the segment that is stored in // case the following fields will specify the segment that is stored in
...@@ -305,7 +493,7 @@ message TensorProto { ...@@ -305,7 +493,7 @@ message TensorProto {
} }
optional Segment segment = 3; optional Segment segment = 3;
// Tensor content must be in the row major order. // Tensor content must be organized in row-major order.
// //
// Depending on the data_type field, exactly one of the fields below with // Depending on the data_type field, exactly one of the fields below with
// name ending in _data is used to store the elements of the tensor. // name ending in _data is used to store the elements of the tensor.
...@@ -313,7 +501,7 @@ message TensorProto { ...@@ -313,7 +501,7 @@ message TensorProto {
// For float and complex64 values // For float and complex64 values
// Complex64 tensors are encoded as a single array of floats, // Complex64 tensors are encoded as a single array of floats,
// with the real components appearing in odd numbered positions, // with the real components appearing in odd numbered positions,
// and the corresponding imaginary component apparing in the // and the corresponding imaginary component appearing in the
// subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i] // subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
// is encoded as [1.0, 2.0 ,3.0 ,4.0] // is encoded as [1.0, 2.0 ,3.0 ,4.0]
// When this field is present, the data_type field MUST be FLOAT or COMPLEX64. // When this field is present, the data_type field MUST be FLOAT or COMPLEX64.
...@@ -323,7 +511,7 @@ message TensorProto { ...@@ -323,7 +511,7 @@ message TensorProto {
// float16 values must be bit-wise converted to an uint16_t prior // float16 values must be bit-wise converted to an uint16_t prior
// to writing to the buffer. // to writing to the buffer.
// When this field is present, the data_type field MUST be // When this field is present, the data_type field MUST be
// INT32, INT16, INT8, UINT16, INT8, BOOL, or FLOAT32 // INT32, INT16, INT8, UINT16, UINT8, BOOL, or FLOAT16
repeated int32 int32_data = 5 [packed = true]; repeated int32 int32_data = 5 [packed = true];
// For strings. // For strings.
...@@ -360,10 +548,32 @@ message TensorProto { ...@@ -360,10 +548,32 @@ message TensorProto {
// When this field is present, the data_type field MUST NOT be STRING or UNDEFINED // When this field is present, the data_type field MUST NOT be STRING or UNDEFINED
optional bytes raw_data = 9; optional bytes raw_data = 9;
// Data can be stored inside the protobuf file using type-specific fields or raw_data.
// Alternatively, raw bytes data can be stored in an external file, using the external_data field.
// external_data stores key-value pairs describing data location. Recognized keys are:
// - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
// protobuf model was stored
// - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
// Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
// - "length" (optional) - number of bytes containing data. Integer stored as string.
// - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
repeated StringStringEntryProto external_data = 13;
// Location of the data for this tensor. MUST be one of:
// - DEFAULT - data stored inside the protobuf message. Data is stored in raw_data (if set) otherwise in type-specified field.
// - EXTERNAL - data stored in an external location as described by external_data field.
enum DataLocation {
DEFAULT = 0;
EXTERNAL = 1;
}
// If value not set, data is stored in raw_data (if set) otherwise in type-specified field.
optional DataLocation data_location = 14;
// For double // For double
// Complex64 tensors are encoded as a single array of doubles, // Complex128 tensors are encoded as a single array of doubles,
// with the real components appearing in odd numbered positions, // with the real components appearing in odd numbered positions,
// and the corresponding imaginary component apparing in the // and the corresponding imaginary component appearing in the
// subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i] // subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
// is encoded as [1.0, 2.0 ,3.0 ,4.0] // is encoded as [1.0, 2.0 ,3.0 ,4.0]
// When this field is present, the data_type field MUST be DOUBLE or COMPLEX128 // When this field is present, the data_type field MUST be DOUBLE or COMPLEX128
...@@ -375,6 +585,28 @@ message TensorProto { ...@@ -375,6 +585,28 @@ message TensorProto {
repeated uint64 uint64_data = 11 [packed = true]; repeated uint64 uint64_data = 11 [packed = true];
} }
// A serialized sparse-tensor value
message SparseTensorProto {
// The sequence of non-default values are encoded as a tensor of shape [NNZ].
// The default-value is zero for numeric tensors, and empty-string for string tensors.
optional TensorProto values = 1;
// The indices of the non-default values, which may be stored in one of two formats.
// (a) Indices can be a tensor of shape [NNZ, rank] with the [i,j]-th value
// corresponding to the j-th index of the i-th value (in the values tensor).
// (b) Indices can be a tensor of shape [NNZ], in which case the i-th value
// must be the linearized-index of the i-th value (in the values tensor).
// The linearized-index can be converted into an index tuple (k_1,...,k_rank)
// using the shape provided below.
// The indices must appear in ascending order without duplication.
// In the first format, the ordering is lexicographic-ordering:
// e.g., index-value [1,4] must appear before [2,1]
optional TensorProto indices = 2;
// The shape of the underlying dense-tensor: [dim_1, dim_2, ... dim_rank]
repeated int64 dims = 3;
}
// Defines a tensor shape. A dimension can be either an integer value // Defines a tensor shape. A dimension can be either an integer value
// or a symbolic variable. A symbolic variable represents an unknown // or a symbolic variable. A symbolic variable represents an unknown
// dimension. // dimension.
...@@ -384,28 +616,73 @@ message TensorShapeProto { ...@@ -384,28 +616,73 @@ message TensorShapeProto {
int64 dim_value = 1; int64 dim_value = 1;
string dim_param = 2; // namespace Shape string dim_param = 2; // namespace Shape
}; };
// Standard denotation can optionally be used to denote tensor
// dimensions with standard semantic descriptions to ensure
// that operations are applied to the correct axis of a tensor.
// Refer to https://github.com/onnx/onnx/blob/master/docs/DimensionDenotation.md#denotation-definition
// for pre-defined dimension denotations.
optional string denotation = 3;
}; };
repeated Dimension dim = 1; repeated Dimension dim = 1;
} }
// Define the types. // Types
//
// The standard ONNX data types.
message TypeProto { message TypeProto {
message Tensor { message Tensor {
// This field MUST NOT have the value of UNDEFINED // This field MUST NOT have the value of UNDEFINED
// This field MUST have a valid TensorProto.DataType value
// This field MUST be present for this version of the IR. // This field MUST be present for this version of the IR.
optional TensorProto.DataType elem_type = 1; optional int32 elem_type = 1;
optional TensorShapeProto shape = 2; optional TensorShapeProto shape = 2;
} }
// repeated T
message Sequence {
// The type and optional shape of each element of the sequence.
// This field MUST be present for this version of the IR.
optional TypeProto elem_type = 1;
};
// map<K,V>
message Map {
// This field MUST have a valid TensorProto.DataType value
// This field MUST be present for this version of the IR.
// This field MUST refer to an integral type ([U]INT{8|16|32|64}) or STRING
optional int32 key_type = 1;
// This field MUST be present for this version of the IR.
optional TypeProto value_type = 2;
};
oneof value { oneof value {
// The type of a tensor. // The type of a tensor.
Tensor tensor_type = 1; Tensor tensor_type = 1;
// NOTE: DNN-only implementations of ONNX MAY elect to not support non-tensor values
// as input and output to graphs and nodes. These types are needed to naturally
// support classical ML operators. DNN operators SHOULD restrict their input
// and output types to tensors.
// The type of a sequence.
Sequence sequence_type = 4;
// The type of a map.
Map map_type = 5;
} }
// An optional denotation can be used to denote the whole
// type with a standard semantic description as to what is
// stored inside. Refer to https://github.com/onnx/onnx/blob/master/docs/TypeDenotation.md#type-denotation-definition
// for pre-defined type denotations.
optional string denotation = 6;
} }
// Operator Sets
//
// OperatorSets are uniquely identified by a (domain, opset_version) pair. // OperatorSets are uniquely identified by a (domain, opset_version) pair.
message OperatorSetIdProto { message OperatorSetIdProto {
// The domain of the operator set being identified. // The domain of the operator set being identified.
...@@ -418,3 +695,8 @@ message OperatorSetIdProto { ...@@ -418,3 +695,8 @@ message OperatorSetIdProto {
// This field MUST be present in this version of the IR. // This field MUST be present in this version of the IR.
optional int64 version = 2; optional int64 version = 2;
} }
// For using protobuf-lite
option optimize_for = LITE_RUNTIME;
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment