hyperparams.proto 4.46 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
syntax = "proto2";

package object_detection.protos;

// Configuration proto for the convolution op hyperparameters to use in the
// object detection pipeline.
message Hyperparams {

  // Operations affected by hyperparameters.
  enum Op {
    // Convolution, Separable Convolution, Convolution transpose.
    CONV = 1;

    // Fully connected
    FC = 2;
  }
  optional Op op = 1 [default = CONV];

  // Regularizer for the weights of the convolution op.
  optional Regularizer regularizer = 2;

  // Initializer for the weights of the convolution op.
  optional Initializer initializer = 3;

  // Type of activation to apply after convolution.
  enum Activation {
    // Use None (no activation)
    NONE = 0;

    // Use tf.nn.relu
    RELU = 1;

    // Use tf.nn.relu6
    RELU_6 = 2;
35
36
37

    // Use tf.nn.swish
    SWISH = 3;
38
39
40
  }
  optional Activation activation = 4 [default = RELU];

41
42
43
44
  oneof normalizer_oneof {
    // Note that if nothing below is selected, then no normalization is applied
    // BatchNorm hyperparameters.
    BatchNorm batch_norm = 5;
45
46
    // SyncBatchNorm hyperparameters (KerasLayerHyperparams only).
    BatchNorm sync_batch_norm = 9;
47
48
49
50
51
52
    // GroupNorm hyperparameters. This is only supported on a subset of models.
    // Note that the current implementation of group norm instantiated in
    // tf.contrib.group.layers.group_norm() only supports fixed_size_resizer
    // for image preprocessing.
    GroupNorm group_norm = 7;
  }
53
54
55
56

  // Whether depthwise convolutions should be regularized. If this parameter is
  // NOT set then the conv hyperparams will default to the parent scope.
  optional bool regularize_depthwise = 6 [default = false];
57
58
59
60
61
62

  // By default, use_bias is set to False if batch_norm is not None and
  // batch_norm.center is True. When force_use_bias is set to True, this
  // behavior will be overridden, and use_bias will be set to True, regardless
  // of batch norm parameters. Note, this only applies to KerasLayerHyperparams.
  optional bool force_use_bias = 8 [default = false];
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
}

// Proto with one-of field for regularizers.
message Regularizer {
  oneof regularizer_oneof {
    L1Regularizer l1_regularizer = 1;
    L2Regularizer l2_regularizer = 2;
  }
}

// Configuration proto for L1 Regularizer.
// See https://www.tensorflow.org/api_docs/python/tf/contrib/layers/l1_regularizer
message L1Regularizer {
  optional float weight = 1 [default = 1.0];
}

// Configuration proto for L2 Regularizer.
// See https://www.tensorflow.org/api_docs/python/tf/contrib/layers/l2_regularizer
message L2Regularizer {
  optional float weight = 1 [default = 1.0];
}

// Proto with one-of field for initializers.
message Initializer {
  oneof initializer_oneof {
    TruncatedNormalInitializer truncated_normal_initializer = 1;
    VarianceScalingInitializer variance_scaling_initializer = 2;
90
    RandomNormalInitializer random_normal_initializer = 3;
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
  }
}

// Configuration proto for truncated normal initializer. See
// https://www.tensorflow.org/api_docs/python/tf/truncated_normal_initializer
message TruncatedNormalInitializer {
  optional float mean = 1 [default = 0.0];
  optional float stddev = 2 [default = 1.0];
}

// Configuration proto for variance scaling initializer. See
// https://www.tensorflow.org/api_docs/python/tf/contrib/layers/
// variance_scaling_initializer
message VarianceScalingInitializer {
  optional float factor = 1 [default = 2.0];
  optional bool uniform = 2 [default = false];
  enum Mode {
    FAN_IN = 0;
    FAN_OUT = 1;
    FAN_AVG = 2;
  }
  optional Mode mode = 3 [default = FAN_IN];
}

115
116
117
118
119
120
121
// Configuration proto for random normal initializer. See
// https://www.tensorflow.org/api_docs/python/tf/random_normal_initializer
message RandomNormalInitializer {
  optional float mean = 1 [default = 0.0];
  optional float stddev = 2 [default = 1.0];
}

122
123
124
125
126
127
128
129
130
131
132
133
// Configuration proto for batch norm to apply after convolution op. See
// https://www.tensorflow.org/api_docs/python/tf/contrib/layers/batch_norm
message BatchNorm {
  optional float decay = 1 [default = 0.999];
  optional bool center = 2 [default = true];
  optional bool scale = 3 [default = false];
  optional float epsilon = 4 [default = 0.001];
  // Whether to train the batch norm variables. If this is set to false during
  // training, the current value of the batch_norm variables are used for
  // forward pass but they are never updated.
  optional bool train = 5 [default = true];
}
134
135
136
137
138

// Configuration proto for group normalization to apply after convolution op.
// https://arxiv.org/abs/1803.08494
message GroupNorm {
}