optimizer.proto 2.89 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
syntax = "proto2";

package object_detection.protos;

// Messages for configuring the optimizing strategy for training object
// detection models.

// Top level optimizer message.
message Optimizer {
  oneof optimizer {
    RMSPropOptimizer rms_prop_optimizer = 1;
    MomentumOptimizer momentum_optimizer = 2;
    AdamOptimizer adam_optimizer = 3;
  }
Vivek Rathod's avatar
Vivek Rathod committed
15
16
  optional bool use_moving_average = 4 [default = true];
  optional float moving_average_decay = 5 [default = 0.9999];
17
18
19
20
21
22
}

// Configuration message for the RMSPropOptimizer
// See: https://www.tensorflow.org/api_docs/python/tf/train/RMSPropOptimizer
message RMSPropOptimizer {
  optional LearningRate learning_rate = 1;
Vivek Rathod's avatar
Vivek Rathod committed
23
24
25
  optional float momentum_optimizer_value = 2 [default = 0.9];
  optional float decay = 3 [default = 0.9];
  optional float epsilon = 4 [default = 1.0];
26
27
28
29
30
31
}

// Configuration message for the MomentumOptimizer
// See: https://www.tensorflow.org/api_docs/python/tf/train/MomentumOptimizer
message MomentumOptimizer {
  optional LearningRate learning_rate = 1;
Vivek Rathod's avatar
Vivek Rathod committed
32
  optional float momentum_optimizer_value = 2 [default = 0.9];
33
34
35
36
37
38
39
40
41
42
43
44
45
46
}

// Configuration message for the AdamOptimizer
// See: https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer
message AdamOptimizer {
  optional LearningRate learning_rate = 1;
}

// Configuration message for optimizer learning rate.
message LearningRate {
  oneof learning_rate {
    ConstantLearningRate constant_learning_rate = 1;
    ExponentialDecayLearningRate exponential_decay_learning_rate = 2;
    ManualStepLearningRate manual_step_learning_rate = 3;
Vivek Rathod's avatar
Vivek Rathod committed
47
    CosineDecayLearningRate cosine_decay_learning_rate = 4;
48
49
50
51
52
  }
}

// Configuration message for a constant learning rate.
message ConstantLearningRate {
Vivek Rathod's avatar
Vivek Rathod committed
53
  optional float learning_rate = 1 [default = 0.002];
54
55
56
57
58
59
}

// Configuration message for an exponentially decaying learning rate.
// See https://www.tensorflow.org/versions/master/api_docs/python/train/ \
//     decaying_the_learning_rate#exponential_decay
message ExponentialDecayLearningRate {
Vivek Rathod's avatar
Vivek Rathod committed
60
61
62
63
  optional float initial_learning_rate = 1 [default = 0.002];
  optional uint32 decay_steps = 2 [default = 4000000];
  optional float decay_factor = 3 [default = 0.95];
  optional bool staircase = 4 [default = true];
64
65
66
67
}

// Configuration message for a manually defined learning rate schedule.
message ManualStepLearningRate {
Vivek Rathod's avatar
Vivek Rathod committed
68
  optional float initial_learning_rate = 1 [default = 0.002];
69
70
  message LearningRateSchedule {
    optional uint32 step = 1;
Vivek Rathod's avatar
Vivek Rathod committed
71
    optional float learning_rate = 2 [default = 0.002];
72
73
74
  }
  repeated LearningRateSchedule schedule = 2;
}
Vivek Rathod's avatar
Vivek Rathod committed
75
76
77
78
79
80
81
82
83

// Configuration message for a cosine decaying learning rate as defined in
// object_detection/utils/learning_schedules.py
message CosineDecayLearningRate {
  optional float learning_rate_base = 1 [default = 0.002];
  optional uint32 total_steps = 2 [default = 4000000];
  optional float warmup_learning_rate = 3 [default = 0.0002];
  optional uint32 warmup_steps = 4 [default = 10000];
}