semantic_segmentation.py 18.5 KB
Newer Older
Yeqing Li's avatar
Yeqing Li committed
1
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Abdullah Rashwan's avatar
Abdullah Rashwan committed
2
3
4
5
6
7
8
9
10
11
12
13
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Yeqing Li's avatar
Yeqing Li committed
14
15

# Lint as: python3
Abdullah Rashwan's avatar
Abdullah Rashwan committed
16
"""Semantic segmentation configuration definition."""
17
import dataclasses
Abdullah Rashwan's avatar
Abdullah Rashwan committed
18
import os
Abdullah Rashwan's avatar
Abdullah Rashwan committed
19
20
21
22
from typing import List, Optional, Union

import numpy as np

Abdullah Rashwan's avatar
Abdullah Rashwan committed
23
24
25
26
27
28
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling import optimization
from official.modeling.hyperparams import config_definitions as cfg
from official.vision.beta.configs import common
from official.vision.beta.configs import decoders
Abdullah Rashwan's avatar
Abdullah Rashwan committed
29
from official.vision.beta.configs import backbones
Abdullah Rashwan's avatar
Abdullah Rashwan committed
30
31
32
33
34


@dataclasses.dataclass
class DataConfig(cfg.DataConfig):
  """Input config for training."""
Abdullah Rashwan's avatar
Abdullah Rashwan committed
35
  output_size: List[int] = dataclasses.field(default_factory=list)
Abdullah Rashwan's avatar
Abdullah Rashwan committed
36
37
38
  # If crop_size is specified, image will be resized first to
  # output_size, then crop of size crop_size will be cropped.
  crop_size: List[int] = dataclasses.field(default_factory=list)
Abdullah Rashwan's avatar
Abdullah Rashwan committed
39
40
41
42
43
44
  input_path: str = ''
  global_batch_size: int = 0
  is_training: bool = True
  dtype: str = 'float32'
  shuffle_buffer_size: int = 1000
  cycle_length: int = 10
Abdullah Rashwan's avatar
Abdullah Rashwan committed
45
46
47
  # If resize_eval_groundtruth is set to False, original image sizes are used
  # for eval. In that case, groundtruth_padded_size has to be specified too to
  # allow for batching the variable input sizes of images.
Abdullah Rashwan's avatar
Abdullah Rashwan committed
48
49
50
51
  resize_eval_groundtruth: bool = True
  groundtruth_padded_size: List[int] = dataclasses.field(default_factory=list)
  aug_scale_min: float = 1.0
  aug_scale_max: float = 1.0
Abdullah Rashwan's avatar
Abdullah Rashwan committed
52
  aug_rand_hflip: bool = True
Abdullah Rashwan's avatar
Abdullah Rashwan committed
53
  drop_remainder: bool = True
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
54
  file_type: str = 'tfrecord'
55
  decoder: Optional[common.DataDecoder] = common.DataDecoder()
Abdullah Rashwan's avatar
Abdullah Rashwan committed
56
57
58
59


@dataclasses.dataclass
class SegmentationHead(hyperparams.Config):
Abdullah Rashwan's avatar
Abdullah Rashwan committed
60
  """Segmentation head config."""
Abdullah Rashwan's avatar
Abdullah Rashwan committed
61
62
63
  level: int = 3
  num_convs: int = 2
  num_filters: int = 256
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
64
  use_depthwise_convolution: bool = False
Abdullah Rashwan's avatar
Abdullah Rashwan committed
65
  prediction_kernel_size: int = 1
Abdullah Rashwan's avatar
Abdullah Rashwan committed
66
  upsample_factor: int = 1
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
67
  feature_fusion: Optional[str] = None  # None, deeplabv3plus, or pyramid_fusion
Abdullah Rashwan's avatar
Abdullah Rashwan committed
68
69
70
  # deeplabv3plus feature fusion params
  low_level: int = 2
  low_level_num_filters: int = 48
Abdullah Rashwan's avatar
Abdullah Rashwan committed
71
72
73


@dataclasses.dataclass
Abdullah Rashwan's avatar
Abdullah Rashwan committed
74
75
class SemanticSegmentationModel(hyperparams.Config):
  """Semantic segmentation model config."""
Abdullah Rashwan's avatar
Abdullah Rashwan committed
76
77
78
79
80
81
82
83
84
85
86
87
88
  num_classes: int = 0
  input_size: List[int] = dataclasses.field(default_factory=list)
  min_level: int = 3
  max_level: int = 6
  head: SegmentationHead = SegmentationHead()
  backbone: backbones.Backbone = backbones.Backbone(
      type='resnet', resnet=backbones.ResNet())
  decoder: decoders.Decoder = decoders.Decoder(type='identity')
  norm_activation: common.NormActivation = common.NormActivation()


@dataclasses.dataclass
class Losses(hyperparams.Config):
Abdullah Rashwan's avatar
Abdullah Rashwan committed
89
  label_smoothing: float = 0.0
Abdullah Rashwan's avatar
Abdullah Rashwan committed
90
91
92
93
  ignore_label: int = 255
  class_weights: List[float] = dataclasses.field(default_factory=list)
  l2_weight_decay: float = 0.0
  use_groundtruth_dimension: bool = True
Abdullah Rashwan's avatar
Abdullah Rashwan committed
94
  top_k_percent_pixels: float = 1.0
Abdullah Rashwan's avatar
Abdullah Rashwan committed
95
96


97
98
99
100
101
102
@dataclasses.dataclass
class Evaluation(hyperparams.Config):
  report_per_class_iou: bool = True
  report_train_mean_iou: bool = True  # Turning this off can speed up training.


Abdullah Rashwan's avatar
Abdullah Rashwan committed
103
@dataclasses.dataclass
Abdullah Rashwan's avatar
Abdullah Rashwan committed
104
class SemanticSegmentationTask(cfg.TaskConfig):
Abdullah Rashwan's avatar
Abdullah Rashwan committed
105
  """The model config."""
Abdullah Rashwan's avatar
Abdullah Rashwan committed
106
  model: SemanticSegmentationModel = SemanticSegmentationModel()
Abdullah Rashwan's avatar
Abdullah Rashwan committed
107
108
109
  train_data: DataConfig = DataConfig(is_training=True)
  validation_data: DataConfig = DataConfig(is_training=False)
  losses: Losses = Losses()
110
  evaluation: Evaluation = Evaluation()
Abdullah Rashwan's avatar
Abdullah Rashwan committed
111
112
113
114
  train_input_partition_dims: List[int] = dataclasses.field(
      default_factory=list)
  eval_input_partition_dims: List[int] = dataclasses.field(
      default_factory=list)
Abdullah Rashwan's avatar
Abdullah Rashwan committed
115
116
117
118
119
120
121
122
123
  init_checkpoint: Optional[str] = None
  init_checkpoint_modules: Union[
      str, List[str]] = 'all'  # all, backbone, and/or decoder


@exp_factory.register_config_factory('semantic_segmentation')
def semantic_segmentation() -> cfg.ExperimentConfig:
  """Semantic segmentation general."""
  return cfg.ExperimentConfig(
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
124
      task=SemanticSegmentationTask(),
Abdullah Rashwan's avatar
Abdullah Rashwan committed
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
      trainer=cfg.TrainerConfig(),
      restrictions=[
          'task.train_data.is_training != None',
          'task.validation_data.is_training != None'
      ])

# PASCAL VOC 2012 Dataset
PASCAL_TRAIN_EXAMPLES = 10582
PASCAL_VAL_EXAMPLES = 1449
PASCAL_INPUT_PATH_BASE = 'pascal_voc_seg'


@exp_factory.register_config_factory('seg_deeplabv3_pascal')
def seg_deeplabv3_pascal() -> cfg.ExperimentConfig:
  """Image segmentation on imagenet with resnet deeplabv3."""
  train_batch_size = 16
  eval_batch_size = 8
  steps_per_epoch = PASCAL_TRAIN_EXAMPLES // train_batch_size
Abdullah Rashwan's avatar
Abdullah Rashwan committed
143
  output_stride = 16
Abdullah Rashwan's avatar
Abdullah Rashwan committed
144
  aspp_dilation_rates = [12, 24, 36]  # [6, 12, 18] if output_stride = 16
Abdullah Rashwan's avatar
Abdullah Rashwan committed
145
146
  multigrid = [1, 2, 4]
  stem_type = 'v1'
Abdullah Rashwan's avatar
Abdullah Rashwan committed
147
  level = int(np.math.log2(output_stride))
Abdullah Rashwan's avatar
Abdullah Rashwan committed
148
  config = cfg.ExperimentConfig(
Abdullah Rashwan's avatar
Abdullah Rashwan committed
149
150
      task=SemanticSegmentationTask(
          model=SemanticSegmentationModel(
Abdullah Rashwan's avatar
Abdullah Rashwan committed
151
              num_classes=21,
Abdullah Rashwan's avatar
Abdullah Rashwan committed
152
              input_size=[None, None, 3],
Abdullah Rashwan's avatar
Abdullah Rashwan committed
153
154
              backbone=backbones.Backbone(
                  type='dilated_resnet', dilated_resnet=backbones.DilatedResNet(
Abdullah Rashwan's avatar
Abdullah Rashwan committed
155
156
                      model_id=101, output_stride=output_stride,
                      multigrid=multigrid, stem_type=stem_type)),
Abdullah Rashwan's avatar
Abdullah Rashwan committed
157
158
              decoder=decoders.Decoder(
                  type='aspp', aspp=decoders.ASPP(
Abdullah Rashwan's avatar
Abdullah Rashwan committed
159
160
161
162
163
164
165
166
167
168
                      level=level, dilation_rates=aspp_dilation_rates)),
              head=SegmentationHead(level=level, num_convs=0),
              norm_activation=common.NormActivation(
                  activation='swish',
                  norm_momentum=0.9997,
                  norm_epsilon=1e-3,
                  use_sync_bn=True)),
          losses=Losses(l2_weight_decay=1e-4),
          train_data=DataConfig(
              input_path=os.path.join(PASCAL_INPUT_PATH_BASE, 'train_aug*'),
Abdullah Rashwan's avatar
Abdullah Rashwan committed
169
170
              # TODO(arashwan): test changing size to 513 to match deeplab.
              output_size=[512, 512],
Abdullah Rashwan's avatar
Abdullah Rashwan committed
171
172
173
174
175
176
              is_training=True,
              global_batch_size=train_batch_size,
              aug_scale_min=0.5,
              aug_scale_max=2.0),
          validation_data=DataConfig(
              input_path=os.path.join(PASCAL_INPUT_PATH_BASE, 'val*'),
Abdullah Rashwan's avatar
Abdullah Rashwan committed
177
              output_size=[512, 512],
Abdullah Rashwan's avatar
Abdullah Rashwan committed
178
179
180
181
182
              is_training=False,
              global_batch_size=eval_batch_size,
              resize_eval_groundtruth=False,
              groundtruth_padded_size=[512, 512],
              drop_remainder=False),
Abdullah Rashwan's avatar
Abdullah Rashwan committed
183
184
          # resnet101
          init_checkpoint='gs://cloud-tpu-checkpoints/vision-2.0/deeplab/deeplab_resnet101_imagenet/ckpt-62400',
Abdullah Rashwan's avatar
Abdullah Rashwan committed
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
          init_checkpoint_modules='backbone'),
      trainer=cfg.TrainerConfig(
          steps_per_loop=steps_per_epoch,
          summary_interval=steps_per_epoch,
          checkpoint_interval=steps_per_epoch,
          train_steps=45 * steps_per_epoch,
          validation_steps=PASCAL_VAL_EXAMPLES // eval_batch_size,
          validation_interval=steps_per_epoch,
          optimizer_config=optimization.OptimizationConfig({
              'optimizer': {
                  'type': 'sgd',
                  'sgd': {
                      'momentum': 0.9
                  }
              },
              'learning_rate': {
                  'type': 'polynomial',
                  'polynomial': {
                      'initial_learning_rate': 0.007,
                      'decay_steps': 45 * steps_per_epoch,
                      'end_learning_rate': 0.0,
                      'power': 0.9
                  }
              },
              'warmup': {
                  'type': 'linear',
                  'linear': {
                      'warmup_steps': 5 * steps_per_epoch,
                      'warmup_learning_rate': 0
                  }
              }
          })),
      restrictions=[
          'task.train_data.is_training != None',
          'task.validation_data.is_training != None'
      ])

  return config


@exp_factory.register_config_factory('seg_deeplabv3plus_pascal')
def seg_deeplabv3plus_pascal() -> cfg.ExperimentConfig:
  """Image segmentation on imagenet with resnet deeplabv3+."""
  train_batch_size = 16
  eval_batch_size = 8
  steps_per_epoch = PASCAL_TRAIN_EXAMPLES // train_batch_size
  output_stride = 16
Abdullah Rashwan's avatar
Abdullah Rashwan committed
232
233
234
  aspp_dilation_rates = [6, 12, 18]
  multigrid = [1, 2, 4]
  stem_type = 'v1'
Abdullah Rashwan's avatar
Abdullah Rashwan committed
235
236
237
238
239
  level = int(np.math.log2(output_stride))
  config = cfg.ExperimentConfig(
      task=SemanticSegmentationTask(
          model=SemanticSegmentationModel(
              num_classes=21,
Abdullah Rashwan's avatar
Abdullah Rashwan committed
240
              input_size=[None, None, 3],
Abdullah Rashwan's avatar
Abdullah Rashwan committed
241
242
              backbone=backbones.Backbone(
                  type='dilated_resnet', dilated_resnet=backbones.DilatedResNet(
Abdullah Rashwan's avatar
Abdullah Rashwan committed
243
244
                      model_id=101, output_stride=output_stride,
                      stem_type=stem_type, multigrid=multigrid)),
Abdullah Rashwan's avatar
Abdullah Rashwan committed
245
246
247
248
249
250
251
252
253
254
              decoder=decoders.Decoder(
                  type='aspp',
                  aspp=decoders.ASPP(
                      level=level, dilation_rates=aspp_dilation_rates)),
              head=SegmentationHead(
                  level=level,
                  num_convs=2,
                  feature_fusion='deeplabv3plus',
                  low_level=2,
                  low_level_num_filters=48),
Abdullah Rashwan's avatar
Abdullah Rashwan committed
255
256
257
258
259
260
261
262
              norm_activation=common.NormActivation(
                  activation='swish',
                  norm_momentum=0.9997,
                  norm_epsilon=1e-3,
                  use_sync_bn=True)),
          losses=Losses(l2_weight_decay=1e-4),
          train_data=DataConfig(
              input_path=os.path.join(PASCAL_INPUT_PATH_BASE, 'train_aug*'),
Abdullah Rashwan's avatar
Abdullah Rashwan committed
263
              output_size=[512, 512],
Abdullah Rashwan's avatar
Abdullah Rashwan committed
264
265
266
267
268
269
              is_training=True,
              global_batch_size=train_batch_size,
              aug_scale_min=0.5,
              aug_scale_max=2.0),
          validation_data=DataConfig(
              input_path=os.path.join(PASCAL_INPUT_PATH_BASE, 'val*'),
Abdullah Rashwan's avatar
Abdullah Rashwan committed
270
              output_size=[512, 512],
Abdullah Rashwan's avatar
Abdullah Rashwan committed
271
272
273
              is_training=False,
              global_batch_size=eval_batch_size,
              resize_eval_groundtruth=False,
Abdullah Rashwan's avatar
Abdullah Rashwan committed
274
275
              groundtruth_padded_size=[512, 512],
              drop_remainder=False),
Abdullah Rashwan's avatar
Abdullah Rashwan committed
276
277
          # resnet101
          init_checkpoint='gs://cloud-tpu-checkpoints/vision-2.0/deeplab/deeplab_resnet101_imagenet/ckpt-62400',
Abdullah Rashwan's avatar
Abdullah Rashwan committed
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
          init_checkpoint_modules='backbone'),
      trainer=cfg.TrainerConfig(
          steps_per_loop=steps_per_epoch,
          summary_interval=steps_per_epoch,
          checkpoint_interval=steps_per_epoch,
          train_steps=45 * steps_per_epoch,
          validation_steps=PASCAL_VAL_EXAMPLES // eval_batch_size,
          validation_interval=steps_per_epoch,
          optimizer_config=optimization.OptimizationConfig({
              'optimizer': {
                  'type': 'sgd',
                  'sgd': {
                      'momentum': 0.9
                  }
              },
              'learning_rate': {
                  'type': 'polynomial',
                  'polynomial': {
                      'initial_learning_rate': 0.007,
                      'decay_steps': 45 * steps_per_epoch,
                      'end_learning_rate': 0.0,
                      'power': 0.9
                  }
              },
              'warmup': {
                  'type': 'linear',
                  'linear': {
                      'warmup_steps': 5 * steps_per_epoch,
                      'warmup_learning_rate': 0
                  }
              }
          })),
      restrictions=[
          'task.train_data.is_training != None',
          'task.validation_data.is_training != None'
      ])

  return config
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389


@exp_factory.register_config_factory('seg_resnetfpn_pascal')
def seg_resnetfpn_pascal() -> cfg.ExperimentConfig:
  """Image segmentation on imagenet with resnet-fpn."""
  train_batch_size = 256
  eval_batch_size = 32
  steps_per_epoch = PASCAL_TRAIN_EXAMPLES // train_batch_size
  config = cfg.ExperimentConfig(
      task=SemanticSegmentationTask(
          model=SemanticSegmentationModel(
              num_classes=21,
              input_size=[512, 512, 3],
              min_level=3,
              max_level=7,
              backbone=backbones.Backbone(
                  type='resnet', resnet=backbones.ResNet(model_id=50)),
              decoder=decoders.Decoder(type='fpn', fpn=decoders.FPN()),
              head=SegmentationHead(level=3, num_convs=3),
              norm_activation=common.NormActivation(
                  activation='swish',
                  use_sync_bn=True)),
          losses=Losses(l2_weight_decay=1e-4),
          train_data=DataConfig(
              input_path=os.path.join(PASCAL_INPUT_PATH_BASE, 'train_aug*'),
              is_training=True,
              global_batch_size=train_batch_size,
              aug_scale_min=0.2,
              aug_scale_max=1.5),
          validation_data=DataConfig(
              input_path=os.path.join(PASCAL_INPUT_PATH_BASE, 'val*'),
              is_training=False,
              global_batch_size=eval_batch_size,
              resize_eval_groundtruth=False,
              groundtruth_padded_size=[512, 512],
              drop_remainder=False),
      ),
      trainer=cfg.TrainerConfig(
          steps_per_loop=steps_per_epoch,
          summary_interval=steps_per_epoch,
          checkpoint_interval=steps_per_epoch,
          train_steps=450 * steps_per_epoch,
          validation_steps=PASCAL_VAL_EXAMPLES // eval_batch_size,
          validation_interval=steps_per_epoch,
          optimizer_config=optimization.OptimizationConfig({
              'optimizer': {
                  'type': 'sgd',
                  'sgd': {
                      'momentum': 0.9
                  }
              },
              'learning_rate': {
                  'type': 'polynomial',
                  'polynomial': {
                      'initial_learning_rate': 0.007,
                      'decay_steps': 450 * steps_per_epoch,
                      'end_learning_rate': 0.0,
                      'power': 0.9
                  }
              },
              'warmup': {
                  'type': 'linear',
                  'linear': {
                      'warmup_steps': 5 * steps_per_epoch,
                      'warmup_learning_rate': 0
                  }
              }
          })),
      restrictions=[
          'task.train_data.is_training != None',
          'task.validation_data.is_training != None'
      ])

  return config
Abdullah Rashwan's avatar
Abdullah Rashwan committed
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411


# Cityscapes Dataset (Download and process the dataset yourself)
CITYSCAPES_TRAIN_EXAMPLES = 2975
CITYSCAPES_VAL_EXAMPLES = 500
CITYSCAPES_INPUT_PATH_BASE = 'cityscapes'


@exp_factory.register_config_factory('seg_deeplabv3plus_cityscapes')
def seg_deeplabv3plus_cityscapes() -> cfg.ExperimentConfig:
  """Image segmentation on imagenet with resnet deeplabv3+."""
  train_batch_size = 16
  eval_batch_size = 16
  steps_per_epoch = CITYSCAPES_TRAIN_EXAMPLES // train_batch_size
  output_stride = 16
  aspp_dilation_rates = [6, 12, 18]
  multigrid = [1, 2, 4]
  stem_type = 'v1'
  level = int(np.math.log2(output_stride))
  config = cfg.ExperimentConfig(
      task=SemanticSegmentationTask(
          model=SemanticSegmentationModel(
Abdullah Rashwan's avatar
Abdullah Rashwan committed
412
413
414
              # Cityscapes uses only 19 semantic classes for train/evaluation.
              # The void (background) class is ignored in train and evaluation.
              num_classes=19,
Abdullah Rashwan's avatar
Abdullah Rashwan committed
415
416
417
418
419
420
421
422
              input_size=[None, None, 3],
              backbone=backbones.Backbone(
                  type='dilated_resnet', dilated_resnet=backbones.DilatedResNet(
                      model_id=101, output_stride=output_stride,
                      stem_type=stem_type, multigrid=multigrid)),
              decoder=decoders.Decoder(
                  type='aspp',
                  aspp=decoders.ASPP(
Abdullah Rashwan's avatar
Abdullah Rashwan committed
423
424
                      level=level, dilation_rates=aspp_dilation_rates,
                      pool_kernel_size=[512, 1024])),
Abdullah Rashwan's avatar
Abdullah Rashwan committed
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
              head=SegmentationHead(
                  level=level,
                  num_convs=2,
                  feature_fusion='deeplabv3plus',
                  low_level=2,
                  low_level_num_filters=48),
              norm_activation=common.NormActivation(
                  activation='swish',
                  norm_momentum=0.99,
                  norm_epsilon=1e-3,
                  use_sync_bn=True)),
          losses=Losses(l2_weight_decay=1e-4),
          train_data=DataConfig(
              input_path=os.path.join(CITYSCAPES_INPUT_PATH_BASE,
                                      'train_fine**'),
Abdullah Rashwan's avatar
Abdullah Rashwan committed
440
441
              crop_size=[512, 1024],
              output_size=[1024, 2048],
Abdullah Rashwan's avatar
Abdullah Rashwan committed
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
              is_training=True,
              global_batch_size=train_batch_size,
              aug_scale_min=0.5,
              aug_scale_max=2.0),
          validation_data=DataConfig(
              input_path=os.path.join(CITYSCAPES_INPUT_PATH_BASE, 'val_fine*'),
              output_size=[1024, 2048],
              is_training=False,
              global_batch_size=eval_batch_size,
              resize_eval_groundtruth=True,
              drop_remainder=False),
          # resnet101
          init_checkpoint='gs://cloud-tpu-checkpoints/vision-2.0/deeplab/deeplab_resnet101_imagenet/ckpt-62400',
          init_checkpoint_modules='backbone'),
      trainer=cfg.TrainerConfig(
          steps_per_loop=steps_per_epoch,
          summary_interval=steps_per_epoch,
          checkpoint_interval=steps_per_epoch,
          train_steps=500 * steps_per_epoch,
          validation_steps=CITYSCAPES_VAL_EXAMPLES // eval_batch_size,
          validation_interval=steps_per_epoch,
          optimizer_config=optimization.OptimizationConfig({
              'optimizer': {
                  'type': 'sgd',
                  'sgd': {
                      'momentum': 0.9
                  }
              },
              'learning_rate': {
                  'type': 'polynomial',
                  'polynomial': {
                      'initial_learning_rate': 0.01,
                      'decay_steps': 500 * steps_per_epoch,
                      'end_learning_rate': 0.0,
                      'power': 0.9
                  }
              },
              'warmup': {
                  'type': 'linear',
                  'linear': {
                      'warmup_steps': 5 * steps_per_epoch,
                      'warmup_learning_rate': 0
                  }
              }
          })),
      restrictions=[
          'task.train_data.is_training != None',
          'task.validation_data.is_training != None'
      ])

  return config