train.py 2.97 KB
Newer Older
Frederick Liu's avatar
Frederick Liu committed
1
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Le Hou's avatar
Le Hou committed
2
3
4
5
6
7
8
9
10
11
12
13
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Frederick Liu's avatar
Frederick Liu committed
14

Le Hou's avatar
Le Hou committed
15
16
17
18
19
20
"""TFM common training driver."""

from absl import app
from absl import flags
import gin

Hongkun Yu's avatar
Hongkun Yu committed
21
from official.common import distribute_utils
Le Hou's avatar
Le Hou committed
22
23
24
25
26
27
# pylint: disable=unused-import
from official.common import registry_imports
# pylint: enable=unused-import
from official.common import flags as tfm_flags
from official.core import task_factory
from official.core import train_lib
Le Hou's avatar
Le Hou committed
28
from official.core import train_utils
Le Hou's avatar
Le Hou committed
29
from official.modeling import performance
Le Hou's avatar
Le Hou committed
30
from official.nlp import continuous_finetune_lib
Le Hou's avatar
Le Hou committed
31
32
33

FLAGS = flags.FLAGS

Le Hou's avatar
Le Hou committed
34
35
36
37
38
flags.DEFINE_integer(
    'pretrain_steps',
    default=None,
    help='The number of total training steps for the pretraining job.')

Le Hou's avatar
Le Hou committed
39
40
41
42
43
44
45
46
47
48

def main(_):
  gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
  params = train_utils.parse_configuration(FLAGS)
  model_dir = FLAGS.model_dir
  if 'train' in FLAGS.mode:
    # Pure eval modes do not output yaml files. Otherwise continuous eval job
    # may race against the train job for writing the same file.
    train_utils.serialize_config(params, model_dir)

Le Hou's avatar
Le Hou committed
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
  if FLAGS.mode == 'continuous_train_and_eval':
    continuous_finetune_lib.run_continuous_finetune(
        FLAGS.mode, params, model_dir, pretrain_steps=FLAGS.pretrain_steps)

  else:
    # Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
    # can have significant impact on model speeds by utilizing float16 in case
    # of GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only
    # when dtype is float16
    if params.runtime.mixed_precision_dtype:
      performance.set_mixed_precision_policy(
          params.runtime.mixed_precision_dtype)
    distribution_strategy = distribute_utils.get_distribution_strategy(
        distribution_strategy=params.runtime.distribution_strategy,
        all_reduce_alg=params.runtime.all_reduce_alg,
        num_gpus=params.runtime.num_gpus,
        tpu_address=params.runtime.tpu,
        **params.runtime.model_parallelism())
    with distribution_strategy.scope():
      task = task_factory.get_task(params.task, logging_dir=model_dir)
Le Hou's avatar
Le Hou committed
69

Le Hou's avatar
Le Hou committed
70
71
72
73
74
75
    train_lib.run_experiment(
        distribution_strategy=distribution_strategy,
        task=task,
        mode=FLAGS.mode,
        params=params,
        model_dir=model_dir)
Le Hou's avatar
Le Hou committed
76

Le Hou's avatar
Le Hou committed
77
78
  train_utils.save_gin_config(FLAGS.mode, model_dir)

Le Hou's avatar
Le Hou committed
79
80
if __name__ == '__main__':
  tfm_flags.define_flags()
81
  flags.mark_flags_as_required(['experiment', 'mode', 'model_dir'])
Le Hou's avatar
Le Hou committed
82
  app.run(main)