pretrain.yaml 1.85 KB
Newer Older
dongchy920's avatar
dongchy920 committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
 # Copyright (c) 2022, salesforce.com, inc.
 # All rights reserved.
 # SPDX-License-Identifier: BSD-3-Clause
 # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause

model:
  arch: albef_pretrain

  model_type: base
  load_pretrained: False

  queue_size: 65536

  image_size: 256


datasets:
  coco_caption:
    vis_processor:
        train:
          name: "blip_image_train"
          image_size: 256
    text_processor:
        train:
          name: "blip_caption"
  conceptual_caption_3m: # name of the dataset builder
    vis_processor:
        train:
          name: "blip_image_train"
          image_size: 256
    text_processor:
        train:
          name: "blip_caption"
  conceptual_caption_12m: # name of the dataset builder
    vis_processor:
        train:
          name: "blip_image_train"
          image_size: 256
    text_processor:
        train:
          name: "blip_caption"
  vg_caption: # name of the dataset builder
    vis_processor:
        train:
          name: "blip_image_train"
          image_size: 256
    text_processor:
        train:
          name: "blip_caption"
  sbu_caption: # name of the dataset builder
    vis_processor:
        train:
          name: "blip_image_train"
          image_size: 256
    text_processor:
        train:
          name: "blip_caption"

run:
  task: image_text_pretrain
  # optimizer
  lr_sched: "linear_warmup_step_lr"
  # lr_sched: "linear_warmup_cosine_lr"
  init_lr: 3e-4
  min_lr: 1e-6
  warmup_lr: 1e-6
  lr_decay_rate: 0.9

  weight_decay: 0.05
  max_epoch: 20
  batch_size_train: 64
  batch_size_eval: 64
  num_workers: 4
  warmup_steps: 3000

  seed: 42
  output_dir: "output/ALBEF/Pretrain"

  amp: False
  resume_ckpt_path: null

  evaluate: False 
  train_splits: ["train"]

  device: "cuda"
  world_size: 1
  dist_url: "env://"
  distributed: True