molmo-o-lora.yaml 2.41 KB
Newer Older
wanglch's avatar
wanglch committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
model:
  name_or_path: allenai/Molmo-7B-O-0924
  arch: causal
  use_flash_attn: true

wandb:
  project: pdelfin
  entity: ai2-llm

generate:
  max_length: 4096

train_data:
  seed: 1337
  cache_location: /data/jakep/pdfdata/pdelfin_cache
  sources:
    - name: openai_batch_data_v5_1_train
      response_glob_path: /data/jakep/pdfdata/openai_batch_data_v5_1_train_done/*.json
      target_longest_image_dim: [1024]
      target_anchor_text_len: [6000]
    - name: openai_batch_data_v5_1_iabooks_train
      response_glob_path: /data/jakep/pdfdata/openai_batch_data_v5_1_iabooks_train_done/*.json
      target_longest_image_dim: [1024]
      target_anchor_text_len: [6000]

valid_data:
  cache_location: /data/jakep/pdfdata/pdelfin_cache
  metric_for_best_model: openai_batch_data_v5_1_eval_loss
  sources:
    # These tend to be small, so you can load from s3 it's no big deal
    - name: openai_batch_data_v5_1_eval
      response_glob_path: s3://ai2-oe-data/jakep/pdfdata/openai_batch_done_v5_1_eval/*.json
      target_longest_image_dim: [1024]
      target_anchor_text_len: [6000]
    - name: openai_batch_data_v5_1_eval
      response_glob_path: s3://ai2-oe-data/jakep/pdfdata/openai_batch_done_v5_1_eval/*.json
      target_longest_image_dim: [1024]
      target_anchor_text_len: [6000]




# Mostly pulled from https://github.com/QwenLM/Qwen2/blob/main/examples/sft/finetune.sh
hparams:
  batch_size: 1
  eval_batch_size: 1
  gradient_accumulation_steps: 4
  gradient_checkpointing: true
  find_unused_parameters: true
  clip_grad_norm: 1.0
  learning_rate: 1e-4
  max_steps: 10000
  pad_multiple_of: 16
  log_every_steps: 10
  eval_every_steps: 100
  optim: adamw_torch
  lr_scheduler: cosine
  weight_decay: 0.01
  warmup_ratio: 0.03

# From https://github.com/QwenLM/Qwen2/blob/main/examples/sft/finetune.py
lora:
  rank: 32
  alpha: 32
  dropout: 0.05
  task_type: CAUSAL_LM
  target_modules:
      # attention layers in main transformer
      - att_proj 
      - ff_proj
      - attn_out
      - ff_out
      # vision transformer attention and FF
      - attention.wq
      - attention.wk
      - attention.wv
      - attention.wo
      - feed_forward.w1
      - feed_forward.w2
      # vision image projector
      - vision_backbone.image_projector.w1
      - vision_backbone.image_projector.w2
      - vision_backbone.image_projector.w3

save:
  path: s3://ai2-oe-data/jakep/experiments/molmo-o-0924/v1/models/
  save_every_steps: 1000

max_workers: 10