train_colossalai_teyvat.yaml 2.54 KB
Newer Older
1
model:
Fazzie's avatar
Fazzie committed
2
  base_learning_rate: 1.0e-4
3
  params:
Fazzie's avatar
Fazzie committed
4
    parameterization: "v"
5
6
7
    linear_start: 0.00085
    linear_end: 0.0120
    num_timesteps_cond: 1
Fazzie's avatar
Fazzie committed
8
    ckpt: None # use ckpt path
9
10
11
12
13
14
    log_every_t: 200
    timesteps: 1000
    first_stage_key: image
    cond_stage_key: txt
    image_size: 64
    channels: 4
Fazzie's avatar
Fazzie committed
15
    cond_stage_trainable: false
16
17
18
    conditioning_key: crossattn
    monitor: val/loss_simple_ema
    scale_factor: 0.18215
Fazzie's avatar
Fazzie committed
19
    use_ema: False
20
21

    scheduler_config: # 10000 warmup steps
natalie_cao's avatar
natalie_cao committed
22
23
24
25
26
      warm_up_steps: [ 1 ] # NOTE for resuming. use 10000 if starting from scratch
      cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
      f_start: [ 1.e-6 ]
      f_max: [ 1.e-4 ]
      f_min: [ 1.e-10 ]
27

Fazzie's avatar
Fazzie committed
28

29
    unet_config:
natalie_cao's avatar
natalie_cao committed
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
      use_checkpoint: True
      use_fp16: True
      image_size: 32 # unused
      in_channels: 4
      out_channels: 4
      model_channels: 320
      attention_resolutions: [ 4, 2, 1 ]
      num_res_blocks: 2
      channel_mult: [ 1, 2, 4, 4 ]
      num_head_channels: 64 # need to fix for flash-attn
      use_spatial_transformer: True
      use_linear_in_transformer: True
      transformer_depth: 1
      context_dim: 1024
      legacy: False
45
46

    first_stage_config:
natalie_cao's avatar
natalie_cao committed
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
      embed_dim: 4
      monitor: val/rec_loss
      ddconfig:
        #attn_type: "vanilla-xformers"
        double_z: true
        z_channels: 4
        resolution: 256
        in_channels: 3
        out_ch: 3
        ch: 128
        ch_mult:
        - 1
        - 2
        - 4
        - 4
        num_res_blocks: 2
        attn_resolutions: []
        dropout: 0.0
      lossconfig:
66
67

    cond_stage_config:
natalie_cao's avatar
natalie_cao committed
68
69
      freeze: True
      layer: "penultimate"
70
71

data:
natalie_cao's avatar
natalie_cao committed
72
73
74
75
76
77
78
79
80
81
82
83
84
85
  batch_size: 16
  num_workers: 4
  train:
    target: ldm.data.teyvat.hf_dataset
    params:
      path: Fazzie/Teyvat
      image_transforms:
      - target: torchvision.transforms.Resize
        params:
          size: 512
      - target: torchvision.transforms.RandomCrop
        params:
          size: 512
      - target: torchvision.transforms.RandomHorizontalFlip
86
87
88

lightning:
  trainer:
Fazzie's avatar
Fazzie committed
89
    accelerator: 'gpu'
90
91
    devices: 2
    log_gpu_memory: all
Fazzie's avatar
Fazzie committed
92
    max_epochs: 2
93
94
95
    precision: 16
    auto_select_gpus: False
    strategy:
natalie_cao's avatar
natalie_cao committed
96
97
98
99
100
      use_chunk: True
      enable_distributed_storage: True
      placement_policy: cuda
      force_outputs_fp32: true
      min_chunk_size: 64
101
102
103
104

    log_every_n_steps: 2
    logger: True
    default_root_dir: "/tmp/diff_log/"
Fazzie's avatar
Fazzie committed
105
    # profiler: pytorch
106
107
108

  logger_config:
    wandb:
natalie_cao's avatar
natalie_cao committed
109
110
111
112
      name: nowname
      save_dir: "/tmp/diff_log/"
      offline: opt.debug
      id: nowname