openai-clip_to_mmpretrain-clip.py 2.62 KB
Newer Older
limm's avatar
limm committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
from collections import OrderedDict

import mmengine
import torch
from mmengine.runner import CheckpointLoader


def convert_clip(ckpt):
    new_ckpt = OrderedDict()

    for k, v in list(ckpt.items()):
        new_v = v
        if k.startswith('visual.conv1'):
            new_k = k.replace('conv1', 'patch_embed.projection')
        elif k.startswith('visual.positional_embedding'):
            new_k = k.replace('positional_embedding', 'pos_embed')
            new_v = v.unsqueeze(dim=0)
        elif k.startswith('visual.class_embedding'):
            new_k = k.replace('class_embedding', 'cls_token')
            new_v = v.unsqueeze(dim=0).unsqueeze(dim=0)
        elif k.startswith('visual.ln_pre'):
            new_k = k.replace('ln_pre', 'pre_norm')
        elif k.startswith('visual.transformer.resblocks'):
            new_k = k.replace('transformer.resblocks', 'layers')
            if 'ln_1' in k:
                new_k = new_k.replace('ln_1', 'ln1')
            elif 'ln_2' in k:
                new_k = new_k.replace('ln_2', 'ln2')
            elif 'mlp.c_fc' in k:
                new_k = new_k.replace('mlp.c_fc', 'ffn.layers.0.0')
            elif 'mlp.c_proj' in k:
                new_k = new_k.replace('mlp.c_proj', 'ffn.layers.1')
            elif 'attn.in_proj_weight' in k:
                new_k = new_k.replace('in_proj_weight', 'qkv.weight')
            elif 'attn.in_proj_bias' in k:
                new_k = new_k.replace('in_proj_bias', 'qkv.bias')
            elif 'attn.out_proj' in k:
                new_k = new_k.replace('out_proj', 'proj')
        elif k.startswith('visual.ln_post'):
            new_k = k.replace('ln_post', 'ln1')
        elif k.startswith('visual.proj'):
            new_k = k.replace('visual.proj', 'visual_proj.proj')
        else:
            new_k = k

        new_ckpt[new_k] = new_v
    return new_ckpt


def main():
    parser = argparse.ArgumentParser(
        description='Convert keys in pretrained clip '
        'models to mmpretrain style.')
    parser.add_argument('src', help='src model path or url')
    # The dst path must be a full path of the new checkpoint.
    parser.add_argument('dst', help='save path')
    args = parser.parse_args()

    checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu')

    if 'state_dict' in checkpoint:
        state_dict = checkpoint['state_dict']
    else:
        state_dict = checkpoint

    weight = convert_clip(state_dict)
    mmengine.mkdir_or_exist(osp.dirname(args.dst))
    torch.save(weight, args.dst)

    print('Done!!')


if __name__ == '__main__':
    main()