# 转换huggingface上bert-base-chinese权重 # 权重链接:https://huggingface.co/bert-base-chinese # 由于key和框架的key没有完全对齐,主要里面用的都是Laynorm.gamma和Laynorm.beta来保存权重和偏置 import torch state_dict = torch.load('F:/Projects/pretrain_ckpt/bert/[huggingface_torch_base]--bert-base-chinese/pytorch_model.bin') state_dict_new = {} for k, v in state_dict.items(): if 'LayerNorm.gamma' in k: k = k.replace('LayerNorm.gamma', 'LayerNorm.weight') state_dict_new[k] = v elif 'LayerNorm.beta' in k: k = k.replace('LayerNorm.beta', 'LayerNorm.bias') state_dict_new[k] = v else: state_dict_new[k] = v torch.save(state_dict_new, 'F:/Projects/pretrain_ckpt/bert/[huggingface_torch_base]--bert-base-chinese/bert4torch_pytorch_model.bin') # config配置 ''' { "architectures": [ "BertForMaskedLM" ], "attention_probs_dropout_prob": 0.1, "directionality": "bidi", "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "hidden_size": 768, "initializer_range": 0.02, "intermediate_size": 3072, "layer_norm_eps": 1e-12, "max_position_embeddings": 512, "model_type": "bert", "num_attention_heads": 12, "num_hidden_layers": 12, "pad_token_id": 0, "pooler_fc_size": 768, "pooler_num_attention_heads": 12, "pooler_num_fc_layers": 3, "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "type_vocab_size": 2, "vocab_size": 21128 } '''