Commit c007ba1a authored by sunzhq2's avatar sunzhq2 Committed by xuxo
Browse files

update

parents
Pipeline #3464 failed with stages
in 0 seconds
#! -*- coding:utf-8 -*-
# 以文本分类(情感分类)为例的半监督学习UDA策略,https://arxiv.org/abs/1904.12848
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, Callback, text_segmentate, ListDataset, seed_everything, get_pool_emb
from bert4torch.losses import UDALoss
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
import numpy as np
import random
maxlen = 256
batch_size = 16
config_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/bert_config.json'
checkpoint_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/pytorch_model.bin'
dict_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
seed_everything(42)
# 建立分词器
tokenizer = Tokenizer(dict_path, do_lower_case=True)
# 加载数据集
class MyDataset(ListDataset):
@staticmethod
def load_data(filenames):
"""加载数据,并尽量划分为不超过maxlen的句子
"""
D = []
seps, strips = u'\n。!?!?;;,, ', u';;,, '
for filename in filenames:
with open(filename, encoding='utf-8') as f:
for l in f:
text, label = l.strip().split('\t')
for t in text_segmentate(text, maxlen - 2, seps, strips):
D.append((t, int(label)))
return D
train_dataset = MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.train.data'])
valid_dataset = MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.valid.data'])
test_dataset = MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.test.data'])
# 理论上应该收集任务领域类的无监督数据,这里用所有的监督数据来作无监督数据
unsup_dataset = [sen for sen, _ in (train_dataset.data + valid_dataset.data + test_dataset.data)]
def collate_fn(batch):
def add_noise(token_ids, del_ratio=0.3):
'''这里用随机删除做简单示例,实际中可以使用增删改等多种noise方案
'''
n = len(token_ids)
keep_or_not = np.random.rand(n) > del_ratio
if sum(keep_or_not) == 0:
keep_or_not[np.random.choice(n)] = True # guarantee that at least one word remains
return list(np.array(token_ids)[keep_or_not])
# batch_token_ids包含三部分,第一部分是有监督数据,第二部分是领域类的无监督数据,第三部分是无监督数据经数据增强后的数据
batch_token_ids, batch_labels = [[], [], []], []
for text, label in batch:
token_ids, _ = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids[0].append(token_ids)
batch_labels.append([label])
# 无监督部分
unsup_text = random.choice(unsup_dataset) # 随机挑一个无监督数据
token_ids, _ = tokenizer.encode(unsup_text, maxlen=maxlen)
batch_token_ids[1].append(token_ids)
batch_token_ids[2].append(token_ids[:1] + add_noise(token_ids[1:-1]) + token_ids[-1:]) # 无监督数据增强
batch_token_ids = [j for i in batch_token_ids for j in i]
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return batch_token_ids, batch_labels.flatten()
# 加载数据集
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(valid_dataset, batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(test_dataset, batch_size=batch_size, collate_fn=collate_fn)
# 定义bert上的模型结构
class Model(BaseModel):
def __init__(self, pool_method='cls'):
super().__init__()
self.pool_method = pool_method
self.bert = build_transformer_model(config_path=config_path, checkpoint_path=checkpoint_path, with_pool=True, segment_vocab_size=0)
self.dropout = nn.Dropout(0.1)
self.dense = nn.Linear(self.bert.configs['hidden_size'], 2)
def forward(self, token_ids):
hidden_states, pooling = self.bert([token_ids])
pooled_output = get_pool_emb(hidden_states, pooling, token_ids.gt(0).long(), self.pool_method)
output = self.dropout(pooled_output)
output = self.dense(output)
return output
model = Model().to(device)
class Loss(UDALoss):
def forward(self, y_pred, y_true_sup):
loss, loss_sup, loss_unsup = super().forward(y_pred, y_true_sup, model.global_step, model.total_steps)
return {'loss': loss, 'loss_sup': loss_sup, 'loss_unsup': loss_unsup}
# 定义使用的loss和optimizer,这里支持自定义
model.compile(
loss=Loss(tsa_schedule='linear_schedule', start_p=0.8), # 这里可换用不同的策略, 不为None时候要给定model
optimizer=optim.Adam(model.parameters(), lr=2e-5),
metrics=['loss_sup', 'loss_unsup'] # Loss返回的key会自动计入metrics,下述metrics不写仍可以打印具体的Loss
)
class Evaluator(Callback):
"""评估与保存
"""
def __init__(self):
self.best_val_acc = 0.
def on_epoch_end(self, global_step, epoch, logs=None):
val_acc = self.evaluate(valid_dataloader)
test_acc = self.evaluate(test_dataloader)
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
# model.save_weights('best_model.pt')
print(f'val_acc: {val_acc:.5f}, test_acc: {test_acc:.5f}, best_val_acc: {self.best_val_acc:.5f}\n')
# 定义评价函数
def evaluate(self, data):
total, right = 0., 0.
for token_ids, y_true in data:
y_pred = model.predict(token_ids[:y_true.size(0)]).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=10, steps_per_epoch=None, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
#! -*- coding:utf-8 -*-
# 通过对抗训/梯度惩罚练增强模型的泛化性能,包含fgm, pgs, vat,梯度惩罚
# 数据集:情感分类数据集
# 对抗训练:https://kexue.fm/archives/7234
# 虚拟对抗训练:https://kexue.fm/archives/7466
# 梯度惩罚:https://kexue.fm/archives/7234
from bert4torch.models import build_transformer_model, BaseModel
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from bert4torch.snippets import sequence_padding, Callback, ListDataset, text_segmentate, get_pool_emb, seed_everything
from bert4torch.tokenizers import Tokenizer
import sys
maxlen = 256
batch_size = 16
# BERT base
config_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/bert_config.json'
checkpoint_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/pytorch_model.bin'
dict_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
seed_everything(42)
# 建立分词器
tokenizer = Tokenizer(dict_path, do_lower_case=True)
# 加载数据集
class MyDataset(ListDataset):
@staticmethod
def load_data(filenames):
"""加载数据,并尽量划分为不超过maxlen的句子
"""
D = []
seps, strips = u'\n。!?!?;;,, ', u';;,, '
for filename in filenames:
with open(filename, encoding='utf-8') as f:
for l in f:
text, label = l.strip().split('\t')
for t in text_segmentate(text, maxlen - 2, seps, strips):
D.append((t, int(label)))
return D
def collate_fn(batch):
batch_token_ids, batch_labels = [], []
for text, label in batch:
token_ids = tokenizer.encode(text, maxlen=maxlen)[0]
batch_token_ids.append(token_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return batch_token_ids, batch_labels.flatten()
# 加载数据集
train_dataloader = DataLoader(MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn)
# 定义bert上的模型结构
class Model(BaseModel):
def __init__(self, pool_method='cls') -> None:
super().__init__()
self.pool_method = pool_method
self.bert = build_transformer_model(config_path=config_path, checkpoint_path=checkpoint_path, with_pool=True, segment_vocab_size=0)
self.dropout = nn.Dropout(0.1)
self.dense = nn.Linear(self.bert.configs['hidden_size'], 2)
def forward(self, token_ids):
hidden_states, pooling = self.bert([token_ids])
pooled_output = get_pool_emb(hidden_states, pooling, token_ids.gt(0).long(), self.pool_method)
output = self.dropout(pooled_output)
output = self.dense(output)
return output
model = Model().to(device)
# 传参方式
mode = sys.argv[1]
adversarial_train = {'name': mode}
print(f'Using {mode}'.center(60, '='))
# debug方式
# 具体参数设置可以到bert4torch.models/bert4torch.snippets里
# adversarial_train = {'name': 'fgm'} # fgm方式
# adversarial_train = {'name': 'pgd'} # pgd方式
# adversarial_train = {'name': 'gradient_penalty'} # 梯度惩罚
# adversarial_train = {'name': 'vat'} # 虚拟对抗,这里仅为使用有监督数据的示例
model.compile(loss=nn.CrossEntropyLoss(), optimizer=optim.Adam(model.parameters(), lr=2e-5),
metrics=['accuracy'], adversarial_train=adversarial_train)
class Evaluator(Callback):
"""评估与保存
"""
def __init__(self):
self.best_val_acc = 0.
def on_epoch_end(self, global_step, epoch, logs=None):
val_acc = self.evaluate(valid_dataloader)
test_acc = self.evaluate(test_dataloader)
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
# model.save_weights('best_model.pt')
print(f'val_acc: {val_acc:.5f}, test_acc: {test_acc:.5f}, best_val_acc: {self.best_val_acc:.5f}\n')
# 定义评价函数
def evaluate(self, data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=10, steps_per_epoch=None, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
#! -*- coding:utf-8 -*-
# 情感分类任务, 指数滑动平均
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, Callback, text_segmentate, ListDataset, seed_everything, get_pool_emb
from bert4torch.optimizers import extend_with_exponential_moving_average
import torch.nn as nn
import torch
import torch.optim as optim
import random, os, numpy as np
from torch.utils.data import DataLoader
maxlen = 256
batch_size = 16
config_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/bert_config.json'
checkpoint_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/pytorch_model.bin'
dict_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
seed_everything(42)
# 建立分词器
tokenizer = Tokenizer(dict_path, do_lower_case=True)
# 加载数据集
class MyDataset(ListDataset):
@staticmethod
def load_data(filenames):
"""加载数据,并尽量划分为不超过maxlen的句子
"""
D = []
seps, strips = u'\n。!?!?;;,, ', u';;,, '
for filename in filenames:
with open(filename, encoding='utf-8') as f:
for l in f:
text, label = l.strip().split('\t')
for t in text_segmentate(text, maxlen - 2, seps, strips):
D.append((t, int(label)))
return D
def collate_fn(batch):
batch_token_ids, batch_labels = [], []
for text, label in batch:
token_ids = tokenizer.encode(text, maxlen=maxlen)[0]
batch_token_ids.append(token_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return batch_token_ids, batch_labels.flatten()
# 加载数据集
train_dataloader = DataLoader(MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn)
# 定义bert上的模型结构
class Model(BaseModel):
def __init__(self, pool_method='cls') -> None:
super().__init__()
self.pool_method = pool_method
self.bert = build_transformer_model(config_path=config_path, checkpoint_path=checkpoint_path, with_pool=True, segment_vocab_size=0)
self.dropout = nn.Dropout(0.1)
self.dense = nn.Linear(self.bert.configs['hidden_size'], 2)
def forward(self, token_ids):
hidden_states, pooling = self.bert([token_ids])
pooled_output = get_pool_emb(hidden_states, pooling, token_ids.gt(0).long(), self.pool_method)
output = self.dropout(pooled_output)
output = self.dense(output)
return output
model = Model().to(device)
ema_schedule = extend_with_exponential_moving_average(model, decay=0.99)
# 定义使用的loss和optimizer,这里支持自定义
model.compile(
loss=nn.CrossEntropyLoss(),
optimizer=optim.Adam(model.parameters(), lr=2e-5),
scheduler=ema_schedule,
metrics=['accuracy']
)
class Evaluator(Callback):
"""评估与保存
"""
def __init__(self):
self.best_val_acc = 0.
def on_epoch_end(self, global_step, epoch, logs=None):
val_acc = self.evaluate(valid_dataloader)
test_acc = self.evaluate(test_dataloader)
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
# model.save_weights('best_model.pt')
print(f'val_acc: {val_acc:.5f}, test_acc: {test_acc:.5f}, best_val_acc: {self.best_val_acc:.5f}\n')
# 定义评价函数
def evaluate(self, data):
ema_schedule.apply_ema_weights() # 使用滑动平均的ema权重
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
ema_schedule.restore_raw_weights() # 恢复原来模型的参数
return right / total
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=10, steps_per_epoch=None, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
#! -*- coding:utf-8 -*-
# 情感分类任务, 指数滑动平均ema+warmup两种策略
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, Callback, text_segmentate, ListDataset, seed_everything, get_pool_emb
from bert4torch.optimizers import extend_with_exponential_moving_average, get_linear_schedule_with_warmup
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
maxlen = 256
batch_size = 16
config_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/bert_config.json'
checkpoint_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/pytorch_model.bin'
dict_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
seed_everything(42)
# 建立分词器
tokenizer = Tokenizer(dict_path, do_lower_case=True)
# 加载数据集
class MyDataset(ListDataset):
@staticmethod
def load_data(filenames):
"""加载数据,并尽量划分为不超过maxlen的句子
"""
D = []
seps, strips = u'\n。!?!?;;,, ', u';;,, '
for filename in filenames:
with open(filename, encoding='utf-8') as f:
for l in f:
text, label = l.strip().split('\t')
for t in text_segmentate(text, maxlen - 2, seps, strips):
D.append((t, int(label)))
return D
def collate_fn(batch):
batch_token_ids, batch_labels = [], []
for text, label in batch:
token_ids = tokenizer.encode(text, maxlen=maxlen)[0]
batch_token_ids.append(token_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return batch_token_ids, batch_labels.flatten()
# 加载数据集
train_dataloader = DataLoader(MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn)
# 定义bert上的模型结构
class Model(BaseModel):
def __init__(self, pool_method='cls') -> None:
super().__init__()
self.pool_method = pool_method
self.bert = build_transformer_model(config_path=config_path, checkpoint_path=checkpoint_path, with_pool=True, segment_vocab_size=0)
self.dropout = nn.Dropout(0.1)
self.dense = nn.Linear(self.bert.configs['hidden_size'], 2)
def forward(self, token_ids):
hidden_states, pooling = self.bert([token_ids])
pooled_output = get_pool_emb(hidden_states, pooling, token_ids.gt(0).long(), self.pool_method)
output = self.dropout(pooled_output)
output = self.dense(output)
return output
model = Model().to(device)
optimizer = optim.Adam(model.parameters(), lr=2e-5)
ema_schedule = extend_with_exponential_moving_average(model, decay=0.99)
warmup_scheduler = get_linear_schedule_with_warmup(optimizer, len(train_dataloader), num_training_steps=len(train_dataloader)*10, last_epoch=-1)
# 定义使用的loss和optimizer,这里支持自定义
model.compile(
loss=nn.CrossEntropyLoss(),
optimizer=optimizer,
scheduler=[ema_schedule, warmup_scheduler],
metrics=['accuracy']
)
class Evaluator(Callback):
"""评估与保存
"""
def __init__(self):
self.best_val_acc = 0.
def on_epoch_end(self, global_step, epoch, logs=None):
val_acc = self.evaluate(valid_dataloader)
test_acc = self.evaluate(test_dataloader)
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
# model.save_weights('best_model.pt')
print(f'val_acc: {val_acc:.5f}, test_acc: {test_acc:.5f}, best_val_acc: {self.best_val_acc:.5f}\n')
# 定义评价函数
def evaluate(self, data):
ema_schedule.apply_ema_weights() # 使用滑动平均的ema权重
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
ema_schedule.restore_raw_weights() # 恢复原来模型的参数
return right / total
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=10, steps_per_epoch=None, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
#! -*- coding:utf-8 -*-
# 情感分类任务, 加载bert权重
# Mixup策略,包含embedding,hidden, encoder的mixup
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.layers import MixUp
from bert4torch.snippets import sequence_padding, Callback, text_segmentate, ListDataset, seed_everything, get_pool_emb
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
maxlen = 256
batch_size = 16
config_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/bert_config.json'
checkpoint_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/pytorch_model.bin'
dict_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
choice = 'train' # train表示训练,infer表示推理
seed_everything(42)
# 建立分词器
tokenizer = Tokenizer(dict_path, do_lower_case=True)
# 加载数据集
class MyDataset(ListDataset):
@staticmethod
def load_data(filenames):
"""加载数据,并尽量划分为不超过maxlen的句子
"""
D = []
seps, strips = u'\n。!?!?;;,, ', u';;,, '
for filename in filenames:
with open(filename, encoding='utf-8') as f:
for l in f:
text, label = l.strip().split('\t')
for t in text_segmentate(text, maxlen - 2, seps, strips):
D.append((t, int(label)))
return D
def collate_fn(batch):
batch_token_ids, batch_labels = [], []
for text, label in batch:
token_ids = tokenizer.encode(text, maxlen=maxlen)[0]
batch_token_ids.append(token_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return batch_token_ids, batch_labels.flatten()
# 加载数据集
train_dataloader = DataLoader(MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn)
# 定义bert上的模型结构
class Model(BaseModel):
def __init__(self, mixup_method='encoder', pool_method='cls') -> None:
super().__init__()
self.pool_method = pool_method
self.bert = build_transformer_model(config_path=config_path, checkpoint_path=checkpoint_path, with_pool=True, segment_vocab_size=0)
self.dropout = nn.Dropout(0.1)
self.dense = nn.Linear(self.bert.configs['hidden_size'], 2)
self.mixup = MixUp(method=mixup_method)
def forward(self, token_ids):
hidden_states, pooling = self.mixup.encode(self.bert, [token_ids])
pooled_output = get_pool_emb(hidden_states, pooling, token_ids.gt(0).long(), self.pool_method)
output = self.dropout(pooled_output)
y_pred = self.dense(output)
return y_pred
def predict(self, token_ids):
self.eval()
with torch.no_grad():
hidden_states, pooling = self.bert([token_ids])
pooled_output = get_pool_emb(hidden_states, pooling, token_ids.gt(0).long(), self.pool_method)
output = self.dropout(pooled_output)
y_pred = self.dense(output)
return y_pred
model = Model().to(device)
class Loss(nn.Module):
def forward(self, y_pred, y_true):
return model.mixup(nn.CrossEntropyLoss(), y_pred, y_true)
# 定义使用的loss和optimizer,这里支持自定义
model.compile(
loss=Loss(),
optimizer=optim.Adam(model.parameters(), lr=2e-5),
)
class Evaluator(Callback):
"""评估与保存
"""
def __init__(self):
self.best_val_acc = 0.
def on_epoch_end(self, global_step, epoch, logs=None):
val_acc = self.evaluate(valid_dataloader)
test_acc = self.evaluate(test_dataloader)
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
# model.save_weights('best_model.pt')
print(f'val_acc: {val_acc:.5f}, test_acc: {test_acc:.5f}, best_val_acc: {self.best_val_acc:.5f}\n')
# 定义评价函数
def evaluate(self, data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
if __name__ == '__main__':
if choice == 'train':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=10, steps_per_epoch=None, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
#! -*- coding:utf-8 -*-
# 以文本分类为例的半监督学习,虚拟对抗训练策略
# 监督数据部分只计算监督Loss, 有监督+无监督数据计算对抗训练的Loss
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, Callback, text_segmentate, ListDataset, seed_everything, get_pool_emb
import torch.nn as nn
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
import random
maxlen = 256
batch_size = 16
config_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/bert_config.json'
checkpoint_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/pytorch_model.bin'
dict_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
seed_everything(42)
# 建立分词器
tokenizer = Tokenizer(dict_path, do_lower_case=True)
# 加载数据集
class MyDataset(ListDataset):
@staticmethod
def load_data(filenames):
"""加载数据,并尽量划分为不超过maxlen的句子
"""
D = []
seps, strips = u'\n。!?!?;;,, ', u';;,, '
for filename in filenames:
with open(filename, encoding='utf-8') as f:
for l in f:
text, label = l.strip().split('\t')
for t in text_segmentate(text, maxlen - 2, seps, strips):
D.append((t, int(label)))
return D
train_dataset = MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.train.data'])
valid_dataset = MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.valid.data'])
test_dataset = MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.test.data'])
# 理论上应该收集任务领域类的无监督数据,这里用所有的监督数据来作无监督数据
unsup_dataset = [sen for sen, _ in (train_dataset.data + valid_dataset.data + test_dataset.data)]
def collate_fn(batch):
# batch_token_ids包含两部部分,第一部分是有监督数据,第二部分是无监督数据
batch_token_ids, batch_labels = [[], []], []
for text, label in batch:
token_ids = tokenizer.encode(text, maxlen=maxlen)[0]
batch_token_ids[0].append(token_ids)
batch_labels.append([label])
# 无监督部分
unsup_text = random.choice(unsup_dataset) # 随机挑一个无监督数据
token_ids, _ = tokenizer.encode(unsup_text, maxlen=maxlen)
batch_token_ids[1].append(token_ids)
batch_token_ids = [j for i in batch_token_ids for j in i]
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return batch_token_ids, batch_labels.flatten()
# 加载数据集
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(valid_dataset, batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(test_dataset, batch_size=batch_size, collate_fn=collate_fn)
# 定义bert上的模型结构
class Model(BaseModel):
def __init__(self, pool_method='cls'):
super().__init__()
self.pool_method = pool_method
self.bert = build_transformer_model(config_path=config_path, checkpoint_path=checkpoint_path, with_pool=True, segment_vocab_size=0)
self.dropout = nn.Dropout(0.1)
self.dense = nn.Linear(self.bert.configs['hidden_size'], 2)
def forward(self, token_ids):
hidden_states, pooling = self.bert([token_ids])
pooled_output = get_pool_emb(hidden_states, pooling, token_ids[0].gt(0).long(), self.pool_method)
output = self.dropout(pooled_output)
output = self.dense(output)
return output
model = Model().to(device)
class MyLoss(nn.Module):
def forward(self, y_pred, y_true_sup):
y_pred_sup = y_pred[:y_true_sup.shape[0]] # 仅计算监督部分loss
return F.cross_entropy(y_pred_sup, y_true_sup)
# 定义使用的loss和optimizer,这里支持自定义
model.compile(
loss=MyLoss(),
optimizer=optim.Adam(model.parameters(), lr=2e-5),
adversarial_train = {'name': 'vat', 'adv_alpha': 1} # 虚拟对抗
)
class Evaluator(Callback):
"""评估与保存
"""
def __init__(self):
self.best_val_acc = 0.
def on_epoch_end(self, global_step, epoch, logs=None):
val_acc = self.evaluate(valid_dataloader)
test_acc = self.evaluate(test_dataloader)
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
# model.save_weights('best_model.pt')
print(f'val_acc: {val_acc:.5f}, test_acc: {test_acc:.5f}, best_val_acc: {self.best_val_acc:.5f}\n')
# 定义评价函数
def evaluate(self, data):
total, right = 0., 0.
for inputs, y_true in data:
inputs = [inputs[0][:y_true.size(0)]] # 仅计算有监督部分
y_pred = model.predict(inputs).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=10, steps_per_epoch=None, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
# bert4torch使用教程
## 1. 建模流程示例
```python
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import Callback, Logger, Tensorboard, ListDataset
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
# 建立分词器
tokenizer = Tokenizer(dict_path, do_lower_case=True)
# 加载数据集,可以自己继承Dataset来定义
class MyDataset(ListDataset):
@staticmethod
def load_data(filenames):
"""读取文本文件,整理成需要的格式
"""
D = []
return D
def collate_fn(batch):
'''处理上述load_data得到的batch数据,整理成对应device上的Tensor
注意:返回值分为feature和label, feature可整理成list或tuple
'''
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
# 加载数据集
train_dataloader = DataLoader(MyDataset('file_path'), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
# 定义bert上的模型结构,以文本二分类为例
class Model(BaseModel):
def __init__(self) -> None:
super().__init__()
self.bert = build_transformer_model(config_path, checkpoint_path, with_pool=True)
self.dropout = nn.Dropout(0.1)
self.dense = nn.Linear(768, 2)
def forward(self, token_ids, segment_ids):
# build_transformer_model得到的模型仅接受list/tuple传参,因此入参只有一个时候包装成[token_ids]
hidden_states, pooled_output = self.bert([token_ids, segment_ids])
output = self.dropout(pooled_output)
output = self.dense(output)
return output
model = Model().to(device)
# 定义使用的loss和optimizer,这里支持自定义
model.compile(
loss=nn.CrossEntropyLoss(), # 可以自定义Loss
optimizer=optim.Adam(model.parameters(), lr=2e-5), # 可以自定义优化器
scheduler=None, # 可以自定义scheduler
metrics=['accuracy'] # 可以自定义回调函数
)
# 定义评价函数
def evaluate(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
class Evaluator(Callback):
"""评估与保存,这里定义仅在epoch结束后调用
"""
def __init__(self):
self.best_val_acc = 0.
def on_epoch_end(self, global_step, epoch, logs=None):
val_acc = evaluate(valid_dataloader)
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
model.save_weights('best_model.pt')
print(f'val_acc: {val_acc:.5f}, best_val_acc: {self.best_val_acc:.5f}\n')
if __name__ == '__main__':
evaluator = Evaluator()
# 指定训练的epochs,每轮的steps_per_epoch(不设置或者设置为None表示自动计算),梯度累积grad_accumulation_steps
# 使用默认Logger和Tensorboard
model.fit(train_dataloader, epochs=20, steps_per_epoch=100, grad_accumulation_steps=2,
callbacks=[evaluator, Logger('./test/test.log'), Tensorboard('./test/')])
```
## 2. 主要模块讲解
### 1) 数据处理部分
#### a. 精简词表,并建立分词器
```python
token_dict, keep_tokens = load_vocab(
dict_path=dict_path, # 词典文件路径
simplified=True, # 过滤冗余部分token,如[unused1]
startswith=['[PAD]', '[UNK]', '[CLS]', '[SEP]'], # 指定起始的token,如[UNK]从bert默认的103位置调整到1
)
tokenizer = Tokenizer(token_dict, do_lower_case=True) # 若无需精简,仅使用当前行定义tokenizer即可
```
#### b. 好用的小函数
- `text_segmentate()`: 截断总长度至不超过maxlen, 接受多个sequence输入,每次截断最长的句子,indices表示删除的token位置
- `tokenizer.encode()`: 把text转成token_ids,默认句首添加[CLS],句尾添加[SEP],返回token_ids和segment_ids,相当于同时调用`tokenizer.tokenize()``tokenizer.tokens_to_ids()`
- `tokenizer.decode()`: 把token_ids转成text,默认会删除[CLS], [SEP], [UNK]等特殊字符,相当于调用`tokenizer.ids_to_tokens()`并做了一些后处理
- `sequence_padding`: 将序列padding到同一长度, 传入一个元素为list, ndarray, tensor的list,返回ndarry或tensor
- `parallel_apply()`: 多进程或多线程地将func应用到iterable的每个元素中
- `get_pool_emb()`: 根据参数设置,多种方式获取句向量
- `seed_everything()`: 固定全局seed
### 2) 模型定义部分
- 模型创建
```python
'''
调用模型后,若设置with_pool, with_nsp, with_mlm,则返回值依次为[hidden_states, pool_emb/nsp_emb, mlm_scores],否则只返回hidden_states
'''
build_transformer_model(
config_path=config_path, # 模型的config文件地址
checkpoint_path=checkpoint_path, # 模型文件地址,默认值None表示不加载预训练模型
model='bert', # 加载的模型结构,这里Model也可以基于nn.Module自定义后传入
application='encoder', # 模型应用,支持encoder,lm和unilm格式
segment_vocab_size=2, # type_token_ids数量,默认为2,如不传入segment_ids则需设置为0
with_pool=False, # 是否包含Pool部分
with_nsp=False, # 是否包含NSP部分
with_mlm=False, # 是否包含MLM部分
return_model_config=False, # 是否返回模型配置参数
output_all_encoded_layers=False, # 是否返回所有hidden_state层
layer_add_embs=nn.Embedding(2, 768), # 自定义额外的embedding输入
)
```
- 定义loss,optimizer,scheduler, metrics等
```python
'''
定义使用的loss、optimizer和metrics,这里支持自定义
'''
def eval(y_pred, y_true):
# 仅做示意
return {'rouge-1': random.random(), 'rouge-2': random.random(), 'rouge-l': random.random(), 'bleu': random.random()}
def f1(y_pred, y_true):
# 仅做示意
return random.random()
model.compile(
loss=nn.CrossEntropyLoss(), # 可以自定义Loss
optimizer=optim.Adam(model.parameters(), lr=2e-5), # 可以自定义优化器
scheduler=None, # 可以自定义scheduler
adversarial_train={'name': 'fgm'}, # 训练trick方案设置,支持fgm, pgd, gradient_penalty, vat
metrics=['accuracy', eval, {'f1': f1}] # loss等默认打印的字段无需设置,可多种方式自定义回调函数
)
```
- 自定义模型
```python
'''
基于bert上层的各类魔改,如last2layer_average, token_first_last_average
'''
class Model(BaseModel):
# 需要继承BaseModel
def __init__(self):
super().__init__()
self.bert = build_transformer_model(config_path, checkpoint_path)
def forward(self):
pass
```
- [自定义训练过程](https://github.com/Tongjilibo/bert4torch/blob/master/examples/tutorials/tutorials_custom_fit_progress.py)
```python
'''
自定义fit过程,适用于自带fit()不满足需求时
'''
class Model(BaseModel):
def fit(self, train_dataloader, steps_per_epoch, epochs):
train_dataloader = cycle(train_dataloader)
self.train()
for epoch in range(epochs):
for bti in range(steps_per_epoch):
train_X, train_y = next(train_dataloader)
output = self.forward(*train_X)
loss = self.criterion(output, train_y)
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
```
- 模型保存和加载
```python
'''
prefix: 是否以原始的key来保存,如word_embedding原始key为bert.embeddings.word_embeddings.weight
默认为None表示不启用, 若基于BaseModel自定义模型,需指定为bert模型对应的成员变量名,直接使用设置为''
主要是为了别的训练框架容易加载
'''
# ====仅进行保存和加载====
model.save_weights(save_path, prefix=None) # 保存模型权重
model.load_weights(save_path) # 加载模型权重
# =======断点续训========
# 在Callback中的on_epoch_end()或on_batch_end()保存需要的参数
model.save_weights(save_path, prefix=None) # 保存模型权重
model.save_steps_params(save_path) # 保存训练进度参数,当前的epoch和step,断点续训使用
torch.save(optimizer.state_dict(), save_path) # 保存优化器,断点续训使用
# 加载前序训练保存的参数
model.load_weights(save_path) # 加载模型权重
model.load_steps_params(save_path) # 加载训练进度参数,断点续训使用
state_dict = torch.load(save_path, map_location='cpu') # 加载优化器,断点续训使用
optimizer.load_state_dict(state_dict)
```
- [加载transformers模型进行训练](https://github.com/Tongjilibo/bert4torch/blob/master/examples/tutorials/tutorials_load_transformers_model.py)
```python
from transformers import AutoModelForSequenceClassification
class Model(BaseModel):
def __init__(self):
super().__init__()
self.bert = AutoModelForSequenceClassification.from_pretrained("file_path", num_labels=2)
def forward(self, token_ids, attention_mask, segment_ids):
output = self.bert(input_ids=token_ids, attention_mask=attention_mask, token_type_ids=segment_ids)
return output.logits
```
### 3) 模型评估部分
```python
'''支持在多个位置执行
'''
class Evaluator(Callback):
"""评估与保存
"""
def __init__(self):
self.best_val_acc = 0.
def on_dataloader_end():
# 可用于重新生成dataloader
# 比如多个数据文件时,动态读取一个文件并重新生成dataloader的情况,如预训练
pass
def on_train_begin(self, logs=None): # 训练开始时候
pass
def on_train_end(self, logs=None): # 训练结束时候
pass
def on_batch_begin(self, global_step, local_step, logs=None): # batch开始时候
pass
def on_batch_end(self, global_step, local_step, logs=None): # batch结束时候
# 可以设置每隔多少个step,后台记录log,写tensorboard等
# 尽量不要在batch_begin和batch_end中print,防止打断进度条功能
pass
def on_epoch_begin(self, global_step, epoch, logs=None): # epoch开始时候
pass
def on_epoch_end(self, global_step, epoch, logs=None): # epoch结束时候
val_acc = evaluate(valid_dataloader)
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
model.save_weights('best_model.pt')
print(f'val_acc: {val_acc:.5f}, best_val_acc: {self.best_val_acc:.5f}\n')
```
## 3. 其他特性讲解
### 1) 单机多卡训练
#### a. 使用DataParallel
```python
'''DP有两种方式,第一种是forward只计算logit,第二种是forward直接计算loss
建议使用第二种,可以部分缓解负载不均衡的问题
'''
from bert4torch.models import BaseModelDP
# ===========处理数据和定义model===========
model = BaseModelDP(model) # 指定DP模式使用多gpu
model.compile(
loss=lambda x, _: x.mean(), # 多个gpu计算的loss的均值
optimizer=optim.Adam(model.parameters(), lr=2e-5),
)
```
#### b. 使用DistributedDataParallel
```python
'''DDP使用torch.distributed.launch,从命令行启动
'''
# 需要定义命令行参数
parser = argparse.ArgumentParser()
parser.add_argument("--local_rank", type=int, default=-1)
args = parser.parse_args()
torch.cuda.set_device(args.local_rank)
device = torch.device('cuda', args.local_rank)
torch.distributed.init_process_group(backend='nccl')
# ===========处理数据和定义model===========
# 指定DDP模型使用多gpu, master_rank为指定用于打印训练过程的local_rank
model = BaseModelDDP(model, master_rank=0, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=False)
# 定义使用的loss和optimizer,这里支持自定义
model.compile(
loss=lambda x, _: x, # 直接把forward计算的loss传出来
optimizer=optim.Adam(model.parameters(), lr=2e-5),
)
```
### 2) 日志记录
```python
# 自行用Tensorboard记录
from tensorboardX import SummaryWriter
class Evaluator(Callback):
"""每隔多少个step评估并记录tensorboard
"""
def on_batch_end(self, global_step, local_step, logs=None):
if global_step % 100 == 0:
writer.add_scalar(f"train/loss", logs['loss'], global_step)
val_acc = evaluate(valid_dataloader)
writer.add_scalar(f"valid/acc", val_acc, global_step)
# 使用默认的文件Logger和Tensorboard
model.fit(train_dataloader, epochs=20, steps_per_epoch=100, grad_accumulation_steps=2,
callbacks=[evaluator, Logger('./test/test.log'), Tensorboard('./test/')])
```
### 3) 打印训练参数
```python
from torchinfo import summary
summary(model, input_data=next(iter(train_dataloader))[0])
```
\ No newline at end of file
#! -*- coding:utf-8 -*-
# 自定义fit()训练过程
from itertools import cycle
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, text_segmentate, ListDataset, ProgbarLogger
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
maxlen = 128
batch_size = 16
config_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/bert_config.json'
checkpoint_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/pytorch_model.bin'
dict_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# 建立分词器
tokenizer = Tokenizer(dict_path, do_lower_case=True)
# 加载数据集
class MyDataset(ListDataset):
@staticmethod
def load_data(filenames):
"""加载数据,并尽量划分为不超过maxlen的句子
"""
D = []
seps, strips = u'\n。!?!?;;,, ', u';;,, '
for filename in filenames:
with open(filename, encoding='utf-8') as f:
for l in f:
text, label = l.strip().split('\t')
for t in text_segmentate(text, maxlen - 2, seps, strips):
D.append((t, int(label)))
return D
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
# 加载数据集
train_dataloader = DataLoader(MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn)
# 定义bert上的模型结构
class Model(BaseModel):
def __init__(self) -> None:
super().__init__()
self.bert = build_transformer_model(config_path=config_path, checkpoint_path=checkpoint_path, with_pool=True)
self.dropout = nn.Dropout(0.1)
self.dense = nn.Linear(self.bert.configs['hidden_size'], 2)
def forward(self, token_ids, segment_ids):
_, pooled_output = self.bert([token_ids, segment_ids])
output = self.dropout(pooled_output)
output = self.dense(output)
return output
def fit(self, train_dataloader, steps_per_epoch, epochs=1):
'''自定义fit过程:适用于自带fit()不满足需求时,用于自定义训练过程
'''
# 实现进度条展示功能,不需要可以不用
bar = ProgbarLogger(epochs, steps_per_epoch, ['loss'])
global_step, epoch, best_val_acc = 0, 0, 0
train_dataloader = cycle(train_dataloader)
self.train()
for epoch in range(epochs):
bar.on_epoch_begin(epoch=epoch)
for bti in range(steps_per_epoch):
bar.on_batch_begin()
train_X, train_y = next(train_dataloader)
output = self.forward(*train_X)
loss = self.criterion(output, train_y)
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
bar.on_batch_end(logs={'loss': loss.item()}) # 和上面定义bar时候一致
global_step += 1
bar.on_epoch_end()
# 评估
val_acc = evaluate(valid_dataloader)
if val_acc > best_val_acc:
best_val_acc = val_acc
# model.save_weights('best_model.pt')
print(f'val_acc: {val_acc:.5f}, best_val_acc: {best_val_acc:.5f}\n')
model = Model().to(device)
# 定义使用的loss和optimizer,这里支持自定义
model.compile(
loss=nn.CrossEntropyLoss(),
optimizer=optim.Adam(model.parameters(), lr=2e-5),
)
# 定义评价函数
def evaluate(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
if __name__ == '__main__':
model.fit(train_dataloader, epochs=20, steps_per_epoch=100)
#! -*- coding:utf-8 -*-
# 调用transformers库中的模型来调用
# 本脚本演示功能为主,实际训练建议两者取其一
# 少量可能使用到的场景:
# 1)bert4torch的fit过程可以轻松使用对抗训练,梯度惩罚,虚拟对抗训练等功能
# 2)就是临时直接用transformers库里面的模型文件
# 3)写代码时候用于校验两者结果
from transformers import AutoModelForSequenceClassification
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import BaseModel
from bert4torch.snippets import sequence_padding, Callback, text_segmentate, ListDataset
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
maxlen = 128
batch_size = 16
dict_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# 建立分词器
tokenizer = Tokenizer(dict_path, do_lower_case=True)
# 加载数据集
class MyDataset(ListDataset):
@staticmethod
def load_data(filenames):
"""加载数据,并尽量划分为不超过maxlen的句子
"""
D = []
seps, strips = u'\n。!?!?;;,, ', u';;,, '
for filename in filenames:
with open(filename, encoding='utf-8') as f:
for l in f:
text, label = l.strip().split('\t')
for t in text_segmentate(text, maxlen - 2, seps, strips):
D.append((t, int(label)))
return D
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
# 加载数据集
train_dataloader = DataLoader(MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn)
class Model(BaseModel):
def __init__(self):
super().__init__()
self.bert = AutoModelForSequenceClassification.from_pretrained("F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12", num_labels=2)
def forward(self, token_ids, segment_ids):
output = self.bert(input_ids=token_ids, token_type_ids=segment_ids)
return output.logits
model = Model().to(device)
# 定义使用的loss和optimizer,这里支持自定义
model.compile(
loss=nn.CrossEntropyLoss(),
optimizer=optim.Adam(model.parameters(), lr=2e-5),
metrics=['accuracy']
)
# 定义评价函数
def evaluate(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
class Evaluator(Callback):
"""评估与保存
"""
def __init__(self):
self.best_val_acc = 0.
def on_epoch_end(self, global_step, epoch, logs=None):
val_acc = evaluate(valid_dataloader)
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
# model.save_weights('best_model.pt')
print(f'val_acc: {val_acc:.5f}, best_val_acc: {self.best_val_acc:.5f}\n')
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=20, steps_per_epoch=100, grad_accumulation_steps=2, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
#! -*- coding:utf-8 -*-
# 以文本分类为例,展示部分tips的使用方法
# torchinfo打印参数,自定义metrics, 断点续训,默认Logger和Tensorboard
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, Callback, Logger, Tensorboard, text_segmentate, ListDataset, seed_everything, get_pool_emb
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from torchinfo import summary
import os
maxlen = 256
batch_size = 16
config_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/bert_config.json'
checkpoint_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/pytorch_model.bin'
dict_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
choice = 'train' # train表示训练,infer表示推理
# 固定seed
seed_everything(42)
# 建立分词器
tokenizer = Tokenizer(dict_path, do_lower_case=True)
# 加载数据集
class MyDataset(ListDataset):
@staticmethod
def load_data(filenames):
"""加载数据,并尽量划分为不超过maxlen的句子
"""
D = []
seps, strips = u'\n。!?!?;;,, ', u';;,, '
for filename in filenames:
with open(filename, encoding='utf-8') as f:
for l in f:
text, label = l.strip().split('\t')
for t in text_segmentate(text, maxlen - 2, seps, strips):
D.append((t, int(label)))
return D
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
# 加载数据集
train_dataloader = DataLoader(MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset(['F:/Projects/data/corpus/sentence_classification/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn)
# 定义bert上的模型结构
class Model(BaseModel):
def __init__(self, pool_method='cls') -> None:
super().__init__()
self.pool_method = pool_method
self.bert = build_transformer_model(config_path=config_path, checkpoint_path=checkpoint_path, with_pool=True)
self.dropout = nn.Dropout(0.1)
self.dense = nn.Linear(self.bert.configs['hidden_size'], 2)
def forward(self, token_ids, segment_ids):
hidden_states, pooling = self.bert([token_ids, segment_ids])
pooled_output = get_pool_emb(hidden_states, pooling, token_ids.gt(0).long(), self.pool_method)
output = self.dropout(pooled_output)
output = self.dense(output)
return output
model = Model().to(device)
summary(model, input_data=next(iter(train_dataloader))[0])
def acc(y_pred, y_true):
y_pred = torch.argmax(y_pred, dim=-1)
return torch.sum(y_pred.eq(y_true)).item() / y_true.numel()
# 定义使用的loss和optimizer,这里支持自定义
optimizer = optim.Adam(model.parameters(), lr=2e-5)
if os.path.exists('last_model.pt'):
model.load_weights('last_model.pt') # 加载模型权重
if os.path.exists('last_steps.pt'):
model.load_steps_params('last_steps.pt') # 加载训练进度参数,断点续训使用
if os.path.exists('last_optimizer.pt'):
state_dict = torch.load('last_optimizer.pt', map_location='cpu') # 加载优化器,断点续训使用
optimizer.load_state_dict(state_dict)
model.compile(
loss=nn.CrossEntropyLoss(),
optimizer=optimizer,
metrics={'acc': acc}
)
class Evaluator(Callback):
"""评估与保存
"""
def __init__(self):
self.best_val_acc = 0.
def on_epoch_end(self, global_step, epoch, logs=None):
val_acc = self.evaluate(valid_dataloader)
test_acc = self.evaluate(test_dataloader)
logs['val/acc'] = val_acc
logs['test/acc'] = test_acc
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
# model.save_weights('best_model.pt')
print(f'val_acc: {val_acc:.5f}, test_acc: {test_acc:.5f}, best_val_acc: {self.best_val_acc:.5f}\n')
model.save_weights('last_model.pt', prefix=None) # 保存模型权重
model.save_steps_params('last_steps.pt') # 保存训练进度参数,当前的epoch和step,断点续训使用
torch.save(optimizer.state_dict(), 'last_optimizer.pt') # 保存优化器,断点续训使用
# 定义评价函数
def evaluate(self, data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
def inference(texts):
'''单条样本推理
'''
for text in texts:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
token_ids = torch.tensor(token_ids, dtype=torch.long, device=device)[None, :]
segment_ids = torch.tensor(segment_ids, dtype=torch.long, device=device)[None, :]
logit = model.predict([token_ids, segment_ids])
y_pred = torch.argmax(torch.softmax(logit, dim=-1)).cpu().numpy()
print(text, ' ----> ', y_pred)
if __name__ == '__main__':
if choice == 'train':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=10, steps_per_epoch=100, callbacks=[evaluator, Logger('test.log'), Tensorboard('./')])
else:
model.load_weights('best_model.pt')
inference(['我今天特别开心', '我今天特别生气'])
#! -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='bert4torch',
version='0.2.2',
description='an elegant bert4torch',
long_description='bert4torch: https://github.com/Tongjilibo/bert4torch',
license='MIT Licence',
url='https://github.com/Tongjilibo/bert4torch',
author='Tongjilibo',
install_requires=['torch>1.6'],
packages=find_packages()
)
\ No newline at end of file
<!-- Generated by scripts/utils/show_asr_result.sh -->
# RESULTS
## Environments
- date: `Mon Oct 19 13:56:23 JST 2020`
- python version: `3.7.3 (default, Mar 27 2019, 22:11:17) [GCC 7.3.0]`
- espnet version: `espnet 0.9.0`
- pytorch version: `pytorch 1.6.0`
- Git hash: `20b0c89369d9dd3e05780b65fdd00a9b4f4891e5`
- Commit date: `Mon Oct 12 09:28:20 2020 -0400`
## asr_train_asr_conformer3_raw_char_batch_bins4000000_accum_grad4_sp
### WER
|dataset|Snt|Wrd|Corr|Sub|Del|Ins|Err|S.Err|
|---|---|---|---|---|---|---|---|---|
|decode_asr_rnn_lm_lm_train_lm_char_valid.loss.ave_asr_model_valid.acc.ave/dev|14326|14326|64.8|35.2|0.0|0.0|35.2|35.2|
|decode_asr_rnn_lm_lm_train_lm_char_valid.loss.ave_asr_model_valid.acc.ave/test|7176|7176|63.5|36.5|0.0|0.0|36.5|36.5|
|decode_asr_rnn_lm_lm_train_lm_transformer_char_batch_bins2000000_valid.loss.ave_asr_model_valid.acc.ave/dev|14326|14326|66.3|33.7|0.0|0.0|33.7|33.7|
|decode_asr_rnn_lm_lm_train_lm_transformer_char_batch_bins2000000_valid.loss.ave_asr_model_valid.acc.ave/test|7176|7176|65.0|35.0|0.0|0.0|35.0|35.0|
### CER
|dataset|Snt|Wrd|Corr|Sub|Del|Ins|Err|S.Err|
|---|---|---|---|---|---|---|---|---|
|decode_asr_rnn_lm_lm_train_lm_char_valid.loss.ave_asr_model_valid.acc.ave/dev|14326|205341|95.5|4.4|0.1|0.1|4.6|35.2|
|decode_asr_rnn_lm_lm_train_lm_char_valid.loss.ave_asr_model_valid.acc.ave/test|7176|104765|95.2|4.7|0.1|0.1|4.9|36.5|
|decode_asr_rnn_lm_lm_train_lm_transformer_char_batch_bins2000000_valid.loss.ave_asr_model_valid.acc.ave/dev|14326|205341|95.7|4.2|0.1|0.1|4.4|33.7|
|decode_asr_rnn_lm_lm_train_lm_transformer_char_batch_bins2000000_valid.loss.ave_asr_model_valid.acc.ave/test|7176|104765|95.4|4.5|0.1|0.1|4.7|35.0|
accum_grad: 4
allow_variable_data_keys: false
batch_bins: 4000000
batch_size: 20
batch_type: numel
best_model_criterion:
- - valid
- acc
- max
bpemodel: null
chunk_length: 500
chunk_shift_ratio: 0.5
cleaner: null
collect_stats: false
config: ./conf/train_asr_conformer3.yaml
ctc_conf:
ctc_type: builtin
dropout_rate: 0.0
reduce: true
cudnn_benchmark: false
cudnn_deterministic: true
cudnn_enabled: true
decoder: transformer
decoder_conf:
attention_heads: 4
dropout_rate: 0.1
linear_units: 2048
num_blocks: 6
positional_dropout_rate: 0.1
self_attention_dropout_rate: 0.0
src_attention_dropout_rate: 0.0
dist_backend: nccl
dist_init_method: env://
dist_launcher: null
dist_master_addr: null
dist_master_port: null
dist_rank: null
dist_world_size: null
distributed: false
dry_run: false
early_stopping_criterion:
- valid
- loss
- min
encoder: conformer
encoder_conf:
activation_type: swish
attention_dropout_rate: 0.0
attention_heads: 4
cnn_module_kernel: 15
dropout_rate: 0.1
input_layer: conv2d
linear_units: 2048
macaron_style: true
normalize_before: true
num_blocks: 12
output_size: 256
pos_enc_layer_type: rel_pos
positional_dropout_rate: 0.1
selfattention_layer_type: rel_selfattn
use_cnn_module: true
fold_length:
- 51200
- 150
frontend: default
frontend_conf:
fs: 16k
g2p: null
grad_clip: 5
grad_clip_type: 2.0
grad_noise: false
init: null
input_size: null
iterator_type: sequence
keep_nbest_models: 10
local_rank: 0
log_interval: null
log_level: INFO
max_cache_fd: 32
max_cache_size: 0.0
max_epoch: 50
model_conf:
ctc_weight: 0.3
length_normalized_loss: false
lsm_weight: 0.1
multiple_iterator: false
multiprocessing_distributed: false
ngpu: 1
no_forward_run: false
non_linguistic_symbols: null
normalize: global_mvn
normalize_conf:
stats_file: /home/sunzhq/workspace/yidong-infer/conformer/34e9cabc2c29fd0e3a2917ffa525d98b/exp/asr_stats_raw_sp/train/feats_stats.npz
num_att_plot: 3
num_cache_chunks: 1024
num_iters_per_epoch: null
num_workers: 4
optim: adam
optim_conf:
lr: 0.0005
output_dir: exp/asr_train_asr_conformer3_raw_char_batch_bins4000000_accum_grad4_sp
patience: null
pretrain_key: []
pretrain_path: []
print_config: false
required:
- output_dir
- token_list
resume: true
scheduler: warmuplr
scheduler_conf:
warmup_steps: 30000
seed: 0
sort_batch: descending
sort_in_batch: descending
specaug: specaug
specaug_conf:
apply_freq_mask: true
apply_time_mask: true
apply_time_warp: true
freq_mask_width_range:
- 0
- 30
num_freq_mask: 2
num_time_mask: 2
time_mask_width_range:
- 0
- 40
time_warp_mode: bicubic
time_warp_window: 5
token_list:
- <blank>
- <unk>
- "\u7684"
- "\u4E00"
- "\u5728"
- "\u5341"
- "\u4E2D"
- "\u662F"
- "\u4EBA"
- "\u6709"
- "\u4E8C"
- "\u4E0A"
- "\u4E86"
- "\u4E0D"
- "\u56FD"
- "\u5E02"
- "\u5927"
- "\u4E1A"
- "\u4E3A"
- "\u5E74"
- "\u4E09"
- "\u53D1"
- "\u4E2A"
- "\u5206"
- "\u51FA"
- "\u4F1A"
- "\u516C"
- "\u884C"
- "\u5730"
- "\u6210"
- "\u8FD9"
- "\u548C"
- "\u5230"
- "\u4E94"
- "\u4EA7"
- "\u65F6"
- "\u5BF9"
- "\u623F"
- "\u767E"
- "\u80FD"
- "\u573A"
- "\u6765"
- "\u4EE5"
- "\u65B0"
- "\u4E4B"
- "\u65E5"
- "\u8005"
- "\u5C06"
- "\u73B0"
- "\u56DB"
- "\u8981"
- "\u5BB6"
- "\u8D44"
- "\u591A"
- "\u6708"
- "\u4E5F"
- "\u65B9"
- "\u540E"
- "\u673A"
- "\u4E0B"
- "\u524D"
- "\u96F6"
- "\u6BD4"
- "\u4E8E"
- "\u751F"
- "\u70B9"
- "\u5F00"
- "\u52A8"
- "\u9AD8"
- "\u7ECF"
- "\u8FDB"
- "\u62A5"
- "\u4F53"
- "\u8D5B"
- "\u5B50"
- "\u4E07"
- "\u8F66"
- "\u7528"
- "\u91D1"
- "\u53F8"
- "\u53EF"
- "\u88AB"
- "\u8FC7"
- "\u624B"
- "\u672C"
- "\u4F5C"
- "\u81EA"
- "\u5168"
- "\u516B"
- "\u516D"
- "\u6700"
- "\u4EF7"
- "\u76EE"
- "\u7535"
- "\u90E8"
- "\u4EA4"
- "\u4E5D"
- "\u4E03"
- "\u9762"
- "\u6211"
- "\u4F01"
- "\u52A0"
- "\u5C0F"
- "\u5EA6"
- "\u5B9E"
- "\u540C"
- "\u57CE"
- "\u5DE5"
- "\u5176"
- "\u529B"
- "\u5B9A"
- "\u800C"
- "\u5143"
- "\u5408"
- "\u5DF2"
- "\u5185"
- "\u4E0E"
- "\u6CD5"
- "\u8FD8"
- "\u5173"
- "\u7F51"
- "\u5F97"
- "\u4ED6"
- "\u5C31"
- "\u5165"
- "\u540D"
- "\u54C1"
- "\u5973"
- "\u8BB0"
- "\u7406"
- "\u4E8B"
- "\u957F"
- "\u4E24"
- "\u5546"
- "\u90FD"
- "\u4EEC"
- "\u4EAC"
- "\u5E76"
- "\u4F46"
- "\u5E73"
- "\u5236"
- "\u4FDD"
- "\u636E"
- "\u671F"
- "\u5316"
- "\u4E3B"
- "\u91CD"
- "\u8868"
- "\u6B21"
- "\u76F8"
- "\u91CF"
- "\u901A"
- "\u9053"
- "\u653F"
- "\u6240"
- "\u5929"
- "\u7B2C"
- "\u5229"
- "\u95F4"
- "\u6D77"
- "\u6570"
- "\u52A1"
- "\u63D0"
- "\u5317"
- "\u5C55"
- "\u5458"
- "\u7BA1"
- "\u6295"
- "\u56E0"
- "\u5EFA"
- "\u597D"
- "\u5916"
- "\u533A"
- "\u66F4"
- "\u793A"
- "\u589E"
- "\u4ECE"
- "\u8BA1"
- "\u4FE1"
- "\u6027"
- "\u7B49"
- "\u8FD0"
- "\u9879"
- "\u5E94"
- "\u5F53"
- "\u6536"
- "\u4F4D"
- "\u7740"
- "\u8D77"
- "\u5B66"
- "\u53F0"
- "\u6C11"
- "\u6301"
- "\u89C4"
- "\u8BBE"
- "\u660E"
- "\u80A1"
- "\u6B63"
- "\u6CA1"
- "\u5FC3"
- "\u7136"
- "\u5F88"
- "\u4ECA"
- "\u8C03"
- "\u53BB"
- "\u5B89"
- "\u6B64"
- "\u4E1C"
- "\u961F"
- "\u5982"
- "\u7EBF"
- "\u79D1"
- "\u4E16"
- "\u65E0"
- "\u8FBE"
- "\u8EAB"
- "\u679C"
- "\u8BC1"
- "\u57FA"
- "\u53D7"
- "\u7537"
- "\u9700"
- "\u6807"
- "\u5E03"
- "\u60C5"
- "\u683C"
- "\u8FD1"
- "\u6B65"
- "\u672A"
- "\u8D39"
- "\u6C42"
- "\u5F0F"
- "\u6D88"
- "\u5343"
- "\u7F8E"
- "\u4E9B"
- "\u91CC"
- "\u7C73"
- "\u5411"
- "\u770B"
- "\u7EED"
- "\u606F"
- "\u610F"
- "\u63A5"
- "\u95E8"
- "\u56DE"
- "\u53CA"
- "\u9500"
- "\u8001"
- "\u83B7"
- "\u603B"
- "\u76D1"
- "\u6253"
- "\u8054"
- "\u81F3"
- "\u4EBF"
- "\u8BF4"
- "\u8BAF"
- "\u4F4F"
- "\u73AF"
- "\u4EF6"
- "\u6574"
- "\u6C34"
- "\u6280"
- "\u8DEF"
- "\u9662"
- "\u5C40"
- "\u7279"
- "\u8BE5"
- "\u7EDF"
- "\u7531"
- "\u552E"
- "\u8D2D"
- "\u5F3A"
- "\u6539"
- "\u95EE"
- "\u4E50"
- "\u697C"
- "\u6DA8"
- "\u5904"
- "\u51B3"
- "\u8BA9"
- "\u7CFB"
- "\u6237"
- "\u9898"
- "\u63A8"
- "\u5C11"
- "\u5E7F"
- "\u663E"
- "\u964D"
- "\u8DD1"
- "\u5F71"
- "\u53EA"
- "\u9009"
- "\u79F0"
- "\u521B"
- "\u6613"
- "\u6218"
- "\u9996"
- "\u5B8C"
- "\u6848"
- "\u7B56"
- "\u5E38"
- "\u67E5"
- "\u53C2"
- "\u79CD"
- "\u724C"
- "\u7A0B"
- "\u94F6"
- "\u5907"
- "\u8BA4"
- "\u8425"
- "\u7ACB"
- "\u52BF"
- "\u7ED3"
- "\u9020"
- "\u8D85"
- "\u5DF1"
- "\u51C6"
- "\u5B58"
- "\u9669"
- "\u7403"
- "\u5404"
- "\u4EE3"
- "\u4F4E"
- "\u518D"
- "\u505A"
- "\u7EA7"
- "\u6B3E"
- "\u653E"
- "\u7269"
- "\u544A"
- "\u539F"
- "\u53CB"
- "\u8F6C"
- "\u8B66"
- "\u5468"
- "\u754C"
- "\u5F20"
- "\u6837"
- "\u4F20"
- "\u8F83"
- "\u98CE"
- "\u5355"
- "\u7ED9"
- "\u5979"
- "\u5DDE"
- "\u89E3"
- "\u5219"
- "\u89C6"
- "\u6307"
- "\u9884"
- "\u5347"
- "\u534E"
- "\u4F9B"
- "\u8D70"
- "\u6BCF"
- "\u53D6"
- "\u5BFC"
- "\u641C"
- "\u96C6"
- "\u6587"
- "\u53D8"
- "\u5BA2"
- "\u6392"
- "\u7247"
- "\u5934"
- "\u4EFB"
- "\u79EF"
- "\u672F"
- "\u7387"
- "\u578B"
- "\u519B"
- "\u65AF"
- "\u7814"
- "\u522B"
- "\u975E"
- "\u76F4"
- "\u667A"
- "\u901F"
- "\u7EC4"
- "\u661F"
- "\u9886"
- "\u53E3"
- "\u4EFD"
- "\u5C81"
- "\u9A6C"
- "\u738B"
- "\u5FEB"
- "\u4E13"
- "\u793E"
- "\u4F7F"
- "\u56E2"
- "\u6A21"
- "\u5668"
- "\u96BE"
- "\u6D3B"
- "\u62C9"
- "\u6216"
- "\u7EA6"
- "\u65BD"
- "\u6E90"
- "\u6784"
- "\u652F"
- "\u533B"
- "\u513F"
- "\u5E26"
- "\u670D"
- "\u5148"
- "\u60F3"
- "\u5F15"
- "\u4E48"
- "\u529E"
- "\u7167"
- "\u72D0"
- "\u6743"
- "\u5FAE"
- "\u5357"
- "\u59CB"
- "\u878D"
- "\u6DF1"
- "\u58EB"
- "\u6E38"
- "\u7EE9"
- "\u4EC5"
- "\u51B5"
- "\u5A92"
- "\u968F"
- "\u534A"
- "\u8D8A"
- "\u5E45"
- "\u786E"
- "\u6CE8"
- "\u7C7B"
- "\u4E89"
- "\u7A0E"
- "\u9650"
- "\u6D41"
- "\u5747"
- "\u63A7"
- "\u5145"
- "\u989D"
- "\u671B"
- "\u8FDE"
- "\u5212"
- "\u5965"
- "\u4E9A"
- "\u5305"
- "\u5A31"
- "\u897F"
- "\u8D22"
- "\u503C"
- "\u4F24"
- "\u67D0"
- "\u81F4"
- "\u7EC8"
- "\u7A7A"
- "\u6D4E"
- "\u4F17"
- "\u9645"
- "\u571F"
- "\u4E70"
- "\u4ECD"
- "\u80B2"
- "\u5E08"
- "\u6C7D"
- "\u77E5"
- "\u8D28"
- "\u6001"
- "\u5177"
- "\u674E"
- "\u8D23"
- "\u7A76"
- "\u9732"
- "\u6761"
- "\u51E0"
- "\u5C45"
- "\u5171"
- "\u54CD"
- "\u53CD"
- "\u7AD9"
- "\u51A0"
- "\u8282"
- "\u5B63"
- "\u4F18"
- "\u59D4"
- "\u5B85"
- "\u89C2"
- "\u4E92"
- "\u89C1"
- "\u8303"
- "\u5883"
- "\u611F"
- "\u8D1F"
- "\u6BB5"
- "\u5931"
- "\u91C7"
- "\u5957"
- "\u57DF"
- "\u5C14"
- "\u4E3E"
- "\u4F55"
- "\u5149"
- "\u6C14"
- "\u843D"
- "\u535A"
- "\u6559"
- "\u9526"
- "\u6797"
- "\u5C71"
- "\u4F9D"
- "\u7EE7"
- "\u6781"
- "\u5F62"
- "\u56FE"
- "\u5BA1"
- "\u7ADE"
- "\u76CA"
- "\u65AD"
- "\u8D37"
- "\u6548"
- "\u5E9C"
- "\u590D"
- "\u8BB8"
- "\u5BB9"
- "\u5065"
- "\u51FB"
- "\u8DB3"
- "\u53C8"
- "\u8BC9"
- "\u52A9"
- "\u5B69"
- "\u8272"
- "\u505C"
- "\u7968"
- "\u53CC"
- "\u62FF"
- "\u677F"
- "\u677E"
- "\u70ED"
- "\u90A3"
- "\u628A"
- "\u5374"
- "\u6E05"
- "\u5218"
- "\u8BAE"
- "\u8003"
- "\u51CF"
- "\u66FE"
- "\u7591"
- "\u4F8B"
- "\u9664"
- "\u529F"
- "\u5360"
- "\u4F60"
- "\u8BD5"
- "\u6839"
- "\u6E2F"
- "\u592A"
- "\u79BB"
- "\u624D"
- "\u8D27"
- "\u7A81"
- "\u6D89"
- "\u4E14"
- "\u5238"
- "\u914D"
- "\u76D8"
- "\u5373"
- "\u5E93"
- "\u4ED8"
- "\u7834"
- "\u804C"
- "\u6F14"
- "\u519C"
- "\u7F6E"
- "\u7EAA"
- "\u8BBA"
- "\u771F"
- "\u9F99"
- "\u665A"
- "\u88C5"
- "\u7231"
- "\u53F7"
- "\u7EC3"
- "\u6B7B"
- "\u538B"
- "\u4EB2"
- "\u4E25"
- "\u8BC4"
- "\u7530"
- "\u8BDD"
- "\u6258"
- "\u62A4"
- "\u706B"
- "\u534F"
- "\u7EA2"
- "\u6C5F"
- "\u514B"
- "\u5356"
- "\u8A00"
- "\u79DF"
- "\u5584"
- "\u9891"
- "\u666E"
- "\u98DE"
- "\u9A8C"
- "\u8865"
- "\u8FB9"
- "\u6EE1"
- "\u8C61"
- "\u8F6F"
- "\u7B97"
- "\u906D"
- "\u9980"
- "\u95FB"
- "\u7A33"
- "\u5382"
- "\u8FDC"
- "\u82F9"
- "\u94B1"
- "\u62C5"
- "\u5224"
- "\u5B98"
- "\u867D"
- "\u6E7E"
- "\u6309"
- "\u6628"
- "\u6821"
- "\u5FC5"
- "\u56ED"
- "\u7565"
- "\u6551"
- "\u5E0C"
- "\u5E95"
- "\u6267"
- "\u591F"
- "\u5F81"
- "\u62CD"
- "\u5386"
- "\u50CF"
- "\u6DA6"
- "\u5C42"
- "\u503A"
- "\u4FBF"
- "\u969C"
- "\u56F4"
- "\u5EB7"
- "\u5E97"
- "\u5F80"
- "\u5217"
- "\u65E9"
- "\u6D4B"
- "\u5F55"
- "\u5426"
- "\u9999"
- "\u5B9D"
- "\u9633"
- "\u7D22"
- "\u6838"
- "\u5174"
- "\u68C0"
- "\u72B6"
- "\u82F1"
- "\u6751"
- "\u6599"
- "\u4E91"
- "\u7559"
- "\u592B"
- "\u79FB"
- "\u5956"
- "\u75C5"
- "\u4E34"
- "\u8F7B"
- "\u7701"
- "\u79D2"
- "\u6FC0"
- "\u8BF7"
- "\u9769"
- "\u5C5E"
- "\u9047"
- "\u8DCC"
- "\u7EF4"
- "\u6279"
- "\u5FB7"
- "\u627F"
- "\u7AEF"
- "\u4ECB"
- "\u7CBE"
- "\u593A"
- "\u7FA4"
- "\u521D"
- "\u80DC"
- "\u5361"
- "\u5C3D"
- "\u82B1"
- "\u8F86"
- "\u5B83"
- "\u6545"
- "\u795E"
- "\u5C4A"
- "\u6CBB"
- "\u900F"
- "\u666F"
- "\u767D"
- "\u526F"
- "\u4EC0"
- "\u5BA3"
- "\u94C1"
- "\u6768"
- "\u8DF3"
- "\u5047"
- "\u767B"
- "\u798F"
- "\u9752"
- "\u836F"
- "\u5A5A"
- "\u517B"
- "\u5E55"
- "\u8FDD"
- "\u77ED"
- "\u8BBF"
- "\u4FEE"
- "\u7EB7"
- "\u5F8B"
- "\u5DE6"
- "\u89D2"
- "\u9152"
- "\u62EC"
- "\u7206"
- "\u5ACC"
- "\u5F84"
- "\u5B81"
- "\u8463"
- "\u9002"
- "\u9010"
- "\u521A"
- "\u9632"
- "\u9648"
- "\u5348"
- "\u5DEE"
- "\u5EAD"
- "\u72EC"
- "\u6CE2"
- "\u98DF"
- "\u8BC6"
- "\u4F3C"
- "\u5019"
- "\u9EC4"
- "\u4EA1"
- "\u8BAD"
- "\u4E66"
- "\u9000"
- "\u5F85"
- "\u822A"
- "\u5757"
- "\u51B2"
- "\u6269"
- "\u5434"
- "\u751A"
- "\u7533"
- "\u4F1F"
- "\u773C"
- "\u5DF4"
- "\u89C9"
- "\u627E"
- "\u6362"
- "\u4E49"
- "\u8F6E"
- "\u6ED1"
- "\u5E2D"
- "\u592E"
- "\u9001"
- "\u53F3"
- "\u536B"
- "\u4E58"
- "\u77F3"
- "\u5B57"
- "\u7F6A"
- "\u7F57"
- "\u6CF3"
- "\u5B59"
- "\u6790"
- "\u5FD7"
- "\u53E6"
- "\u6BCD"
- "\u7EFF"
- "\u62A2"
- "\u6B62"
- "\u4EE4"
- "\u7AE5"
- "\u5988"
- "\u53F2"
- "\u5211"
- "\u6D32"
- "\u8FF0"
- "\u7A7F"
- "\u5FF5"
- "\u7EB3"
- "\u635F"
- "\u5BCC"
- "\u514D"
- "\u6BD2"
- "\u7EDC"
- "\u7D27"
- "\u59BB"
- "\u4E4E"
- "\u8C6A"
- "\u7D20"
- "\u5BB3"
- "\u5012"
- "\u5438"
- "\u8857"
- "\u4FC3"
- "\u62E9"
- "\u6740"
- "\u8FFD"
- "\u5DE8"
- "\u72AF"
- "\u58F0"
- "\u613F"
- "\u6668"
- "\u601D"
- "\u8C08"
- "\u6CB3"
- "\u9547"
- "\u5C3C"
- "\u8DDF"
- "\u5E86"
- "\u94FE"
- "\u63AA"
- "\u501F"
- "\u8D54"
- "\u5BC6"
- "\u5733"
- "\u8D34"
- "\u82CF"
- "\u6E29"
- "\u9A97"
- "\u4E60"
- "\u6444"
- "\u7248"
- "\u5E2E"
- "\u5E01"
- "\u9636"
- "\u963F"
- "\u8FCE"
- "\u9A7E"
- "\u9ED1"
- "\u8D8B"
- "\u53BF"
- "\u79C1"
- "\u5403"
- "\u7597"
- "\u7EC6"
- "\u8651"
- "\u8111"
- "\u97E9"
- "\u4EAE"
- "\u65C5"
- "\u6293"
- "\u7F5A"
- "\u826F"
- "\u80CC"
- "\u8138"
- "\u7EDD"
- "\u73ED"
- "\u5371"
- "\u7840"
- "\u620F"
- "\u6234"
- "\u62DB"
- "\u547D"
- "\u5C1A"
- "\u7F3A"
- "\u4F19"
- "\u987B"
- "\u7236"
- "\u591C"
- "\u5207"
- "\u64CD"
- "\u6325"
- "\u6D3E"
- "\u5EF6"
- "\u649E"
- "\u62AB"
- "\u8863"
- "\u5267"
- "\u9646"
- "\u7ADF"
- "\u7B7E"
- "\u6B27"
- "\u4EAB"
- "\u6625"
- "\u5FBD"
- "\u88C1"
- "\u507F"
- "\u542F"
- "\u827A"
- "\u5B97"
- "\u5473"
- "\u5BDF"
- "\u4F30"
- "\u51C0"
- "\u52DF"
- "\u62E5"
- "\u91CA"
- "\u559C"
- "\u987A"
- "\u52B1"
- "\u9760"
- "\u6E10"
- "\u5170"
- "\u6CB9"
- "\u4F73"
- "\u56F0"
- "\u9488"
- "\u8FF7"
- "\u5199"
- "\u6750"
- "\u786C"
- "\u6865"
- "\u575A"
- "\u8BA2"
- "\u62F3"
- "\u7D2F"
- "\u76D6"
- "\u5BA4"
- "\u675F"
- "\u622A"
- "\u8DDD"
- "\u9A76"
- "\u65EC"
- "\u6B4C"
- "\u6089"
- "\u70C8"
- "\u5E8F"
- "\u60A3"
- "\u5E72"
- "\u6C61"
- "\u5708"
- "\u6770"
- "\u9876"
- "\u8D25"
- "\u4F34"
- "\u5F52"
- "\u63A2"
- "\u66DD"
- "\u6000"
- "\u6025"
- "\u6C60"
- "\u7EC7"
- "\u79C0"
- "\u59D0"
- "\u5CF0"
- "\u987E"
- "\u8BEF"
- "\u952E"
- "\u4E30"
- "\u73A9"
- "\u6C49"
- "\u53E4"
- "\u5F69"
- "\u8BA8"
- "\u670B"
- "\u6297"
- "\u523A"
- "\u6311"
- "\u8840"
- "\u51CC"
- "\u65E7"
- "\u62DF"
- "\u6652"
- "\u9644"
- "\u60CA"
- "\u6B22"
- "\u52B3"
- "\u4E08"
- "\u64AD"
- "\u5F90"
- "\u5417"
- "\u6E56"
- "\u7B11"
- "\u9986"
- "\u97F3"
- "\u9635"
- "\u5750"
- "\u8C37"
- "\u5F02"
- "\u600E"
- "\u590F"
- "\u9F84"
- "\u719F"
- "\u82E5"
- "\u60E0"
- "\u4F11"
- "\u6C38"
- "\u54EA"
- "\u6682"
- "\u8F93"
- "\u7ECD"
- "\u5370"
- "\u51B0"
- "\u7F13"
- "\u6696"
- "\u542C"
- "\u907F"
- "\u5609"
- "\u5BFB"
- "\u57F9"
- "\u7B79"
- "\u4F26"
- "\u96EA"
- "\u8D26"
- "\u66B4"
- "\u7B80"
- "\u4E88"
- "\u4E3D"
- "\u6CFD"
- "\u523B"
- "\u91CE"
- "\u5A01"
- "\u5BBD"
- "\u7B14"
- "\u8BED"
- "\u6B66"
- "\u7092"
- "\u865A"
- "\u67B6"
- "\u5947"
- "\u54E5"
- "\u5C24"
- "\u5EA7"
- "\u8FC5"
- "\u7C89"
- "\u500D"
- "\u6731"
- "\u5C4B"
- "\u822C"
- "\u9519"
- "\u6D25"
- "\u5F1F"
- "\u6C47"
- "\u6982"
- "\u9F13"
- "\u6389"
- "\u90D1"
- "\u949F"
- "\u53EC"
- "\u793C"
- "\u7981"
- "\u6298"
- "\u7F29"
- "\u9501"
- "\u6D9B"
- "\u4E61"
- "\u80A5"
- "\u5E78"
- "\u96E8"
- "\u68A6"
- "\u8089"
- "\u653B"
- "\u51AC"
- "\u547C"
- "\u84DD"
- "\u7EFC"
- "\u7801"
- "\u676F"
- "\u6620"
- "\u5200"
- "\u8C22"
- "\u7F16"
- "\u811A"
- "\u6653"
- "\u904D"
- "\u671D"
- "\u5409"
- "\u6D17"
- "\u76D7"
- "\u4E39"
- "\u5C4F"
- "\u76DB"
- "\u79D8"
- "\u62D8"
- "\u67D3"
- "\u6E20"
- "\u6263"
- "\u6D0B"
- "\u68AF"
- "\u67AA"
- "\u4E45"
- "\u8BC8"
- "\u5DDD"
- "\u6469"
- "\u4FC4"
- "\u8FEA"
- "\u6BDB"
- "\u8D5E"
- "\u7B26"
- "\u753B"
- "\u7FFB"
- "\u59B9"
- "\u7B51"
- "\u805A"
- "\u54C8"
- "\u5175"
- "\u80AF"
- "\u80CE"
- "\u6F6E"
- "\u82E6"
- "\u9003"
- "\u8BB2"
- "\u6388"
- "\u6162"
- "\u987F"
- "\u9057"
- "\u4E1D"
- "\u5448"
- "\u63ED"
- "\u6302"
- "\u5C01"
- "\u6167"
- "\u8DE8"
- "\u8BE2"
- "\u62C6"
- "\u68EE"
- "\u5B55"
- "\u8131"
- "\u8BFB"
- "\u679A"
- "\u6350"
- "\u6869"
- "\u8DC3"
- "\u5237"
- "\u82AF"
- "\u6597"
- "\u6606"
- "\u50A8"
- "\u5B88"
- "\u89E6"
- "\u6728"
- "\u76AE"
- "\u996D"
- "\u6DFB"
- "\u839E"
- "\u9707"
- "\u8F7D"
- "\u8D35"
- "\u4FB5"
- "\u6491"
- "\u7238"
- "\u518C"
- "\u821E"
- "\u4E01"
- "\u8D38"
- "\u5976"
- "\u9690"
- "\u5987"
- "\u699C"
- "\u7761"
- "\u9677"
- "\u8349"
- "\u626C"
- "\u88AD"
- "\u5077"
- "\u7763"
- "\u4E8F"
- "\u5415"
- "\u73E0"
- "\u8D76"
- "\u6276"
- "\u76C8"
- "\u6863"
- "\u8BFA"
- "\u8FD4"
- "\u65E2"
- "\u672B"
- "\u6C99"
- "\u8C01"
- "\u5B8F"
- "\u6458"
- "\u5178"
- "\u5E8A"
- "\u95ED"
- "\u5F03"
- "\u96F7"
- "\u6BD5"
- "\u90ED"
- "\u73B2"
- "\u90CE"
- "\u829D"
- "\u80E1"
- "\u745E"
- "\u76DF"
- "\u5385"
- "\u62B1"
- "\u71C3"
- "\u94DC"
- "\u65D7"
- "\u8363"
- "\u9910"
- "\u7259"
- "\u7237"
- "\u8FF9"
- "\u5B87"
- "\u9014"
- "\u6F5C"
- "\u62B5"
- "\u9AA8"
- "\u63F4"
- "\u6D6A"
- "\u7389"
- "\u7956"
- "\u632F"
- "\u8679"
- "\u6563"
- "\u7126"
- "\u52C7"
- "\u52AA"
- "\u5A46"
- "\u62D2"
- "\u5F39"
- "\u6881"
- "\u575B"
- "\u542B"
- "\u574F"
- "\u7EAF"
- "\u70DF"
- "\u51B7"
- "\u955C"
- "\u53EB"
- "\u8D75"
- "\u9759"
- "\u4EEA"
- "\u85CF"
- "\u6742"
- "\u75DB"
- "\u614E"
- "\u6811"
- "\u7AE0"
- "\u585E"
- "\u94A2"
- "\u72C2"
- "\u5462"
- "\u96C5"
- "\u5BFF"
- "\u6069"
- "\u56FA"
- "\u72D7"
- "\u83DC"
- "\u6C9F"
- "\u732E"
- "\u53F6"
- "\u6CF0"
- "\u8D62"
- "\u5269"
- "\u7A83"
- "\u504F"
- "\u638C"
- "\u5B9C"
- "\u8BFE"
- "\u8DA3"
- "\u559D"
- "\u7EA0"
- "\u7C4D"
- "\u66FF"
- "\u70B8"
- "\u9694"
- "\u7838"
- "\u642D"
- "\u8BDA"
- "\u65CF"
- "\u6D59"
- "\u9F50"
- "\u6746"
- "\u664B"
- "\u6076"
- "\u594B"
- "\u79CB"
- "\u9C9C"
- "\u9C81"
- "\u5192"
- "\u8D5A"
- "\u5F31"
- "\u817F"
- "\u795D"
- "\u6DF7"
- "\u7F34"
- "\u75BE"
- "\u63E1"
- "\u6C6A"
- "\u8F89"
- "\u5954"
- "\u9192"
- "\u6355"
- "\u9A91"
- "\u9E1F"
- "\u6446"
- "\u7075"
- "\u654F"
- "\u725B"
- "\u5C9B"
- "\u604B"
- "\u8017"
- "\u74E6"
- "\u62FC"
- "\u6050"
- "\u68D2"
- "\u5766"
- "\u539A"
- "\u4FA7"
- "\u5C1D"
- "\u85AA"
- "\u5802"
- "\u66F2"
- "\u7B54"
- "\u96C4"
- "\u5F92"
- "\u788D"
- "\u62D3"
- "\u7FD4"
- "\u4F5B"
- "\u4F50"
- "\u6EF4"
- "\u676D"
- "\u6B8B"
- "\u6BEB"
- "\u5C04"
- "\u62D6"
- "\u963B"
- "\u8F91"
- "\u8E2A"
- "\u75C7"
- "\u59D3"
- "\u6B32"
- "\u9C7C"
- "\u8239"
- "\u6062"
- "\u8861"
- "\u6DE1"
- "\u552F"
- "\u4E4F"
- "\u8FDF"
- "\u742A"
- "\u70E7"
- "\u5510"
- "\u5377"
- "\u966A"
- "\u4F0F"
- "\u52B5"
- "\u7E41"
- "\u9006"
- "\u8FC1"
- "\u8BCA"
- "\u4E71"
- "\u4EA6"
- "\u8C13"
- "\u77FF"
- "\u8FEB"
- "\u5FE7"
- "\u626E"
- "\u5DE2"
- "\u624E"
- "\u5353"
- "\u6052"
- "\u5E84"
- "\u9012"
- "\u707E"
- "\u83B1"
- "\u8D74"
- "\u7164"
- "\u640F"
- "\u5242"
- "\u6885"
- "\u5427"
- "\u64A4"
- "\u54F2"
- "\u70B3"
- "\u5C3E"
- "\u8A89"
- "\u6D1B"
- "\u8F68"
- "\u7F72"
- "\u515A"
- "\u60EF"
- "\u5E7C"
- "\u7F18"
- "\u58A8"
- "\u83AB"
- "\u8F9E"
- "\u594F"
- "\u6562"
- "\u5784"
- "\u65C1"
- "\u8499"
- "\u7BB1"
- "\u5428"
- "\u6CDB"
- "\u6015"
- "\u95F9"
- "\u6B20"
- "\u52AB"
- "\u7EB8"
- "\u5CB8"
- "\u6DD8"
- "\u8D4C"
- "\u7A97"
- "\u6D01"
- "\u5C97"
- "\u5A18"
- "\u6676"
- "\u52B2"
- "\u51ED"
- "\u65A4"
- "\u6D2A"
- "\u6DB2"
- "\u69DB"
- "\u517C"
- "\u6454"
- "\u695A"
- "\u660C"
- "\u83F2"
- "\u840C"
- "\u4F0D"
- "\u6CBF"
- "\u54A8"
- "\u996E"
- "\u5899"
- "\u6C88"
- "\u5761"
- "\u5BF8"
- "\u6EA2"
- "\u4ED3"
- "\u9274"
- "\u6148"
- "\u67EF"
- "\u65E6"
- "\u6B8A"
- "\u5760"
- "\u8BF8"
- "\u641E"
- "\u4F0A"
- "\u9738"
- "\u7ED1"
- "\u6C27"
- "\u5885"
- "\u8F7F"
- "\u86CB"
- "\u5FD9"
- "\u6EE8"
- "\u4E95"
- "\u903C"
- "\u4F2F"
- "\u764C"
- "\u71D5"
- "\u8D56"
- "\u6D66"
- "\u6F0F"
- "\u643A"
- "\u582A"
- "\u9605"
- "\u8BD7"
- "\u8D29"
- "\u8150"
- "\u503E"
- "\u94FA"
- "\u65FA"
- "\u6A2A"
- "\u900A"
- "\u5141"
- "\u7A84"
- "\u9E21"
- "\u5531"
- "\u8D3F"
- "\u62E8"
- "\u780D"
- "\u731B"
- "\u78B3"
- "\u5835"
- "\u9080"
- "\u5195"
- "\u680F"
- "\u59C6"
- "\u8033"
- "\u7ED5"
- "\u89C8"
- "\u8058"
- "\u7433"
- "\u971E"
- "\u6316"
- "\u5E9E"
- "\u5F7B"
- "\u9881"
- "\u633A"
- "\u6C89"
- "\u6284"
- "\u5BAB"
- "\u6BB4"
- "\u5783"
- "\u573E"
- "\u5C38"
- "\u6DB5"
- "\u5A03"
- "\u5A77"
- "\u7275"
- "\u817E"
- "\u5367"
- "\u5076"
- "\u6270"
- "\u6FB3"
- "\u8FC8"
- "\u864E"
- "\u8D21"
- "\u8BCD"
- "\u58C1"
- "\u5BBE"
- "\u6377"
- "\u5FCD"
- "\u4F69"
- "\u558A"
- "\u62BD"
- "\u690D"
- "\u70BC"
- "\u5978"
- "\u5410"
- "\u629B"
- "\u7965"
- "\u8389"
- "\u6CC4"
- "\u68B0"
- "\u4E52"
- "\u8F9B"
- "\u75AF"
- "\u51EF"
- "\u626B"
- "\u706F"
- "\u6DC0"
- "\u6BC1"
- "\u9B3C"
- "\u5A74"
- "\u6DEB"
- "\u51BB"
- "\u7BEE"
- "\u804A"
- "\u5E05"
- "\u4E54"
- "\u6CAA"
- "\u7FBD"
- "\u820D"
- "\u88C2"
- "\u5FFD"
- "\u5706"
- "\u62D4"
- "\u6717"
- "\u5BBF"
- "\u9EBB"
- "\u7720"
- "\u73AE"
- "\u5854"
- "\u78B0"
- "\u602A"
- "\u62BC"
- "\u6500"
- "\u9A70"
- "\u6B23"
- "\u8E0F"
- "\u5DE9"
- "\u5E9F"
- "\u8270"
- "\u4E73"
- "\u53E5"
- "\u4FA6"
- "\u5144"
- "\u8350"
- "\u5BD3"
- "\u53A6"
- "\u8D1D"
- "\u7EB5"
- "\u8096"
- "\u675C"
- "\u5FD8"
- "\u4E22"
- "\u642C"
- "\u66FC"
- "\u74F6"
- "\u9E4F"
- "\u9ED8"
- "\u60E8"
- "\u6CE1"
- "\u6108"
- "\u6566"
- "\u6D1E"
- "\u529D"
- "\u9896"
- "\u9177"
- "\u989C"
- "\u5DE1"
- "\u810F"
- "\u4EFF"
- "\u7F8A"
- "\u6324"
- "\u5EC9"
- "\u9EA6"
- "\u584C"
- "\u541B"
- "\u654C"
- "\u4E4C"
- "\u4FE9"
- "\u6A0A"
- "\u90AE"
- "\u70EF"
- "\u8BE6"
- "\u8212"
- "\u5951"
- "\u6F2B"
- "\u80DE"
- "\u9B54"
- "\u5B8B"
- "\u4F10"
- "\u8C28"
- "\u59FF"
- "\u59D1"
- "\u9686"
- "\u7EB9"
- "\u5085"
- "\u8336"
- "\u8457"
- "\u8C0B"
- "\u656C"
- "\u90C1"
- "\u9A71"
- "\u83CC"
- "\u60AC"
- "\u5FAA"
- "\u644A"
- "\u95EA"
- "\u4F2A"
- "\u9E3F"
- "\u5A1C"
- "\u6F8E"
- "\u6E43"
- "\u7089"
- "\u6697"
- "\u95EF"
- "\u7EEA"
- "\u6C70"
- "\u7A3F"
- "\u54AC"
- "\u5362"
- "\u6CC9"
- "\u6D8C"
- "\u857E"
- "\u59FB"
- "\u718A"
- "\u7A00"
- "\u6447"
- "\u540A"
- "\u684C"
- "\u4FCA"
- "\u54ED"
- "\u8D60"
- "\u9038"
- "\u5413"
- "\u8D6B"
- "\u51E1"
- "\u4FF1"
- "\u51AF"
- "\u5DE7"
- "\u6DAF"
- "\u5566"
- "\u8BBC"
- "\u6070"
- "\u629A"
- "\u8087"
- "\u950B"
- "\u51F6"
- "\u8D2F"
- "\u6084"
- "\u706D"
- "\u5180"
- "\u7CD5"
- "\u4F38"
- "\u80D6"
- "\u8179"
- "\u90CA"
- "\u658C"
- "\u946B"
- "\u5389"
- "\u80A9"
- "\u5723"
- "\u6D6E"
- "\u5999"
- "\u9970"
- "\u5C16"
- "\u5C0A"
- "\u90B1"
- "\u8BDE"
- "\u5C61"
- "\u6478"
- "\u916C"
- "\u95F2"
- "\u6670"
- "\u5339"
- "\u953B"
- "\u7532"
- "\u6572"
- "\u9065"
- "\u52D2"
- "\u5151"
- "\u7199"
- "\u7A3D"
- "\u8521"
- "\u60DC"
- "\u732B"
- "\u6012"
- "\u9A7B"
- "\u9887"
- "\u6D53"
- "\u5BB4"
- "\u4EC1"
- "\u8D4F"
- "\u78E8"
- "\u60B2"
- "\u9A82"
- "\u8F74"
- "\u59DC"
- "\u732A"
- "\u5272"
- "\u6B49"
- "\u73BB"
- "\u6D69"
- "\u756A"
- "\u6E21"
- "\u808C"
- "\u8DF5"
- "\u76FE"
- "\u751C"
- "\u6EBA"
- "\u5C3A"
- "\u5FC6"
- "\u76D0"
- "\u6CE5"
- "\u8584"
- "\u77DB"
- "\u7545"
- "\u6291"
- "\u9897"
- "\u848B"
- "\u7A0D"
- "\u788E"
- "\u5E1D"
- "\u7483"
- "\u6380"
- "\u62D0"
- "\u7262"
- "\u5E7B"
- "\u4ED4"
- "\u7CAE"
- "\u827E"
- "\u626D"
- "\u5C3F"
- "\u520A"
- "\u4ED1"
- "\u9ECE"
- "\u57C3"
- "\u81C2"
- "\u90BB"
- "\u82D7"
- "\u8854"
- "\u6842"
- "\u6F6D"
- "\u5C65"
- "\u8D3E"
- "\u997C"
- "\u60E9"
- "\u8BF1"
- "\u65CB"
- "\u7BC7"
- "\u8FBD"
- "\u65ED"
- "\u903E"
- "\u8C46"
- "\u6F58"
- "\u5806"
- "\u7518"
- "\u90A6"
- "\u6C0F"
- "\u62E6"
- "\u7855"
- "\u68CB"
- "\u88E4"
- "\u4E53"
- "\u59DA"
- "\u5398"
- "\u9093"
- "\u9676"
- "\u8428"
- "\u5F17"
- "\u8F85"
- "\u5EF7"
- "\u5401"
- "\u6760"
- "\u7EEE"
- "\u7444"
- "\u5939"
- "\u69FD"
- "\u7978"
- "\u8881"
- "\u52FE"
- "\u8D41"
- "\u5E16"
- "\u8170"
- "\u6F02"
- "\u88D5"
- "\u5634"
- "\u58EE"
- "\u5F2F"
- "\u554A"
- "\u6C64"
- "\u57AB"
- "\u9B4F"
- "\u5021"
- "\u680B"
- "\u7891"
- "\u9888"
- "\u6691"
- "\u9B45"
- "\u88F8"
- "\u758F"
- "\u96C7"
- "\u6BC5"
- "\u5FE0"
- "\u7586"
- "\u845B"
- "\u51E4"
- "\u5C48"
- "\u60A6"
- "\u9988"
- "\u6321"
- "\u95EB"
- "\u6C2E"
- "\u5146"
- "\u8C8C"
- "\u5395"
- "\u8C23"
- "\u98A0"
- "\u731C"
- "\u75B2"
- "\u6846"
- "\u63FD"
- "\u80C1"
- "\u61BE"
- "\u79E9"
- "\u8273"
- "\u5E3D"
- "\u6C1B"
- "\u8377"
- "\u6CEA"
- "\u5251"
- "\u61C2"
- "\u94BB"
- "\u9075"
- "\u8D2A"
- "\u8D3C"
- "\u72F1"
- "\u59E3"
- "\u5BFA"
- "\u80F6"
- "\u5435"
- "\u50AC"
- "\u524A"
- "\u4E11"
- "\u6B3A"
- "\u8083"
- "\u59A5"
- "\u70E6"
- "\u7070"
- "\u64C5"
- "\u4F63"
- "\u8427"
- "\u867E"
- "\u978B"
- "\u6367"
- "\u901D"
- "\u7325"
- "\u74DC"
- "\u9178"
- "\u5948"
- "\u53A8"
- "\u7D2B"
- "\u4FA0"
- "\u5851"
- "\u5A07"
- "\u8F96"
- "\u8206"
- "\u64E6"
- "\u67CF"
- "\u6F84"
- "\u78CA"
- "\u8650"
- "\u8F70"
- "\u66F9"
- "\u5220"
- "\u9F3B"
- "\u67F3"
- "\u5C6F"
- "\u7B3C"
- "\u7687"
- "\u7CD6"
- "\u73CD"
- "\u75BC"
- "\u67DC"
- "\u6361"
- "\u5740"
- "\u80A0"
- "\u635E"
- "\u62DC"
- "\u5CFB"
- "\u5439"
- "\u4E43"
- "\u7626"
- "\u809A"
- "\u8D24"
- "\u5E15"
- "\u5CB3"
- "\u52E4"
- "\u745C"
- "\u9505"
- "\u6CAB"
- "\u4FD7"
- "\u6615"
- "\u5E06"
- "\u8302"
- "\u9189"
- "\u586B"
- "\u9971"
- "\u722C"
- "\u8F69"
- "\u6EDE"
- "\u871C"
- "\u6C57"
- "\u98D9"
- "\u8010"
- "\u4EA8"
- "\u5AB3"
- "\u5F6D"
- "\u84C4"
- "\u8776"
- "\u70AE"
- "\u9F20"
- "\u5496"
- "\u7434"
- "\u5BA0"
- "\u68CD"
- "\u6398"
- "\u8328"
- "\u5751"
- "\u6E58"
- "\u5B5F"
- "\u52A3"
- "\u707F"
- "\u866B"
- "\u5F66"
- "\u55B7"
- "\u63CF"
- "\u8FA9"
- "\u5C34"
- "\u5C2C"
- "\u5F25"
- "\u5B64"
- "\u5CE1"
- "\u51F8"
- "\u903B"
- "\u8FB0"
- "\u5B54"
- "\u62AC"
- "\u99A8"
- "\u851A"
- "\u6021"
- "\u96EF"
- "\u7816"
- "\u5D07"
- "\u80A2"
- "\u67F1"
- "\u9614"
- "\u5F7C"
- "\u8352"
- "\u6EDA"
- "\u8461"
- "\u8404"
- "\u6602"
- "\u76C6"
- "\u6028"
- "\u77AC"
- "\u659C"
- "\u65A9"
- "\u775B"
- "\u526A"
- "\u63D2"
- "\u68DA"
- "\u4E32"
- "\u6C83"
- "\u67D4"
- "\u80A4"
- "\u58F3"
- "\u80F8"
- "\u9655"
- "\u51C9"
- "\u5D1B"
- "\u9E23"
- "\u7F55"
- "\u8877"
- "\u9634"
- "\u76F2"
- "\u4F1E"
- "\u6212"
- "\u8E22"
- "\u72FC"
- "\u57CB"
- "\u917F"
- "\u65E8"
- "\u6208"
- "\u6349"
- "\u8DEA"
- "\u8D3A"
- "\u8C2D"
- "\u6D82"
- "\u840E"
- "\u6ECB"
- "\u660F"
- "\u6247"
- "\u9F0E"
- "\u6960"
- "\u9A73"
- "\u6EAA"
- "\u6851"
- "\u94A7"
- "\u8361"
- "\u75D5"
- "\u739B"
- "\u8EB2"
- "\u8C10"
- "\u60A8"
- "\u53F9"
- "\u6876"
- "\u6655"
- "\u4E19"
- "\u7487"
- "\u549A"
- "\u70C2"
- "\u6749"
- "\u6323"
- "\u7A9D"
- "\u4EB5"
- "\u82B8"
- "\u6E1D"
- "\u82B3"
- "\u5986"
- "\u819C"
- "\u714C"
- "\u5C18"
- "\u4FAF"
- "\u8D4B"
- "\u6E23"
- "\u8D2B"
- "\u6843"
- "\u9875"
- "\u541E"
- "\u80C0"
- "\u7AF9"
- "\u809D"
- "\u96FE"
- "\u5AC1"
- "\u8F88"
- "\u6124"
- "\u7410"
- "\u6B96"
- "\u5A9B"
- "\u5BC4"
- "\u50F5"
- "\u902E"
- "\u806A"
- "\u7C97"
- "\u5BD2"
- "\u5F04"
- "\u5893"
- "\u8C0C"
- "\u6254"
- "\u5F79"
- "\u5446"
- "\u9756"
- "\u8482"
- "\u82AC"
- "\u7FFC"
- "\u5582"
- "\u5B75"
- "\u8C0E"
- "\u7845"
- "\u74A8"
- "\u5580"
- "\u76FC"
- "\u76D2"
- "\u614C"
- "\u70EB"
- "\u79E6"
- "\u68B3"
- "\u97E6"
- "\u888B"
- "\u9493"
- "\u5915"
- "\u7897"
- "\u5BE8"
- "\u5858"
- "\u884D"
- "\u5792"
- "\u537F"
- "\u6EE9"
- "\u6251"
- "\u7ED8"
- "\u8FB1"
- "\u708E"
- "\u94C5"
- "\u80BF"
- "\u8870"
- "\u53A2"
- "\u8EBA"
- "\u7EBD"
- "\u786B"
- "\u7750"
- "\u7FC1"
- "\u6170"
- "\u800D"
- "\u7F20"
- "\u72E0"
- "\u8109"
- "\u65A5"
- "\u8102"
- "\u8DB4"
- "\u94A9"
- "\u6B67"
- "\u6905"
- "\u8E29"
- "\u63B7"
- "\u633D"
- "\u9510"
- "\u52D8"
- "\u9022"
- "\u90DD"
- "\u5BAA"
- "\u80C3"
- "\u7C92"
- "\u77A9"
- "\u8F9F"
- "\u7686"
- "\u4EF0"
- "\u8155"
- "\u532A"
- "\u9675"
- "\u94A5"
- "\u7F1D"
- "\u95F8"
- "\u72AC"
- "\u9521"
- "\u5F0A"
- "\u51DD"
- "\u81ED"
- "\u8D81"
- "\u62FE"
- "\u5938"
- "\u63A9"
- "\u8000"
- "\u70AD"
- "\u94EC"
- "\u53E0"
- "\u574A"
- "\u632A"
- "\u87F9"
- "\u88F9"
- "\u72EE"
- "\u8F90"
- "\u964C"
- "\u6345"
- "\u75AB"
- "\u5179"
- "\u970D"
- "\u9508"
- "\u5A1F"
- "\u8681"
- "\u5962"
- "\u543B"
- "\u4F83"
- "\u6656"
- "\u6273"
- "\u51A4"
- "\u5F70"
- "\u8E48"
- "\u7574"
- "\u86C7"
- "\u6FE0"
- "\u5561"
- "\u5821"
- "\u4FA3"
- "\u6492"
- "\u94ED"
- "\u638F"
- "\u594E"
- "\u8702"
- "\u54B8"
- "\u7A77"
- "\u7784"
- "\u9042"
- "\u78BE"
- "\u533F"
- "\u74F7"
- "\u8231"
- "\u5239"
- "\u67C4"
- "\u502A"
- "\u7779"
- "\u8BD1"
- "\u6DC7"
- "\u731D"
- "\u6D45"
- "\u80BA"
- "\u6E7F"
- "\u987D"
- "\u7F69"
- "\u80C6"
- "\u5319"
- "\u6E34"
- "\u59AE"
- "\u7F9E"
- "\u8106"
- "\u9B44"
- "\u9502"
- "\u7EA4"
- "\u70AB"
- "\u88D9"
- "\u80BE"
- "\u50B2"
- "\u819D"
- "\u53D4"
- "\u5565"
- "\u6495"
- "\u7272"
- "\u7334"
- "\u8FA8"
- "\u915D"
- "\u522E"
- "\u60D1"
- "\u6E17"
- "\u55BB"
- "\u6674"
- "\u6DD1"
- "\u7FA1"
- "\u6155"
- "\u64C2"
- "\u9A9A"
- "\u7EBA"
- "\u5495"
- "\u50E7"
- "\u6094"
- "\u5782"
- "\u762B"
- "\u5265"
- "\u8230"
- "\u6D4F"
- "\u9C8D"
- "\u8DFB"
- "\u4EAD"
- "\u64B0"
- "\u5378"
- "\u83B2"
- "\u7EB1"
- "\u7CCA"
- "\u6735"
- "\u5CA9"
- "\u7709"
- "\u51FD"
- "\u7CDF"
- "\u4ED7"
- "\u60F9"
- "\u7426"
- "\u8D1E"
- "\u6C22"
- "\u6977"
- "\u8393"
- "\u7792"
- "\u5960"
- "\u52C3"
- "\u9524"
- "\u59A8"
- "\u5E37"
- "\u6D3D"
- "\u4E5E"
- "\u727A"
- "\u4EA9"
- "\u7C3F"
- "\u6591"
- "\u7FD8"
- "\u7948"
- "\u5507"
- "\u8015"
- "\u626F"
- "\u598D"
- "\u574E"
- "\u8C31"
- "\u76EF"
- "\u6CFC"
- "\u608D"
- "\u838E"
- "\u6C41"
- "\u56CA"
- "\u7529"
- "\u8FA3"
- "\u6D78"
- "\u607C"
- "\u76D4"
- "\u70E4"
- "\u575D"
- "\u5DC5"
- "\u6CB8"
- "\u62B9"
- "\u90B9"
- "\u973E"
- "\u6016"
- "\u72B9"
- "\u64CE"
- "\u8FC4"
- "\u6068"
- "\u4E27"
- "\u575E"
- "\u8896"
- "\u8D64"
- "\u840D"
- "\u723D"
- "\u7A46"
- "\u5A36"
- "\u95F7"
- "\u634D"
- "\u8180"
- "\u4F88"
- "\u7B4B"
- "\u901B"
- "\u5029"
- "\u7EB2"
- "\u906E"
- "\u5FA1"
- "\u59E8"
- "\u6DEE"
- "\u5BB0"
- "\u53C9"
- "\u7EF5"
- "\u60E7"
- "\u94A6"
- "\u5ECA"
- "\u9CC4"
- "\u7802"
- "\u6D46"
- "\u79BD"
- "\u548F"
- "\u763E"
- "\u997F"
- "\u75F4"
- "\u7EF3"
- "\u789F"
- "\u97F5"
- "\u7693"
- "\u5ED6"
- "\u5CAD"
- "\u86D9"
- "\u5154"
- "\u82BD"
- "\u5256"
- "\u5AD6"
- "\u6614"
- "\u54C0"
- "\u8513"
- "\u8C26"
- "\u6EE5"
- "\u8D42"
- "\u6E0A"
- "\u6363"
- "\u4F51"
- "\u5F08"
- "\u4ED9"
- "\u6FA1"
- "\u9AA4"
- "\u4FA8"
- "\u5949"
- "\u78C5"
- "\u6168"
- "\u7B5B"
- "\u5632"
- "\u7AE3"
- "\u7BAD"
- "\u8367"
- "\u8116"
- "\u5F64"
- "\u8C6B"
- "\u8E81"
- "\u79C9"
- "\u9E64"
- "\u5E7A"
- "\u6E14"
- "\u7F62"
- "\u8D2C"
- "\u94F2"
- "\u5375"
- "\u9017"
- "\u7267"
- "\u852C"
- "\u82D1"
- "\u6CA6"
- "\u904F"
- "\u67F4"
- "\u5E99"
- "\u517D"
- "\u8036"
- "\u9B42"
- "\u6E9C"
- "\u7F09"
- "\u4FCF"
- "\u8574"
- "\u82DB"
- "\u51D1"
- "\u5A7F"
- "\u94F8"
- "\u515C"
- "\u8E6D"
- "\u9E2D"
- "\u6734"
- "\u808B"
- "\u566A"
- "\u711A"
- "\u574D"
- "\u5564"
- "\u9489"
- "\u621A"
- "\u8C0D"
- "\u632B"
- "\u8247"
- "\u4F59"
- "\u5DF7"
- "\u5C60"
- "\u548B"
- "\u8A79"
- "\u886B"
- "\u6D74"
- "\u7239"
- "\u5B5D"
- "\u7624"
- "\u9716"
- "\u5D29"
- "\u7538"
- "\u60BC"
- "\u64D2"
- "\u6D47"
- "\u96D5"
- "\u7AD6"
- "\u5E10"
- "\u8424"
- "\u9761"
- "\u6F20"
- "\u50BB"
- "\u64BC"
- "\u5D14"
- "\u7B52"
- "\u810A"
- "\u561B"
- "\u81E3"
- "\u79BE"
- "\u9F9F"
- "\u5524"
- "\u5440"
- "\u58E4"
- "\u704C"
- "\u90B5"
- "\u7A3B"
- "\u5DFE"
- "\u8469"
- "\u9965"
- "\u7F14"
- "\u820C"
- "\u7A9C"
- "\u79FD"
- "\u8305"
- "\u9753"
- "\u9631"
- "\u949E"
- "\u6F7C"
- "\u785D"
- "\u58A9"
- "\u8759"
- "\u8760"
- "\u5AC2"
- "\u8258"
- "\u56A3"
- "\u94C3"
- "\u6252"
- "\u4F6C"
- "\u7AED"
- "\u8D4E"
- "\u508D"
- "\u71AC"
- "\u60A0"
- "\u6328"
- "\u6CCA"
- "\u6512"
- "\u576A"
- "\u7130"
- "\u87BA"
- "\u8587"
- "\u86DB"
- "\u725F"
- "\u5FCC"
- "\u6127"
- "\u9175"
- "\u8FED"
- "\u9976"
- "\u60DF"
- "\u94AE"
- "\u95F5"
- "\u78A7"
- "\u5F98"
- "\u5F8A"
- "\u6EAF"
- "\u68C9"
- "\u6B6A"
- "\u6342"
- "\u868A"
- "\u9530"
- "\u5C41"
- "\u7578"
- "\u80AA"
- "\u8E72"
- "\u5254"
- "\u6986"
- "\u6487"
- "\u745F"
- "\u8BB6"
- "\u98D8"
- "\u84B8"
- "\u8BE0"
- "\u5BC2"
- "\u7F44"
- "\u83B9"
- "\u9E45"
- "\u6CE3"
- "\u5D16"
- "\u73CA"
- "\u8BB3"
- "\u7FF0"
- "\u8718"
- "\u4EF2"
- "\u71E5"
- "\u83F1"
- "\u6EE2"
- "\u714E"
- "\u86EE"
- "\u77BB"
- "\u8611"
- "\u83C7"
- "\u9699"
- "\u6346"
- "\u8549"
- "\u9063"
- "\u5B9B"
- "\u8086"
- "\u4E38"
- "\u78C1"
- "\u73A5"
- "\u5D4C"
- "\u97F6"
- "\u679D"
- "\u54AA"
- "\u6109"
- "\u5455"
- "\u6DE4"
- "\u8A93"
- "\u8F84"
- "\u4FEF"
- "\u6850"
- "\u8205"
- "\u84C9"
- "\u6E2D"
- "\u6C2F"
- "\u6E85"
- "\u96C1"
- "\u9F9A"
- "\u607A"
- "\u5996"
- "\u997D"
- "\u8346"
- "\u67AF"
- "\u4EC7"
- "\u575F"
- "\u6F9C"
- "\u9E9F"
- "\u85E4"
- "\u730E"
- "\u6D12"
- "\u8339"
- "\u788C"
- "\u754F"
- "\u6DA4"
- "\u4FDE"
- "\u52FF"
- "\u853D"
- "\u7F50"
- "\u5C39"
- "\u5830"
- "\u5112"
- "\u82AE"
- "\u5B5A"
- "\u54D7"
- "\u6390"
- "\u77F6"
- "\u690E"
- "\u9610"
- "\u9A74"
- "\u8749"
- "\u7115"
- "\u9102"
- "\u803B"
- "\u70AF"
- "\u886C"
- "\u5A49"
- "\u6101"
- "\u68A8"
- "\u4E1B"
- "\u8C05"
- "\u81A8"
- "\u66D9"
- "\u9E7F"
- "\u9A84"
- "\u7F05"
- "\u5306"
- "\u8D43"
- "\u84B2"
- "\u7741"
- "\u7131"
- "\u707C"
- "\u5203"
- "\u8783"
- "\u7455"
- "\u8BB9"
- "\u7985"
- "\u81C0"
- "\u59D7"
- "\u5A9A"
- "\u545B"
- "\u51F0"
- "\u701A"
- "\u57D4"
- "\u5F13"
- "\u961A"
- "\u6E5B"
- "\u5955"
- "\u625B"
- "\u9F7F"
- "\u631F"
- "\u9AD3"
- "\u72ED"
- "\u6808"
- "\u9A8F"
- "\u5D2D"
- "\u6151"
- "\u6BBF"
- "\u796D"
- "\u50FB"
- "\u8E6C"
- "\u5BE1"
- "\u5466"
- "\u97A0"
- "\u9171"
- "\u7470"
- "\u9992"
- "\u5764"
- "\u8D9F"
- "\u81FB"
- "\u5492"
- "\u8C79"
- "\u755C"
- "\u5189"
- "\u7ECE"
- "\u5C8C"
- "\u7504"
- "\u7EDE"
- "\u5BB5"
- "\u5EB8"
- "\u6B47"
- "\u6320"
- "\u6C28"
- "\u4E59"
- "\u8335"
- "\u5C94"
- "\u6DC4"
- "\u7898"
- "\u6DCB"
- "\u84EC"
- "\u9885"
- "\u7FB9"
- "\u6D51"
- "\u6627"
- "\u7FE0"
- "\u5CE5"
- "\u60D5"
- "\u777F"
- "\u82A6"
- "\u8680"
- "\u9893"
- "\u971C"
- "\u94B0"
- "\u6A58"
- "\u5824"
- "\u51F3"
- "\u6EB6"
- "\u952F"
- "\u5E42"
- "\u69B4"
- "\u5A3C"
- "\u6C79"
- "\u832B"
- "\u538C"
- "\u7EF0"
- "\u5D0E"
- "\u6E83"
- "\u64AC"
- "\u6CBE"
- "\u62C7"
- "\u75B5"
- "\u54E6"
- "\u5F27"
- "\u5F18"
- "\u54BD"
- "\u846C"
- "\u9601"
- "\u7AFF"
- "\u7BE1"
- "\u96B6"
- "\u8BDF"
- "\u716E"
- "\u4E18"
- "\u803F"
- "\u5F6C"
- "\u655E"
- "\u6CFB"
- "\u5937"
- "\u9685"
- "\u6E0E"
- "\u6DF9"
- "\u9A86"
- "\u918B"
- "\u9706"
- "\u6DA9"
- "\u9640"
- "\u53D9"
- "\u6897"
- "\u51B6"
- "\u655B"
- "\u75EA"
- "\u8BBD"
- "\u75A4"
- "\u8782"
- "\u8292"
- "\u5E62"
- "\u709C"
- "\u6BEF"
- "\u6A59"
- "\u62E2"
- "\u4FE8"
- "\u4ED5"
- "\u6C30"
- "\u94BE"
- "\u5450"
- "\u682A"
- "\u813E"
- "\u70E8"
- "\u78D5"
- "\u859B"
- "\u7A96"
- "\u82B7"
- "\u8715"
- "\u8845"
- "\u6B79"
- "\u54D2"
- "\u8BE1"
- "\u6467"
- "\u6F06"
- "\u87D1"
- "\u5288"
- "\u5475"
- "\u7D6E"
- "\u6296"
- "\u5A05"
- "\u94DD"
- "\u9709"
- "\u82AD"
- "\u8F9C"
- "\u660A"
- "\u5618"
- "\u54D1"
- "\u67A2"
- "\u8110"
- "\u5E90"
- "\u94A0"
- "\u9CCC"
- "\u77E9"
- "\u9506"
- "\u5A67"
- "\u6C9B"
- "\u9972"
- "\u7184"
- "\u7FE1"
- "\u5C79"
- "\u818F"
- "\u9619"
- "\u6402"
- "\u9523"
- "\u5E4C"
- "\u6A44"
- "\u6984"
- "\u6756"
- "\u65F7"
- "\u77EB"
- "\u5188"
- "\u821F"
- "\u814A"
- "\u8042"
- "\u62E3"
- "\u905B"
- "\u52CB"
- "\u7A98"
- "\u97E7"
- "\u54B1"
- "\u62CE"
- "\u6912"
- "\u63E3"
- "\u6BB7"
- "\u63EA"
- "\u4F3D"
- "\u8D31"
- "\u743C"
- "\u83E1"
- "\u95FA"
- "\u662D"
- "\u96CF"
- "\u8E4A"
- "\u9EDB"
- "\u79B9"
- "\u978D"
- "\u4E56"
- "\u6C5D"
- "\u752B"
- "\u5F5D"
- "\u6CF8"
- "\u8BEC"
- "\u62FD"
- "\u6BFD"
- "\u6405"
- "\u8475"
- "\u65F1"
- "\u52C9"
- "\u8DF7"
- "\u7554"
- "\u8098"
- "\u5742"
- "\u6F29"
- "\u6DA1"
- "\u5018"
- "\u919B"
- "\u66E6"
- "\u94C0"
- "\u674F"
- "\u68D5"
- "\u5E7D"
- "\u88F4"
- "\u962E"
- "\u6577"
- "\u8304"
- "\u6CA7"
- "\u527D"
- "\u6073"
- "\u6DF3"
- "\u8431"
- "\u88B1"
- "\u4EA5"
- "\u75F1"
- "\u8154"
- "\u5AC9"
- "\u7CB9"
- "\u710A"
- "\u8BC0"
- "\u7CAA"
- "\u6714"
- "\u9EEF"
- "\u8C1C"
- "\u7728"
- "\u7941"
- "\u66A7"
- "\u9B41"
- "\u8F97"
- "\u7A57"
- "\u5026"
- "\u527F"
- "\u888D"
- "\u606D"
- "\u7099"
- "\u5A34"
- "\u73AB"
- "\u950F"
- "\u718F"
- "\u7AA5"
- "\u5815"
- "\u609F"
- "\u6643"
- "\u7F2A"
- "\u9A7F"
- "\u6CF7"
- "\u96C0"
- "\u60EB"
- "\u73BA"
- "\u5243"
- "\u6590"
- "\u8882"
- "\u68AD"
- "\u54C4"
- "\u90AA"
- "\u5C82"
- "\u817B"
- "\u5AE9"
- "\u6995"
- "\u8C34"
- "\u6F47"
- "\u7EAC"
- "\u4FAE"
- "\u7FC5"
- "\u9576"
- "\u5777"
- "\u5F6A"
- "\u7977"
- "\u531D"
- "\u803D"
- "\u841D"
- "\u7A91"
- "\u747E"
- "\u6EE4"
- "\u62F1"
- "\u54E8"
- "\u8822"
- "\u90A2"
- "\u6D9E"
- "\u6064"
- "\u6CFE"
- "\u8C24"
- "\u7011"
- "\u8236"
- "\u61C8"
- "\u5FF1"
- "\u70F9"
- "\u665F"
- "\u8E1E"
- "\u5241"
- "\u73C9"
- "\u5E9A"
- "\u6664"
- "\u58F6"
- "\u783E"
- "\u55C5"
- "\u5992"
- "\u5308"
- "\u80F0"
- "\u7EEF"
- "\u837C"
- "\u722A"
- "\u831C"
- "\u6866"
- "\u8707"
- "\u829C"
- "\u7384"
- "\u846B"
- "\u8682"
- "\u7ECA"
- "\u6401"
- "\u970F"
- "\u7C98"
- "\u4F5F"
- "\u96CD"
- "\u57AE"
- "\u7F81"
- "\u5A25"
- "\u78B1"
- "\u78F7"
- "\u948A"
- "\u6BD9"
- "\u8BFF"
- "\u7EF8"
- "\u634F"
- "\u9074"
- "\u754A"
- "\u53AE"
- "\u5DEB"
- "\u7316"
- "\u7357"
- "\u63B4"
- "\u8F8D"
- "\u8721"
- "\u8D63"
- "\u7B75"
- "\u8299"
- "\u849C"
- "\u7F06"
- "\u4FEA"
- "\u9E70"
- "\u7B0B"
- "\u6BCB"
- "\u5586"
- "\u9E6D"
- "\u8774"
- "\u6C40"
- "\u8BFD"
- "\u6854"
- "\u7BF7"
- "\u83BD"
- "\u6816"
- "\u996A"
- "\u4F3A"
- "\u6233"
- "\u8C0A"
- "\u9704"
- "\u4F84"
- "\u6ED4"
- "\u778E"
- "\u76B1"
- "\u86DF"
- "\u88D4"
- "\u70FD"
- "\u733F"
- "\u53EE"
- "\u7EF7"
- "\u817A"
- "\u66A8"
- "\u6CA5"
- "\u55A7"
- "\u56E4"
- "\u63A0"
- "\u9661"
- "\u81BA"
- "\u75D2"
- "\u9975"
- "\u620E"
- "\u891A"
- "\u4E10"
- "\u6E24"
- "\u5E1C"
- "\u5A04"
- "\u6D3C"
- "\u7984"
- "\u5A75"
- "\u7422"
- "\u8EAF"
- "\u79BA"
- "\u5CD9"
- "\u8E39"
- "\u601C"
- "\u7096"
- "\u5250"
- "\u7F1A"
- "\u8944"
- "\u67AB"
- "\u7EFD"
- "\u5EBE"
- "\u65A7"
- "\u7A74"
- "\u5BC7"
- "\u8747"
- "\u97AD"
- "\u960E"
- "\u77E2"
- "\u7CD9"
- "\u5DCD"
- "\u84BF"
- "\u6B92"
- "\u86F0"
- "\u56E7"
- "\u535C"
- "\u5B99"
- "\u73EE"
- "\u9E26"
- "\u749E"
- "\u7FDF"
- "\u9157"
- "\u8912"
- "\u8C41"
- "\u9551"
- "\u8037"
- "\u68E0"
- "\u57A6"
- "\u97EC"
- "\u836B"
- "\u7AA8"
- "\u9E3D"
- "\u7FB2"
- "\u61D2"
- "\u8EAC"
- "\u5315"
- "\u7280"
- "\u543C"
- "\u73C0"
- "\u6619"
- "\u6A31"
- "\u8E7F"
- "\u6289"
- "\u82CD"
- "\u6C5B"
- "\u94C9"
- "\u9549"
- "\u5594"
- "\u90AF"
- "\u90F8"
- "\u5671"
- "\u74EF"
- "\u6CBC"
- "\u637B"
- "\u82EF"
- "\u8E7C"
- "\u9E8B"
- "\u9600"
- "\u715E"
- "\u8E1D"
- "\u7F2D"
- "\u83CA"
- "\u7AFA"
- "\u5CED"
- "\u6525"
- "\u7656"
- "\u809B"
- "\u6CD4"
- "\u62EF"
- "\u7A9F"
- "\u9773"
- "\u8235"
- "\u5631"
- "\u6631"
- "\u52FA"
- "\u543E"
- "\u4E2B"
- "\u89C5"
- "\u9187"
- "\u78CB"
- "\u5F99"
- "\u9668"
- "\u60FA"
- "\u6E0D"
- "\u70AC"
- "\u683D"
- "\u664F"
- "\u9882"
- "\u5974"
- "\u6994"
- "\u9A6D"
- "\u56BC"
- "\u8D61"
- "\u8C5A"
- "\u8537"
- "\u6893"
- "\u68A7"
- "\u54FD"
- "\u6657"
- "\u6C5E"
- "\u5AE3"
- "\u854A"
- "\u797A"
- "\u75B9"
- "\u58F9"
- "\u566C"
- "\u7682"
- "\u77D7"
- "\u609A"
- "\u61A7"
- "\u61AC"
- "\u62F7"
- "\u6241"
- "\u5ED3"
- "\u8E74"
- "\u5C9A"
- "\u745B"
- "\u5D34"
- "\u6817"
- "\u56DA"
- "\u6DBF"
- "\u7901"
- "\u6654"
- "\u6BA1"
- "\u7480"
- "\u6DDE"
- "\u968B"
- "\u8E35"
- "\u94B5"
- "\u714A"
- "\u8D58"
- "\u77A7"
- "\u5BDE"
- "\u964B"
- "\u9AB7"
- "\u9AC5"
- "\u79F8"
- "\u79C6"
- "\u592F"
- "\u8354"
- "\u8941"
- "\u8913"
- "\u7B28"
- "\u6CAE"
- "\u7785"
- "\u6002"
- "\u8317"
- "\u7525"
- "\u4E9F"
- "\u6773"
- "\u7166"
- "\u631A"
- "\u68F5"
- "\u7960"
- "\u55EF"
- "\u6795"
- "\u7C9F"
- "\u6CCC"
- "\u8700"
- "\u5BE5"
- "\u9050"
- "\u6D9D"
- "\u8FAB"
- "\u7C41"
- "\u7A8D"
- "\u804B"
- "\u900D"
- "\u8DE4"
- "\u51F9"
- "\u91DC"
- "\u5600"
- "\u55D2"
- "\u6DDD"
- "\u85DC"
- "\u7FF1"
- "\u785A"
- "\u53FC"
- "\u75F9"
- "\u817C"
- "\u8146"
- "\u4F0E"
- "\u9A8B"
- "\u6115"
- "\u8165"
- "\u62EE"
- "\u8F67"
- "\u766B"
- "\u6A61"
- "\u818A"
- "\u89D1"
- "\u5BC5"
- "\u7812"
- "\u8DBE"
- "\u9890"
- "\u6F33"
- "\u5CE8"
- "\u545C"
- "\u6DC6"
- "\u51FF"
- "\u58D5"
- "\u94E8"
- "\u8386"
- "\u7B77"
- "\u74A7"
- "\u8B6C"
- "\u5C96"
- "\u62A0"
- "\u7B1B"
- "\u53A5"
- "\u783A"
- "\u5589"
- "\u914C"
- "\u7C27"
- "\u9CB8"
- "\u8E0A"
- "\u7261"
- "\u5B1B"
- "\u7F1C"
- "\u5942"
- "\u71B9"
- "\u95FD"
- "\u998A"
- "\u80EF"
- "\u5587"
- "\u4F36"
- "\u589F"
- "\u715C"
- "\u8018"
- "\u69B7"
- "\u9A81"
- "\u7329"
- "\u8F99"
- "\u72F8"
- "\u6ED5"
- "\u8BF5"
- "\u7A92"
- "\u604D"
- "\u9AE6"
- "\u8BEB"
- "\u69A8"
- "\u71A0"
- "\u853A"
- "\u85AF"
- "\u6B46"
- "\u7CA4"
- "\u592D"
- "\u62CC"
- "\u550F"
- "\u5384"
- "\u541D"
- "\u7737"
- "\u5CEA"
- "\u62D9"
- "\u548E"
- "\u7CA5"
- "\u75F0"
- "\u7405"
- "\u7F9A"
- "\u8398"
- "\u61A8"
- "\u77B0"
- "\u7085"
- "\u5B5C"
- "\u4EA2"
- "\u7F2E"
- "\u712F"
- "\u5484"
- "\u6687"
- "\u77EE"
- "\u6C72"
- "\u7076"
- "\u95F0"
- "\u595A"
- "\u6C76"
- "\u73F2"
- "\u9E93"
- "\u618B"
- "\u5D02"
- "\u9573"
- "\u6B83"
- "\u5349"
- "\u8BE7"
- "\u77E3"
- "\u5C4E"
- "\u8046"
- "\u828B"
- "\u5C51"
- "\u7F42"
- "\u7C7D"
- "\u7EDA"
- "\u535E"
- "\u6789"
- "\u6C55"
- "\u61CB"
- "\u5AB2"
- "\u5567"
- "\u63A3"
- "\u5B09"
- "\u4EE8"
- "\u59EC"
- "\u61FF"
- "\u9985"
- "\u80FA"
- "\u6482"
- "\u776B"
- "\u86D0"
- "\u8403"
- "\u7708"
- "\u98DA"
- "\u6BD3"
- "\u6D85"
- "\u663C"
- "\u6A71"
- "\u9A7C"
- "\u6DA0"
- "\u8C29"
- "\u5A76"
- "\u819B"
- "\u62C4"
- "\u7EE3"
- "\u6805"
- "\u90AC"
- "\u6020"
- "\u9119"
- "\u54C9"
- "\u8DFA"
- "\u5E18"
- "\u6C93"
- "\u6400"
- "\u814C"
- "\u7FBF"
- "\u6CF5"
- "\u911E"
- "\u90E1"
- "\u70C3"
- "\u611A"
- "\u8559"
- "\u57A4"
- "\u950C"
- "\u67E0"
- "\u6AAC"
- "\u8471"
- "\u57A2"
- "\u532E"
- "\u5366"
- "\u61CA"
- "\u63BA"
- "\u53F1"
- "\u576F"
- "\u7CEF"
- "\u8986"
- "\u94C6"
- "\u742C"
- "\u62A1"
- "\u6F62"
- "\u68FA"
- "\u587E"
- "\u98D3"
- "\u8BC5"
- "\u7FE9"
- "\u63CD"
- "\u6A80"
- "\u9CDD"
- "\u8BAA"
- "\u7194"
- "\u675E"
- "\u5543"
- "\u6600"
- "\u7D0A"
- "\u6556"
- "\u7490"
- "\u8517"
- "\u69CC"
- "\u94D0"
- "\u6421"
- "\u78D0"
- "\u5B95"
- "\u6813"
- "\u53ED"
- "\u621F"
- "\u9877"
- "\u6FD2"
- "\u7AA6"
- "\u6441"
- "\u4FD0"
- "\u77B3"
- "\u8695"
- "\u9E4A"
- "\u8FC2"
- "\u757F"
- "\u74E3"
- "\u5A9E"
- "\u5BDD"
- "\u8E66"
- "\u55D1"
- "\u8892"
- "\u6B89"
- "\u7A1A"
- "\u4FD8"
- "\u642A"
- "\u6CBD"
- "\u5983"
- "\u55D3"
- "\u80EB"
- "\u753A"
- "\u83B4"
- "\u82E3"
- "\u75D8"
- "\u8511"
- "\u7696"
- "\u679E"
- "\u5FD0"
- "\u5FD1"
- "\u9774"
- "\u83C1"
- "\u59E5"
- "\u8BD9"
- "\u56B7"
- "\u7109"
- "\u6CA3"
- "\u9739"
- "\u96F3"
- "\u50DA"
- "\u5C27"
- "\u560E"
- "\u8BE9"
- "\u54AB"
- "\u67EC"
- "\u60EE"
- "\u72C4"
- "\u5300"
- "\u88C6"
- "\u9ECF"
- "\u91C9"
- "\u81B3"
- "\u6E3A"
- "\u82DF"
- "\u7476"
- "\u553E"
- "\u7620"
- "\u8BA7"
- "\u7766"
- "\u5F26"
- "\u5E87"
- "\u8884"
- "\u5669"
- "\u627C"
- "\u621B"
- "\u7980"
- "\u607F"
- "\u6EC1"
- "\u9EBE"
- "\u7B71"
- "\u7600"
- "\u892A"
- "\u69DF"
- "\u7F28"
- "\u7ED2"
- "\u72B7"
- "\u8338"
- "\u60CB"
- "\u55E4"
- "\u5BEE"
- "\u8902"
- "\u54B3"
- "\u7F00"
- "\u8C19"
- "\u6DA7"
- "\u70BD"
- "\u7F04"
- "\u9E5C"
- "\u780C"
- "\u8D2E"
- "\u5EB5"
- "\u96A7"
- "\u5364"
- "\u8DC6"
- "\u768B"
- "\u8757"
- "\u6D31"
- "\u572A"
- "\u9091"
- "\u9504"
- "\u835F"
- "\u6E1A"
- "\u82C7"
- "\u5B70"
- "\u9E43"
- "\u54FC"
- "\u5443"
- "\u741B"
- "\u75E3"
- "\u6479"
- "\u75FC"
- "\u956F"
- "\u5201"
- "\u79E7"
- "\u8169"
- "\u9CDE"
- "\u4E4D"
- "\u989A"
- "\u6177"
- "\u6C13"
- "\u60E6"
- "\u5351"
- "\u631D"
- "\u71A8"
- "\u6FEE"
- "\u80F3"
- "\u74E2"
- "\u7830"
- "\u6EA7"
- "\u9537"
- "\u9E20"
- "\u7292"
- "\u59DD"
- "\u8E44"
- "\u5BB8"
- "\u4FA5"
- "\u952D"
- "\u4F76"
- "\u6D4A"
- "\u5A6A"
- "\u78FA"
- "\u54A4"
- "\u8FE2"
- "\u6A90"
- "\u90BA"
- "\u6382"
- "\u6E32"
- "\u568E"
- "\u795B"
- "\u4F22"
- "\u53DB"
- "\u64AE"
- "\u752C"
- "\u6DCC"
- "\u701B"
- "\u673D"
- "\u9642"
- "\u5E3C"
- "\u94FF"
- "\u9535"
- "\u6F13"
- "\u9A6F"
- "\u9CA8"
- "\u6292"
- "\u8301"
- "\u67FF"
- "\u8C94"
- "\u8C85"
- "\u949D"
- "\u9CC5"
- "\u568F"
- "\u66AE"
- "\u745A"
- "\u8364"
- "\u8713"
- "\u57A3"
- "\u98A4"
- "\u6EA5"
- "\u81C3"
- "\u622E"
- "\u67A3"
- "\u4F7C"
- "\u62D7"
- "\u54C6"
- "\u55E6"
- "\u60DA"
- "\u9E25"
- "\u501A"
- "\u55E8"
- "\u8238"
- "\u8D50"
- "\u59CA"
- "\u6194"
- "\u60B4"
- "\u94F0"
- "\u9EDD"
- "\u5C7F"
- "\u79C3"
- "\u563B"
- "\u695E"
- "\u68F1"
- "\u8888"
- "\u88DF"
- "\u6C74"
- "\u63C9"
- "\u9ACB"
- "\u60B8"
- "\u69BB"
- "\u901E"
- "\u667E"
- "\u5C4C"
- "\u95F3"
- "\u75CA"
- "\u889C"
- "\u6249"
- "\u7436"
- "\u6452"
- "\u637A"
- "\u5320"
- "\u7A88"
- "\u7A95"
- "\u98D2"
- "\u732C"
- "\u871A"
- "\u840B"
- "\u86AF"
- "\u8693"
- "\u9C9F"
- "\u6F88"
- "\u6A1F"
- "\u6096"
- "\u7396"
- "\u4FFE"
- "\u62BF"
- "\u5F77"
- "\u5F7F"
- "\u8671"
- "\u72D9"
- "\u9CB6"
- "\u69FF"
- "\u70D8"
- "\u630E"
- "\u72F0"
- "\u72DE"
- "\u9083"
- "\u77AA"
- "\u4FDA"
- "\u6D95"
- "\u8C2C"
- "\u776C"
- "\u8737"
- "\u5162"
- "\u954D"
- "\u7837"
- "\u83E0"
- "\u6026"
- "\u51C4"
- "\u536F"
- "\u7352"
- "\u6E00"
- "\u8F98"
- "\u6EC7"
- "\u71CE"
- "\u564E"
- "\u874E"
- "\u7DA6"
- "\u9122"
- "\u634E"
- "\u77BF"
- "\u873F"
- "\u8712"
- "\u79A7"
- "\u6988"
- "\u9539"
- "\u6BAD"
- "\u7235"
- "\u76F9"
- "\u6DD6"
- "\u557C"
- "\u74EE"
- "\u9CD6"
- "\u9556"
- "\u73D1"
- "\u7F79"
- "\u6B86"
- "\u6396"
- "\u67DE"
- "\u7F38"
- "\u7EC5"
- "\u68D8"
- "\u7949"
- "\u80F1"
- "\u6B93"
- "\u55E1"
- "\u55F7"
- "\u7B8D"
- "\u5729"
- "\u8012"
- "\u5A55"
- "\u8151"
- "\u8426"
- "\u9E5E"
- "\u73DC"
- "\u5575"
- "\u7459"
- "\u8446"
- "\u9021"
- "\u55FD"
- "\u9955"
- "\u992E"
- "\u96BC"
- "\u599E"
- "\u997A"
- "\u53E8"
- "\u914B"
- "\u6059"
- "\u6CD7"
- "\u5F29"
- "\u9A9C"
- "\u94CE"
- "\u9176"
- "\u869D"
- "\u70C1"
- "\u533E"
- "\u4FAC"
- "\u85FB"
- "\u99A5"
- "\u9AA5"
- "\u69D0"
- "\u7F15"
- "\u693F"
- "\u8886"
- "\u740A"
- "\u7A23"
- "\u85E9"
- "\u8FF8"
- "\u8E42"
- "\u8E8F"
- "\u96BD"
- "\u4FF8"
- "\u90EB"
- "\u7C38"
- "\u7825"
- "\u9AB8"
- "\u63AE"
- "\u659B"
- "\u5578"
- "\u748B"
- "\u579B"
- "\u672D"
- "\u908B"
- "\u9062"
- "\u8572"
- "\u54C7"
- "\u78B4"
- "\u909B"
- "\u5D03"
- "\u89D0"
- "\u7B19"
- "\u88F3"
- "\u6CDE"
- "\u868C"
- "\u918D"
- "\u9190"
- "\u62F4"
- "\u821C"
- "\u6C85"
- "\u61F5"
- "\u8C15"
- "\u5E1A"
- "\u87B3"
- "\u567C"
- "\u556A"
- "\u6F31"
- "\u90DC"
- "\u7889"
- "\u572D"
- "\u8C00"
- "\u8F76"
- "\u8200"
- "\u5472"
- "\u5576"
- "\u6C1F"
- "\u740F"
- "\u5785"
- "\u5A29"
- "\u4E7E"
- "\u93D6"
- "\u727E"
- "\u80AE"
- "\u5555"
- "\u540F"
- "\u6D93"
- "\u6C26"
- "\u9525"
- "\u684E"
- "\u543F"
- "\u70CA"
- "\u659F"
- "\u6C7E"
- "\u5C90"
- "\u8004"
- "\u800B"
- "\u55F2"
- "\u80DB"
- "\u759A"
- "\u9A87"
- "\u7663"
- "\u78E1"
- "\u4F91"
- "\u6F3E"
- "\u789A"
- "\u7409"
- "\u60EC"
- "\u9041"
- "\u8038"
- "\u5CB1"
- "\u7CD7"
- "\u7F19"
- "\u80B4"
- "\u68B5"
- "\u50EE"
- "\u9E35"
- "\u60AF"
- "\u5B6A"
- "\u8385"
- "\u622C"
- "\u9701"
- "\u7C07"
- "\u9035"
- "\u501C"
- "\u50A5"
- "\u998B"
- "\u84C1"
- "\u8859"
- "\u86C0"
- "\u852B"
- "\u5D27"
- "\u541F"
- "\u7430"
- "\u552C"
- "\u6E25"
- "\u5CB7"
- "\u4EE1"
- "\u6D8E"
- "\u9E33"
- "\u9E2F"
- "\u954A"
- "\u59A7"
- "\u5B37"
- "\u5AE6"
- "\u5AD4"
- "\u6C90"
- "\u4F09"
- "\u5D9D"
- "\u9522"
- "\u7B50"
- "\u8725"
- "\u8734"
- "\u6CF1"
- "\u9A85"
- "\u5406"
- "\u64A9"
- "\u602F"
- "\u53E9"
- "\u54DF"
- "\u556C"
- "\u5CAC"
- "\u7B03"
- "\u73B3"
- "\u7441"
- "\u909D"
- "\u54A3"
- "\u77DC"
- "\u562D"
- "\u9997"
- "\u5A40"
- "\u9ED4"
- "\u951F"
- "\u5570"
- "\u7FCC"
- "\u94E0"
- "\u8C89"
- "\u737E"
- "\u9163"
- "\u6963"
- "\u4F43"
- "\u7435"
- "\u8306"
- "\u7699"
- "\u51CB"
- "\u655D"
- "\u5323"
- "\u5D58"
- "\u5B93"
- "\u830E"
- "\u6942"
- "\u7AF2"
- "\u762A"
- "\u4F97"
- "\u94E3"
- "\u85B0"
- "\u7832"
- "\u7FA3"
- "\u6DFC"
- "\u895F"
- "\u598A"
- "\u5A20"
- "\u7F61"
- "\u7601"
- "\u6930"
- "\u70D9"
- "\u5457"
- "\u8343"
- "\u768E"
- "\u6B9A"
- "\u814B"
- "\u9ABC"
- "\u8153"
- "\u69AD"
- "\u9698"
- "\u5509"
- "\u94EE"
- "\u72E9"
- "\u62A8"
- "\u5CC1"
- "\u7CB1"
- "\u9602"
- "\u53A9"
- "\u83A0"
- "\u5429"
- "\u5490"
- "\u778C"
- "\u870A"
- "\u606C"
- "\u8191"
- "\u8E09"
- "\u8DC4"
- "\u988D"
- "\u6710"
- "\u759D"
- "\u6BC2"
- "\u79E3"
- "\u821B"
- "\u708A"
- "\u6F2F"
- "\u6CE0"
- "\u5598"
- "\u64B5"
- "\u72E1"
- "\u733E"
- "\u94C2"
- "\u949B"
- "\u835E"
- "\u62ED"
- "\u4E1E"
- "\u6F2D"
- "\u7ECC"
- "\u57DC"
- "\u63B0"
- "\u72C8"
- "\u951C"
- "\u83E9"
- "\u5F1B"
- "\u5BF0"
- "\u79E4"
- "\u705E"
- "\u9ECD"
- "\u84DF"
- "\u5D5B"
- "\u6989"
- "\u5E44"
- "\u988A"
- "\u7F24"
- "\u6726"
- "\u80E7"
- "\u51A5"
- "\u781D"
- "\u9540"
- "\u5919"
- "\u71CA"
- "\u835A"
- "\u6D48"
- "\u82E1"
- "\u773A"
- "\u966C"
- "\u5BD0"
- "\u4F58"
- "\u6FD1"
- "\u4EC4"
- "\u6954"
- "\u80DA"
- "\u5D69"
- "\u6D19"
- "\u8BD3"
- "\u961C"
- "\u6D5A"
- "\u89CA"
- "\u89CE"
- "\u66F0"
- "\u6035"
- "\u5156"
- "\u7A20"
- "\u5D4B"
- "\u824B"
- "\u7BEA"
- "\u7425"
- "\u739F"
- "\u8934"
- "\u891B"
- "\u55B1"
- "\u865E"
- "\u9B47"
- "\u51C7"
- "\u5F89"
- "\u561F"
- "\u81C6"
- "\u728A"
- "\u54CE"
- "\u9751"
- "\u4FFA"
- "\u586C"
- "\u59AF"
- "\u5A0C"
- "\u8708"
- "\u86A3"
- "\u6063"
- "\u6C8F"
- "\u78F4"
- "\u970E"
- "\u8DB8"
- "\u9E92"
- "\u6C2A"
- "\u7F07"
- "\u6C81"
- "\u7583"
- "\u6078"
- "\u7629"
- "\u6684"
- "\u61A9"
- "\u796F"
- "\u60F0"
- "\u6E89"
- "\u6CB1"
- "\u8BF2"
- "\u7B08"
- "\u64D8"
- "\u4EB3"
- "\u5B7A"
- "\u5FEA"
- "\u779F"
- "\u64DE"
- "\u7638"
- "\u63AC"
- "\u5501"
- "\u8E5A"
- "\u5321"
- "\u7C95"
- "\u9CB7"
- "\u6CD3"
- "\u53F5"
- "\u55E3"
- "\u772F"
- "\u70B7"
- "\u73FA"
- "\u6F15"
- "\u8C11"
- "\u54AF"
- "\u55EC"
- "\u7F30"
- "\u5372"
- "\u58D1"
- "\u9776"
- "\u968D"
- "\u5520"
- "\u6FE1"
- "\u76CE"
- "\u9A8A"
- "\u8171"
- "\u9798"
- "\u62E7"
- "\u75EB"
- "\u5BA6"
- "\u8BF6"
- "\u690B"
- "\u9F3E"
- "\u6E4D"
- "\u6BD7"
- "\u916A"
- "\u8D66"
- "\u7095"
- "\u7118"
- "\u5958"
- "\u9082"
- "\u9005"
- "\u5984"
- "\u9A90"
- "\u5352"
- "\u55B5"
- "\u89E5"
- "\u772C"
- "\u7EA3"
- "\u61B7"
- "\u8983"
- "\u5B40"
- "\u828A"
- "\u5B62"
- "\u60F6"
- "\u8FE5"
- "\u7EB0"
- "\u5480"
- "\u9E3E"
- "\u7BAB"
- "\u6666"
- "\u6CEF"
- "\u781A"
- "\u542D"
- "\u7962"
- "\u63E9"
- "\u5228"
- "\u73CF"
- "\u64B8"
- "\u5140"
- "\u75C9"
- "\u631B"
- "\u80E4"
- "\u5DFF"
- "\u7EB6"
- "\u9541"
- "\u54FA"
- "\u5494"
- "\u5693"
- "\u7A3C"
- "\u7116"
- "\u59A4"
- "\u59A9"
- "\u6F5E"
- "\u96CC"
- "\u683E"
- "\u4F8D"
- "\u7172"
- "\u5ADA"
- "\u7AFD"
- "\u606A"
- "\u9708"
- "\u8D5D"
- "\u83BA"
- "\u7736"
- "\u6853"
- "\u69CE"
- "\u9991"
- "\u6DAE"
- "\u67AD"
- "\u5F87"
- "\u6D35"
- "\u578C"
- "\u6635"
- "\u8936"
- "\u55BD"
- "\u812F"
- "\u5B71"
- "\u9068"
- "\u8C1A"
- "\u70F7"
- "\u643D"
- "\u916F"
- "\u67B7"
- "\u6849"
- "\u54A7"
- "\u7ABF"
- "\u62C8"
- "\u6593"
- "\u8DDB"
- "\u8E76"
- "\u761F"
- "\u4FED"
- "\u975B"
- "\u810D"
- <sos/eos>
token_type: char
train_data_path_and_name_and_type:
- - dump/raw/train_sp/wav.scp
- speech
- kaldi_ark
- - dump/raw/train_sp/text
- text
- text
train_dtype: float32
train_shape_file:
- exp/asr_stats_raw_sp/train/speech_shape
- exp/asr_stats_raw_sp/train/text_shape.char
use_amp: false
use_preprocessor: true
val_scheduler_criterion:
- valid
- acc
valid_batch_bins: null
valid_batch_size: null
valid_batch_type: null
valid_data_path_and_name_and_type:
- - dump/raw/dev/wav.scp
- speech
- kaldi_ark
- - dump/raw/dev/text
- text
- text
valid_max_cache_size: null
valid_shape_file:
- exp/asr_stats_raw_sp/valid/speech_shape
- exp/asr_stats_raw_sp/valid/text_shape.char
write_collected_feats: false
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment