Commit 5988d2cc authored by yuguo960516's avatar yuguo960516
Browse files

bert-large

parent 478602ba
Pipeline #142 canceled with stages
from libai.config import LazyCall
from libai.models import SwinTransformer
from .swin_tiny_patch4_window7_224 import cfg
cfg.embed_dim = 128
cfg.depths = [2, 2, 18, 2]
cfg.num_heads = [4, 8, 16, 32]
cfg.drop_path_rate = 0.5
model = LazyCall(SwinTransformer)(cfg=cfg)
from libai.config import LazyCall
from libai.models import SwinTransformer
from .swin_tiny_patch4_window7_224 import cfg
cfg.img_size = 384
cfg.embed_dim = 192
cfg.depths = [2, 2, 18, 2]
cfg.num_heads = [6, 12, 24, 48]
cfg.window_size = 12
cfg.drop_path_rate = 0.1
model = LazyCall(SwinTransformer)(cfg=cfg)
from libai.config import LazyCall
from libai.models import SwinTransformer
from .swin_tiny_patch4_window7_224 import cfg
cfg.embed_dim = 192
cfg.depths = [2, 2, 18, 2]
cfg.num_heads = [6, 12, 24, 48]
cfg.drop_path_rate = 0.1
model = LazyCall(SwinTransformer)(cfg=cfg)
from libai.config import LazyCall
from libai.models import SwinTransformer
from .swin_tiny_patch4_window7_224 import cfg
cfg.depths = [2, 2, 18, 2]
cfg.drop_path_rate = 0.3
model = LazyCall(SwinTransformer)(cfg=cfg)
from libai.config import LazyCall
from libai.models import SwinTransformer
from .swin_tiny_patch4_window7_224 import cfg
cfg.img_size = 256
cfg.num_heads = [4, 8, 16, 32]
cfg.window_size = 8
model = LazyCall(SwinTransformer)(cfg=cfg)
from omegaconf import DictConfig
from libai.config import LazyCall
from libai.models import SwinTransformer
cfg = dict(
img_size=224,
patch_size=4,
in_chans=3,
num_classes=1000,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop_rate=0.0,
drop_path_rate=0.2,
ape=False,
patch_norm=True,
loss_func=None,
)
cfg = DictConfig(cfg)
model = LazyCall(SwinTransformer)(cfg=cfg)
from libai.config import LazyCall
from libai.models import SwinTransformerV2
from .swinv2_tiny_patch4_window8_256 import cfg
cfg.window_size = 16
cfg.depths = [2, 2, 18, 2]
cfg.num_heads = [4, 8, 16, 32]
cfg.drop_path_rate = 0.5
model = LazyCall(SwinTransformerV2)(cfg=cfg)
from libai.config import LazyCall
from libai.models import SwinTransformerV2
from .swinv2_tiny_patch4_window8_256 import cfg
cfg.depths = [2, 2, 18, 2]
cfg.num_heads = [4, 8, 16, 32]
cfg.drop_path_rate = 0.5
model = LazyCall(SwinTransformerV2)(cfg=cfg)
from libai.config import LazyCall
from libai.models import SwinTransformerV2
from .swinv2_tiny_patch4_window8_256 import cfg
cfg.window_size = 16
cfg.depths = [2, 2, 18, 2]
cfg.drop_path_rate = 0.3
model = LazyCall(SwinTransformerV2)(cfg=cfg)
from libai.config import LazyCall
from libai.models import SwinTransformerV2
from .swinv2_tiny_patch4_window8_256 import cfg
cfg.depths = [2, 2, 18, 2]
cfg.drop_path_rate = 0.3
model = LazyCall(SwinTransformerV2)(cfg=cfg)
from libai.config import LazyCall
from libai.models import SwinTransformerV2
from .swinv2_tiny_patch4_window8_256 import cfg
cfg.window_size = 16
model = LazyCall(SwinTransformerV2)(cfg=cfg)
from omegaconf import DictConfig
from libai.config import LazyCall
from libai.models import SwinTransformerV2
cfg = dict(
img_size=256,
patch_size=4,
in_chans=3,
num_classes=1000,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=8,
mlp_ratio=4.0,
qkv_bias=True,
drop_rate=0.0,
drop_path_rate=0.2,
ape=False,
patch_norm=True,
pretrained_window_sizes=[0, 0, 0, 0],
loss_func=None,
)
cfg = DictConfig(cfg)
model = LazyCall(SwinTransformerV2)(cfg=cfg)
from omegaconf import DictConfig
from libai.config import LazyCall
from libai.models import T5Model, T5ForPreTraining
cfg = dict(
vocab_size=30522,
hidden_size=768,
hidden_layers=6,
num_attention_heads=16,
intermediate_size=1536,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
embedding_dropout_prob=0.1,
initializer_range=0.02,
layernorm_eps=1e-5,
bias_gelu_fusion=True,
bias_dropout_fusion=True,
scale_mask_softmax_fusion=True,
apply_query_key_layer_scaling=True,
apply_residual_post_layernorm=False,
amp_enabled=False,
)
cfg = DictConfig(cfg)
t5_model = LazyCall(T5Model)(cfg=cfg)
pretrain_model = LazyCall(T5ForPreTraining)(cfg=cfg)
from libai.config import LazyCall
from libai.models import VisionTransformer
from .vit_tiny_patch16_224 import cfg
cfg.patch_size = 16
cfg.embed_dim = 768
cfg.num_heads = 12
model = LazyCall(VisionTransformer)(cfg=cfg)
from libai.config import LazyCall
from libai.models import VisionTransformer
from .vit_tiny_patch16_224 import cfg
cfg.patch_size = 32
cfg.embed_dim = 768
cfg.num_heads = 12
model = LazyCall(VisionTransformer)(cfg=cfg)
from libai.config import LazyCall
from libai.models import VisionTransformer
from .vit_tiny_patch16_224 import cfg
cfg.patch_size = 14
cfg.embed_dim = 1408
cfg.mlp_ratio = 48 / 11
cfg.depth = 40
cfg.num_heads = 16
model = LazyCall(VisionTransformer)(cfg=cfg)
from libai.config import LazyCall
from libai.models import VisionTransformer
from .vit_tiny_patch16_224 import cfg
cfg.patch_size = 14
cfg.embed_dim = 1664
cfg.mlp_ratio = 64 / 13
cfg.depth = 48
cfg.num_heads = 16
model = LazyCall(VisionTransformer)(cfg=cfg)
from libai.config import LazyCall
from libai.models import VisionTransformer
from .vit_tiny_patch16_224 import cfg
cfg.patch_size = 16
cfg.embed_dim = 1280
cfg.depth = 32
cfg.num_heads = 16
model = LazyCall(VisionTransformer)(cfg=cfg)
from libai.config import LazyCall
from libai.models import VisionTransformer
from .vit_tiny_patch16_224 import cfg
cfg.patch_size = 16
cfg.embed_dim = 1024
cfg.depth = 24
cfg.num_heads = 16
model = LazyCall(VisionTransformer)(cfg=cfg)
from libai.config import LazyCall
from libai.models import VisionTransformer
from .vit_tiny_patch16_224 import cfg
cfg.patch_size = 32
cfg.embed_dim = 1024
cfg.depth = 24
cfg.num_heads = 16
model = LazyCall(VisionTransformer)(cfg=cfg)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment