Commit 1baf0566 authored by limm's avatar limm
Browse files

add tests part

parent 495d9ed9
Pipeline #2800 canceled with stages
# Copyright (c) OpenMMLab. All rights reserved.
import platform
import pytest
import torch
from mmpretrain.models import SimCLR
from mmpretrain.structures import DataSample
backbone = dict(type='ResNet', depth=18, norm_cfg=dict(type='BN'))
neck = dict(
type='NonLinearNeck', # SimCLR non-linear neck
in_channels=512,
hid_channels=2,
out_channels=2,
num_layers=2,
with_avg_pool=True,
norm_cfg=dict(type='BN1d'))
head = dict(
type='ContrastiveHead',
loss=dict(type='CrossEntropyLoss'),
temperature=0.1)
@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit')
def test_simclr():
data_preprocessor = {
'mean': (123.675, 116.28, 103.53),
'std': (58.395, 57.12, 57.375),
'to_rgb': True,
}
alg = SimCLR(
backbone=backbone,
neck=neck,
head=head,
data_preprocessor=data_preprocessor)
fake_data = {
'inputs':
[torch.randn((2, 3, 224, 224)),
torch.randn((2, 3, 224, 224))],
'data_samples': [DataSample() for _ in range(2)]
}
fake_inputs = alg.data_preprocessor(fake_data)
fake_loss = alg(**fake_inputs, mode='loss')
assert isinstance(fake_loss['loss'].item(), float)
# test extract
fake_feat = alg(fake_inputs['inputs'][0], mode='tensor')
assert fake_feat[0].size() == torch.Size([2, 512, 7, 7])
# Copyright (c) OpenMMLab. All rights reserved.
import platform
import pytest
import torch
from mmpretrain.models import SimMIM, SimMIMSwinTransformer
from mmpretrain.structures import DataSample
@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit')
def test_simmim_swin():
backbone = dict(
arch='B',
img_size=192,
stage_cfgs=dict(block_cfgs=dict(window_size=6)))
simmim_backbone = SimMIMSwinTransformer(**backbone)
simmim_backbone.init_weights()
fake_inputs = torch.randn((2, 3, 192, 192))
fake_mask = torch.rand((2, 48, 48))
# test with mask
fake_outputs = simmim_backbone(fake_inputs, fake_mask)[0]
assert fake_outputs.shape == torch.Size([2, 1024, 6, 6])
# test without mask
fake_outputs = simmim_backbone(fake_inputs, None)
assert len(fake_outputs) == 1
assert fake_outputs[0].shape == torch.Size([2, 1024, 6, 6])
@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit')
def test_simmim():
data_preprocessor = {
'mean': [0.5, 0.5, 0.5],
'std': [0.5, 0.5, 0.5],
'to_rgb': True
}
# model config
backbone = dict(
type='SimMIMSwinTransformer',
arch='B',
img_size=192,
stage_cfgs=dict(block_cfgs=dict(window_size=6)))
neck = dict(
type='SimMIMLinearDecoder', in_channels=128 * 2**3, encoder_stride=32)
head = dict(
type='SimMIMHead',
patch_size=4,
loss=dict(type='PixelReconstructionLoss', criterion='L1', channel=3))
model = SimMIM(
backbone=backbone,
neck=neck,
head=head,
data_preprocessor=data_preprocessor)
# test forward_train
fake_data_sample = DataSample()
fake_mask = torch.rand((48, 48))
fake_data_sample.set_mask(fake_mask)
fake_data = {
'inputs': torch.randn((2, 3, 192, 192)),
'data_samples': [fake_data_sample for _ in range(2)]
}
fake_inputs = model.data_preprocessor(fake_data)
fake_outputs = model(**fake_inputs, mode='loss')
assert isinstance(fake_outputs['loss'].item(), float)
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import platform
import pytest
import torch
from mmpretrain.models import SimSiam
from mmpretrain.structures import DataSample
@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit')
def test_simsiam():
data_preprocessor = {
'mean': (123.675, 116.28, 103.53),
'std': (58.395, 57.12, 57.375),
'to_rgb': True,
}
backbone = dict(
type='ResNet',
depth=18,
norm_cfg=dict(type='BN'),
zero_init_residual=True)
neck = dict(
type='NonLinearNeck',
in_channels=512,
hid_channels=2,
out_channels=2,
num_layers=3,
with_last_bn_affine=False,
with_avg_pool=True,
norm_cfg=dict(type='BN1d'))
head = dict(
type='LatentPredictHead',
loss=dict(type='CosineSimilarityLoss'),
predictor=dict(
type='NonLinearNeck',
in_channels=2,
hid_channels=2,
out_channels=2,
with_avg_pool=False,
with_last_bn=False,
with_last_bias=True,
norm_cfg=dict(type='BN1d')))
alg = SimSiam(
backbone=backbone,
neck=neck,
head=head,
data_preprocessor=copy.deepcopy(data_preprocessor))
fake_data = {
'inputs':
[torch.randn((2, 3, 224, 224)),
torch.randn((2, 3, 224, 224))],
'data_samples': [DataSample() for _ in range(2)]
}
fake_inputs = alg.data_preprocessor(fake_data)
fake_loss = alg(**fake_inputs, mode='loss')
assert fake_loss['loss'] > -1
# test extract
fake_feat = alg(fake_inputs['inputs'][0], mode='tensor')
assert fake_feat[0].size() == torch.Size([2, 512, 7, 7])
# Copyright (c) OpenMMLab. All rights reserved.
import platform
import pytest
import torch
from mmpretrain.models import SparK
from mmpretrain.structures import DataSample
@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit')
def test_spark():
data_preprocessor = {
'mean': (123.675, 116.28, 103.53),
'std': (58.395, 57.12, 57.375),
'to_rgb': True
}
backbone = dict(
type='SparseResNet',
depth=50,
out_indices=(0, 1, 2, 3),
drop_path_rate=0.05,
norm_cfg=dict(type='BN'))
neck = dict(
type='SparKLightDecoder',
feature_dim=512,
upsample_ratio=32, # equal to downsample_raito
mid_channels=0,
norm_cfg=dict(type='BN'),
last_act=False)
head = dict(
type='SparKPretrainHead',
loss=dict(type='PixelReconstructionLoss', criterion='L2'))
alg = SparK(
backbone=backbone,
neck=neck,
head=head,
data_preprocessor=data_preprocessor,
enc_dec_norm_cfg=dict(type='BN'),
)
fake_data = {
'inputs': torch.randn((2, 3, 224, 224)),
'data_sample': [DataSample() for _ in range(2)]
}
fake_inputs = alg.data_preprocessor(fake_data)
fake_loss = alg(**fake_inputs, mode='loss')
assert isinstance(fake_loss['loss'].item(), float)
# Copyright (c) OpenMMLab. All rights reserved.
import platform
import pytest
import torch
from mmpretrain.models import SwAV
from mmpretrain.structures import DataSample
@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit')
def test_swav():
data_preprocessor = {
'mean': (123.675, 116.28, 103.53),
'std': (58.395, 57.12, 57.375),
'to_rgb': True
}
backbone = dict(
type='ResNet',
depth=18,
norm_cfg=dict(type='BN'),
zero_init_residual=True)
neck = dict(
type='SwAVNeck',
in_channels=512,
hid_channels=2,
out_channels=2,
norm_cfg=dict(type='BN1d'),
with_avg_pool=True)
head = dict(
type='SwAVHead',
loss=dict(
type='SwAVLoss',
feat_dim=2, # equal to neck['out_channels']
epsilon=0.05,
temperature=0.1,
num_crops=[2, 6]))
alg = SwAV(
backbone=backbone,
neck=neck,
head=head,
data_preprocessor=data_preprocessor)
fake_data = {
'inputs': [
torch.randn((2, 3, 224, 224)),
torch.randn((2, 3, 224, 224)),
torch.randn((2, 3, 96, 96)),
torch.randn((2, 3, 96, 96)),
torch.randn((2, 3, 96, 96)),
torch.randn((2, 3, 96, 96)),
torch.randn((2, 3, 96, 96)),
torch.randn((2, 3, 96, 96))
],
'data_samples': [DataSample() for _ in range(2)]
}
fake_inputs = alg.data_preprocessor(fake_data)
fake_outputs = alg(**fake_inputs, mode='loss')
assert isinstance(fake_outputs['loss'].item(), float)
# Copyright (c) OpenMMLab. All rights reserved.
import platform
from unittest import TestCase
import pytest
import torch
from mmpretrain.models import VQKD, DALLEEncoder, HOGGenerator
class TestDALLE(TestCase):
@pytest.mark.skipif(
platform.system() == 'Windows', reason='Windows mem limit')
def test_dalle(self):
model = DALLEEncoder()
fake_inputs = torch.rand((2, 3, 112, 112))
fake_outputs = model(fake_inputs)
assert list(fake_outputs.shape) == [2, 8192, 14, 14]
class TestHOGGenerator(TestCase):
def test_hog_generator(self):
hog_generator = HOGGenerator()
fake_input = torch.randn((2, 3, 224, 224))
fake_output = hog_generator(fake_input)
assert list(fake_output.shape) == [2, 196, 108]
fake_hog_out = hog_generator.out[0].unsqueeze(0)
fake_hog_img = hog_generator.generate_hog_image(fake_hog_out)
assert fake_hog_img.shape == (224, 224)
with pytest.raises(AssertionError):
fake_hog_img = hog_generator.generate_hog_image(
hog_generator.out[0])
class TestVQKD(TestCase):
ENCODER_CFG = dict(
arch='base',
img_size=224,
patch_size=16,
in_channels=3,
out_indices=-1,
drop_rate=0.,
drop_path_rate=0.,
norm_cfg=dict(type='LN', eps=1e-6),
final_norm=True,
out_type='featmap',
with_cls_token=True,
frozen_stages=-1,
use_abs_pos_emb=True,
use_rel_pos_bias=False,
use_shared_rel_pos_bias=False,
layer_scale_init_value=0.,
interpolate_mode='bicubic',
patch_cfg=dict(),
layer_cfgs=dict(),
init_cfg=None)
@pytest.mark.skipif(
platform.system() == 'Windows', reason='Windows mem limit')
def test_vqkd(self):
model = VQKD(encoder_config=self.ENCODER_CFG)
fake_inputs = torch.rand((2, 3, 224, 224))
fake_outputs = model(fake_inputs)
assert list(fake_outputs.shape) == [2, 196]
# Copyright (c) OpenMMLab. All rights reserved.
from copy import deepcopy
from unittest import TestCase
import torch
from mmengine import ConfigDict
from mmengine.registry import init_default_scope
from mmpretrain.models import AverageClsScoreTTA, ImageClassifier
from mmpretrain.registry import MODELS
from mmpretrain.structures import DataSample
init_default_scope('mmpretrain')
class TestAverageClsScoreTTA(TestCase):
DEFAULT_ARGS = dict(
type='AverageClsScoreTTA',
module=dict(
type='ImageClassifier',
backbone=dict(type='ResNet', depth=18),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearClsHead',
num_classes=10,
in_channels=512,
loss=dict(type='CrossEntropyLoss'))))
def test_initialize(self):
model: AverageClsScoreTTA = MODELS.build(self.DEFAULT_ARGS)
self.assertIsInstance(model.module, ImageClassifier)
def test_forward(self):
inputs = torch.rand(1, 3, 224, 224)
model: AverageClsScoreTTA = MODELS.build(self.DEFAULT_ARGS)
# The forward of TTA model should not be called.
with self.assertRaisesRegex(NotImplementedError, 'will not be called'):
model(inputs)
def test_test_step(self):
cfg = ConfigDict(deepcopy(self.DEFAULT_ARGS))
cfg.module.data_preprocessor = dict(
mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5])
model: AverageClsScoreTTA = MODELS.build(cfg)
img1 = torch.randint(0, 256, (1, 3, 224, 224))
img2 = torch.randint(0, 256, (1, 3, 224, 224))
data1 = {
'inputs': img1,
'data_samples': [DataSample().set_gt_label(1)]
}
data2 = {
'inputs': img2,
'data_samples': [DataSample().set_gt_label(1)]
}
data_tta = {
'inputs': [img1, img2],
'data_samples': [[DataSample().set_gt_label(1)],
[DataSample().set_gt_label(1)]]
}
score1 = model.module.test_step(data1)[0].pred_score
score2 = model.module.test_step(data2)[0].pred_score
score_tta = model.test_step(data_tta)[0].pred_score
torch.testing.assert_allclose(score_tta, (score1 + score2) / 2)
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import ANY, MagicMock
import pytest
import torch
from mmpretrain.models.utils.attention import (ShiftWindowMSA, WindowMSA,
torch_meshgrid)
def get_relative_position_index(window_size):
"""Method from original code of Swin-Transformer."""
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch_meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
# 2, Wh*Ww, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
# Wh*Ww, Wh*Ww, 2
relative_coords = relative_coords.permute(1, 2, 0).contiguous()
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
return relative_position_index
class TestWindowMSA(TestCase):
def test_forward(self):
attn = WindowMSA(embed_dims=96, window_size=(7, 7), num_heads=4)
inputs = torch.rand((16, 7 * 7, 96))
output = attn(inputs)
self.assertEqual(output.shape, inputs.shape)
# test non-square window_size
attn = WindowMSA(embed_dims=96, window_size=(6, 7), num_heads=4)
inputs = torch.rand((16, 6 * 7, 96))
output = attn(inputs)
self.assertEqual(output.shape, inputs.shape)
def test_relative_pos_embed(self):
attn = WindowMSA(embed_dims=96, window_size=(7, 8), num_heads=4)
self.assertEqual(attn.relative_position_bias_table.shape,
((2 * 7 - 1) * (2 * 8 - 1), 4))
# test relative_position_index
expected_rel_pos_index = get_relative_position_index((7, 8))
self.assertTrue(
torch.allclose(attn.relative_position_index,
expected_rel_pos_index))
# test default init
self.assertTrue(
torch.allclose(attn.relative_position_bias_table,
torch.tensor(0.)))
attn.init_weights()
self.assertFalse(
torch.allclose(attn.relative_position_bias_table,
torch.tensor(0.)))
def test_qkv_bias(self):
# test qkv_bias=True
attn = WindowMSA(
embed_dims=96, window_size=(7, 7), num_heads=4, qkv_bias=True)
self.assertEqual(attn.qkv.bias.shape, (96 * 3, ))
# test qkv_bias=False
attn = WindowMSA(
embed_dims=96, window_size=(7, 7), num_heads=4, qkv_bias=False)
self.assertIsNone(attn.qkv.bias)
def tets_qk_scale(self):
# test default qk_scale
attn = WindowMSA(
embed_dims=96, window_size=(7, 7), num_heads=4, qk_scale=None)
head_dims = 96 // 4
self.assertAlmostEqual(attn.scale, head_dims**-0.5)
# test specified qk_scale
attn = WindowMSA(
embed_dims=96, window_size=(7, 7), num_heads=4, qk_scale=0.3)
self.assertEqual(attn.scale, 0.3)
def test_attn_drop(self):
inputs = torch.rand(16, 7 * 7, 96)
attn = WindowMSA(
embed_dims=96, window_size=(7, 7), num_heads=4, attn_drop=1.0)
# drop all attn output, output shuold be equal to proj.bias
self.assertTrue(torch.allclose(attn(inputs), attn.proj.bias))
def test_prob_drop(self):
inputs = torch.rand(16, 7 * 7, 96)
attn = WindowMSA(
embed_dims=96, window_size=(7, 7), num_heads=4, proj_drop=1.0)
self.assertTrue(torch.allclose(attn(inputs), torch.tensor(0.)))
def test_mask(self):
inputs = torch.rand(16, 7 * 7, 96)
attn = WindowMSA(embed_dims=96, window_size=(7, 7), num_heads=4)
mask = torch.zeros((4, 49, 49))
# Mask the first column
mask[:, 0, :] = -100
mask[:, :, 0] = -100
outs = attn(inputs, mask=mask)
inputs[:, 0, :].normal_()
outs_with_mask = attn(inputs, mask=mask)
torch.testing.assert_allclose(outs[:, 1:, :], outs_with_mask[:, 1:, :])
class TestShiftWindowMSA(TestCase):
def test_forward(self):
inputs = torch.rand((1, 14 * 14, 96))
attn = ShiftWindowMSA(embed_dims=96, window_size=7, num_heads=4)
output = attn(inputs, (14, 14))
self.assertEqual(output.shape, inputs.shape)
self.assertEqual(attn.w_msa.relative_position_bias_table.shape,
((2 * 7 - 1)**2, 4))
# test forward with shift_size
attn = ShiftWindowMSA(
embed_dims=96, window_size=7, num_heads=4, shift_size=3)
output = attn(inputs, (14, 14))
assert output.shape == (inputs.shape)
# test irregular input shape
input_resolution = (19, 18)
attn = ShiftWindowMSA(embed_dims=96, num_heads=4, window_size=7)
inputs = torch.rand((1, 19 * 18, 96))
output = attn(inputs, input_resolution)
assert output.shape == (inputs.shape)
# test wrong input_resolution
input_resolution = (14, 14)
attn = ShiftWindowMSA(embed_dims=96, num_heads=4, window_size=7)
inputs = torch.rand((1, 14 * 14, 96))
with pytest.raises(AssertionError):
attn(inputs, (14, 15))
def test_pad_small_map(self):
# test pad_small_map=True
inputs = torch.rand((1, 6 * 7, 96))
attn = ShiftWindowMSA(
embed_dims=96,
window_size=7,
num_heads=4,
shift_size=3,
pad_small_map=True)
attn.get_attn_mask = MagicMock(wraps=attn.get_attn_mask)
output = attn(inputs, (6, 7))
self.assertEqual(output.shape, inputs.shape)
attn.get_attn_mask.assert_called_once_with((7, 7),
window_size=7,
shift_size=3,
device=ANY)
# test pad_small_map=False
inputs = torch.rand((1, 6 * 7, 96))
attn = ShiftWindowMSA(
embed_dims=96,
window_size=7,
num_heads=4,
shift_size=3,
pad_small_map=False)
with self.assertRaisesRegex(AssertionError, r'the window size \(7\)'):
attn(inputs, (6, 7))
# test pad_small_map=False, and the input size equals to window size
inputs = torch.rand((1, 7 * 7, 96))
attn.get_attn_mask = MagicMock(wraps=attn.get_attn_mask)
output = attn(inputs, (7, 7))
self.assertEqual(output.shape, inputs.shape)
attn.get_attn_mask.assert_called_once_with((7, 7),
window_size=7,
shift_size=0,
device=ANY)
def test_drop_layer(self):
inputs = torch.rand((1, 14 * 14, 96))
attn = ShiftWindowMSA(
embed_dims=96,
window_size=7,
num_heads=4,
dropout_layer=dict(type='Dropout', drop_prob=1.0))
attn.init_weights()
# drop all attn output, output shuold be equal to proj.bias
self.assertTrue(
torch.allclose(attn(inputs, (14, 14)), torch.tensor(0.)))
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import MagicMock, patch
import numpy as np
import torch
from mmpretrain.models import Mixup, RandomBatchAugment
from mmpretrain.registry import BATCH_AUGMENTS
class TestRandomBatchAugment(TestCase):
def test_initialize(self):
# test single augmentation
augments = dict(type='Mixup', alpha=1.)
batch_augments = RandomBatchAugment(augments)
self.assertIsInstance(batch_augments.augments, list)
self.assertEqual(len(batch_augments.augments), 1)
# test specify augments with object
augments = Mixup(alpha=1.)
batch_augments = RandomBatchAugment(augments)
self.assertIsInstance(batch_augments.augments, list)
self.assertEqual(len(batch_augments.augments), 1)
# test multiple augmentation
augments = [
dict(type='Mixup', alpha=1.),
dict(type='CutMix', alpha=0.8),
]
batch_augments = RandomBatchAugment(augments)
# mixup, cutmix
self.assertEqual(len(batch_augments.augments), 2)
self.assertIsNone(batch_augments.probs)
# test specify probs
augments = [
dict(type='Mixup', alpha=1.),
dict(type='CutMix', alpha=0.8),
]
batch_augments = RandomBatchAugment(augments, probs=[0.5, 0.3])
# mixup, cutmix and None
self.assertEqual(len(batch_augments.augments), 3)
self.assertAlmostEqual(batch_augments.probs[-1], 0.2)
# test assertion
with self.assertRaisesRegex(AssertionError, 'Got 2 vs 1'):
RandomBatchAugment(augments, probs=0.5)
with self.assertRaisesRegex(AssertionError, 'exceeds 1.'):
RandomBatchAugment(augments, probs=[0.5, 0.6])
def test_call(self):
inputs = torch.rand(2, 3, 224, 224)
scores = torch.rand(2, 10)
augments = [
dict(type='Mixup', alpha=1.),
dict(type='CutMix', alpha=0.8),
]
batch_augments = RandomBatchAugment(augments, probs=[0.5, 0.3])
with patch('numpy.random', np.random.RandomState(0)):
batch_augments.augments[1] = MagicMock()
batch_augments(inputs, scores)
batch_augments.augments[1].assert_called_once_with(inputs, scores)
augments = [
dict(type='Mixup', alpha=1.),
dict(type='CutMix', alpha=0.8),
]
batch_augments = RandomBatchAugment(augments, probs=[0.0, 0.0])
mixed_inputs, mixed_samples = batch_augments(inputs, scores)
self.assertIs(mixed_inputs, inputs)
self.assertIs(mixed_samples, scores)
class TestMixup(TestCase):
DEFAULT_ARGS = dict(type='Mixup', alpha=1.)
def test_initialize(self):
with self.assertRaises(AssertionError):
cfg = {**self.DEFAULT_ARGS, 'alpha': 'unknown'}
BATCH_AUGMENTS.build(cfg)
def test_call(self):
inputs = torch.rand(2, 3, 224, 224)
scores = torch.rand(2, 10)
mixup = BATCH_AUGMENTS.build(self.DEFAULT_ARGS)
mixed_inputs, mixed_scores = mixup(inputs, scores)
self.assertEqual(mixed_inputs.shape, (2, 3, 224, 224))
self.assertEqual(mixed_scores.shape, (2, 10))
# test binary classification
scores = torch.rand(2, 1)
mixed_inputs, mixed_scores = mixup(inputs, scores)
self.assertEqual(mixed_inputs.shape, (2, 3, 224, 224))
self.assertEqual(mixed_scores.shape, (2, 1))
class TestCutMix(TestCase):
DEFAULT_ARGS = dict(type='CutMix', alpha=1.)
def test_initialize(self):
with self.assertRaises(AssertionError):
cfg = {**self.DEFAULT_ARGS, 'alpha': 'unknown'}
BATCH_AUGMENTS.build(cfg)
def test_call(self):
inputs = torch.rand(2, 3, 224, 224)
scores = torch.rand(2, 10)
# test with cutmix_minmax
cfg = {**self.DEFAULT_ARGS, 'cutmix_minmax': (0.1, 0.2)}
cutmix = BATCH_AUGMENTS.build(cfg)
mixed_inputs, mixed_scores = cutmix(inputs, scores)
self.assertEqual(mixed_inputs.shape, (2, 3, 224, 224))
self.assertEqual(mixed_scores.shape, (2, 10))
# test without correct_lam
cfg = {**self.DEFAULT_ARGS, 'correct_lam': False}
cutmix = BATCH_AUGMENTS.build(cfg)
mixed_inputs, mixed_scores = cutmix(inputs, scores)
self.assertEqual(mixed_inputs.shape, (2, 3, 224, 224))
self.assertEqual(mixed_scores.shape, (2, 10))
# test default settings
cutmix = BATCH_AUGMENTS.build(self.DEFAULT_ARGS)
mixed_inputs, mixed_scores = cutmix(inputs, scores)
self.assertEqual(mixed_inputs.shape, (2, 3, 224, 224))
self.assertEqual(mixed_scores.shape, (2, 10))
# test binary classification
scores = torch.rand(2, 1)
mixed_inputs, mixed_scores = cutmix(inputs, scores)
self.assertEqual(mixed_inputs.shape, (2, 3, 224, 224))
self.assertEqual(mixed_scores.shape, (2, 1))
class TestResizeMix(TestCase):
DEFAULT_ARGS = dict(type='ResizeMix', alpha=1.)
def test_initialize(self):
with self.assertRaises(AssertionError):
cfg = {**self.DEFAULT_ARGS, 'alpha': 'unknown'}
BATCH_AUGMENTS.build(cfg)
def test_call(self):
inputs = torch.rand(2, 3, 224, 224)
scores = torch.rand(2, 10)
mixup = BATCH_AUGMENTS.build(self.DEFAULT_ARGS)
mixed_inputs, mixed_scores = mixup(inputs, scores)
self.assertEqual(mixed_inputs.shape, (2, 3, 224, 224))
self.assertEqual(mixed_scores.shape, (2, 10))
# test binary classification
scores = torch.rand(2, 1)
mixed_inputs, mixed_scores = mixup(inputs, scores)
self.assertEqual(mixed_inputs.shape, (2, 3, 224, 224))
self.assertEqual(mixed_scores.shape, (2, 1))
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import pytest
import torch
from mmpretrain.models import (ClsDataPreprocessor, RandomBatchAugment,
SelfSupDataPreprocessor,
TwoNormDataPreprocessor, VideoDataPreprocessor)
from mmpretrain.registry import MODELS
from mmpretrain.structures import DataSample
class TestClsDataPreprocessor(TestCase):
def test_stack_batch(self):
cfg = dict(type='ClsDataPreprocessor')
processor: ClsDataPreprocessor = MODELS.build(cfg)
data = {
'inputs': [torch.randint(0, 256, (3, 224, 224))],
'data_samples': [DataSample().set_gt_label(1)]
}
processed_data = processor(data)
inputs = processed_data['inputs']
data_samples = processed_data['data_samples']
self.assertEqual(inputs.shape, (1, 3, 224, 224))
self.assertEqual(len(data_samples), 1)
self.assertTrue((data_samples[0].gt_label == torch.tensor([1])).all())
def test_padding(self):
cfg = dict(type='ClsDataPreprocessor', pad_size_divisor=16)
processor: ClsDataPreprocessor = MODELS.build(cfg)
data = {
'inputs': [
torch.randint(0, 256, (3, 255, 255)),
torch.randint(0, 256, (3, 224, 224))
]
}
inputs = processor(data)['inputs']
self.assertEqual(inputs.shape, (2, 3, 256, 256))
data = {'inputs': torch.randint(0, 256, (2, 3, 255, 255))}
inputs = processor(data)['inputs']
self.assertEqual(inputs.shape, (2, 3, 256, 256))
def test_to_rgb(self):
cfg = dict(type='ClsDataPreprocessor', to_rgb=True)
processor: ClsDataPreprocessor = MODELS.build(cfg)
data = {'inputs': [torch.randint(0, 256, (3, 224, 224))]}
inputs = processor(data)['inputs']
torch.testing.assert_allclose(data['inputs'][0].flip(0).float(),
inputs[0])
data = {'inputs': torch.randint(0, 256, (1, 3, 224, 224))}
inputs = processor(data)['inputs']
torch.testing.assert_allclose(data['inputs'].flip(1).float(), inputs)
def test_normalization(self):
cfg = dict(
type='ClsDataPreprocessor',
mean=[127.5, 127.5, 127.5],
std=[127.5, 127.5, 127.5])
processor: ClsDataPreprocessor = MODELS.build(cfg)
data = {'inputs': [torch.randint(0, 256, (3, 224, 224))]}
processed_data = processor(data)
inputs = processed_data['inputs']
self.assertTrue((inputs >= -1).all())
self.assertTrue((inputs <= 1).all())
self.assertIsNone(processed_data['data_samples'])
data = {'inputs': torch.randint(0, 256, (1, 3, 224, 224))}
inputs = processor(data)['inputs']
self.assertTrue((inputs >= -1).all())
self.assertTrue((inputs <= 1).all())
def test_batch_augmentation(self):
cfg = dict(
type='ClsDataPreprocessor',
num_classes=10,
batch_augments=dict(augments=[
dict(type='Mixup', alpha=0.8),
dict(type='CutMix', alpha=1.)
]))
processor: ClsDataPreprocessor = MODELS.build(cfg)
self.assertIsInstance(processor.batch_augments, RandomBatchAugment)
data = {
'inputs': [torch.randint(0, 256, (3, 224, 224))],
'data_samples': [DataSample().set_gt_label(1)]
}
processed_data = processor(data, training=True)
self.assertIn('inputs', processed_data)
self.assertIn('data_samples', processed_data)
cfg['batch_augments'] = None
processor: ClsDataPreprocessor = MODELS.build(cfg)
self.assertIsNone(processor.batch_augments)
data = {'inputs': [torch.randint(0, 256, (3, 224, 224))]}
processed_data = processor(data, training=True)
self.assertIn('inputs', processed_data)
self.assertIsNone(processed_data['data_samples'])
class TestSelfSupDataPreprocessor(TestCase):
def test_to_rgb(self):
cfg = dict(type='SelfSupDataPreprocessor', to_rgb=True)
processor: SelfSupDataPreprocessor = MODELS.build(cfg)
self.assertTrue(processor._channel_conversion)
fake_data = {
'inputs':
[torch.randn((2, 3, 224, 224)),
torch.randn((2, 3, 224, 224))],
'data_samples': [DataSample(), DataSample()]
}
inputs = processor(fake_data)['inputs']
torch.testing.assert_allclose(fake_data['inputs'][0].flip(1).float(),
inputs[0])
torch.testing.assert_allclose(fake_data['inputs'][1].flip(1).float(),
inputs[1])
def test_forward(self):
data_preprocessor = SelfSupDataPreprocessor(
to_rgb=True, mean=[124, 117, 104], std=[59, 58, 58])
# test list inputs
fake_data = {
'inputs': [torch.randn((2, 3, 224, 224))],
'data_samples': [DataSample(), DataSample()]
}
fake_output = data_preprocessor(fake_data)
self.assertEqual(len(fake_output['inputs']), 1)
self.assertEqual(len(fake_output['data_samples']), 2)
# test torch.Tensor inputs
fake_data = {
'inputs': torch.randn((2, 3, 224, 224)),
'data_samples': [DataSample(), DataSample()]
}
fake_output = data_preprocessor(fake_data)
self.assertEqual(fake_output['inputs'].shape,
torch.Size((2, 3, 224, 224)))
self.assertEqual(len(fake_output['data_samples']), 2)
class TestTwoNormDataPreprocessor(TestCase):
def test_assertion(self):
with pytest.raises(AssertionError):
_ = TwoNormDataPreprocessor(
to_rgb=True,
mean=(123.675, 116.28, 103.53),
std=(58.395, 57.12, 57.375),
)
with pytest.raises(AssertionError):
_ = TwoNormDataPreprocessor(
to_rgb=True,
mean=(123.675, 116.28, 103.53),
std=(58.395, 57.12, 57.375),
second_mean=(127.5, 127.5),
second_std=(127.5, 127.5, 127.5),
)
with pytest.raises(AssertionError):
_ = TwoNormDataPreprocessor(
to_rgb=True,
mean=(123.675, 116.28, 103.53),
std=(58.395, 57.12, 57.375),
second_mean=(127.5, 127.5, 127.5),
second_std=(127.5, 127.5),
)
def test_forward(self):
data_preprocessor = dict(
mean=(123.675, 116.28, 103.53),
std=(58.395, 57.12, 57.375),
second_mean=(127.5, 127.5, 127.5),
second_std=(127.5, 127.5, 127.5),
to_rgb=True)
data_preprocessor = TwoNormDataPreprocessor(**data_preprocessor)
fake_data = {
'inputs':
[torch.randn((2, 3, 224, 224)),
torch.randn((2, 3, 224, 224))],
'data_sample': [DataSample(), DataSample()]
}
fake_output = data_preprocessor(fake_data)
self.assertEqual(len(fake_output['inputs']), 2)
self.assertEqual(len(fake_output['data_samples']), 2)
class TestVideoDataPreprocessor(TestCase):
def test_NCTHW_format(self):
data_preprocessor = VideoDataPreprocessor(
mean=[114.75, 114.75, 114.75],
std=[57.375, 57.375, 57.375],
to_rgb=True,
format_shape='NCTHW')
# test list inputs
fake_data = {
'inputs': [torch.randn((2, 3, 4, 224, 224))],
'data_sample': [DataSample(), DataSample()]
}
fake_output = data_preprocessor(fake_data)
self.assertEqual(len(fake_output['inputs']), 1)
self.assertEqual(len(fake_output['data_samples']), 2)
# test torch.Tensor inputs
fake_data = {
'inputs': torch.randn((2, 3, 4, 224, 224)),
'data_sample': [DataSample(), DataSample()]
}
fake_output = data_preprocessor(fake_data)
self.assertEqual(fake_output['inputs'].shape,
torch.Size((2, 3, 4, 224, 224)))
self.assertEqual(len(fake_output['data_samples']), 2)
def test_NCHW_format(self):
data_preprocessor = VideoDataPreprocessor(
mean=[114.75, 114.75, 114.75],
std=[57.375, 57.375, 57.375],
to_rgb=True,
format_shape='NCHW')
# test list inputs
fake_data = {
'inputs': [torch.randn((2, 3, 224, 224))],
'data_sample': [DataSample(), DataSample()]
}
fake_output = data_preprocessor(fake_data)
self.assertEqual(len(fake_output['inputs']), 1)
self.assertEqual(len(fake_output['data_samples']), 2)
# test torch.Tensor inputs
fake_data = {
'inputs': torch.randn((2, 3, 224, 224)),
'data_sample': [DataSample(), DataSample()]
}
fake_output = data_preprocessor(fake_data)
self.assertEqual(fake_output['inputs'].shape,
torch.Size((2, 3, 224, 224)))
self.assertEqual(len(fake_output['data_samples']), 2)
# Copyright (c) OpenMMLab. All rights reserved.
import math
from unittest import TestCase
import torch
import torch.nn as nn
from mmengine.logging import MessageHub
from mmengine.testing import assert_allclose
from mmpretrain.models.utils import CosineEMA
class TestEMA(TestCase):
def test_cosine_ema(self):
model = nn.Sequential(nn.Conv2d(1, 5, kernel_size=3), nn.Linear(5, 10))
# init message hub
max_iters = 5
test = dict(name='ema_test')
message_hub = MessageHub.get_instance(**test)
message_hub.update_info('max_iters', max_iters)
# test EMA
momentum = 0.996
end_momentum = 1.
ema_model = CosineEMA(model, momentum=1 - momentum)
averaged_params = [
torch.zeros_like(param) for param in model.parameters()
]
for i in range(max_iters):
updated_averaged_params = []
for p, p_avg in zip(model.parameters(), averaged_params):
p.detach().add_(torch.randn_like(p))
if i == 0:
updated_averaged_params.append(p.clone())
else:
m = end_momentum - (end_momentum - momentum) * (
math.cos(math.pi * i / float(max_iters)) + 1) / 2
updated_averaged_params.append(
(p_avg * m + p * (1 - m)).clone())
ema_model.update_parameters(model)
averaged_params = updated_averaged_params
for p_target, p_ema in zip(averaged_params, ema_model.parameters()):
assert_allclose(p_target, p_ema)
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmpretrain.models.backbones import VGG
from mmpretrain.models.utils import HybridEmbed, PatchEmbed, PatchMerging
def cal_unfold_dim(dim, kernel_size, stride, padding=0, dilation=1):
return (dim + 2 * padding - dilation * (kernel_size - 1) - 1) // stride + 1
def test_patch_embed():
# Test PatchEmbed
patch_embed = PatchEmbed()
img = torch.randn(1, 3, 224, 224)
img = patch_embed(img)
assert img.shape == torch.Size((1, 196, 768))
# Test PatchEmbed with stride = 8
conv_cfg = dict(kernel_size=16, stride=8)
patch_embed = PatchEmbed(conv_cfg=conv_cfg)
img = torch.randn(1, 3, 224, 224)
img = patch_embed(img)
assert img.shape == torch.Size((1, 729, 768))
def test_hybrid_embed():
# Test VGG11 HybridEmbed
backbone = VGG(11, norm_eval=True)
backbone.init_weights()
patch_embed = HybridEmbed(backbone)
img = torch.randn(1, 3, 224, 224)
img = patch_embed(img)
assert img.shape == torch.Size((1, 49, 768))
def test_patch_merging():
settings = dict(in_channels=16, out_channels=32, padding=0)
downsample = PatchMerging(**settings)
# test forward with wrong dims
with pytest.raises(AssertionError):
inputs = torch.rand((1, 16, 56 * 56))
downsample(inputs, input_size=(56, 56))
# test patch merging forward
inputs = torch.rand((1, 56 * 56, 16))
out, output_size = downsample(inputs, input_size=(56, 56))
assert output_size == (28, 28)
assert out.shape == (1, 28 * 28, 32)
# test different kernel_size in each direction
downsample = PatchMerging(kernel_size=(2, 3), **settings)
out, output_size = downsample(inputs, input_size=(56, 56))
expected_dim = cal_unfold_dim(56, 2, 2) * cal_unfold_dim(56, 3, 3)
assert downsample.sampler.kernel_size == (2, 3)
assert output_size == (cal_unfold_dim(56, 2, 2), cal_unfold_dim(56, 3, 3))
assert out.shape == (1, expected_dim, 32)
# test default stride
downsample = PatchMerging(kernel_size=6, **settings)
assert downsample.sampler.stride == (6, 6)
# test stride=3
downsample = PatchMerging(kernel_size=6, stride=3, **settings)
out, output_size = downsample(inputs, input_size=(56, 56))
assert downsample.sampler.stride == (3, 3)
assert out.shape == (1, cal_unfold_dim(56, 6, stride=3)**2, 32)
# test padding
downsample = PatchMerging(
in_channels=16, out_channels=32, kernel_size=6, padding=2)
out, output_size = downsample(inputs, input_size=(56, 56))
assert downsample.sampler.padding == (2, 2)
assert out.shape == (1, cal_unfold_dim(56, 6, 6, padding=2)**2, 32)
# test str padding
downsample = PatchMerging(in_channels=16, out_channels=32, kernel_size=6)
out, output_size = downsample(inputs, input_size=(56, 56))
assert downsample.sampler.padding == (0, 0)
assert out.shape == (1, cal_unfold_dim(56, 6, 6, padding=2)**2, 32)
# test dilation
downsample = PatchMerging(kernel_size=6, dilation=2, **settings)
out, output_size = downsample(inputs, input_size=(56, 56))
assert downsample.sampler.dilation == (2, 2)
assert out.shape == (1, cal_unfold_dim(56, 6, 6, dilation=2)**2, 32)
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from torch.nn.modules import GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from mmpretrain.models.utils import InvertedResidual, SELayer
def is_norm(modules):
"""Check if is one of the norms."""
if isinstance(modules, (GroupNorm, _BatchNorm)):
return True
return False
def test_inverted_residual():
with pytest.raises(AssertionError):
# stride must be in [1, 2]
InvertedResidual(16, 16, 32, stride=3)
with pytest.raises(AssertionError):
# se_cfg must be None or dict
InvertedResidual(16, 16, 32, se_cfg=list())
# Add expand conv if in_channels and mid_channels is not the same
assert InvertedResidual(32, 16, 32).with_expand_conv is False
assert InvertedResidual(16, 16, 32).with_expand_conv is True
# Test InvertedResidual forward, stride=1
block = InvertedResidual(16, 16, 32, stride=1)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert getattr(block, 'se', None) is None
assert block.with_res_shortcut
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward, stride=2
block = InvertedResidual(16, 16, 32, stride=2)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert not block.with_res_shortcut
assert x_out.shape == torch.Size((1, 16, 28, 28))
# Test InvertedResidual forward with se layer
se_cfg = dict(channels=32)
block = InvertedResidual(16, 16, 32, stride=1, se_cfg=se_cfg)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert isinstance(block.se, SELayer)
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward without expand conv
block = InvertedResidual(32, 16, 32)
x = torch.randn(1, 32, 56, 56)
x_out = block(x)
assert getattr(block, 'expand_conv', None) is None
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward with GroupNorm
block = InvertedResidual(
16, 16, 32, norm_cfg=dict(type='GN', num_groups=2))
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
for m in block.modules():
if is_norm(m):
assert isinstance(m, GroupNorm)
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward with HSigmoid
block = InvertedResidual(16, 16, 32, act_cfg=dict(type='HSigmoid'))
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward with checkpoint
block = InvertedResidual(16, 16, 32, with_cp=True)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert block.with_cp
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmpretrain.models.utils import LayerScale
class TestLayerScale(TestCase):
def test_init(self):
with self.assertRaisesRegex(AssertionError, "'data_format' could"):
cfg = dict(
dim=10,
data_format='BNC',
)
LayerScale(**cfg)
cfg = dict(dim=10)
ls = LayerScale(**cfg)
assert torch.equal(ls.weight,
torch.ones(10, requires_grad=True) * 1e-5)
def forward(self):
# Test channels_last
cfg = dict(dim=256, inplace=False, data_format='channels_last')
ls_channels_last = LayerScale(**cfg)
x = torch.randn((4, 49, 256))
out = ls_channels_last(x)
self.assertEqual(tuple(out.size()), (4, 49, 256))
assert torch.equal(x * 1e-5, out)
# Test channels_first
cfg = dict(dim=256, inplace=False, data_format='channels_first')
ls_channels_first = LayerScale(**cfg)
x = torch.randn((4, 256, 7, 7))
out = ls_channels_first(x)
self.assertEqual(tuple(out.size()), (4, 256, 7, 7))
assert torch.equal(x * 1e-5, out)
# Test inplace True
cfg = dict(dim=256, inplace=True, data_format='channels_first')
ls_channels_first = LayerScale(**cfg)
x = torch.randn((4, 256, 7, 7))
out = ls_channels_first(x)
self.assertEqual(tuple(out.size()), (4, 256, 7, 7))
self.assertIs(x, out)
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmengine.utils import digit_version
from mmpretrain.models.utils import channel_shuffle, is_tracing, make_divisible
def test_make_divisible():
# test min_value is None
result = make_divisible(34, 8, None)
assert result == 32
# test when new_value > min_ratio * value
result = make_divisible(10, 8, min_ratio=0.9)
assert result == 16
# test min_value = 0.8
result = make_divisible(33, 8, min_ratio=0.8)
assert result == 32
def test_channel_shuffle():
x = torch.randn(1, 24, 56, 56)
with pytest.raises(AssertionError):
# num_channels should be divisible by groups
channel_shuffle(x, 7)
groups = 3
batch_size, num_channels, height, width = x.size()
channels_per_group = num_channels // groups
out = channel_shuffle(x, groups)
# test the output value when groups = 3
for b in range(batch_size):
for c in range(num_channels):
c_out = c % channels_per_group * groups + c // channels_per_group
for i in range(height):
for j in range(width):
assert x[b, c, i, j] == out[b, c_out, i, j]
@pytest.mark.skipif(
digit_version(torch.__version__) < digit_version('1.6.0'),
reason='torch.jit.is_tracing is not available before 1.6.0')
def test_is_tracing():
def foo(x):
if is_tracing():
return x
else:
return x.tolist()
x = torch.rand(3)
# test without trace
assert isinstance(foo(x), list)
# test with trace
traced_foo = torch.jit.trace(foo, (torch.rand(1), ))
assert isinstance(traced_foo(x), torch.Tensor)
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
import torch.nn.functional as F
from mmpretrain.models.utils import GRN, LayerNorm2d
class TestGRN(TestCase):
def test_init(self):
module = GRN(in_channels=32, eps=1e-3)
self.assertEqual(module.in_channels, 32)
self.assertEqual(module.eps, 1e-3)
self.assertTrue(module.gamma.requires_grad)
self.assertTrue(module.beta.requires_grad)
self.assertEqual(module.gamma.shape, (32, ))
self.assertTrue(module.beta.shape, (32, ))
def test_forward(self):
module = GRN(in_channels=32, eps=1e-3)
input_ = torch.rand(1, 28, 28, 32)
gx = torch.norm(input_, p=2, dim=(1, 2), keepdim=True)
nx = gx / (gx.mean(dim=3, keepdim=True) + 1e-3)
expected_out = module.gamma * input_ * nx + module.beta + input_
torch.testing.assert_allclose(
module(input_, data_format='channel_last'), expected_out)
input_ = input_.permute([0, 3, 1, 2])
expected_out = expected_out.permute([0, 3, 1, 2])
torch.testing.assert_allclose(
module(input_, data_format='channel_first'), expected_out)
class TestLayerNorm2d(TestCase):
def test_init(self):
module = LayerNorm2d(num_channels=32, eps=1e-3)
self.assertEqual(module.num_channels, 32)
self.assertEqual(module.eps, 1e-3)
self.assertTrue(module.weight.requires_grad)
self.assertTrue(module.bias.requires_grad)
self.assertEqual(module.weight.shape, (32, ))
self.assertTrue(module.bias.shape, (32, ))
def test_forward(self):
module = LayerNorm2d(num_channels=32, eps=1e-3)
input_ = torch.rand(1, 28, 28, 32)
expected_out = F.layer_norm(input_, module.normalized_shape,
module.weight, module.bias, 1e-3)
torch.testing.assert_allclose(
module(input_, data_format='channel_last'), expected_out)
input_ = input_.permute([0, 3, 1, 2])
expected_out = expected_out.permute([0, 3, 1, 2])
torch.testing.assert_allclose(
module(input_, data_format='channel_first'), expected_out)
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmpretrain.models.utils import (ConditionalPositionEncoding,
RotaryEmbeddingFast)
def test_conditional_position_encoding_module():
CPE = ConditionalPositionEncoding(in_channels=32, embed_dims=32, stride=2)
outs = CPE(torch.randn(1, 3136, 32), (56, 56))
assert outs.shape == torch.Size([1, 784, 32])
def test_rotary_embedding_fast_module():
RoPE = RotaryEmbeddingFast(embed_dims=64, patch_resolution=24)
outs = RoPE(torch.randn(1, 2, 24 * 24, 64), (24, 24))
assert outs.shape == torch.Size([1, 2, 24 * 24, 64])
RoPE = RotaryEmbeddingFast(embed_dims=64, patch_resolution=(14, 20))
outs = RoPE(torch.randn(1, 2, 14 * 20, 64), (14, 20))
assert outs.shape == torch.Size([1, 2, 14 * 20, 64])
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from torch.nn.modules import GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from mmpretrain.models.utils import SELayer
def is_norm(modules):
"""Check if is one of the norms."""
if isinstance(modules, (GroupNorm, _BatchNorm)):
return True
return False
def test_se():
with pytest.raises(AssertionError):
# base_channels must be a number
SELayer(16, squeeze_channels='32')
with pytest.raises(AssertionError):
# base_channels must be None or a number larger than 0
SELayer(16, squeeze_channels=-1)
with pytest.raises(AssertionError):
# act_cfg must be two dict tuple
SELayer(
16,
act_cfg=(dict(type='ReLU'), dict(type='Sigmoid'),
dict(type='ReLU')))
# Test SELayer forward, channels=64
input = torch.randn((4, 64, 112, 112))
se = SELayer(64)
output = se(input)
assert se.conv1.out_channels == 8
assert se.conv2.in_channels == 8
assert output.shape == torch.Size((4, 64, 112, 112))
# Test SELayer forward, ratio=4
input = torch.randn((4, 128, 112, 112))
se = SELayer(128, ratio=4)
output = se(input)
assert se.conv1.out_channels == 32
assert se.conv2.in_channels == 32
assert output.shape == torch.Size((4, 128, 112, 112))
# Test SELayer forward, channels=54, ratio=4
# channels cannot be divisible by ratio
input = torch.randn((1, 54, 76, 103))
se = SELayer(54, ratio=4)
output = se(input)
assert se.conv1.out_channels == 16
assert se.conv2.in_channels == 16
assert output.shape == torch.Size((1, 54, 76, 103))
# Test SELayer forward, divisor=2
se = SELayer(54, ratio=4, divisor=2)
output = se(input)
assert se.conv1.out_channels == 14
assert se.conv2.in_channels == 14
assert output.shape == torch.Size((1, 54, 76, 103))
# Test SELayer forward, squeeze_channels=25
input = torch.randn((1, 128, 56, 56))
se = SELayer(128, squeeze_channels=25)
output = se(input)
assert se.conv1.out_channels == 25
assert se.conv2.in_channels == 25
assert output.shape == torch.Size((1, 128, 56, 56))
# Test SELayer forward, not used ratio and divisor
input = torch.randn((1, 128, 56, 56))
se = SELayer(
128,
squeeze_channels=13,
ratio=4,
divisor=8,
)
output = se(input)
assert se.conv1.out_channels == 13
assert se.conv2.in_channels == 13
assert output.shape == torch.Size((1, 128, 56, 56))
# Test SELayer with HSigmoid activate layer
input = torch.randn((4, 128, 56, 56))
se = SELayer(
128,
squeeze_channels=25,
act_cfg=(dict(type='ReLU'), dict(type='HSigmoid')))
output = se(input)
assert se.conv1.out_channels == 25
assert se.conv2.in_channels == 25
assert output.shape == torch.Size((4, 128, 56, 56))
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
import torch.nn as nn
from mmpretrain.models.utils import LayerScale, SwiGLUFFN, SwiGLUFFNFused
class TestSwiGLUFFN(TestCase):
def test_init(self):
swiglu = SwiGLUFFN(embed_dims=4)
assert swiglu.w12.weight.shape == torch.ones((8, 4)).shape
assert swiglu.w3.weight.shape == torch.ones((4, 4)).shape
assert isinstance(swiglu.gamma2, nn.Identity)
swiglu = SwiGLUFFN(embed_dims=4, layer_scale_init_value=0.1)
assert isinstance(swiglu.gamma2, LayerScale)
def test_forward(self):
swiglu = SwiGLUFFN(embed_dims=4)
x = torch.randn((1, 8, 4))
out = swiglu(x)
self.assertEqual(out.size(), x.size())
swiglu = SwiGLUFFN(embed_dims=4, out_dims=12)
x = torch.randn((1, 8, 4))
out = swiglu(x)
self.assertEqual(tuple(out.size()), (1, 8, 12))
class TestSwiGLUFFNFused(TestCase):
def test_init(self):
swiglu = SwiGLUFFNFused(embed_dims=4)
assert swiglu.w12.weight.shape == torch.ones((16, 4)).shape
assert swiglu.w3.weight.shape == torch.ones((4, 8)).shape
assert isinstance(swiglu.gamma2, nn.Identity)
swiglu = SwiGLUFFNFused(embed_dims=4, layer_scale_init_value=0.1)
assert isinstance(swiglu.gamma2, LayerScale)
def test_forward(self):
swiglu = SwiGLUFFNFused(embed_dims=4)
x = torch.randn((1, 8, 4))
out = swiglu(x)
self.assertEqual(out.size(), x.size())
swiglu = SwiGLUFFNFused(embed_dims=4, out_dims=12)
x = torch.randn((1, 8, 4))
out = swiglu(x)
self.assertEqual(tuple(out.size()), (1, 8, 12))
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import numpy as np
import torch
from mmpretrain.structures import DataSample, MultiTaskDataSample
class TestDataSample(TestCase):
def _test_set_label(self, key):
data_sample = DataSample()
method = getattr(data_sample, 'set_' + key)
# Test number
method(1)
self.assertIn(key, data_sample)
label = getattr(data_sample, key)
self.assertIsInstance(label, torch.LongTensor)
# Test tensor with single number
method(torch.tensor(2))
self.assertIn(key, data_sample)
label = getattr(data_sample, key)
self.assertIsInstance(label, torch.LongTensor)
# Test array with single number
method(np.array(3))
self.assertIn(key, data_sample)
label = getattr(data_sample, key)
self.assertIsInstance(label, torch.LongTensor)
# Test tensor
method(torch.tensor([1, 2, 3]))
self.assertIn(key, data_sample)
label = getattr(data_sample, key)
self.assertIsInstance(label, torch.Tensor)
self.assertTrue((label == torch.tensor([1, 2, 3])).all())
# Test array
method(np.array([1, 2, 3]))
self.assertIn(key, data_sample)
label = getattr(data_sample, key)
self.assertTrue((label == torch.tensor([1, 2, 3])).all())
# Test Sequence
method([1, 2, 3])
self.assertIn(key, data_sample)
label = getattr(data_sample, key)
self.assertTrue((label == torch.tensor([1, 2, 3])).all())
# Test unavailable type
with self.assertRaisesRegex(TypeError, "<class 'str'> is not"):
method('hi')
def test_set_gt_label(self):
self._test_set_label('gt_label')
def test_set_pred_label(self):
self._test_set_label('pred_label')
def test_set_gt_score(self):
data_sample = DataSample()
data_sample.set_gt_score(torch.tensor([0.1, 0.1, 0.6, 0.1, 0.1]))
self.assertIn('gt_score', data_sample)
torch.testing.assert_allclose(data_sample.gt_score,
[0.1, 0.1, 0.6, 0.1, 0.1])
# Test invalid length
with self.assertRaisesRegex(AssertionError, 'should be equal to'):
data_sample.set_gt_score([1, 2])
# Test invalid dims
with self.assertRaisesRegex(AssertionError, 'but got 2'):
data_sample.set_gt_score(torch.tensor([[0.1, 0.1, 0.6, 0.1, 0.1]]))
def test_set_pred_score(self):
data_sample = DataSample()
data_sample.set_pred_score(torch.tensor([0.1, 0.1, 0.6, 0.1, 0.1]))
self.assertIn('pred_score', data_sample)
torch.testing.assert_allclose(data_sample.pred_score,
[0.1, 0.1, 0.6, 0.1, 0.1])
# Test invalid length
with self.assertRaisesRegex(AssertionError, 'should be equal to'):
data_sample.set_gt_score([1, 2])
# Test invalid dims
with self.assertRaisesRegex(AssertionError, 'but got 2'):
data_sample.set_pred_score(
torch.tensor([[0.1, 0.1, 0.6, 0.1, 0.1]]))
class TestMultiTaskDataSample(TestCase):
def test_multi_task_data_sample(self):
gt_label = {'task0': {'task00': 1, 'task01': 1}, 'task1': 1}
data_sample = MultiTaskDataSample()
task_sample = DataSample().set_gt_label(gt_label['task1'])
data_sample.set_field(task_sample, 'task1')
data_sample.set_field(MultiTaskDataSample(), 'task0')
for task_name in gt_label['task0']:
task_sample = DataSample().set_gt_label(
gt_label['task0'][task_name])
data_sample.task0.set_field(task_sample, task_name)
self.assertIsInstance(data_sample.task0, MultiTaskDataSample)
self.assertIsInstance(data_sample.task1, DataSample)
self.assertIsInstance(data_sample.task0.task00, DataSample)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment