test_mff.py 1.73 KB
Newer Older
limm's avatar
limm committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
# Copyright (c) OpenMMLab. All rights reserved.
import platform

import pytest
import torch

from mmpretrain.models import MFF, MFFViT
from mmpretrain.structures import DataSample


@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit')
def test_mae_vit():
    backbone = dict(
        arch='b', patch_size=16, mask_ratio=0.75, out_indices=[1, 11])
    mae_backbone = MFFViT(**backbone)
    mae_backbone.init_weights()
    fake_inputs = torch.randn((2, 3, 224, 224))

    # test with mask
    fake_outputs = mae_backbone(fake_inputs)[0]
    assert list(fake_outputs.shape) == [2, 50, 768]


@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit')
def test_mae():
    data_preprocessor = {
        'mean': [0.5, 0.5, 0.5],
        'std': [0.5, 0.5, 0.5],
        'to_rgb': True
    }
    backbone = dict(
        type='MFFViT',
        arch='b',
        patch_size=16,
        mask_ratio=0.75,
        out_indices=[1, 11])
    neck = dict(
        type='MAEPretrainDecoder',
        patch_size=16,
        in_chans=3,
        embed_dim=768,
        decoder_embed_dim=512,
        decoder_depth=8,
        decoder_num_heads=16,
        mlp_ratio=4.,
    )
    loss = dict(type='PixelReconstructionLoss', criterion='L2')
    head = dict(
        type='MAEPretrainHead', norm_pix=False, patch_size=16, loss=loss)

    alg = MFF(
        backbone=backbone,
        neck=neck,
        head=head,
        data_preprocessor=data_preprocessor)

    fake_data = {
        'inputs': torch.randn((2, 3, 224, 224)),
        'data_samples': [DataSample() for _ in range(2)]
    }
    fake_inputs = alg.data_preprocessor(fake_data)
    fake_outputs = alg(**fake_inputs, mode='loss')
    assert isinstance(fake_outputs['loss'].item(), float)