test_lightning_trainer.py 5.1 KB
Newer Older
1
2
3
4
import json
import pytest

import nni
5
import nni.retiarii.evaluator.pytorch.lightning as pl
6
import nni.runtime.platform.test
7
8
9
10
import pytorch_lightning
import torch
import torch.nn as nn
import torch.nn.functional as F
11
from nni.retiarii.evaluator import FunctionalEvaluator
12
13
14
15
16
17
18
from sklearn.datasets import load_diabetes
from torch.utils.data import Dataset
from torchvision import transforms
from torchvision.datasets import MNIST

debug = False

J-shang's avatar
J-shang committed
19
enable_progress_bar = False
20
if debug:
J-shang's avatar
J-shang committed
21
    enable_progress_bar = True
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51


class MNISTModel(nn.Module):
    def __init__(self):
        super().__init__()
        self.layer_1 = nn.Linear(28 * 28, 128)
        self.layer_2 = nn.Linear(128, 10)

    def forward(self, x):
        x = x.view(x.size(0), -1)
        x = self.layer_1(x)
        x = F.relu(x)
        x = self.layer_2(x)
        return x


class FCNet(nn.Module):
    def __init__(self, input_size, output_size):
        super().__init__()
        self.l1 = nn.Linear(input_size, 5)
        self.relu = nn.ReLU()
        self.l2 = nn.Linear(5, output_size)

    def forward(self, x):
        output = self.l1(x)
        output = self.relu(output)
        output = self.l2(output)
        return output.view(-1)


52
@nni.trace
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
class DiabetesDataset(Dataset):
    def __init__(self, train=True):
        data = load_diabetes()
        self.x = torch.tensor(data['data'], dtype=torch.float32)
        self.y = torch.tensor(data['target'], dtype=torch.float32)
        self.length = self.x.shape[0]
        split = int(self.length * 0.8)
        if train:
            self.x = self.x[:split]
            self.y = self.y[:split]
        else:
            self.x = self.x[split:]
            self.y = self.y[split:]
        self.length = len(self.y)

    def __getitem__(self, idx):
        return self.x[idx], self.y[idx]

    def __len__(self):
        return self.length


def _get_final_result():
    return float(json.loads(nni.runtime.platform.test._last_metric)['value'])


def _foo(model_cls):
    assert model_cls == MNISTModel


def _reset():
    # this is to not affect other tests in sdk
    nni.trial._intermediate_seq = 0
86
    nni.trial._params = {'foo': 'bar', 'parameter_id': 0, 'parameters': {}}
87
88
89
90
91
92
93
    nni.runtime.platform.test._last_metric = None


@pytest.mark.skipif(pytorch_lightning.__version__ < '1.0', reason='Incompatible APIs.')
def test_mnist():
    _reset()
    transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
94
95
    train_dataset = nni.trace(MNIST)(root='data/mnist', train=True, download=True, transform=transform)
    test_dataset = nni.trace(MNIST)(root='data/mnist', train=False, download=True, transform=transform)
96
97
98
    lightning = pl.Classification(train_dataloader=pl.DataLoader(train_dataset, batch_size=100),
                                  val_dataloaders=pl.DataLoader(test_dataset, batch_size=100),
                                  max_epochs=2, limit_train_batches=0.25,  # for faster training
J-shang's avatar
J-shang committed
99
                                  enable_progress_bar=enable_progress_bar)
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
    lightning._execute(MNISTModel)
    assert _get_final_result() > 0.7
    _reset()


@pytest.mark.skipif(pytorch_lightning.__version__ < '1.0', reason='Incompatible APIs.')
def test_diabetes():
    _reset()
    nni.runtime.platform.test._last_metric = None
    train_dataset = DiabetesDataset(train=True)
    test_dataset = DiabetesDataset(train=False)
    lightning = pl.Regression(optimizer=torch.optim.SGD,
                              train_dataloader=pl.DataLoader(train_dataset, batch_size=20),
                              val_dataloaders=pl.DataLoader(test_dataset, batch_size=20),
                              max_epochs=100,
J-shang's avatar
J-shang committed
115
                              enable_progress_bar=enable_progress_bar)
116
117
118
119
120
121
122
    lightning._execute(FCNet(train_dataset.x.shape[1], 1))
    assert _get_final_result() < 2e4
    _reset()


@pytest.mark.skipif(pytorch_lightning.__version__ < '1.0', reason='Incompatible APIs.')
def test_functional():
123
    FunctionalEvaluator(_foo)._execute(MNISTModel)
124
125


126
127
128
129
130
131
@pytest.mark.skipif(pytorch_lightning.__version__ < '1.0', reason='Incompatible APIs.')
def test_fit_api():
    _reset()
    transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
    train_dataset = nni.trace(MNIST)(root='data/mnist', train=True, download=True, transform=transform)
    test_dataset = nni.trace(MNIST)(root='data/mnist', train=False, download=True, transform=transform)
Yuge Zhang's avatar
Yuge Zhang committed
132
133
134
135

    def lightning(): return pl.Classification(train_dataloader=pl.DataLoader(train_dataset, batch_size=100),
                                              val_dataloaders=pl.DataLoader(test_dataset, batch_size=100),
                                              max_epochs=1, limit_train_batches=0.1,  # for faster training
J-shang's avatar
J-shang committed
136
                                              enable_progress_bar=enable_progress_bar)
Yuge Zhang's avatar
Yuge Zhang committed
137
138
139
140
141
    # Lightning will have some cache in models / trainers,
    # which is problematic if we call fit multiple times.
    lightning().fit(lambda: MNISTModel())
    lightning().fit(MNISTModel)
    lightning().fit(MNISTModel())
142
143
144
    _reset()


145
146
147
148
if __name__ == '__main__':
    test_mnist()
    test_diabetes()
    test_functional()
149
    test_fit_api()