"git@developer.sourcefind.cn:gaoqiong/migraphx.git" did not exist on "4d6264b0537c8e0a3b1117149b9e62b9eb784039"
model.py 3.86 KB
Newer Older
liuzhe-lz's avatar
liuzhe-lz committed
1
2
3
4
5
6
7
"""
Port PyTorch Quickstart to NNI
==============================
This is a modified version of `PyTorch quickstart`_.

It can be run directly and will have the exact same result as original version.

liuzhe-lz's avatar
liuzhe-lz committed
8
Furthermore, it enables the ability of auto tuning with an NNI *experiment*, which will be detailed later.
liuzhe-lz's avatar
liuzhe-lz committed
9

liuzhe-lz's avatar
liuzhe-lz committed
10
It is recommended to run this script directly first to verify the environment.
liuzhe-lz's avatar
liuzhe-lz committed
11

liuzhe-lz's avatar
liuzhe-lz committed
12
There are 2 key differences from the original version:
liuzhe-lz's avatar
liuzhe-lz committed
13

liuzhe-lz's avatar
liuzhe-lz committed
14
15
1. In `Get optimized hyperparameters`_ part, it receives generated hyperparameters.
2. In `Train model and report accuracy`_ part, it reports accuracy metrics to NNI.
liuzhe-lz's avatar
liuzhe-lz committed
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30

.. _PyTorch quickstart: https://pytorch.org/tutorials/beginner/basics/quickstart_tutorial.html
"""

# %%
import nni
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.transforms import ToTensor

# %%
# Hyperparameters to be tuned
# ---------------------------
liuzhe-lz's avatar
liuzhe-lz committed
31
# These are the hyperparameters that will be tuned.
liuzhe-lz's avatar
liuzhe-lz committed
32
33
34
35
36
37
38
39
40
params = {
    'features': 512,
    'lr': 0.001,
    'momentum': 0,
}

# %%
# Get optimized hyperparameters
# -----------------------------
liuzhe-lz's avatar
liuzhe-lz committed
41
# If run directly, :func:`nni.get_next_parameter` is a no-op and returns an empty dict.
liuzhe-lz's avatar
liuzhe-lz committed
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
# But with an NNI *experiment*, it will receive optimized hyperparameters from tuning algorithm.
optimized_params = nni.get_next_parameter()
params.update(optimized_params)
print(params)

# %%
# Load dataset
# ------------
training_data = datasets.FashionMNIST(root="data", train=True, download=True, transform=ToTensor())
test_data = datasets.FashionMNIST(root="data", train=False, download=True, transform=ToTensor())

batch_size = 64

train_dataloader = DataLoader(training_data, batch_size=batch_size)
test_dataloader = DataLoader(test_data, batch_size=batch_size)

# %%
# Build model with hyperparameters
# --------------------------------
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using {device} device")

class NeuralNetwork(nn.Module):
    def __init__(self):
        super(NeuralNetwork, self).__init__()
        self.flatten = nn.Flatten()
        self.linear_relu_stack = nn.Sequential(
            nn.Linear(28*28, params['features']),
            nn.ReLU(),
            nn.Linear(params['features'], params['features']),
            nn.ReLU(),
            nn.Linear(params['features'], 10)
        )

    def forward(self, x):
        x = self.flatten(x)
        logits = self.linear_relu_stack(x)
        return logits

model = NeuralNetwork().to(device)

loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=params['lr'], momentum=params['momentum'])

# %%
liuzhe-lz's avatar
liuzhe-lz committed
87
88
# Define train and test
# ---------------------
liuzhe-lz's avatar
liuzhe-lz committed
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
def train(dataloader, model, loss_fn, optimizer):
    size = len(dataloader.dataset)
    model.train()
    for batch, (X, y) in enumerate(dataloader):
        X, y = X.to(device), y.to(device)
        pred = model(X)
        loss = loss_fn(pred, y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

def test(dataloader, model, loss_fn):
    size = len(dataloader.dataset)
    num_batches = len(dataloader)
    model.eval()
    test_loss, correct = 0, 0
    with torch.no_grad():
        for X, y in dataloader:
            X, y = X.to(device), y.to(device)
            pred = model(X)
            test_loss += loss_fn(pred, y).item()
            correct += (pred.argmax(1) == y).type(torch.float).sum().item()
    test_loss /= num_batches
    correct /= size
    return correct

# %%
liuzhe-lz's avatar
liuzhe-lz committed
116
117
118
# Train model and report accuracy
# -------------------------------
# Report accuracy metrics to NNI so the tuning algorithm can suggest better hyperparameters.
liuzhe-lz's avatar
liuzhe-lz committed
119
120
121
122
123
124
125
epochs = 5
for t in range(epochs):
    print(f"Epoch {t+1}\n-------------------------------")
    train(train_dataloader, model, loss_fn, optimizer)
    accuracy = test(test_dataloader, model, loss_fn)
    nni.report_intermediate_result(accuracy)
nni.report_final_result(accuracy)