"examples/pytorch/vrgcn/train_cv_multi_gpu.py" did not exist on "37aa99c51081c1cfddd9c91dcc1ca1c79196d55a"
eval_utils.py 4.04 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Evaluation utilities."""

import os
Vijay Korthikanti's avatar
Vijay Korthikanti committed
19
20
from functools import partial

21
import torch
Vijay Korthikanti's avatar
Vijay Korthikanti committed
22

23
from megatron import get_args
Vijay Korthikanti's avatar
Vijay Korthikanti committed
24
from megatron import print_rank_0, print_rank_last
25
from megatron import mpu
Vijay Korthikanti's avatar
Vijay Korthikanti committed
26
from megatron.schedules import get_forward_backward_func
27
28
29
30
31
32
33
34
35
from tasks.vision.finetune_utils import build_data_loader
from tasks.vision.finetune_utils import process_batch
from torchvision import datasets, transforms


def accuracy_func_provider():
    """Provide function that calculates accuracies."""
    args = get_args()
    data_path = args.data_path
36
    crop_size = (args.img_h, args.img_w)
37
38

    # Build dataloaders.
39
    val_data_path = data_path[1]
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
    normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
    transform_val = transforms.Compose(
        [
            transforms.Resize(crop_size),
            transforms.CenterCrop(crop_size),
            transforms.ToTensor(),
            normalize,
        ]
    )
    dataset = datasets.ImageFolder(root=val_data_path, transform=transform_val)

    dataloader = build_data_loader(
        dataset,
        args.micro_batch_size,
        num_workers=args.num_workers,
        drop_last=(mpu.get_data_parallel_world_size() > 1),
56
        shuffle=False
57
58
59
60
61
62
    )

    def metrics_func(model, epoch):
        print_rank_0("calculating metrics ...")
        correct, total = calculate_correct_answers(model, dataloader, epoch)
        percent = float(correct) * 100.0 / float(total)
Vijay Korthikanti's avatar
Vijay Korthikanti committed
63
        print_rank_last(
64
65
66
67
68
69
70
71
72
73
            " >> |epoch: {}| overall: correct / total = {} / {} = "
            "{:.4f} %".format(epoch, correct, total, percent)
        )

    return metrics_func


def calculate_correct_answers(model, dataloader, epoch):
    """Calculate correct over total answers"""

Vijay Korthikanti's avatar
Vijay Korthikanti committed
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
    forward_backward_func = get_forward_backward_func()
    for m in model:
        m.eval()

    def loss_func(labels, output_tensor):
        logits = output_tensor

        loss_dict = {}
        # Compute the correct answers.
        predicted = torch.argmax(logits, dim=-1)
        corrects = (predicted == labels).float()
        # Add to the counters.
        loss_dict['total'] = labels.size(0)
        loss_dict['correct'] = corrects.sum().item()

        return 0, loss_dict

    #defined inside to capture output_predictions
    def correct_answers_forward_step(batch, model):
        try:
            batch_ = next(batch)
        except BaseException:
            batch_ = batch
        images, labels = process_batch(batch_)

        # Forward model.
        output_tensor = model(images)

        return output_tensor, partial(loss_func, labels)

104
105
106
107
108
    with torch.no_grad():
        # For all the batches in the dataset.
        total = 0
        correct = 0
        for _, batch in enumerate(dataloader):
Vijay Korthikanti's avatar
Vijay Korthikanti committed
109
110
111
112
113
114
115
116
117
118

            loss_dicts = forward_backward_func(correct_answers_forward_step, batch, model,
                                               optimizer=None, timers=None, forward_only=True)

            for loss_dict in loss_dicts:
                total += loss_dict['total']
                correct += loss_dict['correct']

    for m in model:
        m.train()
119
120

    # Reduce.
Vijay Korthikanti's avatar
Vijay Korthikanti committed
121
122
123
124
    if mpu.is_pipeline_last_stage():
        unreduced = torch.cuda.LongTensor([correct, total])
        torch.distributed.all_reduce(unreduced,
                                     group=mpu.get_data_parallel_group())
125

Vijay Korthikanti's avatar
Vijay Korthikanti committed
126
127
128
129
        # Print on screen.
        correct_ans = unreduced[0].item()
        total_count = unreduced[1].item()
        return correct_ans, total_count