test_gpu.py 1.4 KB
Newer Older
lishen's avatar
lishen committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import torch
import warpctc_pytorch as warp_ctc


def test_empty_label(test_cpu=True, test_gpu=True):
    probs = torch.FloatTensor([
        [[0.1, 0.6, 0.1, 0.1, 0.1], [0.1, 0.1, 0.6, 0.1, 0.1]],
        [[0.6, 0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.5, 0.2, 0.1]]
    ]).contiguous()
    grads = torch.zeros(probs.size())
    labels = torch.IntTensor([1, 2])
    label_sizes = torch.IntTensor([2, 0])
    sizes = torch.IntTensor([2, 2])
    minibatch_size = probs.size(1)

    if test_cpu:
        costs = torch.zeros(minibatch_size)
        warp_ctc.cpu_ctc(probs,  grads,  labels, label_sizes,  sizes,  minibatch_size, costs,  0)
        print('CPU_cost: %f' % costs.sum())
        print('CPU probs={}\ngrads={}\ncosts={}'.format(probs, grads, costs))

    if test_gpu:
        probs = probs.clone().cuda()
        grads = torch.zeros(probs.size()).cuda()
        costs = torch.zeros(minibatch_size)
        warp_ctc.gpu_ctc(probs, grads, labels, label_sizes, sizes, minibatch_size, costs, 0)
        print('GPU_cost: %f' % costs.sum())
        print(grads.view(grads.size(0) * grads.size(1), grads.size(2)))
        print('GPU probs={}\ngrads={}\ncosts={}'.format(probs, grads, costs))


if __name__ == '__main__':
    print('torch.cuda.is_available() ', torch.cuda.is_available())
    # test_empty_label(test_cpu=True, test_gpu=False)
    test_empty_label(test_cpu=False, test_gpu=True)

# HIP_VISIBLE_DEVICES=1 python3 test_gpu_new.py