import torch import warpctc_pytorch as warp_ctc from warpctc_pytorch import CTCLoss def test_empty_label(test_cpu=True, test_gpu=True): probs = torch.FloatTensor([ [[0.1, 0.6, 0.1, 0.1, 0.1], [0.1, 0.1, 0.6, 0.1, 0.1]], [[0.6, 0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.5, 0.2, 0.1]] ]).contiguous() grads = torch.zeros(probs.size()) labels = torch.IntTensor([1, 2]) label_sizes = torch.IntTensor([2, 0]) sizes = torch.IntTensor([2, 2]) minibatch_size = probs.size(1) if test_cpu: costs = torch.zeros(minibatch_size) warp_ctc.cpu_ctc(probs, grads, labels, label_sizes, sizes, minibatch_size, costs, 0) print('CPU cost sum = %f' % costs.sum()) print('CPU probs={}\ngrads={}\ncosts={}\n\n'.format(probs, grads, costs)) if test_gpu: probs = probs.clone().cuda() grads = torch.zeros(probs.size()).cuda() costs = torch.zeros(minibatch_size) warp_ctc.gpu_ctc(probs, grads, labels, label_sizes, sizes, minibatch_size, costs, 0) print('GPU cost sum = %f' % costs.sum()) print(grads.view(grads.size(0) * grads.size(1), grads.size(2))) print('GPU probs={}\ngrads={}\ncosts={}\n\n'.format(probs, grads, costs)) def test_ctcloss(test_cpu=True, test_gpu=True): criterion = CTCLoss(blank=0, size_average=False, length_average=False) probs = torch.FloatTensor([[[0.1, 0.6, 0.1, 0.1, 0.1], [0.1, 0.1, 0.6, 0.1, 0.1]]]).transpose(0, 1).contiguous() labels = torch.IntTensor([1, 2]) probs_sizes = torch.IntTensor([2]) label_sizes = torch.IntTensor([2]) print('probs shape ', probs.shape) print('labels shape ', labels.shape) print('label_sizes ', sum(label_sizes)) if test_cpu: probs_cpu = probs.clone().cpu().requires_grad_(True) # tells autograd to compute gradients for probs cost = criterion(probs_cpu, labels, probs_sizes, label_sizes) cost.backward() print('CPU probs={}\ngrads={}\ncosts={}\n\n'.format(probs_cpu, probs_cpu.grad, cost)) if test_gpu: probs_gpu = probs.clone().cuda().requires_grad_(True) # tells autograd to compute gradients for probs cost = criterion(probs_gpu, labels, probs_sizes, label_sizes) cost.backward() print('GPU probs={}\ngrads={}\ncosts={}\n\n'.format(probs_gpu, probs_gpu.grad, cost)) def main(): print('torch.cuda.is_available() ', torch.cuda.is_available()) test_gpu = False if torch.cuda.is_available(): test_gpu = True # test_empty_label(test_cpu=True, test_gpu=test_gpu) test_ctcloss(test_cpu=True, test_gpu=test_gpu) if __name__ == '__main__': main()