Commit 3a49ee35 authored by xiabo's avatar xiabo
Browse files

测试用例修改

parent 32fec198
......@@ -76,8 +76,16 @@ class TestDeformconv:
assert np.allclose(out.data.detach().cpu().numpy(), repeated_gt_out,
threshold)
assert np.allclose(x.grad.detach().cpu().numpy(), repeated_gt_x_grad,
threshold)
cpu_data = x.grad.detach().cpu().numpy()
for i in range(10):
for j in range(3):
for k in range(3):
if ((abs(cpu_data[i][0][j][k] - repeated_gt_x_grad[i][0][j][k])) > threshold):
print("====error====",cpu_data[i][0][j][k], repeated_gt_x_grad[i][0][j][k])
assert(0)
#assert np.allclose(x.grad.detach().cpu().numpy(), repeated_gt_x_grad,
# threshold)
# the batch size of the input is increased which results in
# a larger gradient so we need to divide by the batch_size
assert np.allclose(
......@@ -150,8 +158,17 @@ class TestDeformconv:
assert np.allclose(out.data.detach().cpu().numpy(), repeated_gt_out,
threshold)
assert np.allclose(x.grad.detach().cpu().numpy(), repeated_gt_x_grad,
threshold)
cpu_data = x.grad.detach().cpu().numpy()
for i in range(10):
for j in range(3):
for k in range(3):
if ((abs(cpu_data[i][0][j][k] - repeated_gt_x_grad[i][0][j][k])) > threshold):
print("====error====",cpu_data[i][0][j][k], repeated_gt_x_grad[i][0][j][k])
assert(0)
#assert np.allclose(x.grad.detach().cpu().numpy(), repeated_gt_x_grad,
# threshold)
assert np.allclose(
model.conv_offset.weight.grad.detach().cpu().numpy() / batch_size,
gt_offset_weight_grad, threshold)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment