Commit c6666d33 authored by Yanghan Wang's avatar Yanghan Wang Committed by Facebook GitHub Bot
Browse files

replace torch.testing.assert_allclose with torch.testing.assert_close

Summary:
Pull Request resolved: https://github.com/facebookresearch/d2go/pull/409

`assert_close` is preferred over `assert_allclose`: https://github.com/pytorch/pytorch/issues/61844

The `assert_allclose` was removed yesterday in https://github.com/pytorch/pytorch/pull/87974, causing test to fail, eg. https://github.com/facebookresearch/d2go/actions/runs/3389194553/jobs/5632021291

Reviewed By: sstsai-adl

Differential Revision: D41000306

fbshipit-source-id: 7bd1cb9d5edf0a4609a909e2283df411bcabdf13
parent bd1beec9
......@@ -174,7 +174,7 @@ class MultiTensorInSingleTensorOut(nn.Module):
@staticmethod
def check_outputs(new_output, original_output):
torch.testing.assert_allclose(new_output, torch.tensor([5]))
torch.testing.assert_close(new_output, torch.tensor([5]))
# NOTE: caffe2 wrapper assumes tensors are fp32
......@@ -191,7 +191,7 @@ class SingleListInSingleListOut(nn.Module):
@staticmethod
def check_outputs(new_output, original_output):
assert len(new_output) == 1
torch.testing.assert_allclose(new_output[0], torch.tensor([5.0]))
torch.testing.assert_close(new_output[0], torch.tensor([5.0]))
class MultiDictInMultiDictOut(nn.Module):
......@@ -210,10 +210,10 @@ class MultiDictInMultiDictOut(nn.Module):
@staticmethod
def check_outputs(new_output, original_output):
first, second = original_output
torch.testing.assert_allclose(first["add"], torch.tensor([4]))
torch.testing.assert_allclose(first["sub"], torch.tensor([-2]))
torch.testing.assert_allclose(second["add"], torch.tensor([6]))
torch.testing.assert_allclose(second["sub"], torch.tensor([-2]))
torch.testing.assert_close(first["add"], torch.tensor([4]))
torch.testing.assert_close(first["sub"], torch.tensor([-2]))
torch.testing.assert_close(second["add"], torch.tensor([6]))
torch.testing.assert_close(second["sub"], torch.tensor([-2]))
MODEL_EXPORT_METHOD_TEST_CASES = [
......
......@@ -47,7 +47,7 @@ class TestBoxWithNMSLimit(unittest.TestCase):
topk_per_image=detections_per_im,
)
for result, kept_index, score in zip(ref_results, ref_kept_indices, scores):
torch.testing.assert_allclose(
torch.testing.assert_close(
score[kept_index, result.pred_classes],
result.scores,
)
......@@ -99,7 +99,7 @@ class TestBoxWithNMSLimit(unittest.TestCase):
for _score_nms, _class_nms, _keeps_nms, _score in zip(
roi_score_nms, roi_class_nms, roi_keeps_nms, scores
):
torch.testing.assert_allclose(
torch.testing.assert_close(
_score[_keeps_nms.to(torch.int64), _class_nms.to(torch.int64)],
_score_nms,
)
......@@ -109,9 +109,9 @@ class TestBoxWithNMSLimit(unittest.TestCase):
):
s1, i1 = s.sort()
s2, i2 = ref.scores.sort()
torch.testing.assert_allclose(s1, s2)
torch.testing.assert_allclose(b[i1], ref.pred_boxes.tensor[i2])
torch.testing.assert_allclose(c.to(torch.int64)[i1], ref.pred_classes[i2])
torch.testing.assert_close(s1, s2)
torch.testing.assert_close(b[i1], ref.pred_boxes.tensor[i2])
torch.testing.assert_close(c.to(torch.int64)[i1], ref.pred_classes[i2])
for ref, k in zip(ref_kept_indices, roi_keeps_nms):
# NOTE: order might be different due to implementation
......
......@@ -97,7 +97,7 @@ class BaseSemanticSegTestCase:
for predictor_output, pytorch_output in zip(
predicotr_outputs, pytorch_outputs
):
torch.testing.assert_allclose(
torch.testing.assert_close(
predictor_output["sem_seg"], pytorch_output["sem_seg"]
)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment