Unverified Commit 3657748b authored by amyeroberts's avatar amyeroberts Committed by GitHub
Browse files

Update YOLOS slow test values (#28187)

Update test values
parent cd1350ce
...@@ -209,14 +209,14 @@ class YolosImageProcessingTest(AnnotationFormatTestMixin, ImageProcessingTestMix ...@@ -209,14 +209,14 @@ class YolosImageProcessingTest(AnnotationFormatTestMixin, ImageProcessingTestMix
encoding = image_processing(images=image, annotations=target, return_tensors="pt") encoding = image_processing(images=image, annotations=target, return_tensors="pt")
# verify pixel values # verify pixel values
expected_shape = torch.Size([1, 3, 800, 1066]) expected_shape = torch.Size([1, 3, 800, 1056])
self.assertEqual(encoding["pixel_values"].shape, expected_shape) self.assertEqual(encoding["pixel_values"].shape, expected_shape)
expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) expected_slice = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4)) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4))
# verify area # verify area
expected_area = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) expected_area = torch.tensor([5832.7256, 11144.6689, 484763.2500, 829269.8125, 146579.4531, 164177.6250])
self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area)) self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area))
# verify boxes # verify boxes
expected_boxes_shape = torch.Size([6, 4]) expected_boxes_shape = torch.Size([6, 4])
...@@ -236,7 +236,7 @@ class YolosImageProcessingTest(AnnotationFormatTestMixin, ImageProcessingTestMix ...@@ -236,7 +236,7 @@ class YolosImageProcessingTest(AnnotationFormatTestMixin, ImageProcessingTestMix
expected_orig_size = torch.tensor([480, 640]) expected_orig_size = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size)) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size))
# verify size # verify size
expected_size = torch.tensor([800, 1066]) expected_size = torch.tensor([800, 1056])
self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size)) self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size))
@slow @slow
...@@ -255,14 +255,14 @@ class YolosImageProcessingTest(AnnotationFormatTestMixin, ImageProcessingTestMix ...@@ -255,14 +255,14 @@ class YolosImageProcessingTest(AnnotationFormatTestMixin, ImageProcessingTestMix
encoding = image_processing(images=image, annotations=target, masks_path=masks_path, return_tensors="pt") encoding = image_processing(images=image, annotations=target, masks_path=masks_path, return_tensors="pt")
# verify pixel values # verify pixel values
expected_shape = torch.Size([1, 3, 800, 1066]) expected_shape = torch.Size([1, 3, 800, 1056])
self.assertEqual(encoding["pixel_values"].shape, expected_shape) self.assertEqual(encoding["pixel_values"].shape, expected_shape)
expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) expected_slice = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4)) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4))
# verify area # verify area
expected_area = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) expected_area = torch.tensor([146591.5000, 163974.2500, 480092.2500, 11187.0000, 5824.5000, 7562.5000])
self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area)) self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area))
# verify boxes # verify boxes
expected_boxes_shape = torch.Size([6, 4]) expected_boxes_shape = torch.Size([6, 4])
...@@ -279,11 +279,11 @@ class YolosImageProcessingTest(AnnotationFormatTestMixin, ImageProcessingTestMix ...@@ -279,11 +279,11 @@ class YolosImageProcessingTest(AnnotationFormatTestMixin, ImageProcessingTestMix
expected_class_labels = torch.tensor([17, 17, 63, 75, 75, 93]) expected_class_labels = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels)) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels))
# verify masks # verify masks
expected_masks_sum = 822873 expected_masks_sum = 815161
self.assertEqual(encoding["labels"][0]["masks"].sum().item(), expected_masks_sum) self.assertEqual(encoding["labels"][0]["masks"].sum().item(), expected_masks_sum)
# verify orig_size # verify orig_size
expected_orig_size = torch.tensor([480, 640]) expected_orig_size = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size)) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size))
# verify size # verify size
expected_size = torch.tensor([800, 1066]) expected_size = torch.tensor([800, 1056])
self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size)) self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size))
...@@ -352,11 +352,11 @@ class YolosModelIntegrationTest(unittest.TestCase): ...@@ -352,11 +352,11 @@ class YolosModelIntegrationTest(unittest.TestCase):
self.assertEqual(outputs.logits.shape, expected_shape) self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice_logits = torch.tensor( expected_slice_logits = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]], [[-23.7219, -10.3165, -14.9083], [-41.5429, -15.2403, -24.1478], [-29.3909, -12.7173, -19.4650]],
device=torch_device, device=torch_device,
) )
expected_slice_boxes = torch.tensor( expected_slice_boxes = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]], device=torch_device [[0.2536, 0.5449, 0.4643], [0.2037, 0.7735, 0.3672], [0.7692, 0.4056, 0.4549]], device=torch_device
) )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice_logits, atol=1e-4)) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice_logits, atol=1e-4))
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4)) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4))
...@@ -365,9 +365,9 @@ class YolosModelIntegrationTest(unittest.TestCase): ...@@ -365,9 +365,9 @@ class YolosModelIntegrationTest(unittest.TestCase):
results = image_processor.post_process_object_detection( results = image_processor.post_process_object_detection(
outputs, threshold=0.3, target_sizes=[image.size[::-1]] outputs, threshold=0.3, target_sizes=[image.size[::-1]]
)[0] )[0]
expected_scores = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861]).to(torch_device) expected_scores = torch.tensor([0.9991, 0.9801, 0.9978, 0.9875, 0.9848]).to(torch_device)
expected_labels = [75, 75, 17, 63, 17] expected_labels = [75, 75, 17, 63, 17]
expected_slice_boxes = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495]).to(torch_device) expected_slice_boxes = torch.tensor([331.8438, 80.5440, 369.9546, 188.0579]).to(torch_device)
self.assertEqual(len(results["scores"]), 5) self.assertEqual(len(results["scores"]), 5)
self.assertTrue(torch.allclose(results["scores"], expected_scores, atol=1e-4)) self.assertTrue(torch.allclose(results["scores"], expected_scores, atol=1e-4))
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment