Unverified Commit 26ea725b authored by Quentin Lhoest's avatar Quentin Lhoest Committed by GitHub
Browse files

Update fixtures-image-utils (#28080)

* fix hf-internal-testing/fixtures_image_utils

* fix test

* comments
parent 1c286be5
...@@ -226,10 +226,12 @@ class ImageGPTImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): ...@@ -226,10 +226,12 @@ class ImageGPTImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
def prepare_images(): def prepare_images():
dataset = load_dataset("hf-internal-testing/fixtures_image_utils", split="test") # we use revision="refs/pr/1" until the PR is merged
# https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1
dataset = load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1")
image1 = Image.open(dataset[4]["file"]) image1 = dataset[4]["image"]
image2 = Image.open(dataset[5]["file"]) image2 = dataset[5]["image"]
images = [image1, image2] images = [image1, image2]
......
...@@ -68,17 +68,19 @@ class DepthEstimationPipelineTests(unittest.TestCase): ...@@ -68,17 +68,19 @@ class DepthEstimationPipelineTests(unittest.TestCase):
self.assertEqual({"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, outputs) self.assertEqual({"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, outputs)
import datasets import datasets
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test") # we use revision="refs/pr/1" until the PR is merged
# https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1")
outputs = depth_estimator( outputs = depth_estimator(
[ [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"),
"http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA # RGBA
dataset[0]["file"], dataset[0]["image"],
# LA # LA
dataset[1]["file"], dataset[1]["image"],
# L # L
dataset[2]["file"], dataset[2]["image"],
] ]
) )
self.assertEqual( self.assertEqual(
......
...@@ -72,7 +72,9 @@ class ImageClassificationPipelineTests(unittest.TestCase): ...@@ -72,7 +72,9 @@ class ImageClassificationPipelineTests(unittest.TestCase):
import datasets import datasets
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test") # we use revision="refs/pr/1" until the PR is merged
# https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1")
# Accepts URL + PIL.Image + lists # Accepts URL + PIL.Image + lists
outputs = image_classifier( outputs = image_classifier(
...@@ -80,11 +82,11 @@ class ImageClassificationPipelineTests(unittest.TestCase): ...@@ -80,11 +82,11 @@ class ImageClassificationPipelineTests(unittest.TestCase):
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"),
"http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA # RGBA
dataset[0]["file"], dataset[0]["image"],
# LA # LA
dataset[1]["file"], dataset[1]["image"],
# L # L
dataset[2]["file"], dataset[2]["image"],
] ]
) )
self.assertEqual( self.assertEqual(
......
...@@ -113,18 +113,20 @@ class ImageSegmentationPipelineTests(unittest.TestCase): ...@@ -113,18 +113,20 @@ class ImageSegmentationPipelineTests(unittest.TestCase):
# to make it work # to make it work
self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, outputs) self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, outputs)
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test") # we use revision="refs/pr/1" until the PR is merged
# https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1")
# RGBA # RGBA
outputs = image_segmenter(dataset[0]["file"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0) outputs = image_segmenter(dataset[0]["image"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0)
m = len(outputs) m = len(outputs)
self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * m, outputs) self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * m, outputs)
# LA # LA
outputs = image_segmenter(dataset[1]["file"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0) outputs = image_segmenter(dataset[1]["image"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0)
m = len(outputs) m = len(outputs)
self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * m, outputs) self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * m, outputs)
# L # L
outputs = image_segmenter(dataset[2]["file"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0) outputs = image_segmenter(dataset[2]["image"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0)
m = len(outputs) m = len(outputs)
self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * m, outputs) self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * m, outputs)
......
...@@ -73,17 +73,19 @@ class ObjectDetectionPipelineTests(unittest.TestCase): ...@@ -73,17 +73,19 @@ class ObjectDetectionPipelineTests(unittest.TestCase):
import datasets import datasets
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test") # we use revision="refs/pr/1" until the PR is merged
# https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1")
batch = [ batch = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"),
"http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA # RGBA
dataset[0]["file"], dataset[0]["image"],
# LA # LA
dataset[1]["file"], dataset[1]["image"],
# L # L
dataset[2]["file"], dataset[2]["image"],
] ]
batch_outputs = object_detector(batch, threshold=0.0) batch_outputs = object_detector(batch, threshold=0.0)
......
...@@ -538,9 +538,11 @@ class LoadImageTester(unittest.TestCase): ...@@ -538,9 +538,11 @@ class LoadImageTester(unittest.TestCase):
self.assertEqual(img_arr.shape, (64, 32, 3)) self.assertEqual(img_arr.shape, (64, 32, 3))
def test_load_img_rgba(self): def test_load_img_rgba(self):
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test") # we use revision="refs/pr/1" until the PR is merged
# https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1")
img = load_image(dataset[0]["file"]) # img with mode RGBA img = load_image(dataset[0]["image"]) # img with mode RGBA
img_arr = np.array(img) img_arr = np.array(img)
self.assertEqual( self.assertEqual(
...@@ -549,9 +551,11 @@ class LoadImageTester(unittest.TestCase): ...@@ -549,9 +551,11 @@ class LoadImageTester(unittest.TestCase):
) )
def test_load_img_la(self): def test_load_img_la(self):
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test") # we use revision="refs/pr/1" until the PR is merged
# https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1")
img = load_image(dataset[1]["file"]) # img with mode LA img = load_image(dataset[1]["image"]) # img with mode LA
img_arr = np.array(img) img_arr = np.array(img)
self.assertEqual( self.assertEqual(
...@@ -560,9 +564,11 @@ class LoadImageTester(unittest.TestCase): ...@@ -560,9 +564,11 @@ class LoadImageTester(unittest.TestCase):
) )
def test_load_img_l(self): def test_load_img_l(self):
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test") # we use revision="refs/pr/1" until the PR is merged
# https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1")
img = load_image(dataset[2]["file"]) # img with mode L img = load_image(dataset[2]["image"]) # img with mode L
img_arr = np.array(img) img_arr = np.array(img)
self.assertEqual( self.assertEqual(
...@@ -571,10 +577,11 @@ class LoadImageTester(unittest.TestCase): ...@@ -571,10 +577,11 @@ class LoadImageTester(unittest.TestCase):
) )
def test_load_img_exif_transpose(self): def test_load_img_exif_transpose(self):
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test") # we use revision="refs/pr/1" until the PR is merged
img_file = dataset[3]["file"] # https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1")
img_without_exif_transpose = PIL.Image.open(img_file) img_without_exif_transpose = dataset[3]["image"]
img_arr_without_exif_transpose = np.array(img_without_exif_transpose) img_arr_without_exif_transpose = np.array(img_without_exif_transpose)
self.assertEqual( self.assertEqual(
...@@ -582,7 +589,7 @@ class LoadImageTester(unittest.TestCase): ...@@ -582,7 +589,7 @@ class LoadImageTester(unittest.TestCase):
(333, 500, 3), (333, 500, 3),
) )
img_with_exif_transpose = load_image(img_file) img_with_exif_transpose = load_image(dataset[3]["image"])
img_arr_with_exif_transpose = np.array(img_with_exif_transpose) img_arr_with_exif_transpose = np.array(img_with_exif_transpose)
self.assertEqual( self.assertEqual(
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment