Unverified Commit 26ea725b authored by Quentin Lhoest's avatar Quentin Lhoest Committed by GitHub
Browse files

Update fixtures-image-utils (#28080)

* fix hf-internal-testing/fixtures_image_utils

* fix test

* comments
parent 1c286be5
......@@ -226,10 +226,12 @@ class ImageGPTImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
def prepare_images():
dataset = load_dataset("hf-internal-testing/fixtures_image_utils", split="test")
# we use revision="refs/pr/1" until the PR is merged
# https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1
dataset = load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1")
image1 = Image.open(dataset[4]["file"])
image2 = Image.open(dataset[5]["file"])
image1 = dataset[4]["image"]
image2 = dataset[5]["image"]
images = [image1, image2]
......
......@@ -68,17 +68,19 @@ class DepthEstimationPipelineTests(unittest.TestCase):
self.assertEqual({"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, outputs)
import datasets
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test")
# we use revision="refs/pr/1" until the PR is merged
# https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1")
outputs = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
dataset[0]["image"],
# LA
dataset[1]["file"],
dataset[1]["image"],
# L
dataset[2]["file"],
dataset[2]["image"],
]
)
self.assertEqual(
......
......@@ -72,7 +72,9 @@ class ImageClassificationPipelineTests(unittest.TestCase):
import datasets
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test")
# we use revision="refs/pr/1" until the PR is merged
# https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1")
# Accepts URL + PIL.Image + lists
outputs = image_classifier(
......@@ -80,11 +82,11 @@ class ImageClassificationPipelineTests(unittest.TestCase):
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
dataset[0]["image"],
# LA
dataset[1]["file"],
dataset[1]["image"],
# L
dataset[2]["file"],
dataset[2]["image"],
]
)
self.assertEqual(
......
......@@ -113,18 +113,20 @@ class ImageSegmentationPipelineTests(unittest.TestCase):
# to make it work
self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, outputs)
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test")
# we use revision="refs/pr/1" until the PR is merged
# https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1")
# RGBA
outputs = image_segmenter(dataset[0]["file"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0)
outputs = image_segmenter(dataset[0]["image"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0)
m = len(outputs)
self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * m, outputs)
# LA
outputs = image_segmenter(dataset[1]["file"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0)
outputs = image_segmenter(dataset[1]["image"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0)
m = len(outputs)
self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * m, outputs)
# L
outputs = image_segmenter(dataset[2]["file"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0)
outputs = image_segmenter(dataset[2]["image"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0)
m = len(outputs)
self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * m, outputs)
......
......@@ -73,17 +73,19 @@ class ObjectDetectionPipelineTests(unittest.TestCase):
import datasets
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test")
# we use revision="refs/pr/1" until the PR is merged
# https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1")
batch = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
dataset[0]["image"],
# LA
dataset[1]["file"],
dataset[1]["image"],
# L
dataset[2]["file"],
dataset[2]["image"],
]
batch_outputs = object_detector(batch, threshold=0.0)
......
......@@ -538,9 +538,11 @@ class LoadImageTester(unittest.TestCase):
self.assertEqual(img_arr.shape, (64, 32, 3))
def test_load_img_rgba(self):
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test")
# we use revision="refs/pr/1" until the PR is merged
# https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1")
img = load_image(dataset[0]["file"]) # img with mode RGBA
img = load_image(dataset[0]["image"]) # img with mode RGBA
img_arr = np.array(img)
self.assertEqual(
......@@ -549,9 +551,11 @@ class LoadImageTester(unittest.TestCase):
)
def test_load_img_la(self):
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test")
# we use revision="refs/pr/1" until the PR is merged
# https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1")
img = load_image(dataset[1]["file"]) # img with mode LA
img = load_image(dataset[1]["image"]) # img with mode LA
img_arr = np.array(img)
self.assertEqual(
......@@ -560,9 +564,11 @@ class LoadImageTester(unittest.TestCase):
)
def test_load_img_l(self):
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test")
# we use revision="refs/pr/1" until the PR is merged
# https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1")
img = load_image(dataset[2]["file"]) # img with mode L
img = load_image(dataset[2]["image"]) # img with mode L
img_arr = np.array(img)
self.assertEqual(
......@@ -571,10 +577,11 @@ class LoadImageTester(unittest.TestCase):
)
def test_load_img_exif_transpose(self):
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test")
img_file = dataset[3]["file"]
# we use revision="refs/pr/1" until the PR is merged
# https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1")
img_without_exif_transpose = PIL.Image.open(img_file)
img_without_exif_transpose = dataset[3]["image"]
img_arr_without_exif_transpose = np.array(img_without_exif_transpose)
self.assertEqual(
......@@ -582,7 +589,7 @@ class LoadImageTester(unittest.TestCase):
(333, 500, 3),
)
img_with_exif_transpose = load_image(img_file)
img_with_exif_transpose = load_image(dataset[3]["image"])
img_arr_with_exif_transpose = np.array(img_with_exif_transpose)
self.assertEqual(
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment