sbu.py 4.1 KB
Newer Older
1
import os
2
from typing import Any, Callable, Optional, Tuple
3

4
5
from PIL import Image

6
from .utils import check_integrity, download_url
7
from .vision import VisionDataset
8
9


10
class SBU(VisionDataset):
11
12
13
14
15
16
17
18
19
20
21
22
23
    """`SBU Captioned Photo <http://www.cs.virginia.edu/~vicente/sbucaptions/>`_ Dataset.

    Args:
        root (string): Root directory of dataset where tarball
            ``SBUCaptionedPhotoDataset.tar.gz`` exists.
        transform (callable, optional): A function/transform that takes in a PIL image
            and returns a transformed version. E.g, ``transforms.RandomCrop``
        target_transform (callable, optional): A function/transform that takes in the
            target and transforms it.
        download (bool, optional): If True, downloads the dataset from the internet and
            puts it in root directory. If dataset is already downloaded, it is not
            downloaded again.
    """
24

25
    url = "https://www.cs.rice.edu/~vo9/sbucaptions/SBUCaptionedPhotoDataset.tar.gz"
26
    filename = "SBUCaptionedPhotoDataset.tar.gz"
27
    md5_checksum = "9aec147b3488753cf758b4d493422285"
28

29
    def __init__(
30
31
32
33
34
        self,
        root: str,
        transform: Optional[Callable] = None,
        target_transform: Optional[Callable] = None,
        download: bool = True,
35
    ) -> None:
36
        super().__init__(root, transform=transform, target_transform=target_transform)
37
38
39
40
41

        if download:
            self.download()

        if not self._check_integrity():
42
            raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
43
44
45
46
47

        # Read the caption for each photo
        self.photos = []
        self.captions = []

48
49
        file1 = os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_urls.txt")
        file2 = os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_captions.txt")
50
51
52
53

        for line1, line2 in zip(open(file1), open(file2)):
            url = line1.rstrip()
            photo = os.path.basename(url)
54
            filename = os.path.join(self.root, "dataset", photo)
55
56
57
58
59
            if os.path.exists(filename):
                caption = line2.rstrip()
                self.photos.append(photo)
                self.captions.append(caption)

60
    def __getitem__(self, index: int) -> Tuple[Any, Any]:
61
62
63
64
65
66
67
        """
        Args:
            index (int): Index

        Returns:
            tuple: (image, target) where target is a caption for the photo.
        """
68
69
        filename = os.path.join(self.root, "dataset", self.photos[index])
        img = Image.open(filename).convert("RGB")
70
71
72
73
74
75
76
77
78
        if self.transform is not None:
            img = self.transform(img)

        target = self.captions[index]
        if self.target_transform is not None:
            target = self.target_transform(target)

        return img, target

79
    def __len__(self) -> int:
80
81
82
        """The number of photos in the dataset."""
        return len(self.photos)

83
    def _check_integrity(self) -> bool:
84
85
86
87
88
89
90
        """Check the md5 checksum of the downloaded tarball."""
        root = self.root
        fpath = os.path.join(root, self.filename)
        if not check_integrity(fpath, self.md5_checksum):
            return False
        return True

91
    def download(self) -> None:
92
93
94
95
        """Download and extract the tarball, and download each individual photo."""
        import tarfile

        if self._check_integrity():
96
            print("Files already downloaded and verified")
97
98
99
100
101
            return

        download_url(self.url, self.root, self.filename, self.md5_checksum)

        # Extract file
102
        with tarfile.open(os.path.join(self.root, self.filename), "r:gz") as tar:
103
104
105
            tar.extractall(path=self.root)

        # Download individual photos
106
        with open(os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_urls.txt")) as fh:
107
108
109
            for line in fh:
                url = line.rstrip()
                try:
110
                    download_url(url, os.path.join(self.root, "dataset"))
111
112
113
114
                except OSError:
                    # The images point to public images on Flickr.
                    # Note: Images might be removed by users at anytime.
                    pass