utils.py 15.6 KB
Newer Older
1
import bz2
2
3
4
5
import gzip
import hashlib
import itertools
import lzma
soumith's avatar
soumith committed
6
import os
soumith's avatar
soumith committed
7
import os.path
8
import pathlib
9
import re
10
import tarfile
11
12
import urllib
import urllib.error
13
14
15
16
import urllib.request
import zipfile
from typing import Any, Callable, List, Iterable, Optional, TypeVar, Dict, IO, Tuple, Iterator
from urllib.parse import urlparse
17

18
import torch
19
from torch.utils.model_zoo import tqdm
20

21
from .._internally_replaced_utils import (
22
23
24
25
    _download_file_from_remote_location,
    _is_remote_location_available,
)

26

27
28
29
30
31
32
33
34
35
36
37
38
39
40
USER_AGENT = "pytorch/vision"


def _urlretrieve(url: str, filename: str, chunk_size: int = 1024) -> None:
    with open(filename, "wb") as fh:
        with urllib.request.urlopen(urllib.request.Request(url, headers={"User-Agent": USER_AGENT})) as response:
            with tqdm(total=response.length) as pbar:
                for chunk in iter(lambda: response.read(chunk_size), ""):
                    if not chunk:
                        break
                    pbar.update(chunk_size)
                    fh.write(chunk)


41
def gen_bar_updater() -> Callable[[int, int, int], None]:
Francisco Massa's avatar
Francisco Massa committed
42
43
    pbar = tqdm(total=None)

44
    def bar_update(count, block_size, total_size):
Holger Kohr's avatar
Holger Kohr committed
45
46
47
48
        if pbar.total is None and total_size:
            pbar.total = total_size
        progress_bytes = count * block_size
        pbar.update(progress_bytes - pbar.n)
49
50

    return bar_update
soumith's avatar
soumith committed
51

soumith's avatar
soumith committed
52

53
def calculate_md5(fpath: str, chunk_size: int = 1024 * 1024) -> str:
54
    md5 = hashlib.md5()
55
56
    with open(fpath, "rb") as f:
        for chunk in iter(lambda: f.read(chunk_size), b""):
57
58
59
60
            md5.update(chunk)
    return md5.hexdigest()


61
def check_md5(fpath: str, md5: str, **kwargs: Any) -> bool:
62
63
64
    return md5 == calculate_md5(fpath, **kwargs)


65
def check_integrity(fpath: str, md5: Optional[str] = None) -> bool:
66
67
    if not os.path.isfile(fpath):
        return False
68
69
    if md5 is None:
        return True
70
    return check_md5(fpath, md5)
71
72


73
74
75
def _get_redirect_url(url: str, max_hops: int = 3) -> str:
    initial_url = url
    headers = {"Method": "HEAD", "User-Agent": USER_AGENT}
76

77
78
79
80
    for _ in range(max_hops + 1):
        with urllib.request.urlopen(urllib.request.Request(url, headers=headers)) as response:
            if response.url == url or response.url is None:
                return url
81

82
            url = response.url
83
    else:
84
85
86
        raise RecursionError(
            f"Request to {initial_url} exceeded {max_hops} redirects. The last redirect points to {url}."
        )
87
88


89
90
91
92
93
94
95
96
97
98
99
100
101
def _get_google_drive_file_id(url: str) -> Optional[str]:
    parts = urlparse(url)

    if re.match(r"(drive|docs)[.]google[.]com", parts.netloc) is None:
        return None

    match = re.match(r"/file/d/(?P<id>[^/]*)", parts.path)
    if match is None:
        return None

    return match.group("id")


102
103
104
def download_url(
    url: str, root: str, filename: Optional[str] = None, md5: Optional[str] = None, max_redirect_hops: int = 3
) -> None:
105
106
107
108
109
    """Download a file from a url and place it in root.

    Args:
        url (str): URL to download file from
        root (str): Directory to place downloaded file in
110
111
        filename (str, optional): Name to save the file under. If None, use the basename of the URL
        md5 (str, optional): MD5 checksum of the download. If None, do not check
112
        max_redirect_hops (int, optional): Maximum number of redirect hops allowed
113
    """
114
    root = os.path.expanduser(root)
115
116
    if not filename:
        filename = os.path.basename(url)
117
118
    fpath = os.path.join(root, filename)

119
    os.makedirs(root, exist_ok=True)
120

121
    # check if file is already present locally
122
    if check_integrity(fpath, md5):
123
        print("Using downloaded and verified file: " + fpath)
124
125
        return

126
    if _is_remote_location_available():
127
        _download_file_from_remote_location(fpath, url)
128
129
130
131
132
133
134
135
136
137
138
    else:
        # expand redirect chain if needed
        url = _get_redirect_url(url, max_hops=max_redirect_hops)

        # check if file is located on Google Drive
        file_id = _get_google_drive_file_id(url)
        if file_id is not None:
            return download_file_from_google_drive(file_id, root, filename, md5)

        # download the file
        try:
139
            print("Downloading " + url + " to " + fpath)
140
            _urlretrieve(url, fpath)
141
        except (urllib.error.URLError, OSError) as e:  # type: ignore[attr-defined]
142
143
            if url[:5] == "https":
                url = url.replace("https:", "http:")
144
                print("Failed download. Trying https -> http instead. Downloading " + url + " to " + fpath)
145
146
147
148
                _urlretrieve(url, fpath)
            else:
                raise e

149
150
151
    # check integrity of downloaded file
    if not check_integrity(fpath, md5):
        raise RuntimeError("File not found or corrupted.")
Sanyam Kapoor's avatar
Sanyam Kapoor committed
152
153


154
def list_dir(root: str, prefix: bool = False) -> List[str]:
Sanyam Kapoor's avatar
Sanyam Kapoor committed
155
156
157
158
159
160
161
162
    """List all directories at a given root

    Args:
        root (str): Path to directory whose folders need to be listed
        prefix (bool, optional): If true, prepends the path to each result, otherwise
            only returns the name of the directories found
    """
    root = os.path.expanduser(root)
163
    directories = [p for p in os.listdir(root) if os.path.isdir(os.path.join(root, p))]
Sanyam Kapoor's avatar
Sanyam Kapoor committed
164
165
166
167
168
    if prefix is True:
        directories = [os.path.join(root, d) for d in directories]
    return directories


169
def list_files(root: str, suffix: str, prefix: bool = False) -> List[str]:
Sanyam Kapoor's avatar
Sanyam Kapoor committed
170
171
172
173
174
175
176
177
178
179
    """List all files ending with a suffix at a given root

    Args:
        root (str): Path to directory whose folders need to be listed
        suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').
            It uses the Python "str.endswith" method and is passed directly
        prefix (bool, optional): If true, prepends the path to each result, otherwise
            only returns the name of the files found
    """
    root = os.path.expanduser(root)
180
    files = [p for p in os.listdir(root) if os.path.isfile(os.path.join(root, p)) and p.endswith(suffix)]
Sanyam Kapoor's avatar
Sanyam Kapoor committed
181
182
183
    if prefix is True:
        files = [os.path.join(root, d) for d in files]
    return files
184
185


186
def _quota_exceeded(first_chunk: bytes) -> bool:  # type: ignore[name-defined]
187
    try:
188
189
        return "Google Drive - Quota exceeded" in first_chunk.decode()
    except UnicodeDecodeError:
190
        return False
191
192


193
def download_file_from_google_drive(file_id: str, root: str, filename: Optional[str] = None, md5: Optional[str] = None):
194
195
196
197
198
199
200
201
202
    """Download a Google Drive file from  and place it in root.

    Args:
        file_id (str): id of file to be downloaded
        root (str): Directory to place downloaded file in
        filename (str, optional): Name to save the file under. If None, use the id of the file.
        md5 (str, optional): MD5 checksum of the download. If None, do not check
    """
    # Based on https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url
203
    import requests
204

205
206
207
208
209
210
211
    url = "https://docs.google.com/uc?export=download"

    root = os.path.expanduser(root)
    if not filename:
        filename = file_id
    fpath = os.path.join(root, filename)

212
    os.makedirs(root, exist_ok=True)
213
214

    if os.path.isfile(fpath) and check_integrity(fpath, md5):
215
        print("Using downloaded and verified file: " + fpath)
216
217
218
    else:
        session = requests.Session()

219
        response = session.get(url, params={"id": file_id}, stream=True)
220
221
222
        token = _get_confirm_token(response)

        if token:
223
            params = {"id": file_id, "confirm": token}
224
225
            response = session.get(url, params=params, stream=True)

226
227
228
229
230
231
232
233
234
235
        # Ideally, one would use response.status_code to check for quota limits, but google drive is not consistent
        # with their own API, refer https://github.com/pytorch/vision/issues/2992#issuecomment-730614517.
        # Should this be fixed at some place in future, one could refactor the following to no longer rely on decoding
        # the first_chunk of the payload
        response_content_generator = response.iter_content(32768)
        first_chunk = None
        while not first_chunk:  # filter out keep-alive new chunks
            first_chunk = next(response_content_generator)

        if _quota_exceeded(first_chunk):
236
237
238
239
240
241
242
            msg = (
                f"The daily quota of the file {filename} is exceeded and it "
                f"can't be downloaded. This is a limitation of Google Drive "
                f"and can only be overcome by trying again later."
            )
            raise RuntimeError(msg)

243
        _save_response_content(itertools.chain((first_chunk,), response_content_generator), fpath)
244
        response.close()
245
246


247
def _get_confirm_token(response: "requests.models.Response") -> Optional[str]:  # type: ignore[name-defined]
248
    for key, value in response.cookies.items():
249
        if key.startswith("download_warning"):
250
251
252
253
254
            return value

    return None


255
def _save_response_content(
256
257
    response_gen: Iterator[bytes],
    destination: str,  # type: ignore[name-defined]
258
) -> None:
259
260
261
    with open(destination, "wb") as f:
        pbar = tqdm(total=None)
        progress = 0
262
263

        for chunk in response_gen:
264
265
266
267
268
            if chunk:  # filter out keep-alive new chunks
                f.write(chunk)
                progress += len(chunk)
                pbar.update(progress - pbar.n)
        pbar.close()
269
270


271
272
273
def _extract_tar(from_path: str, to_path: str, compression: Optional[str]) -> None:
    with tarfile.open(from_path, f"r:{compression[1:]}" if compression else "r") as tar:
        tar.extractall(to_path)
Ardalan's avatar
Ardalan committed
274
275


276
_ZIP_COMPRESSION_MAP: Dict[str, int] = {
277
    ".bz2": zipfile.ZIP_BZIP2,
278
279
    ".xz": zipfile.ZIP_LZMA,
}
280
281


282
283
284
285
286
def _extract_zip(from_path: str, to_path: str, compression: Optional[str]) -> None:
    with zipfile.ZipFile(
        from_path, "r", compression=_ZIP_COMPRESSION_MAP[compression] if compression else zipfile.ZIP_STORED
    ) as zip:
        zip.extractall(to_path)
287
288


289
290
291
292
_ARCHIVE_EXTRACTORS: Dict[str, Callable[[str, str, Optional[str]], None]] = {
    ".tar": _extract_tar,
    ".zip": _extract_zip,
}
293
294
295
296
297
298
299
300
301
302
_COMPRESSED_FILE_OPENERS: Dict[str, Callable[..., IO]] = {
    ".bz2": bz2.open,
    ".gz": gzip.open,
    ".xz": lzma.open,
}
_FILE_TYPE_ALIASES: Dict[str, Tuple[Optional[str], Optional[str]]] = {
    ".tbz": (".tar", ".bz2"),
    ".tbz2": (".tar", ".bz2"),
    ".tgz": (".tar", ".gz"),
}
303
304


305
306
def _detect_file_type(file: str) -> Tuple[str, Optional[str], Optional[str]]:
    """Detect the archive type and/or compression of a file.
307

308
309
    Args:
        file (str): the filename
310

311
312
    Returns:
        (tuple): tuple of suffix, archive type, and compression
313

314
315
316
    Raises:
        RuntimeError: if file has no suffix or suffix is not supported
    """
317
318
319
320
321
    suffixes = pathlib.Path(file).suffixes
    if not suffixes:
        raise RuntimeError(
            f"File '{file}' has no suffixes that could be used to detect the archive type and compression."
        )
322
    suffix = suffixes[-1]
323
324

    # check if the suffix is a known alias
325
    if suffix in _FILE_TYPE_ALIASES:
326
327
328
        return (suffix, *_FILE_TYPE_ALIASES[suffix])

    # check if the suffix is an archive type
329
    if suffix in _ARCHIVE_EXTRACTORS:
330
331
332
        return suffix, suffix, None

    # check if the suffix is a compression
333
334
335
336
337
338
339
340
341
    if suffix in _COMPRESSED_FILE_OPENERS:
        # check for suffix hierarchy
        if len(suffixes) > 1:
            suffix2 = suffixes[-2]

            # check if the suffix2 is an archive type
            if suffix2 in _ARCHIVE_EXTRACTORS:
                return suffix2 + suffix, suffix2, suffix

342
343
        return suffix, None, suffix

344
345
    valid_suffixes = sorted(set(_FILE_TYPE_ALIASES) | set(_ARCHIVE_EXTRACTORS) | set(_COMPRESSED_FILE_OPENERS))
    raise RuntimeError(f"Unknown compression or archive type: '{suffix}'.\nKnown suffixes are: '{valid_suffixes}'.")
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364


def _decompress(from_path: str, to_path: Optional[str] = None, remove_finished: bool = False) -> str:
    r"""Decompress a file.

    The compression is automatically detected from the file name.

    Args:
        from_path (str): Path to the file to be decompressed.
        to_path (str): Path to the decompressed file. If omitted, ``from_path`` without compression extension is used.
        remove_finished (bool): If ``True``, remove the file after the extraction.

    Returns:
        (str): Path to the decompressed file.
    """
    suffix, archive_type, compression = _detect_file_type(from_path)
    if not compression:
        raise RuntimeError(f"Couldn't detect a compression from suffix {suffix}.")

365
    if to_path is None:
366
        to_path = from_path.replace(suffix, archive_type if archive_type is not None else "")
367

368
369
370
371
372
    # We don't need to check for a missing key here, since this was already done in _detect_file_type()
    compressed_file_opener = _COMPRESSED_FILE_OPENERS[compression]

    with compressed_file_opener(from_path, "rb") as rfh, open(to_path, "wb") as wfh:
        wfh.write(rfh.read())
373
374

    if remove_finished:
375
376
        os.remove(from_path)

377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
    return to_path


def extract_archive(from_path: str, to_path: Optional[str] = None, remove_finished: bool = False) -> str:
    """Extract an archive.

    The archive type and a possible compression is automatically detected from the file name. If the file is compressed
    but not an archive the call is dispatched to :func:`decompress`.

    Args:
        from_path (str): Path to the file to be extracted.
        to_path (str): Path to the directory the file will be extracted to. If omitted, the directory of the file is
            used.
        remove_finished (bool): If ``True``, remove the file after the extraction.

    Returns:
        (str): Path to the directory the file was extracted to.
    """
    if to_path is None:
        to_path = os.path.dirname(from_path)

    suffix, archive_type, compression = _detect_file_type(from_path)
    if not archive_type:
        return _decompress(
            from_path,
            os.path.join(to_path, os.path.basename(from_path).replace(suffix, "")),
            remove_finished=remove_finished,
        )

    # We don't need to check for a missing key here, since this was already done in _detect_file_type()
    extractor = _ARCHIVE_EXTRACTORS[archive_type]

    extractor(from_path, to_path, compression)

    return to_path

413

414
415
416
417
418
419
420
421
def download_and_extract_archive(
    url: str,
    download_root: str,
    extract_root: Optional[str] = None,
    filename: Optional[str] = None,
    md5: Optional[str] = None,
    remove_finished: bool = False,
) -> None:
422
423
424
425
426
    download_root = os.path.expanduser(download_root)
    if extract_root is None:
        extract_root = download_root
    if not filename:
        filename = os.path.basename(url)
427

428
    download_url(url, download_root, filename, md5)
429

430
    archive = os.path.join(download_root, filename)
431
    print(f"Extracting {archive} to {extract_root}")
432
    extract_archive(archive, extract_root, remove_finished)
433
434


435
def iterable_to_str(iterable: Iterable) -> str:
436
437
438
    return "'" + "', '".join([str(item) for item in iterable]) + "'"


439
440
441
442
T = TypeVar("T", str, bytes)


def verify_str_arg(
443
444
445
446
    value: T,
    arg: Optional[str] = None,
    valid_values: Iterable[T] = None,
    custom_msg: Optional[str] = None,
447
) -> T:
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
    if not isinstance(value, torch._six.string_classes):
        if arg is None:
            msg = "Expected type str, but got type {type}."
        else:
            msg = "Expected type str for argument {arg}, but got type {type}."
        msg = msg.format(type=type(value), arg=arg)
        raise ValueError(msg)

    if valid_values is None:
        return value

    if value not in valid_values:
        if custom_msg is not None:
            msg = custom_msg
        else:
463
            msg = "Unknown value '{value}' for argument {arg}. Valid values are {{{valid_values}}}."
464
            msg = msg.format(value=value, arg=arg, valid_values=iterable_to_str(valid_values))
465
466
467
        raise ValueError(msg)

    return value