utils.py 18.2 KB
Newer Older
1
import bz2
2
import contextlib
3
4
5
6
import gzip
import hashlib
import itertools
import lzma
soumith's avatar
soumith committed
7
import os
soumith's avatar
soumith committed
8
import os.path
9
import pathlib
10
import re
11
import sys
12
import tarfile
13
14
import urllib
import urllib.error
15
import urllib.request
16
import warnings
17
import zipfile
18
from typing import Any, Callable, Dict, IO, Iterable, Iterator, List, Optional, Tuple, TypeVar
19
from urllib.parse import urlparse
20

21
import numpy as np
22
import requests
23
import torch
24
from torch.utils.model_zoo import tqdm
25

26
from .._internally_replaced_utils import _download_file_from_remote_location, _is_remote_location_available
27

28
29
30
USER_AGENT = "pytorch/vision"


31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
def _save_response_content(
    content: Iterator[bytes],
    destination: str,
    length: Optional[int] = None,
) -> None:
    with open(destination, "wb") as fh, tqdm(total=length) as pbar:
        for chunk in content:
            # filter out keep-alive new chunks
            if not chunk:
                continue

            fh.write(chunk)
            pbar.update(len(chunk))


def _urlretrieve(url: str, filename: str, chunk_size: int = 1024 * 32) -> None:
    with urllib.request.urlopen(urllib.request.Request(url, headers={"User-Agent": USER_AGENT})) as response:
        _save_response_content(iter(lambda: response.read(chunk_size), b""), filename, length=response.length)
49
50


51
def gen_bar_updater() -> Callable[[int, int, int], None]:
52
    warnings.warn("The function `gen_bar_update` is deprecated since 0.13 and will be removed in 0.15.")
Francisco Massa's avatar
Francisco Massa committed
53
54
    pbar = tqdm(total=None)

55
    def bar_update(count, block_size, total_size):
Holger Kohr's avatar
Holger Kohr committed
56
57
58
59
        if pbar.total is None and total_size:
            pbar.total = total_size
        progress_bytes = count * block_size
        pbar.update(progress_bytes - pbar.n)
60
61

    return bar_update
soumith's avatar
soumith committed
62

soumith's avatar
soumith committed
63

64
def calculate_md5(fpath: str, chunk_size: int = 1024 * 1024) -> str:
65
66
67
68
    # Setting the `usedforsecurity` flag does not change anything about the functionality, but indicates that we are
    # not using the MD5 checksum for cryptography. This enables its usage in restricted environments like FIPS. Without
    # it torchvision.datasets is unusable in these environments since we perform a MD5 check everywhere.
    md5 = hashlib.md5(**dict(usedforsecurity=False) if sys.version_info >= (3, 9) else dict())
69
70
    with open(fpath, "rb") as f:
        for chunk in iter(lambda: f.read(chunk_size), b""):
71
72
73
74
            md5.update(chunk)
    return md5.hexdigest()


75
def check_md5(fpath: str, md5: str, **kwargs: Any) -> bool:
76
77
78
    return md5 == calculate_md5(fpath, **kwargs)


79
def check_integrity(fpath: str, md5: Optional[str] = None) -> bool:
80
81
    if not os.path.isfile(fpath):
        return False
82
83
    if md5 is None:
        return True
84
    return check_md5(fpath, md5)
85
86


87
88
89
def _get_redirect_url(url: str, max_hops: int = 3) -> str:
    initial_url = url
    headers = {"Method": "HEAD", "User-Agent": USER_AGENT}
90

91
92
93
94
    for _ in range(max_hops + 1):
        with urllib.request.urlopen(urllib.request.Request(url, headers=headers)) as response:
            if response.url == url or response.url is None:
                return url
95

96
            url = response.url
97
    else:
98
99
100
        raise RecursionError(
            f"Request to {initial_url} exceeded {max_hops} redirects. The last redirect points to {url}."
        )
101
102


103
104
105
106
107
108
109
110
111
112
113
114
115
def _get_google_drive_file_id(url: str) -> Optional[str]:
    parts = urlparse(url)

    if re.match(r"(drive|docs)[.]google[.]com", parts.netloc) is None:
        return None

    match = re.match(r"/file/d/(?P<id>[^/]*)", parts.path)
    if match is None:
        return None

    return match.group("id")


116
117
118
def download_url(
    url: str, root: str, filename: Optional[str] = None, md5: Optional[str] = None, max_redirect_hops: int = 3
) -> None:
119
120
121
122
123
    """Download a file from a url and place it in root.

    Args:
        url (str): URL to download file from
        root (str): Directory to place downloaded file in
124
125
        filename (str, optional): Name to save the file under. If None, use the basename of the URL
        md5 (str, optional): MD5 checksum of the download. If None, do not check
126
        max_redirect_hops (int, optional): Maximum number of redirect hops allowed
127
    """
128
    root = os.path.expanduser(root)
129
130
    if not filename:
        filename = os.path.basename(url)
131
132
    fpath = os.path.join(root, filename)

133
    os.makedirs(root, exist_ok=True)
134

135
    # check if file is already present locally
136
    if check_integrity(fpath, md5):
137
        print("Using downloaded and verified file: " + fpath)
138
139
        return

140
    if _is_remote_location_available():
141
        _download_file_from_remote_location(fpath, url)
142
143
144
145
146
147
148
149
150
151
152
    else:
        # expand redirect chain if needed
        url = _get_redirect_url(url, max_hops=max_redirect_hops)

        # check if file is located on Google Drive
        file_id = _get_google_drive_file_id(url)
        if file_id is not None:
            return download_file_from_google_drive(file_id, root, filename, md5)

        # download the file
        try:
153
            print("Downloading " + url + " to " + fpath)
154
            _urlretrieve(url, fpath)
155
        except (urllib.error.URLError, OSError) as e:  # type: ignore[attr-defined]
156
157
            if url[:5] == "https":
                url = url.replace("https:", "http:")
158
                print("Failed download. Trying https -> http instead. Downloading " + url + " to " + fpath)
159
160
161
162
                _urlretrieve(url, fpath)
            else:
                raise e

163
164
165
    # check integrity of downloaded file
    if not check_integrity(fpath, md5):
        raise RuntimeError("File not found or corrupted.")
Sanyam Kapoor's avatar
Sanyam Kapoor committed
166
167


168
def list_dir(root: str, prefix: bool = False) -> List[str]:
Sanyam Kapoor's avatar
Sanyam Kapoor committed
169
170
171
172
173
174
175
176
    """List all directories at a given root

    Args:
        root (str): Path to directory whose folders need to be listed
        prefix (bool, optional): If true, prepends the path to each result, otherwise
            only returns the name of the directories found
    """
    root = os.path.expanduser(root)
177
    directories = [p for p in os.listdir(root) if os.path.isdir(os.path.join(root, p))]
Sanyam Kapoor's avatar
Sanyam Kapoor committed
178
179
180
181
182
    if prefix is True:
        directories = [os.path.join(root, d) for d in directories]
    return directories


183
def list_files(root: str, suffix: str, prefix: bool = False) -> List[str]:
Sanyam Kapoor's avatar
Sanyam Kapoor committed
184
185
186
187
188
189
190
191
192
193
    """List all files ending with a suffix at a given root

    Args:
        root (str): Path to directory whose folders need to be listed
        suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').
            It uses the Python "str.endswith" method and is passed directly
        prefix (bool, optional): If true, prepends the path to each result, otherwise
            only returns the name of the files found
    """
    root = os.path.expanduser(root)
194
    files = [p for p in os.listdir(root) if os.path.isfile(os.path.join(root, p)) and p.endswith(suffix)]
Sanyam Kapoor's avatar
Sanyam Kapoor committed
195
196
197
    if prefix is True:
        files = [os.path.join(root, d) for d in files]
    return files
198
199


200
201
202
203
204
205
206
207
def _extract_gdrive_api_response(response, chunk_size: int = 32 * 1024) -> Tuple[bytes, Iterator[bytes]]:
    content = response.iter_content(chunk_size)
    first_chunk = None
    # filter out keep-alive new chunks
    while not first_chunk:
        first_chunk = next(content)
    content = itertools.chain([first_chunk], content)

208
    try:
209
210
        match = re.search("<title>Google Drive - (?P<api_response>.+?)</title>", first_chunk.decode())
        api_response = match["api_response"] if match is not None else None
211
    except UnicodeDecodeError:
212
213
        api_response = None
    return api_response, content
214
215


216
def download_file_from_google_drive(file_id: str, root: str, filename: Optional[str] = None, md5: Optional[str] = None):
217
218
219
220
221
222
223
224
    """Download a Google Drive file from  and place it in root.

    Args:
        file_id (str): id of file to be downloaded
        root (str): Directory to place downloaded file in
        filename (str, optional): Name to save the file under. If None, use the id of the file.
        md5 (str, optional): MD5 checksum of the download. If None, do not check
    """
225
226
    # Based on https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url

227
228
229
230
231
    root = os.path.expanduser(root)
    if not filename:
        filename = file_id
    fpath = os.path.join(root, filename)

232
233
    os.makedirs(root, exist_ok=True)

234
235
    if check_integrity(fpath, md5):
        print(f"Using downloaded {'and verified ' if md5 else ''}file: {fpath}")
236
        return
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256

    url = "https://drive.google.com/uc"
    params = dict(id=file_id, export="download")
    with requests.Session() as session:
        response = session.get(url, params=params, stream=True)

        for key, value in response.cookies.items():
            if key.startswith("download_warning"):
                token = value
                break
        else:
            api_response, content = _extract_gdrive_api_response(response)
            token = "t" if api_response == "Virus scan warning" else None

        if token is not None:
            response = session.get(url, params=dict(params, confirm=token), stream=True)
            api_response, content = _extract_gdrive_api_response(response)

        if api_response == "Quota exceeded":
            raise RuntimeError(
257
258
259
260
                f"The daily quota of the file {filename} is exceeded and it "
                f"can't be downloaded. This is a limitation of Google Drive "
                f"and can only be overcome by trying again later."
            )
261

262
        _save_response_content(content, fpath)
263

264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
    # In case we deal with an unhandled GDrive API response, the file should be smaller than 10kB and contain only text
    if os.stat(fpath).st_size < 10 * 1024:
        with contextlib.suppress(UnicodeDecodeError), open(fpath) as fh:
            text = fh.read()
            # Regular expression to detect HTML. Copied from https://stackoverflow.com/a/70585604
            if re.search(r"</?\s*[a-z-][^>]*\s*>|(&(?:[\w\d]+|#\d+|#x[a-f\d]+);)", text):
                warnings.warn(
                    f"We detected some HTML elements in the downloaded file. "
                    f"This most likely means that the download triggered an unhandled API response by GDrive. "
                    f"Please report this to torchvision at https://github.com/pytorch/vision/issues including "
                    f"the response:\n\n{text}"
                )

    if md5 and not check_md5(fpath, md5):
        raise RuntimeError(
            f"The MD5 checksum of the download file {fpath} does not match the one on record."
            f"Please delete the file and try again. "
            f"If the issue persists, please report this to torchvision at https://github.com/pytorch/vision/issues."
        )

284

285
286
287
def _extract_tar(from_path: str, to_path: str, compression: Optional[str]) -> None:
    with tarfile.open(from_path, f"r:{compression[1:]}" if compression else "r") as tar:
        tar.extractall(to_path)
Ardalan's avatar
Ardalan committed
288
289


290
_ZIP_COMPRESSION_MAP: Dict[str, int] = {
291
    ".bz2": zipfile.ZIP_BZIP2,
292
293
    ".xz": zipfile.ZIP_LZMA,
}
294
295


296
297
298
299
300
def _extract_zip(from_path: str, to_path: str, compression: Optional[str]) -> None:
    with zipfile.ZipFile(
        from_path, "r", compression=_ZIP_COMPRESSION_MAP[compression] if compression else zipfile.ZIP_STORED
    ) as zip:
        zip.extractall(to_path)
301
302


303
304
305
306
_ARCHIVE_EXTRACTORS: Dict[str, Callable[[str, str, Optional[str]], None]] = {
    ".tar": _extract_tar,
    ".zip": _extract_zip,
}
307
308
309
310
311
312
313
314
315
316
_COMPRESSED_FILE_OPENERS: Dict[str, Callable[..., IO]] = {
    ".bz2": bz2.open,
    ".gz": gzip.open,
    ".xz": lzma.open,
}
_FILE_TYPE_ALIASES: Dict[str, Tuple[Optional[str], Optional[str]]] = {
    ".tbz": (".tar", ".bz2"),
    ".tbz2": (".tar", ".bz2"),
    ".tgz": (".tar", ".gz"),
}
317
318


319
320
def _detect_file_type(file: str) -> Tuple[str, Optional[str], Optional[str]]:
    """Detect the archive type and/or compression of a file.
321

322
323
    Args:
        file (str): the filename
324

325
326
    Returns:
        (tuple): tuple of suffix, archive type, and compression
327

328
329
330
    Raises:
        RuntimeError: if file has no suffix or suffix is not supported
    """
331
332
333
334
335
    suffixes = pathlib.Path(file).suffixes
    if not suffixes:
        raise RuntimeError(
            f"File '{file}' has no suffixes that could be used to detect the archive type and compression."
        )
336
    suffix = suffixes[-1]
337
338

    # check if the suffix is a known alias
339
    if suffix in _FILE_TYPE_ALIASES:
340
341
342
        return (suffix, *_FILE_TYPE_ALIASES[suffix])

    # check if the suffix is an archive type
343
    if suffix in _ARCHIVE_EXTRACTORS:
344
345
346
        return suffix, suffix, None

    # check if the suffix is a compression
347
348
349
350
351
352
353
354
355
    if suffix in _COMPRESSED_FILE_OPENERS:
        # check for suffix hierarchy
        if len(suffixes) > 1:
            suffix2 = suffixes[-2]

            # check if the suffix2 is an archive type
            if suffix2 in _ARCHIVE_EXTRACTORS:
                return suffix2 + suffix, suffix2, suffix

356
357
        return suffix, None, suffix

358
359
    valid_suffixes = sorted(set(_FILE_TYPE_ALIASES) | set(_ARCHIVE_EXTRACTORS) | set(_COMPRESSED_FILE_OPENERS))
    raise RuntimeError(f"Unknown compression or archive type: '{suffix}'.\nKnown suffixes are: '{valid_suffixes}'.")
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378


def _decompress(from_path: str, to_path: Optional[str] = None, remove_finished: bool = False) -> str:
    r"""Decompress a file.

    The compression is automatically detected from the file name.

    Args:
        from_path (str): Path to the file to be decompressed.
        to_path (str): Path to the decompressed file. If omitted, ``from_path`` without compression extension is used.
        remove_finished (bool): If ``True``, remove the file after the extraction.

    Returns:
        (str): Path to the decompressed file.
    """
    suffix, archive_type, compression = _detect_file_type(from_path)
    if not compression:
        raise RuntimeError(f"Couldn't detect a compression from suffix {suffix}.")

379
    if to_path is None:
380
        to_path = from_path.replace(suffix, archive_type if archive_type is not None else "")
381

382
383
384
385
386
    # We don't need to check for a missing key here, since this was already done in _detect_file_type()
    compressed_file_opener = _COMPRESSED_FILE_OPENERS[compression]

    with compressed_file_opener(from_path, "rb") as rfh, open(to_path, "wb") as wfh:
        wfh.write(rfh.read())
387
388

    if remove_finished:
389
390
        os.remove(from_path)

391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
    return to_path


def extract_archive(from_path: str, to_path: Optional[str] = None, remove_finished: bool = False) -> str:
    """Extract an archive.

    The archive type and a possible compression is automatically detected from the file name. If the file is compressed
    but not an archive the call is dispatched to :func:`decompress`.

    Args:
        from_path (str): Path to the file to be extracted.
        to_path (str): Path to the directory the file will be extracted to. If omitted, the directory of the file is
            used.
        remove_finished (bool): If ``True``, remove the file after the extraction.

    Returns:
        (str): Path to the directory the file was extracted to.
    """
    if to_path is None:
        to_path = os.path.dirname(from_path)

    suffix, archive_type, compression = _detect_file_type(from_path)
    if not archive_type:
        return _decompress(
            from_path,
            os.path.join(to_path, os.path.basename(from_path).replace(suffix, "")),
            remove_finished=remove_finished,
        )

    # We don't need to check for a missing key here, since this was already done in _detect_file_type()
    extractor = _ARCHIVE_EXTRACTORS[archive_type]

    extractor(from_path, to_path, compression)
424
425
    if remove_finished:
        os.remove(from_path)
426
427
428

    return to_path

429

430
431
432
433
434
435
436
437
def download_and_extract_archive(
    url: str,
    download_root: str,
    extract_root: Optional[str] = None,
    filename: Optional[str] = None,
    md5: Optional[str] = None,
    remove_finished: bool = False,
) -> None:
438
439
440
441
442
    download_root = os.path.expanduser(download_root)
    if extract_root is None:
        extract_root = download_root
    if not filename:
        filename = os.path.basename(url)
443

444
    download_url(url, download_root, filename, md5)
445

446
    archive = os.path.join(download_root, filename)
447
    print(f"Extracting {archive} to {extract_root}")
448
    extract_archive(archive, extract_root, remove_finished)
449
450


451
def iterable_to_str(iterable: Iterable) -> str:
452
453
454
    return "'" + "', '".join([str(item) for item in iterable]) + "'"


455
456
457
458
T = TypeVar("T", str, bytes)


def verify_str_arg(
459
460
    value: T,
    arg: Optional[str] = None,
461
    valid_values: Optional[Iterable[T]] = None,
462
    custom_msg: Optional[str] = None,
463
) -> T:
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
    if not isinstance(value, torch._six.string_classes):
        if arg is None:
            msg = "Expected type str, but got type {type}."
        else:
            msg = "Expected type str for argument {arg}, but got type {type}."
        msg = msg.format(type=type(value), arg=arg)
        raise ValueError(msg)

    if valid_values is None:
        return value

    if value not in valid_values:
        if custom_msg is not None:
            msg = custom_msg
        else:
479
            msg = "Unknown value '{value}' for argument {arg}. Valid values are {{{valid_values}}}."
480
            msg = msg.format(value=value, arg=arg, valid_values=iterable_to_str(valid_values))
481
482
483
        raise ValueError(msg)

    return value
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519


def _read_pfm(file_name: str, slice_channels: int = 2) -> np.ndarray:
    """Read file in .pfm format. Might contain either 1 or 3 channels of data.

    Args:
        file_name (str): Path to the file.
        slice_channels (int): Number of channels to slice out of the file.
            Useful for reading different data formats stored in .pfm files: Optical Flows, Stereo Disparity Maps, etc.
    """

    with open(file_name, "rb") as f:
        header = f.readline().rstrip()
        if header not in [b"PF", b"Pf"]:
            raise ValueError("Invalid PFM file")

        dim_match = re.match(rb"^(\d+)\s(\d+)\s$", f.readline())
        if not dim_match:
            raise Exception("Malformed PFM header.")
        w, h = (int(dim) for dim in dim_match.groups())

        scale = float(f.readline().rstrip())
        if scale < 0:  # little-endian
            endian = "<"
            scale = -scale
        else:
            endian = ">"  # big-endian

        data = np.fromfile(f, dtype=endian + "f")

    pfm_channels = 3 if header == b"PF" else 1

    data = data.reshape(h, w, pfm_channels).transpose(2, 0, 1)
    data = np.flip(data, axis=1)  # flip on h dimension
    data = data[:slice_channels, :, :]
    return data.astype(np.float32)