utils.py 15.7 KB
Newer Older
1
import bz2
soumith's avatar
soumith committed
2
import os
soumith's avatar
soumith committed
3
import os.path
soumith's avatar
soumith committed
4
import hashlib
5
import gzip
6
import re
7
import tarfile
8
from typing import Any, Callable, List, Iterable, Optional, TypeVar, Dict, IO, Tuple
9
from urllib.parse import urlparse
10
import zipfile
11
12
import lzma
import contextlib
13
14
15
import urllib
import urllib.request
import urllib.error
16
import pathlib
17

18
import torch
19
from torch.utils.model_zoo import tqdm
20

21
from .._internally_replaced_utils import (
22
23
24
25
    _download_file_from_remote_location,
    _is_remote_location_available,
)

26

27
28
29
30
31
32
33
34
35
36
37
38
39
40
USER_AGENT = "pytorch/vision"


def _urlretrieve(url: str, filename: str, chunk_size: int = 1024) -> None:
    with open(filename, "wb") as fh:
        with urllib.request.urlopen(urllib.request.Request(url, headers={"User-Agent": USER_AGENT})) as response:
            with tqdm(total=response.length) as pbar:
                for chunk in iter(lambda: response.read(chunk_size), ""):
                    if not chunk:
                        break
                    pbar.update(chunk_size)
                    fh.write(chunk)


41
def gen_bar_updater() -> Callable[[int, int, int], None]:
Francisco Massa's avatar
Francisco Massa committed
42
43
    pbar = tqdm(total=None)

44
    def bar_update(count, block_size, total_size):
Holger Kohr's avatar
Holger Kohr committed
45
46
47
48
        if pbar.total is None and total_size:
            pbar.total = total_size
        progress_bytes = count * block_size
        pbar.update(progress_bytes - pbar.n)
49
50

    return bar_update
soumith's avatar
soumith committed
51

soumith's avatar
soumith committed
52

53
def calculate_md5(fpath: str, chunk_size: int = 1024 * 1024) -> str:
54
55
56
57
58
59
60
    md5 = hashlib.md5()
    with open(fpath, 'rb') as f:
        for chunk in iter(lambda: f.read(chunk_size), b''):
            md5.update(chunk)
    return md5.hexdigest()


61
def check_md5(fpath: str, md5: str, **kwargs: Any) -> bool:
62
63
64
    return md5 == calculate_md5(fpath, **kwargs)


65
def check_integrity(fpath: str, md5: Optional[str] = None) -> bool:
66
67
    if not os.path.isfile(fpath):
        return False
68
69
    if md5 is None:
        return True
70
    return check_md5(fpath, md5)
71
72


73
74
75
def _get_redirect_url(url: str, max_hops: int = 3) -> str:
    initial_url = url
    headers = {"Method": "HEAD", "User-Agent": USER_AGENT}
76

77
78
79
80
    for _ in range(max_hops + 1):
        with urllib.request.urlopen(urllib.request.Request(url, headers=headers)) as response:
            if response.url == url or response.url is None:
                return url
81

82
            url = response.url
83
    else:
84
85
86
        raise RecursionError(
            f"Request to {initial_url} exceeded {max_hops} redirects. The last redirect points to {url}."
        )
87
88


89
90
91
92
93
94
95
96
97
98
99
100
101
def _get_google_drive_file_id(url: str) -> Optional[str]:
    parts = urlparse(url)

    if re.match(r"(drive|docs)[.]google[.]com", parts.netloc) is None:
        return None

    match = re.match(r"/file/d/(?P<id>[^/]*)", parts.path)
    if match is None:
        return None

    return match.group("id")


102
103
104
def download_url(
    url: str, root: str, filename: Optional[str] = None, md5: Optional[str] = None, max_redirect_hops: int = 3
) -> None:
105
106
107
108
109
    """Download a file from a url and place it in root.

    Args:
        url (str): URL to download file from
        root (str): Directory to place downloaded file in
110
111
        filename (str, optional): Name to save the file under. If None, use the basename of the URL
        md5 (str, optional): MD5 checksum of the download. If None, do not check
112
        max_redirect_hops (int, optional): Maximum number of redirect hops allowed
113
    """
114
    root = os.path.expanduser(root)
115
116
    if not filename:
        filename = os.path.basename(url)
117
118
    fpath = os.path.join(root, filename)

119
    os.makedirs(root, exist_ok=True)
120

121
    # check if file is already present locally
122
    if check_integrity(fpath, md5):
123
        print('Using downloaded and verified file: ' + fpath)
124
125
        return

126
    if _is_remote_location_available():
127
        _download_file_from_remote_location(fpath, url)
128
129
130
131
132
133
134
135
136
137
138
139
    else:
        # expand redirect chain if needed
        url = _get_redirect_url(url, max_hops=max_redirect_hops)

        # check if file is located on Google Drive
        file_id = _get_google_drive_file_id(url)
        if file_id is not None:
            return download_file_from_google_drive(file_id, root, filename, md5)

        # download the file
        try:
            print('Downloading ' + url + ' to ' + fpath)
140
            _urlretrieve(url, fpath)
141
142
143
144
145
146
147
148
149
        except (urllib.error.URLError, IOError) as e:  # type: ignore[attr-defined]
            if url[:5] == 'https':
                url = url.replace('https:', 'http:')
                print('Failed download. Trying https -> http instead.'
                      ' Downloading ' + url + ' to ' + fpath)
                _urlretrieve(url, fpath)
            else:
                raise e

150
151
152
    # check integrity of downloaded file
    if not check_integrity(fpath, md5):
        raise RuntimeError("File not found or corrupted.")
Sanyam Kapoor's avatar
Sanyam Kapoor committed
153
154


155
def list_dir(root: str, prefix: bool = False) -> List[str]:
Sanyam Kapoor's avatar
Sanyam Kapoor committed
156
157
158
159
160
161
162
163
    """List all directories at a given root

    Args:
        root (str): Path to directory whose folders need to be listed
        prefix (bool, optional): If true, prepends the path to each result, otherwise
            only returns the name of the directories found
    """
    root = os.path.expanduser(root)
164
    directories = [p for p in os.listdir(root) if os.path.isdir(os.path.join(root, p))]
Sanyam Kapoor's avatar
Sanyam Kapoor committed
165
166
167
168
169
    if prefix is True:
        directories = [os.path.join(root, d) for d in directories]
    return directories


170
def list_files(root: str, suffix: str, prefix: bool = False) -> List[str]:
Sanyam Kapoor's avatar
Sanyam Kapoor committed
171
172
173
174
175
176
177
178
179
180
    """List all files ending with a suffix at a given root

    Args:
        root (str): Path to directory whose folders need to be listed
        suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').
            It uses the Python "str.endswith" method and is passed directly
        prefix (bool, optional): If true, prepends the path to each result, otherwise
            only returns the name of the files found
    """
    root = os.path.expanduser(root)
181
    files = [p for p in os.listdir(root) if os.path.isfile(os.path.join(root, p)) and p.endswith(suffix)]
Sanyam Kapoor's avatar
Sanyam Kapoor committed
182
183
184
    if prefix is True:
        files = [os.path.join(root, d) for d in files]
    return files
185
186


187
def _quota_exceeded(response: "requests.models.Response") -> bool:  # type: ignore[name-defined]
188
189
190
191
192
    try:
        start = next(response.iter_content(chunk_size=128, decode_unicode=True))
        return isinstance(start, str) and "Google Drive - Quota exceeded" in start
    except StopIteration:
        return False
193
194


195
def download_file_from_google_drive(file_id: str, root: str, filename: Optional[str] = None, md5: Optional[str] = None):
196
197
198
199
200
201
202
203
204
    """Download a Google Drive file from  and place it in root.

    Args:
        file_id (str): id of file to be downloaded
        root (str): Directory to place downloaded file in
        filename (str, optional): Name to save the file under. If None, use the id of the file.
        md5 (str, optional): MD5 checksum of the download. If None, do not check
    """
    # Based on https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url
205
    import requests
206
207
208
209
210
211
212
    url = "https://docs.google.com/uc?export=download"

    root = os.path.expanduser(root)
    if not filename:
        filename = file_id
    fpath = os.path.join(root, filename)

213
    os.makedirs(root, exist_ok=True)
214
215
216
217
218
219
220
221
222
223
224
225
226

    if os.path.isfile(fpath) and check_integrity(fpath, md5):
        print('Using downloaded and verified file: ' + fpath)
    else:
        session = requests.Session()

        response = session.get(url, params={'id': file_id}, stream=True)
        token = _get_confirm_token(response)

        if token:
            params = {'id': file_id, 'confirm': token}
            response = session.get(url, params=params, stream=True)

227
228
229
230
231
232
233
234
        if _quota_exceeded(response):
            msg = (
                f"The daily quota of the file {filename} is exceeded and it "
                f"can't be downloaded. This is a limitation of Google Drive "
                f"and can only be overcome by trying again later."
            )
            raise RuntimeError(msg)

235
236
237
        _save_response_content(response, fpath)


238
def _get_confirm_token(response: "requests.models.Response") -> Optional[str]:  # type: ignore[name-defined]
239
240
241
242
243
244
245
    for key, value in response.cookies.items():
        if key.startswith('download_warning'):
            return value

    return None


246
247
248
def _save_response_content(
    response: "requests.models.Response", destination: str, chunk_size: int = 32768,  # type: ignore[name-defined]
) -> None:
249
250
251
252
253
254
255
256
257
    with open(destination, "wb") as f:
        pbar = tqdm(total=None)
        progress = 0
        for chunk in response.iter_content(chunk_size):
            if chunk:  # filter out keep-alive new chunks
                f.write(chunk)
                progress += len(chunk)
                pbar.update(progress - pbar.n)
        pbar.close()
258
259


260
261
262
def _extract_tar(from_path: str, to_path: str, compression: Optional[str]) -> None:
    with tarfile.open(from_path, f"r:{compression[1:]}" if compression else "r") as tar:
        tar.extractall(to_path)
Ardalan's avatar
Ardalan committed
263
264


265
_ZIP_COMPRESSION_MAP: Dict[str, int] = {
266
    ".bz2": zipfile.ZIP_BZIP2,
267
268
    ".xz": zipfile.ZIP_LZMA,
}
269
270


271
272
273
274
275
def _extract_zip(from_path: str, to_path: str, compression: Optional[str]) -> None:
    with zipfile.ZipFile(
        from_path, "r", compression=_ZIP_COMPRESSION_MAP[compression] if compression else zipfile.ZIP_STORED
    ) as zip:
        zip.extractall(to_path)
276
277


278
279
280
281
_ARCHIVE_EXTRACTORS: Dict[str, Callable[[str, str, Optional[str]], None]] = {
    ".tar": _extract_tar,
    ".zip": _extract_zip,
}
282
283
284
285
286
287
288
289
290
291
_COMPRESSED_FILE_OPENERS: Dict[str, Callable[..., IO]] = {
    ".bz2": bz2.open,
    ".gz": gzip.open,
    ".xz": lzma.open,
}
_FILE_TYPE_ALIASES: Dict[str, Tuple[Optional[str], Optional[str]]] = {
    ".tbz": (".tar", ".bz2"),
    ".tbz2": (".tar", ".bz2"),
    ".tgz": (".tar", ".gz"),
}
292
293


294
295
296
297
def _verify_archive_type(archive_type: str) -> None:
    if archive_type not in _ARCHIVE_EXTRACTORS.keys():
        valid_types = "', '".join(_ARCHIVE_EXTRACTORS.keys())
        raise RuntimeError(f"Unknown archive type '{archive_type}'. Known archive types are '{valid_types}'.")
298
299


300
301
302
303
def _verify_compression(compression: str) -> None:
    if compression not in _COMPRESSED_FILE_OPENERS.keys():
        valid_types = "', '".join(_COMPRESSED_FILE_OPENERS.keys())
        raise RuntimeError(f"Unknown compression '{compression}'. Known compressions are '{valid_types}'.")
304
305


306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
def _detect_file_type(file: str) -> Tuple[str, Optional[str], Optional[str]]:
    path = pathlib.Path(file)
    suffix = path.suffix
    suffixes = pathlib.Path(file).suffixes
    if not suffixes:
        raise RuntimeError(
            f"File '{file}' has no suffixes that could be used to detect the archive type and compression."
        )
    elif len(suffixes) > 2:
        raise RuntimeError(
            "Archive type and compression detection only works for 1 or 2 suffixes. " f"Got {len(suffixes)} instead."
        )
    elif len(suffixes) == 2:
        # if we have exactly two suffixes we assume the first one is the archive type and the second on is the
        # compression
        archive_type, compression = suffixes
        _verify_archive_type(archive_type)
        _verify_compression(compression)
        return "".join(suffixes), archive_type, compression

    # check if the suffix is a known alias
    with contextlib.suppress(KeyError):
        return (suffix, *_FILE_TYPE_ALIASES[suffix])

    # check if the suffix is an archive type
    with contextlib.suppress(RuntimeError):
        _verify_archive_type(suffix)
        return suffix, suffix, None

    # check if the suffix is a compression
    with contextlib.suppress(RuntimeError):
        _verify_compression(suffix)
        return suffix, None, suffix

    raise RuntimeError(f"Suffix '{suffix}' is neither recognized as archive type nor as compression.")


def _decompress(from_path: str, to_path: Optional[str] = None, remove_finished: bool = False) -> str:
    r"""Decompress a file.

    The compression is automatically detected from the file name.

    Args:
        from_path (str): Path to the file to be decompressed.
        to_path (str): Path to the decompressed file. If omitted, ``from_path`` without compression extension is used.
        remove_finished (bool): If ``True``, remove the file after the extraction.

    Returns:
        (str): Path to the decompressed file.
    """
    suffix, archive_type, compression = _detect_file_type(from_path)
    if not compression:
        raise RuntimeError(f"Couldn't detect a compression from suffix {suffix}.")

360
    if to_path is None:
361
        to_path = from_path.replace(suffix, archive_type if archive_type is not None else "")
362

363
364
365
366
367
    # We don't need to check for a missing key here, since this was already done in _detect_file_type()
    compressed_file_opener = _COMPRESSED_FILE_OPENERS[compression]

    with compressed_file_opener(from_path, "rb") as rfh, open(to_path, "wb") as wfh:
        wfh.write(rfh.read())
368
369

    if remove_finished:
370
371
        os.remove(from_path)

372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
    return to_path


def extract_archive(from_path: str, to_path: Optional[str] = None, remove_finished: bool = False) -> str:
    """Extract an archive.

    The archive type and a possible compression is automatically detected from the file name. If the file is compressed
    but not an archive the call is dispatched to :func:`decompress`.

    Args:
        from_path (str): Path to the file to be extracted.
        to_path (str): Path to the directory the file will be extracted to. If omitted, the directory of the file is
            used.
        remove_finished (bool): If ``True``, remove the file after the extraction.

    Returns:
        (str): Path to the directory the file was extracted to.
    """
    if to_path is None:
        to_path = os.path.dirname(from_path)

    suffix, archive_type, compression = _detect_file_type(from_path)
    if not archive_type:
        return _decompress(
            from_path,
            os.path.join(to_path, os.path.basename(from_path).replace(suffix, "")),
            remove_finished=remove_finished,
        )

    # We don't need to check for a missing key here, since this was already done in _detect_file_type()
    extractor = _ARCHIVE_EXTRACTORS[archive_type]

    extractor(from_path, to_path, compression)

    return to_path

408

409
410
411
412
413
414
415
416
def download_and_extract_archive(
    url: str,
    download_root: str,
    extract_root: Optional[str] = None,
    filename: Optional[str] = None,
    md5: Optional[str] = None,
    remove_finished: bool = False,
) -> None:
417
418
419
420
421
    download_root = os.path.expanduser(download_root)
    if extract_root is None:
        extract_root = download_root
    if not filename:
        filename = os.path.basename(url)
422

423
    download_url(url, download_root, filename, md5)
424

425
426
427
    archive = os.path.join(download_root, filename)
    print("Extracting {} to {}".format(archive, extract_root))
    extract_archive(archive, extract_root, remove_finished)
428
429


430
def iterable_to_str(iterable: Iterable) -> str:
431
432
433
    return "'" + "', '".join([str(item) for item in iterable]) + "'"


434
435
436
437
438
439
T = TypeVar("T", str, bytes)


def verify_str_arg(
    value: T, arg: Optional[str] = None, valid_values: Iterable[T] = None, custom_msg: Optional[str] = None,
) -> T:
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
    if not isinstance(value, torch._six.string_classes):
        if arg is None:
            msg = "Expected type str, but got type {type}."
        else:
            msg = "Expected type str for argument {arg}, but got type {type}."
        msg = msg.format(type=type(value), arg=arg)
        raise ValueError(msg)

    if valid_values is None:
        return value

    if value not in valid_values:
        if custom_msg is not None:
            msg = custom_msg
        else:
            msg = ("Unknown value '{value}' for argument {arg}. "
                   "Valid values are {{{valid_values}}}.")
            msg = msg.format(value=value, arg=arg,
                             valid_values=iterable_to_str(valid_values))
        raise ValueError(msg)

    return value