file_utils.py 10.6 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
"""
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
thomwolf's avatar
thomwolf committed
6
from __future__ import (absolute_import, division, print_function, unicode_literals)
thomwolf's avatar
thomwolf committed
7

8
import sys
thomwolf's avatar
thomwolf committed
9
import json
thomwolf's avatar
thomwolf committed
10
import logging
thomwolf's avatar
thomwolf committed
11
import os
12
import six
thomwolf's avatar
thomwolf committed
13
14
import shutil
import tempfile
15
import fnmatch
thomwolf's avatar
thomwolf committed
16
from functools import wraps
thomwolf's avatar
thomwolf committed
17
18
from hashlib import sha256
from io import open
thomwolf's avatar
thomwolf committed
19
20

import boto3
21
from botocore.config import Config
thomwolf's avatar
thomwolf committed
22
from botocore.exceptions import ClientError
23
import requests
thomwolf's avatar
thomwolf committed
24
from tqdm import tqdm
thomwolf's avatar
thomwolf committed
25

26
27
28
29
30
31
32
try:
    from torch.hub import _get_torch_home
    torch_cache_home = _get_torch_home()
except ImportError:
    torch_cache_home = os.path.expanduser(
        os.getenv('TORCH_HOME', os.path.join(
            os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
thomwolf's avatar
thomwolf committed
33
default_cache_path = os.path.join(torch_cache_home, 'pytorch_transformers')
34

thomwolf's avatar
thomwolf committed
35
36
37
38
39
40
41
try:
    from urllib.parse import urlparse
except ImportError:
    from urlparse import urlparse

try:
    from pathlib import Path
42
    PYTORCH_PRETRAINED_BERT_CACHE = Path(
43
        os.getenv('PYTORCH_TRANSFORMERS_CACHE', os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)))
44
except (AttributeError, ImportError):
45
46
47
48
49
    PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_TRANSFORMERS_CACHE',
                                              os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
                                                        default_cache_path))

PYTORCH_TRANSFORMERS_CACHE = PYTORCH_PRETRAINED_BERT_CACHE  # Kept for backward compatibility
thomwolf's avatar
thomwolf committed
50

51
52
53
54
WEIGHTS_NAME = "pytorch_model.bin"
TF_WEIGHTS_NAME = 'model.ckpt'
CONFIG_NAME = "config.json"

thomwolf's avatar
thomwolf committed
55
logger = logging.getLogger(__name__)  # pylint: disable=invalid-name
thomwolf's avatar
thomwolf committed
56

57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
if not six.PY2:
    def add_start_docstrings(*docstr):
        def docstring_decorator(fn):
            fn.__doc__ = ''.join(docstr) + fn.__doc__
            return fn
        return docstring_decorator

    def add_end_docstrings(*docstr):
        def docstring_decorator(fn):
            fn.__doc__ = fn.__doc__ + ''.join(docstr)
            return fn
        return docstring_decorator
else:
    # Not possible to update class docstrings on python2
    def add_start_docstrings(*docstr):
        def docstring_decorator(fn):
            return fn
        return docstring_decorator

    def add_end_docstrings(*docstr):
        def docstring_decorator(fn):
            return fn
        return docstring_decorator
thomwolf's avatar
thomwolf committed
80

thomwolf's avatar
thomwolf committed
81
def url_to_filename(url, etag=None):
thomwolf's avatar
thomwolf committed
82
83
84
85
    """
    Convert `url` into a hashed filename in a repeatable way.
    If `etag` is specified, append its hash to the url's, delimited
    by a period.
thomwolf's avatar
thomwolf committed
86
87
88
    If the url ends with .h5 (Keras HDF5 weights) ands '.h5' to the name
    so that TF 2.0 can identify it as a HDF5 file
    (see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380)
thomwolf's avatar
thomwolf committed
89
90
91
92
93
94
95
96
97
98
    """
    url_bytes = url.encode('utf-8')
    url_hash = sha256(url_bytes)
    filename = url_hash.hexdigest()

    if etag:
        etag_bytes = etag.encode('utf-8')
        etag_hash = sha256(etag_bytes)
        filename += '.' + etag_hash.hexdigest()

thomwolf's avatar
thomwolf committed
99
100
101
    if url.endswith('.h5'):
        filename += '.h5'

thomwolf's avatar
thomwolf committed
102
103
104
    return filename


thomwolf's avatar
thomwolf committed
105
def filename_to_url(filename, cache_dir=None):
thomwolf's avatar
thomwolf committed
106
107
    """
    Return the url and etag (which may be ``None``) stored for `filename`.
thomwolf's avatar
thomwolf committed
108
    Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
thomwolf's avatar
thomwolf committed
109
110
    """
    if cache_dir is None:
111
        cache_dir = PYTORCH_TRANSFORMERS_CACHE
112
113
    if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
        cache_dir = str(cache_dir)
thomwolf's avatar
thomwolf committed
114
115
116

    cache_path = os.path.join(cache_dir, filename)
    if not os.path.exists(cache_path):
thomwolf's avatar
thomwolf committed
117
        raise EnvironmentError("file {} not found".format(cache_path))
thomwolf's avatar
thomwolf committed
118
119
120

    meta_path = cache_path + '.json'
    if not os.path.exists(meta_path):
thomwolf's avatar
thomwolf committed
121
        raise EnvironmentError("file {} not found".format(meta_path))
thomwolf's avatar
thomwolf committed
122

thomwolf's avatar
thomwolf committed
123
    with open(meta_path, encoding="utf-8") as meta_file:
thomwolf's avatar
thomwolf committed
124
125
126
127
128
129
130
        metadata = json.load(meta_file)
    url = metadata['url']
    etag = metadata['etag']

    return url, etag


131
def cached_path(url_or_filename, cache_dir=None, force_download=False, proxies=None):
thomwolf's avatar
thomwolf committed
132
133
134
135
136
    """
    Given something that might be a URL (or might be a local path),
    determine which. If it's a URL, download the file and cache it, and
    return the path to the cached file. If it's already a local path,
    make sure the file exists and then return the path.
137
138
139
    Args:
        cache_dir: specify a cache directory to save the file to (overwrite the default cache dir).
        force_download: if True, re-dowload the file even if it's already cached in the cache dir.
thomwolf's avatar
thomwolf committed
140
141
    """
    if cache_dir is None:
142
        cache_dir = PYTORCH_TRANSFORMERS_CACHE
143
144
145
146
    if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
        url_or_filename = str(url_or_filename)
    if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
        cache_dir = str(cache_dir)
thomwolf's avatar
thomwolf committed
147
148
149
150
151

    parsed = urlparse(url_or_filename)

    if parsed.scheme in ('http', 'https', 's3'):
        # URL, so get it from the cache (downloading if necessary)
152
        return get_from_cache(url_or_filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies)
thomwolf's avatar
thomwolf committed
153
154
155
156
157
    elif os.path.exists(url_or_filename):
        # File, and it exists.
        return url_or_filename
    elif parsed.scheme == '':
        # File, but it doesn't exist.
thomwolf's avatar
thomwolf committed
158
        raise EnvironmentError("file {} not found".format(url_or_filename))
thomwolf's avatar
thomwolf committed
159
160
161
162
163
    else:
        # Something unknown
        raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))


thomwolf's avatar
thomwolf committed
164
def split_s3_path(url):
thomwolf's avatar
thomwolf committed
165
166
167
168
169
170
171
172
173
174
175
176
    """Split a full s3 path into the bucket name and path."""
    parsed = urlparse(url)
    if not parsed.netloc or not parsed.path:
        raise ValueError("bad s3 path {}".format(url))
    bucket_name = parsed.netloc
    s3_path = parsed.path
    # Remove '/' at beginning of path.
    if s3_path.startswith("/"):
        s3_path = s3_path[1:]
    return bucket_name, s3_path


thomwolf's avatar
thomwolf committed
177
def s3_request(func):
thomwolf's avatar
thomwolf committed
178
179
180
181
182
183
    """
    Wrapper function for s3 requests in order to create more helpful error
    messages.
    """

    @wraps(func)
thomwolf's avatar
thomwolf committed
184
    def wrapper(url, *args, **kwargs):
thomwolf's avatar
thomwolf committed
185
186
187
188
        try:
            return func(url, *args, **kwargs)
        except ClientError as exc:
            if int(exc.response["Error"]["Code"]) == 404:
thomwolf's avatar
thomwolf committed
189
                raise EnvironmentError("file {} not found".format(url))
thomwolf's avatar
thomwolf committed
190
191
192
193
194
195
196
            else:
                raise

    return wrapper


@s3_request
197
def s3_etag(url, proxies=None):
thomwolf's avatar
thomwolf committed
198
    """Check ETag on S3 object."""
199
    s3_resource = boto3.resource("s3", config=Config(proxies=proxies))
thomwolf's avatar
thomwolf committed
200
201
202
203
204
205
    bucket_name, s3_path = split_s3_path(url)
    s3_object = s3_resource.Object(bucket_name, s3_path)
    return s3_object.e_tag


@s3_request
206
def s3_get(url, temp_file, proxies=None):
thomwolf's avatar
thomwolf committed
207
    """Pull a file directly from S3."""
208
    s3_resource = boto3.resource("s3", config=Config(proxies=proxies))
thomwolf's avatar
thomwolf committed
209
210
211
212
    bucket_name, s3_path = split_s3_path(url)
    s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)


213
214
def http_get(url, temp_file, proxies=None):
    req = requests.get(url, stream=True, proxies=proxies)
thomwolf's avatar
thomwolf committed
215
216
217
218
219
220
221
222
223
224
    content_length = req.headers.get('Content-Length')
    total = int(content_length) if content_length is not None else None
    progress = tqdm(unit="B", total=total)
    for chunk in req.iter_content(chunk_size=1024):
        if chunk: # filter out keep-alive new chunks
            progress.update(len(chunk))
            temp_file.write(chunk)
    progress.close()


225
def get_from_cache(url, cache_dir=None, force_download=False, proxies=None):
thomwolf's avatar
thomwolf committed
226
227
228
229
230
    """
    Given a URL, look for the corresponding dataset in the local cache.
    If it's not there, download it. Then return the path to the cached file.
    """
    if cache_dir is None:
231
        cache_dir = PYTORCH_TRANSFORMERS_CACHE
232
233
    if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
        cache_dir = str(cache_dir)
234
235
    if sys.version_info[0] == 2 and not isinstance(cache_dir, str):
        cache_dir = str(cache_dir)
thomwolf's avatar
thomwolf committed
236

thomwolf's avatar
thomwolf committed
237
238
    if not os.path.exists(cache_dir):
        os.makedirs(cache_dir)
thomwolf's avatar
thomwolf committed
239
240
241

    # Get eTag to add to filename, if it exists.
    if url.startswith("s3://"):
242
        etag = s3_etag(url, proxies=proxies)
thomwolf's avatar
thomwolf committed
243
    else:
244
        try:
245
            response = requests.head(url, allow_redirects=True, proxies=proxies)
246
247
248
249
250
251
            if response.status_code != 200:
                etag = None
            else:
                etag = response.headers.get("ETag")
        except EnvironmentError:
            etag = None
thomwolf's avatar
thomwolf committed
252

253
254
    if sys.version_info[0] == 2 and etag is not None:
        etag = etag.decode('utf-8')
thomwolf's avatar
thomwolf committed
255
256
257
258
259
    filename = url_to_filename(url, etag)

    # get cache path to put the file
    cache_path = os.path.join(cache_dir, filename)

260
261
262
263
264
265
266
267
    # If we don't have a connection (etag is None) and can't identify the file
    # try to get the last downloaded one
    if not os.path.exists(cache_path) and etag is None:
        matching_files = fnmatch.filter(os.listdir(cache_dir), filename + '.*')
        matching_files = list(filter(lambda s: not s.endswith('.json'), matching_files))
        if matching_files:
            cache_path = os.path.join(cache_dir, matching_files[-1])

268
    if not os.path.exists(cache_path) or force_download:
thomwolf's avatar
thomwolf committed
269
270
271
        # Download to temporary file, then copy to cache dir once finished.
        # Otherwise you get corrupt cache entries if the download gets interrupted.
        with tempfile.NamedTemporaryFile() as temp_file:
272
            logger.info("%s not found in cache or force_download set to True, downloading to %s", url, temp_file.name)
thomwolf's avatar
thomwolf committed
273
274
275

            # GET file object
            if url.startswith("s3://"):
276
                s3_get(url, temp_file, proxies=proxies)
thomwolf's avatar
thomwolf committed
277
            else:
278
                http_get(url, temp_file, proxies=proxies)
thomwolf's avatar
thomwolf committed
279
280
281
282
283
284
285
286
287
288
289
290
291

            # we are copying the file before closing it, so flush to avoid truncation
            temp_file.flush()
            # shutil.copyfileobj() starts at the current position, so go to the start
            temp_file.seek(0)

            logger.info("copying %s to cache at %s", temp_file.name, cache_path)
            with open(cache_path, 'wb') as cache_file:
                shutil.copyfileobj(temp_file, cache_file)

            logger.info("creating metadata file for %s", cache_path)
            meta = {'url': url, 'etag': etag}
            meta_path = cache_path + '.json'
292
            with open(meta_path, 'w') as meta_file:
thomwolf's avatar
thomwolf committed
293
294
295
296
                output_string = json.dumps(meta)
                if sys.version_info[0] == 2 and isinstance(output_string, str):
                    output_string = unicode(output_string, 'utf-8')  # The beauty of python 2
                meta_file.write(output_string)
thomwolf's avatar
thomwolf committed
297
298
299
300

            logger.info("removing temp file %s", temp_file.name)

    return cache_path