"tests/models/phi/test_modeling_phi.py" did not exist on "81009b7a5c5cb183a9275c15bf347bdc988b02c4"
Unverified Commit 74c55ab9 authored by Lucain's avatar Lucain Committed by GitHub
Browse files

Prepare tests for hfh 0.14 (#22958)



* Test hf_hub 0.14.0rc1

* fix mocked tests

* package version

---------
Co-authored-by: default avatarSylvain Gugger <Sylvain.gugger@gmail.com>
Co-authored-by: default avatartestbot <lucainp@hf.co>
parent 69f2d538
......@@ -34,6 +34,7 @@ from typing import Iterator, List, Optional, Union
from unittest import mock
import huggingface_hub
import requests
from transformers import logging as transformers_logging
......@@ -1688,12 +1689,16 @@ class RequestCounter:
self.head_request_count = 0
self.get_request_count = 0
self.other_request_count = 0
self.old_request = huggingface_hub.file_download.requests.request
huggingface_hub.file_download.requests.request = self.new_request
# Mock `get_session` to count HTTP calls.
self.old_get_session = huggingface_hub.utils._http.get_session
self.session = requests.Session()
self.session.request = self.new_request
huggingface_hub.utils._http.get_session = lambda: self.session
return self
def __exit__(self, *args, **kwargs):
huggingface_hub.file_download.requests.request = self.old_request
huggingface_hub.utils._http.get_session = self.old_get_session
def new_request(self, method, **kwargs):
if method == "GET":
......@@ -1703,7 +1708,7 @@ class RequestCounter:
else:
self.other_request_count += 1
return self.old_request(method=method, **kwargs)
return requests.request(method=method, **kwargs)
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
......
......@@ -357,7 +357,7 @@ class ConfigTestUtils(unittest.TestCase):
_ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.request", return_value=response_mock) as mock_head:
with mock.patch("requests.Session.request", return_value=response_mock) as mock_head:
_ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert")
# This check we did call the fake head request
mock_head.assert_called()
......
......@@ -83,7 +83,7 @@ class FeatureExtractorUtilTester(unittest.TestCase):
# Download this model to make sure it's in the cache.
_ = Wav2Vec2FeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.request", return_value=response_mock) as mock_head:
with mock.patch("requests.Session.request", return_value=response_mock) as mock_head:
_ = Wav2Vec2FeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2")
# This check we did call the fake head request
mock_head.assert_called()
......
......@@ -215,7 +215,7 @@ class ImageProcessorUtilTester(unittest.TestCase):
# Download this model to make sure it's in the cache.
_ = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.request", return_value=response_mock) as mock_head:
with mock.patch("requests.Session.request", return_value=response_mock) as mock_head:
_ = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# This check we did call the fake head request
mock_head.assert_called()
......
......@@ -3348,7 +3348,7 @@ class ModelUtilsTest(TestCasePlus):
_ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.request", return_value=response_mock) as mock_head:
with mock.patch("requests.Session.request", return_value=response_mock) as mock_head:
_ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
# This check we did call the fake head request
mock_head.assert_called()
......
......@@ -2016,7 +2016,7 @@ class UtilsFunctionsTest(unittest.TestCase):
_ = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.request", return_value=response_mock) as mock_head:
with mock.patch("requests.Session.request", return_value=response_mock) as mock_head:
_ = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
# This check we did call the fake head request
mock_head.assert_called()
......
......@@ -3954,7 +3954,7 @@ class TokenizerUtilTester(unittest.TestCase):
_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.request", return_value=response_mock) as mock_head:
with mock.patch("requests.Session.request", return_value=response_mock) as mock_head:
_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
# This check we did call the fake head request
mock_head.assert_called()
......@@ -3972,7 +3972,7 @@ class TokenizerUtilTester(unittest.TestCase):
_ = GPT2TokenizerFast.from_pretrained("gpt2")
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.request", return_value=response_mock) as mock_head:
with mock.patch("requests.Session.request", return_value=response_mock) as mock_head:
_ = GPT2TokenizerFast.from_pretrained("gpt2")
# This check we did call the fake head request
mock_head.assert_called()
......
......@@ -89,7 +89,7 @@ class GetFromCacheTests(unittest.TestCase):
response_mock.json.return_value = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.request", return_value=response_mock) as mock_head:
with mock.patch("requests.Session.request", return_value=response_mock) as mock_head:
path = cached_file(RANDOM_BERT, "conf", _raise_exceptions_for_connection_errors=False)
self.assertIsNone(path)
# This check we did call the fake head request
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment