Unverified Commit edcbe803 authored by Álvaro Somoza's avatar Álvaro Somoza Committed by GitHub
Browse files

Fix huggingface-hub failing tests (#11994)

* login

* more logins

* uploads

* missed login

* another missed login

* downloads

* examples and more logins

* fix

* setup

* Apply style fixes

* fix

* Apply style fixes
parent c02c4a6d
...@@ -593,7 +593,7 @@ def main(): ...@@ -593,7 +593,7 @@ def main():
if args.report_to == "wandb" and args.hub_token is not None: if args.report_to == "wandb" and args.hub_token is not None:
raise ValueError( raise ValueError(
"You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token."
" Please use `huggingface-cli login` to authenticate with the Hub." " Please use `hf auth login` to authenticate with the Hub."
) )
logging_dir = os.path.join(args.output_dir, args.logging_dir) logging_dir = os.path.join(args.output_dir, args.logging_dir)
......
...@@ -151,7 +151,7 @@ dataset = load_dataset("imagefolder", data_files={"train": ["path/to/file1", "pa ...@@ -151,7 +151,7 @@ dataset = load_dataset("imagefolder", data_files={"train": ["path/to/file1", "pa
Next, push it to the hub! Next, push it to the hub!
```python ```python
# assuming you have ran the huggingface-cli login command in a terminal # assuming you have ran the hf auth login command in a terminal
dataset.push_to_hub("name_of_your_dataset") dataset.push_to_hub("name_of_your_dataset")
# if you want to push to a private repo, simply pass private=True: # if you want to push to a private repo, simply pass private=True:
......
...@@ -102,7 +102,7 @@ _deps = [ ...@@ -102,7 +102,7 @@ _deps = [
"filelock", "filelock",
"flax>=0.4.1", "flax>=0.4.1",
"hf-doc-builder>=0.3.0", "hf-doc-builder>=0.3.0",
"huggingface-hub>=0.27.0", "huggingface-hub>=0.34.0",
"requests-mock==1.10.0", "requests-mock==1.10.0",
"importlib_metadata", "importlib_metadata",
"invisible-watermark>=0.2.0", "invisible-watermark>=0.2.0",
......
...@@ -407,7 +407,7 @@ class ConfigMixin: ...@@ -407,7 +407,7 @@ class ConfigMixin:
raise EnvironmentError( raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier" f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier"
" listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a" " listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a"
" token having permission to this repo with `token` or log in with `huggingface-cli login`." " token having permission to this repo with `token` or log in with `hf auth login`."
) )
except RevisionNotFoundError: except RevisionNotFoundError:
raise EnvironmentError( raise EnvironmentError(
......
...@@ -9,7 +9,7 @@ deps = { ...@@ -9,7 +9,7 @@ deps = {
"filelock": "filelock", "filelock": "filelock",
"flax": "flax>=0.4.1", "flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0", "hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.27.0", "huggingface-hub": "huggingface-hub>=0.34.0",
"requests-mock": "requests-mock==1.10.0", "requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata", "importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark>=0.2.0", "invisible-watermark": "invisible-watermark>=0.2.0",
......
...@@ -249,8 +249,8 @@ class BaseGuidance(ConfigMixin, PushToHubMixin): ...@@ -249,8 +249,8 @@ class BaseGuidance(ConfigMixin, PushToHubMixin):
<Tip> <Tip>
To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with `hf
`huggingface-cli login`. You can also activate the special auth login`. You can also activate the special
["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a ["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a
firewalled environment. firewalled environment.
......
...@@ -117,8 +117,8 @@ class AutoModel(ConfigMixin): ...@@ -117,8 +117,8 @@ class AutoModel(ConfigMixin):
<Tip> <Tip>
To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with `hf
`huggingface-cli login`. You can also activate the special auth login`. You can also activate the special
["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a ["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a
firewalled environment. firewalled environment.
......
...@@ -369,8 +369,7 @@ class FlaxModelMixin(PushToHubMixin): ...@@ -369,8 +369,7 @@ class FlaxModelMixin(PushToHubMixin):
raise EnvironmentError( raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier " f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `token` or log in with `huggingface-cli " "token having permission to this repo with `token` or log in with `hf auth login`."
"login`."
) )
except RevisionNotFoundError: except RevisionNotFoundError:
raise EnvironmentError( raise EnvironmentError(
......
...@@ -943,8 +943,8 @@ class ModelMixin(torch.nn.Module, PushToHubMixin): ...@@ -943,8 +943,8 @@ class ModelMixin(torch.nn.Module, PushToHubMixin):
<Tip> <Tip>
To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with `hf
`huggingface-cli login`. You can also activate the special auth login`. You can also activate the special
["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a ["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a
firewalled environment. firewalled environment.
......
...@@ -86,7 +86,7 @@ logic including pre-processing, an unrolled diffusion loop, and post-processing ...@@ -86,7 +86,7 @@ logic including pre-processing, an unrolled diffusion loop, and post-processing
### Text-to-Image generation with Stable Diffusion ### Text-to-Image generation with Stable Diffusion
```python ```python
# make sure you're logged in with `huggingface-cli login` # make sure you're logged in with `hf auth login`
from diffusers import StableDiffusionPipeline, LMSDiscreteScheduler from diffusers import StableDiffusionPipeline, LMSDiscreteScheduler
pipe = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") pipe = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
......
...@@ -392,8 +392,8 @@ class AutoPipelineForText2Image(ConfigMixin): ...@@ -392,8 +392,8 @@ class AutoPipelineForText2Image(ConfigMixin):
<Tip> <Tip>
To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with `hf
`huggingface-cli login`. auth login`.
</Tip> </Tip>
...@@ -687,8 +687,8 @@ class AutoPipelineForImage2Image(ConfigMixin): ...@@ -687,8 +687,8 @@ class AutoPipelineForImage2Image(ConfigMixin):
<Tip> <Tip>
To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with `hf
`huggingface-cli login`. auth login`.
</Tip> </Tip>
...@@ -997,8 +997,8 @@ class AutoPipelineForInpainting(ConfigMixin): ...@@ -997,8 +997,8 @@ class AutoPipelineForInpainting(ConfigMixin):
<Tip> <Tip>
To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with `hf
`huggingface-cli login`. auth login`.
</Tip> </Tip>
......
...@@ -717,7 +717,7 @@ class CycleDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Sta ...@@ -717,7 +717,7 @@ class CycleDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Sta
from diffusers import CycleDiffusionPipeline, DDIMScheduler from diffusers import CycleDiffusionPipeline, DDIMScheduler
# load the pipeline # load the pipeline
# make sure you're logged in with `huggingface-cli login` # make sure you're logged in with `hf auth login`
model_id_or_path = "CompVis/stable-diffusion-v1-4" model_id_or_path = "CompVis/stable-diffusion-v1-4"
scheduler = DDIMScheduler.from_pretrained(model_id_or_path, subfolder="scheduler") scheduler = DDIMScheduler.from_pretrained(model_id_or_path, subfolder="scheduler")
pipe = CycleDiffusionPipeline.from_pretrained(model_id_or_path, scheduler=scheduler).to("cuda") pipe = CycleDiffusionPipeline.from_pretrained(model_id_or_path, scheduler=scheduler).to("cuda")
......
...@@ -278,8 +278,8 @@ class FlaxDiffusionPipeline(ConfigMixin, PushToHubMixin): ...@@ -278,8 +278,8 @@ class FlaxDiffusionPipeline(ConfigMixin, PushToHubMixin):
<Tip> <Tip>
To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with `hf
`huggingface-cli login`. auth login`.
</Tip> </Tip>
......
...@@ -710,8 +710,8 @@ class DiffusionPipeline(ConfigMixin, PushToHubMixin): ...@@ -710,8 +710,8 @@ class DiffusionPipeline(ConfigMixin, PushToHubMixin):
<Tip> <Tip>
To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with `hf
`huggingface-cli login`. auth login`.
</Tip> </Tip>
...@@ -1430,8 +1430,8 @@ class DiffusionPipeline(ConfigMixin, PushToHubMixin): ...@@ -1430,8 +1430,8 @@ class DiffusionPipeline(ConfigMixin, PushToHubMixin):
<Tip> <Tip>
To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with `hf
`huggingface-cli login`. auth login
</Tip> </Tip>
......
...@@ -28,7 +28,7 @@ download the weights with `git lfs install; git clone https://huggingface.co/sta ...@@ -28,7 +28,7 @@ download the weights with `git lfs install; git clone https://huggingface.co/sta
### Using Stable Diffusion without being logged into the Hub. ### Using Stable Diffusion without being logged into the Hub.
If you want to download the model weights using a single Python line, you need to be logged in via `huggingface-cli login`. If you want to download the model weights using a single Python line, you need to be logged in via `hf auth login`.
```python ```python
from diffusers import DiffusionPipeline from diffusers import DiffusionPipeline
...@@ -54,7 +54,7 @@ pipe = StableDiffusionPipeline.from_pretrained("./stable-diffusion-v1-5") ...@@ -54,7 +54,7 @@ pipe = StableDiffusionPipeline.from_pretrained("./stable-diffusion-v1-5")
### Text-to-Image with default PLMS scheduler ### Text-to-Image with default PLMS scheduler
```python ```python
# make sure you're logged in with `huggingface-cli login` # make sure you're logged in with `hf auth login`
from diffusers import StableDiffusionPipeline from diffusers import StableDiffusionPipeline
pipe = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") pipe = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
...@@ -69,7 +69,7 @@ image.save("astronaut_rides_horse.png") ...@@ -69,7 +69,7 @@ image.save("astronaut_rides_horse.png")
### Text-to-Image with DDIM scheduler ### Text-to-Image with DDIM scheduler
```python ```python
# make sure you're logged in with `huggingface-cli login` # make sure you're logged in with `hf auth login`
from diffusers import StableDiffusionPipeline, DDIMScheduler from diffusers import StableDiffusionPipeline, DDIMScheduler
scheduler = DDIMScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler") scheduler = DDIMScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler")
...@@ -88,7 +88,7 @@ image.save("astronaut_rides_horse.png") ...@@ -88,7 +88,7 @@ image.save("astronaut_rides_horse.png")
### Text-to-Image with K-LMS scheduler ### Text-to-Image with K-LMS scheduler
```python ```python
# make sure you're logged in with `huggingface-cli login` # make sure you're logged in with `hf auth login`
from diffusers import StableDiffusionPipeline, LMSDiscreteScheduler from diffusers import StableDiffusionPipeline, LMSDiscreteScheduler
lms = LMSDiscreteScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler") lms = LMSDiscreteScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler")
...@@ -118,7 +118,7 @@ from diffusers import CycleDiffusionPipeline, DDIMScheduler ...@@ -118,7 +118,7 @@ from diffusers import CycleDiffusionPipeline, DDIMScheduler
# load the scheduler. CycleDiffusion only supports stochastic schedulers. # load the scheduler. CycleDiffusion only supports stochastic schedulers.
# load the pipeline # load the pipeline
# make sure you're logged in with `huggingface-cli login` # make sure you're logged in with `hf auth login`
model_id_or_path = "CompVis/stable-diffusion-v1-4" model_id_or_path = "CompVis/stable-diffusion-v1-4"
scheduler = DDIMScheduler.from_pretrained(model_id_or_path, subfolder="scheduler") scheduler = DDIMScheduler.from_pretrained(model_id_or_path, subfolder="scheduler")
pipe = CycleDiffusionPipeline.from_pretrained(model_id_or_path, scheduler=scheduler).to("cuda") pipe = CycleDiffusionPipeline.from_pretrained(model_id_or_path, scheduler=scheduler).to("cuda")
......
...@@ -140,8 +140,8 @@ class SchedulerMixin(PushToHubMixin): ...@@ -140,8 +140,8 @@ class SchedulerMixin(PushToHubMixin):
<Tip> <Tip>
To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with `hf
`huggingface-cli login`. You can also activate the special auth login`. You can also activate the special
["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a ["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a
firewalled environment. firewalled environment.
......
...@@ -120,7 +120,7 @@ class FlaxSchedulerMixin(PushToHubMixin): ...@@ -120,7 +120,7 @@ class FlaxSchedulerMixin(PushToHubMixin):
<Tip> <Tip>
It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated It is required to be logged in (`hf auth login`) when you want to use private or [gated
models](https://huggingface.co/docs/hub/models-gated#gated-models). models](https://huggingface.co/docs/hub/models-gated#gated-models).
</Tip> </Tip>
......
...@@ -318,8 +318,8 @@ def get_cached_module_file( ...@@ -318,8 +318,8 @@ def get_cached_module_file(
<Tip> <Tip>
You may pass a token in `token` if you are not logged in (`huggingface-cli login`) and want to use private or You may pass a token in `token` if you are not logged in (`hf auth login`) and want to use private or [gated
[gated models](https://huggingface.co/docs/hub/models-gated#gated-models). models](https://huggingface.co/docs/hub/models-gated#gated-models).
</Tip> </Tip>
...@@ -505,8 +505,8 @@ def get_class_from_dynamic_module( ...@@ -505,8 +505,8 @@ def get_class_from_dynamic_module(
<Tip> <Tip>
You may pass a token in `token` if you are not logged in (`huggingface-cli login`) and want to use private or You may pass a token in `token` if you are not logged in (`hf auth login`) and want to use private or [gated
[gated models](https://huggingface.co/docs/hub/models-gated#gated-models). models](https://huggingface.co/docs/hub/models-gated#gated-models).
</Tip> </Tip>
......
...@@ -304,8 +304,7 @@ def _get_model_file( ...@@ -304,8 +304,7 @@ def _get_model_file(
raise EnvironmentError( raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier " f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `token` or log in with `huggingface-cli " "token having permission to this repo with `token` or log in with `hf auth login`."
"login`."
) from e ) from e
except RevisionNotFoundError as e: except RevisionNotFoundError as e:
raise EnvironmentError( raise EnvironmentError(
...@@ -515,8 +514,8 @@ class PushToHubMixin: ...@@ -515,8 +514,8 @@ class PushToHubMixin:
Whether to make the repo private. If `None` (default), the repo will be public unless the Whether to make the repo private. If `None` (default), the repo will be public unless the
organization's default is private. This value is ignored if the repo already exists. organization's default is private. This value is ignored if the repo already exists.
token (`str`, *optional*): token (`str`, *optional*):
The token to use as HTTP bearer authorization for remote files. The token generated when running The token to use as HTTP bearer authorization for remote files. The token generated when running `hf
`huggingface-cli login` (stored in `~/.huggingface`). auth login` (stored in `~/.huggingface`).
create_pr (`bool`, *optional*, defaults to `False`): create_pr (`bool`, *optional*, defaults to `False`):
Whether or not to create a PR with the uploaded files or directly commit. Whether or not to create a PR with the uploaded files or directly commit.
safe_serialization (`bool`, *optional*, defaults to `True`): safe_serialization (`bool`, *optional*, defaults to `True`):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment