Unverified Commit edcbe803 authored by Álvaro Somoza's avatar Álvaro Somoza Committed by GitHub
Browse files

Fix huggingface-hub failing tests (#11994)

* login

* more logins

* uploads

* missed login

* another missed login

* downloads

* examples and more logins

* fix

* setup

* Apply style fixes

* fix

* Apply style fixes
parent c02c4a6d
......@@ -593,7 +593,7 @@ def main():
if args.report_to == "wandb" and args.hub_token is not None:
raise ValueError(
"You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token."
" Please use `huggingface-cli login` to authenticate with the Hub."
" Please use `hf auth login` to authenticate with the Hub."
)
logging_dir = os.path.join(args.output_dir, args.logging_dir)
......
......@@ -151,7 +151,7 @@ dataset = load_dataset("imagefolder", data_files={"train": ["path/to/file1", "pa
Next, push it to the hub!
```python
# assuming you have ran the huggingface-cli login command in a terminal
# assuming you have ran the hf auth login command in a terminal
dataset.push_to_hub("name_of_your_dataset")
# if you want to push to a private repo, simply pass private=True:
......
......@@ -102,7 +102,7 @@ _deps = [
"filelock",
"flax>=0.4.1",
"hf-doc-builder>=0.3.0",
"huggingface-hub>=0.27.0",
"huggingface-hub>=0.34.0",
"requests-mock==1.10.0",
"importlib_metadata",
"invisible-watermark>=0.2.0",
......
......@@ -407,7 +407,7 @@ class ConfigMixin:
raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier"
" listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a"
" token having permission to this repo with `token` or log in with `huggingface-cli login`."
" token having permission to this repo with `token` or log in with `hf auth login`."
)
except RevisionNotFoundError:
raise EnvironmentError(
......
......@@ -9,7 +9,7 @@ deps = {
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.27.0",
"huggingface-hub": "huggingface-hub>=0.34.0",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark>=0.2.0",
......
......@@ -249,8 +249,8 @@ class BaseGuidance(ConfigMixin, PushToHubMixin):
<Tip>
To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with
`huggingface-cli login`. You can also activate the special
To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with `hf
auth login`. You can also activate the special
["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a
firewalled environment.
......
......@@ -117,8 +117,8 @@ class AutoModel(ConfigMixin):
<Tip>
To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with
`huggingface-cli login`. You can also activate the special
To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with `hf
auth login`. You can also activate the special
["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a
firewalled environment.
......
......@@ -369,8 +369,7 @@ class FlaxModelMixin(PushToHubMixin):
raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `token` or log in with `huggingface-cli "
"login`."
"token having permission to this repo with `token` or log in with `hf auth login`."
)
except RevisionNotFoundError:
raise EnvironmentError(
......
......@@ -943,8 +943,8 @@ class ModelMixin(torch.nn.Module, PushToHubMixin):
<Tip>
To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with
`huggingface-cli login`. You can also activate the special
To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with `hf
auth login`. You can also activate the special
["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a
firewalled environment.
......
......@@ -86,7 +86,7 @@ logic including pre-processing, an unrolled diffusion loop, and post-processing
### Text-to-Image generation with Stable Diffusion
```python
# make sure you're logged in with `huggingface-cli login`
# make sure you're logged in with `hf auth login`
from diffusers import StableDiffusionPipeline, LMSDiscreteScheduler
pipe = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
......
......@@ -392,8 +392,8 @@ class AutoPipelineForText2Image(ConfigMixin):
<Tip>
To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with
`huggingface-cli login`.
To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with `hf
auth login`.
</Tip>
......@@ -687,8 +687,8 @@ class AutoPipelineForImage2Image(ConfigMixin):
<Tip>
To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with
`huggingface-cli login`.
To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with `hf
auth login`.
</Tip>
......@@ -997,8 +997,8 @@ class AutoPipelineForInpainting(ConfigMixin):
<Tip>
To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with
`huggingface-cli login`.
To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with `hf
auth login`.
</Tip>
......
......@@ -717,7 +717,7 @@ class CycleDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, Sta
from diffusers import CycleDiffusionPipeline, DDIMScheduler
# load the pipeline
# make sure you're logged in with `huggingface-cli login`
# make sure you're logged in with `hf auth login`
model_id_or_path = "CompVis/stable-diffusion-v1-4"
scheduler = DDIMScheduler.from_pretrained(model_id_or_path, subfolder="scheduler")
pipe = CycleDiffusionPipeline.from_pretrained(model_id_or_path, scheduler=scheduler).to("cuda")
......
......@@ -278,8 +278,8 @@ class FlaxDiffusionPipeline(ConfigMixin, PushToHubMixin):
<Tip>
To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with
`huggingface-cli login`.
To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with `hf
auth login`.
</Tip>
......
......@@ -710,8 +710,8 @@ class DiffusionPipeline(ConfigMixin, PushToHubMixin):
<Tip>
To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with
`huggingface-cli login`.
To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with `hf
auth login`.
</Tip>
......@@ -1430,8 +1430,8 @@ class DiffusionPipeline(ConfigMixin, PushToHubMixin):
<Tip>
To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with
`huggingface-cli login`.
To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with `hf
auth login
</Tip>
......
......@@ -28,7 +28,7 @@ download the weights with `git lfs install; git clone https://huggingface.co/sta
### Using Stable Diffusion without being logged into the Hub.
If you want to download the model weights using a single Python line, you need to be logged in via `huggingface-cli login`.
If you want to download the model weights using a single Python line, you need to be logged in via `hf auth login`.
```python
from diffusers import DiffusionPipeline
......@@ -54,7 +54,7 @@ pipe = StableDiffusionPipeline.from_pretrained("./stable-diffusion-v1-5")
### Text-to-Image with default PLMS scheduler
```python
# make sure you're logged in with `huggingface-cli login`
# make sure you're logged in with `hf auth login`
from diffusers import StableDiffusionPipeline
pipe = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
......@@ -69,7 +69,7 @@ image.save("astronaut_rides_horse.png")
### Text-to-Image with DDIM scheduler
```python
# make sure you're logged in with `huggingface-cli login`
# make sure you're logged in with `hf auth login`
from diffusers import StableDiffusionPipeline, DDIMScheduler
scheduler = DDIMScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler")
......@@ -88,7 +88,7 @@ image.save("astronaut_rides_horse.png")
### Text-to-Image with K-LMS scheduler
```python
# make sure you're logged in with `huggingface-cli login`
# make sure you're logged in with `hf auth login`
from diffusers import StableDiffusionPipeline, LMSDiscreteScheduler
lms = LMSDiscreteScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler")
......@@ -118,7 +118,7 @@ from diffusers import CycleDiffusionPipeline, DDIMScheduler
# load the scheduler. CycleDiffusion only supports stochastic schedulers.
# load the pipeline
# make sure you're logged in with `huggingface-cli login`
# make sure you're logged in with `hf auth login`
model_id_or_path = "CompVis/stable-diffusion-v1-4"
scheduler = DDIMScheduler.from_pretrained(model_id_or_path, subfolder="scheduler")
pipe = CycleDiffusionPipeline.from_pretrained(model_id_or_path, scheduler=scheduler).to("cuda")
......
......@@ -140,8 +140,8 @@ class SchedulerMixin(PushToHubMixin):
<Tip>
To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with
`huggingface-cli login`. You can also activate the special
To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with `hf
auth login`. You can also activate the special
["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a
firewalled environment.
......
......@@ -120,7 +120,7 @@ class FlaxSchedulerMixin(PushToHubMixin):
<Tip>
It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated
It is required to be logged in (`hf auth login`) when you want to use private or [gated
models](https://huggingface.co/docs/hub/models-gated#gated-models).
</Tip>
......
......@@ -318,8 +318,8 @@ def get_cached_module_file(
<Tip>
You may pass a token in `token` if you are not logged in (`huggingface-cli login`) and want to use private or
[gated models](https://huggingface.co/docs/hub/models-gated#gated-models).
You may pass a token in `token` if you are not logged in (`hf auth login`) and want to use private or [gated
models](https://huggingface.co/docs/hub/models-gated#gated-models).
</Tip>
......@@ -505,8 +505,8 @@ def get_class_from_dynamic_module(
<Tip>
You may pass a token in `token` if you are not logged in (`huggingface-cli login`) and want to use private or
[gated models](https://huggingface.co/docs/hub/models-gated#gated-models).
You may pass a token in `token` if you are not logged in (`hf auth login`) and want to use private or [gated
models](https://huggingface.co/docs/hub/models-gated#gated-models).
</Tip>
......
......@@ -304,8 +304,7 @@ def _get_model_file(
raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `token` or log in with `huggingface-cli "
"login`."
"token having permission to this repo with `token` or log in with `hf auth login`."
) from e
except RevisionNotFoundError as e:
raise EnvironmentError(
......@@ -515,8 +514,8 @@ class PushToHubMixin:
Whether to make the repo private. If `None` (default), the repo will be public unless the
organization's default is private. This value is ignored if the repo already exists.
token (`str`, *optional*):
The token to use as HTTP bearer authorization for remote files. The token generated when running
`huggingface-cli login` (stored in `~/.huggingface`).
The token to use as HTTP bearer authorization for remote files. The token generated when running `hf
auth login` (stored in `~/.huggingface`).
create_pr (`bool`, *optional*, defaults to `False`):
Whether or not to create a PR with the uploaded files or directly commit.
safe_serialization (`bool`, *optional*, defaults to `True`):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment