"git@developer.sourcefind.cn:OpenDAS/pytorch3d.git" did not exist on "ce3fce49d7ad1a680d8c9be660164d5f7a0bb976"
Unverified Commit 78744b6a authored by Patrick von Platen's avatar Patrick von Platen Committed by GitHub
Browse files

No more use_auth_token=True (#733)

* up

* uP

* uP

* make style

* Apply suggestions from code review

* up

* finish
parent 3dcc75cb
...@@ -78,7 +78,7 @@ You need to accept the model license before downloading or using the Stable Diff ...@@ -78,7 +78,7 @@ You need to accept the model license before downloading or using the Stable Diff
# make sure you're logged in with `huggingface-cli login` # make sure you're logged in with `huggingface-cli login`
from diffusers import StableDiffusionPipeline from diffusers import StableDiffusionPipeline
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=True) pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
pipe = pipe.to("cuda") pipe = pipe.to("cuda")
prompt = "a photo of an astronaut riding a horse on mars" prompt = "a photo of an astronaut riding a horse on mars"
...@@ -114,7 +114,6 @@ pipe = StableDiffusionPipeline.from_pretrained( ...@@ -114,7 +114,6 @@ pipe = StableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", "CompVis/stable-diffusion-v1-4",
revision="fp16", revision="fp16",
torch_dtype=torch.float16, torch_dtype=torch.float16,
use_auth_token=True
) )
pipe = pipe.to("cuda") pipe = pipe.to("cuda")
...@@ -140,7 +139,6 @@ pipe = StableDiffusionPipeline.from_pretrained( ...@@ -140,7 +139,6 @@ pipe = StableDiffusionPipeline.from_pretrained(
revision="fp16", revision="fp16",
torch_dtype=torch.float16, torch_dtype=torch.float16,
scheduler=lms, scheduler=lms,
use_auth_token=True
) )
pipe = pipe.to("cuda") pipe = pipe.to("cuda")
...@@ -169,10 +167,9 @@ pipe = StableDiffusionImg2ImgPipeline.from_pretrained( ...@@ -169,10 +167,9 @@ pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
model_id_or_path, model_id_or_path,
revision="fp16", revision="fp16",
torch_dtype=torch.float16, torch_dtype=torch.float16,
use_auth_token=True
) )
# or download via git clone https://huggingface.co/CompVis/stable-diffusion-v1-4 # or download via git clone https://huggingface.co/CompVis/stable-diffusion-v1-4
# and pass `model_id_or_path="./stable-diffusion-v1-4"` without having to use `use_auth_token=True`. # and pass `model_id_or_path="./stable-diffusion-v1-4"`.
pipe = pipe.to(device) pipe = pipe.to(device)
# let's download an initial image # let's download an initial image
...@@ -219,10 +216,9 @@ pipe = StableDiffusionInpaintPipeline.from_pretrained( ...@@ -219,10 +216,9 @@ pipe = StableDiffusionInpaintPipeline.from_pretrained(
model_id_or_path, model_id_or_path,
revision="fp16", revision="fp16",
torch_dtype=torch.float16, torch_dtype=torch.float16,
use_auth_token=True
) )
# or download via git clone https://huggingface.co/CompVis/stable-diffusion-v1-4 # or download via git clone https://huggingface.co/CompVis/stable-diffusion-v1-4
# and pass `model_id_or_path="./stable-diffusion-v1-4"` without having to use `use_auth_token=True`. # and pass `model_id_or_path="./stable-diffusion-v1-4"`.
pipe = pipe.to(device) pipe = pipe.to(device)
prompt = "a cat sitting on a bench" prompt = "a cat sitting on a bench"
......
...@@ -101,7 +101,7 @@ logic including pre-processing, an unrolled diffusion loop, and post-processing ...@@ -101,7 +101,7 @@ logic including pre-processing, an unrolled diffusion loop, and post-processing
from torch import autocast from torch import autocast
from diffusers import StableDiffusionPipeline, LMSDiscreteScheduler from diffusers import StableDiffusionPipeline, LMSDiscreteScheduler
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=True) pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
pipe = pipe.to("cuda") pipe = pipe.to("cuda")
prompt = "a photo of an astronaut riding a horse on mars" prompt = "a photo of an astronaut riding a horse on mars"
...@@ -126,7 +126,7 @@ from diffusers import StableDiffusionImg2ImgPipeline ...@@ -126,7 +126,7 @@ from diffusers import StableDiffusionImg2ImgPipeline
# load the pipeline # load the pipeline
device = "cuda" device = "cuda"
pipe = StableDiffusionImg2ImgPipeline.from_pretrained( pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16, use_auth_token=True "CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16
).to(device) ).to(device)
# let's download an initial image # let's download an initial image
...@@ -177,7 +177,7 @@ mask_image = download_image(mask_url).resize((512, 512)) ...@@ -177,7 +177,7 @@ mask_image = download_image(mask_url).resize((512, 512))
device = "cuda" device = "cuda"
pipe = StableDiffusionInpaintPipeline.from_pretrained( pipe = StableDiffusionInpaintPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16, use_auth_token=True "CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16
).to(device) ).to(device)
prompt = "a cat sitting on a bench" prompt = "a cat sitting on a bench"
......
...@@ -56,7 +56,7 @@ If you use a CUDA GPU, you can take advantage of `torch.autocast` to perform inf ...@@ -56,7 +56,7 @@ If you use a CUDA GPU, you can take advantage of `torch.autocast` to perform inf
from torch import autocast from torch import autocast
from diffusers import StableDiffusionPipeline from diffusers import StableDiffusionPipeline
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=True) pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
pipe = pipe.to("cuda") pipe = pipe.to("cuda")
prompt = "a photo of an astronaut riding a horse on mars" prompt = "a photo of an astronaut riding a horse on mars"
...@@ -75,7 +75,6 @@ pipe = StableDiffusionPipeline.from_pretrained( ...@@ -75,7 +75,6 @@ pipe = StableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", "CompVis/stable-diffusion-v1-4",
revision="fp16", revision="fp16",
torch_dtype=torch.float16, torch_dtype=torch.float16,
use_auth_token=True
) )
``` ```
...@@ -97,7 +96,6 @@ pipe = StableDiffusionPipeline.from_pretrained( ...@@ -97,7 +96,6 @@ pipe = StableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", "CompVis/stable-diffusion-v1-4",
revision="fp16", revision="fp16",
torch_dtype=torch.float16, torch_dtype=torch.float16,
use_auth_token=True
) )
pipe = pipe.to("cuda") pipe = pipe.to("cuda")
...@@ -152,8 +150,6 @@ def generate_inputs(): ...@@ -152,8 +150,6 @@ def generate_inputs():
pipe = StableDiffusionPipeline.from_pretrained( pipe = StableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", "CompVis/stable-diffusion-v1-4",
# scheduler=scheduler,
use_auth_token=True,
revision="fp16", revision="fp16",
torch_dtype=torch.float16, torch_dtype=torch.float16,
).to("cuda") ).to("cuda")
...@@ -218,8 +214,6 @@ class UNet2DConditionOutput: ...@@ -218,8 +214,6 @@ class UNet2DConditionOutput:
pipe = StableDiffusionPipeline.from_pretrained( pipe = StableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", "CompVis/stable-diffusion-v1-4",
# scheduler=scheduler,
use_auth_token=True,
revision="fp16", revision="fp16",
torch_dtype=torch.float16, torch_dtype=torch.float16,
).to("cuda") ).to("cuda")
......
...@@ -31,7 +31,7 @@ We recommend to "prime" the pipeline using an additional one-time pass through i ...@@ -31,7 +31,7 @@ We recommend to "prime" the pipeline using an additional one-time pass through i
# make sure you're logged in with `huggingface-cli login` # make sure you're logged in with `huggingface-cli login`
from diffusers import StableDiffusionPipeline from diffusers import StableDiffusionPipeline
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=True) pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
pipe = pipe.to("mps") pipe = pipe.to("mps")
prompt = "a photo of an astronaut riding a horse on mars" prompt = "a photo of an astronaut riding a horse on mars"
......
...@@ -31,7 +31,6 @@ pipe = StableDiffusionOnnxPipeline.from_pretrained( ...@@ -31,7 +31,6 @@ pipe = StableDiffusionOnnxPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", "CompVis/stable-diffusion-v1-4",
revision="onnx", revision="onnx",
provider="CUDAExecutionProvider", provider="CUDAExecutionProvider",
use_auth_token=True,
) )
prompt = "a photo of an astronaut riding a horse on mars" prompt = "a photo of an astronaut riding a horse on mars"
......
...@@ -25,7 +25,7 @@ from diffusers import StableDiffusionImg2ImgPipeline ...@@ -25,7 +25,7 @@ from diffusers import StableDiffusionImg2ImgPipeline
# load the pipeline # load the pipeline
device = "cuda" device = "cuda"
pipe = StableDiffusionImg2ImgPipeline.from_pretrained( pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16, use_auth_token=True "CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16
).to(device) ).to(device)
# let's download an initial image # let's download an initial image
......
...@@ -37,7 +37,7 @@ mask_image = download_image(mask_url).resize((512, 512)) ...@@ -37,7 +37,7 @@ mask_image = download_image(mask_url).resize((512, 512))
device = "cuda" device = "cuda"
pipe = StableDiffusionInpaintPipeline.from_pretrained( pipe = StableDiffusionInpaintPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16, use_auth_token=True "CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16
).to(device) ).to(device)
prompt = "a cat sitting on a bench" prompt = "a cat sitting on a bench"
......
...@@ -83,7 +83,6 @@ def main(args): ...@@ -83,7 +83,6 @@ def main(args):
args.dataset_name, args.dataset_name,
args.dataset_config_name, args.dataset_config_name,
cache_dir=args.cache_dir, cache_dir=args.cache_dir,
use_auth_token=True if args.use_auth_token else None,
split="train", split="train",
) )
else: else:
...@@ -222,7 +221,6 @@ if __name__ == "__main__": ...@@ -222,7 +221,6 @@ if __name__ == "__main__":
parser.add_argument("--ema_power", type=float, default=3 / 4) parser.add_argument("--ema_power", type=float, default=3 / 4)
parser.add_argument("--ema_max_decay", type=float, default=0.9999) parser.add_argument("--ema_max_decay", type=float, default=0.9999)
parser.add_argument("--push_to_hub", action="store_true") parser.add_argument("--push_to_hub", action="store_true")
parser.add_argument("--use_auth_token", action="store_true")
parser.add_argument("--hub_token", type=str, default=None) parser.add_argument("--hub_token", type=str, default=None)
parser.add_argument("--hub_model_id", type=str, default=None) parser.add_argument("--hub_model_id", type=str, default=None)
parser.add_argument("--hub_private_repo", action="store_true") parser.add_argument("--hub_private_repo", action="store_true")
......
...@@ -70,7 +70,7 @@ def onnx_export( ...@@ -70,7 +70,7 @@ def onnx_export(
@torch.no_grad() @torch.no_grad()
def convert_models(model_path: str, output_path: str, opset: int): def convert_models(model_path: str, output_path: str, opset: int):
pipeline = StableDiffusionPipeline.from_pretrained(model_path, use_auth_token=True) pipeline = StableDiffusionPipeline.from_pretrained(model_path)
output_path = Path(output_path) output_path = Path(output_path)
# TEXT ENCODER # TEXT ENCODER
......
...@@ -86,7 +86,7 @@ _deps = [ ...@@ -86,7 +86,7 @@ _deps = [
"flake8>=3.8.3", "flake8>=3.8.3",
"flax>=0.4.1", "flax>=0.4.1",
"hf-doc-builder>=0.3.0", "hf-doc-builder>=0.3.0",
"huggingface-hub>=0.9.1", "huggingface-hub>=0.10.0",
"importlib_metadata", "importlib_metadata",
"isort>=5.5.4", "isort>=5.5.4",
"jax>=0.2.8,!=0.3.2,<=0.3.6", "jax>=0.2.8,!=0.3.2,<=0.3.6",
......
...@@ -145,7 +145,8 @@ class ConfigMixin: ...@@ -145,7 +145,8 @@ class ConfigMixin:
<Tip> <Tip>
Passing `use_auth_token=True`` is required when you want to use a private model. It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated
models](https://huggingface.co/docs/hub/models-gated#gated-models).
</Tip> </Tip>
...@@ -238,7 +239,7 @@ class ConfigMixin: ...@@ -238,7 +239,7 @@ class ConfigMixin:
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier" f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier"
" listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a" " listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a"
" token having permission to this repo with `use_auth_token` or log in with `huggingface-cli" " token having permission to this repo with `use_auth_token` or log in with `huggingface-cli"
" login` and pass `use_auth_token=True`." " login`."
) )
except RevisionNotFoundError: except RevisionNotFoundError:
raise EnvironmentError( raise EnvironmentError(
......
...@@ -10,7 +10,7 @@ deps = { ...@@ -10,7 +10,7 @@ deps = {
"flake8": "flake8>=3.8.3", "flake8": "flake8>=3.8.3",
"flax": "flax>=0.4.1", "flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0", "hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.9.1", "huggingface-hub": "huggingface-hub>=0.10.0",
"importlib_metadata": "importlib_metadata", "importlib_metadata": "importlib_metadata",
"isort": "isort>=5.5.4", "isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.3.6", "jax": "jax>=0.2.8,!=0.3.2,<=0.3.6",
......
...@@ -198,7 +198,7 @@ def get_cached_module_file( ...@@ -198,7 +198,7 @@ def get_cached_module_file(
<Tip> <Tip>
Passing `use_auth_token=True` is required when you want to use a private model. Passing `` is required when you want to use a private model.
</Tip> </Tip>
...@@ -306,7 +306,7 @@ def get_class_from_dynamic_module( ...@@ -306,7 +306,7 @@ def get_class_from_dynamic_module(
<Tip> <Tip>
Passing `use_auth_token=True` is required when you want to use a private model. Passing `` is required when you want to use a private model.
</Tip> </Tip>
......
...@@ -357,7 +357,7 @@ class FlaxModelMixin: ...@@ -357,7 +357,7 @@ class FlaxModelMixin:
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier " f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login` and pass `use_auth_token=True`." "login`."
) )
except RevisionNotFoundError: except RevisionNotFoundError:
raise EnvironmentError( raise EnvironmentError(
......
...@@ -270,7 +270,8 @@ class ModelMixin(torch.nn.Module): ...@@ -270,7 +270,8 @@ class ModelMixin(torch.nn.Module):
<Tip> <Tip>
Passing `use_auth_token=True`` is required when you want to use a private model. It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated
models](https://huggingface.co/docs/hub/models-gated#gated-models).
</Tip> </Tip>
...@@ -338,7 +339,7 @@ class ModelMixin(torch.nn.Module): ...@@ -338,7 +339,7 @@ class ModelMixin(torch.nn.Module):
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier " f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login` and pass `use_auth_token=True`." "login`."
) )
except RevisionNotFoundError: except RevisionNotFoundError:
raise EnvironmentError( raise EnvironmentError(
......
...@@ -249,8 +249,8 @@ class FlaxDiffusionPipeline(ConfigMixin): ...@@ -249,8 +249,8 @@ class FlaxDiffusionPipeline(ConfigMixin):
<Tip> <Tip>
Passing `use_auth_token=True`` is required when you want to use a private model, *e.g.* It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated
`"CompVis/stable-diffusion-v1-4"` models](https://huggingface.co/docs/hub/models-gated#gated-models), *e.g.* `"CompVis/stable-diffusion-v1-4"`
</Tip> </Tip>
...@@ -272,15 +272,13 @@ class FlaxDiffusionPipeline(ConfigMixin): ...@@ -272,15 +272,13 @@ class FlaxDiffusionPipeline(ConfigMixin):
>>> # Download pipeline that requires an authorization token >>> # Download pipeline that requires an authorization token
>>> # For more information on access tokens, please refer to this section >>> # For more information on access tokens, please refer to this section
>>> # of the documentation](https://huggingface.co/docs/hub/security-tokens) >>> # of the documentation](https://huggingface.co/docs/hub/security-tokens)
>>> pipeline = FlaxDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=True) >>> pipeline = FlaxDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
>>> # Download pipeline, but overwrite scheduler >>> # Download pipeline, but overwrite scheduler
>>> from diffusers import LMSDiscreteScheduler >>> from diffusers import LMSDiscreteScheduler
>>> scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear") >>> scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear")
>>> pipeline = FlaxDiffusionPipeline.from_pretrained( >>> pipeline = FlaxDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", scheduler=scheduler)
... "CompVis/stable-diffusion-v1-4", scheduler=scheduler, use_auth_token=True
... )
``` ```
""" """
cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE) cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
......
...@@ -240,8 +240,8 @@ class DiffusionPipeline(ConfigMixin): ...@@ -240,8 +240,8 @@ class DiffusionPipeline(ConfigMixin):
<Tip> <Tip>
Passing `use_auth_token=True`` is required when you want to use a private model, *e.g.* It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated
`"CompVis/stable-diffusion-v1-4"` models](https://huggingface.co/docs/hub/models-gated#gated-models), *e.g.* `"CompVis/stable-diffusion-v1-4"`
</Tip> </Tip>
...@@ -263,15 +263,13 @@ class DiffusionPipeline(ConfigMixin): ...@@ -263,15 +263,13 @@ class DiffusionPipeline(ConfigMixin):
>>> # Download pipeline that requires an authorization token >>> # Download pipeline that requires an authorization token
>>> # For more information on access tokens, please refer to this section >>> # For more information on access tokens, please refer to this section
>>> # of the documentation](https://huggingface.co/docs/hub/security-tokens) >>> # of the documentation](https://huggingface.co/docs/hub/security-tokens)
>>> pipeline = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=True) >>> pipeline = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
>>> # Download pipeline, but overwrite scheduler >>> # Download pipeline, but overwrite scheduler
>>> from diffusers import LMSDiscreteScheduler >>> from diffusers import LMSDiscreteScheduler
>>> scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear") >>> scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear")
>>> pipeline = DiffusionPipeline.from_pretrained( >>> pipeline = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", scheduler=scheduler)
... "CompVis/stable-diffusion-v1-4", scheduler=scheduler, use_auth_token=True
... )
``` ```
""" """
cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE) cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
......
...@@ -88,7 +88,7 @@ logic including pre-processing, an unrolled diffusion loop, and post-processing ...@@ -88,7 +88,7 @@ logic including pre-processing, an unrolled diffusion loop, and post-processing
# make sure you're logged in with `huggingface-cli login` # make sure you're logged in with `huggingface-cli login`
from diffusers import StableDiffusionPipeline, LMSDiscreteScheduler from diffusers import StableDiffusionPipeline, LMSDiscreteScheduler
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=True) pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
pipe = pipe.to("cuda") pipe = pipe.to("cuda")
prompt = "a photo of an astronaut riding a horse on mars" prompt = "a photo of an astronaut riding a horse on mars"
...@@ -114,7 +114,6 @@ pipe = StableDiffusionImg2ImgPipeline.from_pretrained( ...@@ -114,7 +114,6 @@ pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", "CompVis/stable-diffusion-v1-4",
revision="fp16", revision="fp16",
torch_dtype=torch.float16, torch_dtype=torch.float16,
use_auth_token=True
).to(device) ).to(device)
# let's download an initial image # let's download an initial image
...@@ -164,7 +163,6 @@ pipe = StableDiffusionInpaintPipeline.from_pretrained( ...@@ -164,7 +163,6 @@ pipe = StableDiffusionInpaintPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", "CompVis/stable-diffusion-v1-4",
revision="fp16", revision="fp16",
torch_dtype=torch.float16, torch_dtype=torch.float16,
use_auth_token=True
).to(device) ).to(device)
prompt = "a cat sitting on a bench" prompt = "a cat sitting on a bench"
......
...@@ -61,7 +61,7 @@ pipe = StableDiffusionPipeline.from_pretrained("./stable-diffusion-v1-4") ...@@ -61,7 +61,7 @@ pipe = StableDiffusionPipeline.from_pretrained("./stable-diffusion-v1-4")
# make sure you're logged in with `huggingface-cli login` # make sure you're logged in with `huggingface-cli login`
from diffusers import StableDiffusionPipeline from diffusers import StableDiffusionPipeline
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=True) pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
pipe = pipe.to("cuda") pipe = pipe.to("cuda")
prompt = "a photo of an astronaut riding a horse on mars" prompt = "a photo of an astronaut riding a horse on mars"
...@@ -81,7 +81,6 @@ scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="sca ...@@ -81,7 +81,6 @@ scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="sca
pipe = StableDiffusionPipeline.from_pretrained( pipe = StableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", "CompVis/stable-diffusion-v1-4",
scheduler=scheduler, scheduler=scheduler,
use_auth_token=True
).to("cuda") ).to("cuda")
prompt = "a photo of an astronaut riding a horse on mars" prompt = "a photo of an astronaut riding a horse on mars"
...@@ -105,7 +104,6 @@ lms = LMSDiscreteScheduler( ...@@ -105,7 +104,6 @@ lms = LMSDiscreteScheduler(
pipe = StableDiffusionPipeline.from_pretrained( pipe = StableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", "CompVis/stable-diffusion-v1-4",
scheduler=lms, scheduler=lms,
use_auth_token=True
).to("cuda") ).to("cuda")
prompt = "a photo of an astronaut riding a horse on mars" prompt = "a photo of an astronaut riding a horse on mars"
......
...@@ -1001,7 +1001,7 @@ class PipelineTesterMixin(unittest.TestCase): ...@@ -1001,7 +1001,7 @@ class PipelineTesterMixin(unittest.TestCase):
@unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU") @unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
def test_stable_diffusion(self): def test_stable_diffusion(self):
# make sure here that pndm scheduler skips prk # make sure here that pndm scheduler skips prk
sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-1", use_auth_token=True) sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-1")
sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None) sd_pipe.set_progress_bar_config(disable=None)
...@@ -1023,7 +1023,7 @@ class PipelineTesterMixin(unittest.TestCase): ...@@ -1023,7 +1023,7 @@ class PipelineTesterMixin(unittest.TestCase):
@slow @slow
@unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU") @unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
def test_stable_diffusion_fast_ddim(self): def test_stable_diffusion_fast_ddim(self):
sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-1", use_auth_token=True) sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-1")
sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None) sd_pipe.set_progress_bar_config(disable=None)
...@@ -1158,9 +1158,9 @@ class PipelineTesterMixin(unittest.TestCase): ...@@ -1158,9 +1158,9 @@ class PipelineTesterMixin(unittest.TestCase):
@unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU") @unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
def test_lms_stable_diffusion_pipeline(self): def test_lms_stable_diffusion_pipeline(self):
model_id = "CompVis/stable-diffusion-v1-1" model_id = "CompVis/stable-diffusion-v1-1"
pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token=True).to(torch_device) pipe = StableDiffusionPipeline.from_pretrained(model_id).to(torch_device)
pipe.set_progress_bar_config(disable=None) pipe.set_progress_bar_config(disable=None)
scheduler = LMSDiscreteScheduler.from_config(model_id, subfolder="scheduler", use_auth_token=True) scheduler = LMSDiscreteScheduler.from_config(model_id, subfolder="scheduler")
pipe.scheduler = scheduler pipe.scheduler = scheduler
prompt = "a photograph of an astronaut riding a horse" prompt = "a photograph of an astronaut riding a horse"
...@@ -1179,9 +1179,9 @@ class PipelineTesterMixin(unittest.TestCase): ...@@ -1179,9 +1179,9 @@ class PipelineTesterMixin(unittest.TestCase):
def test_stable_diffusion_memory_chunking(self): def test_stable_diffusion_memory_chunking(self):
torch.cuda.reset_peak_memory_stats() torch.cuda.reset_peak_memory_stats()
model_id = "CompVis/stable-diffusion-v1-4" model_id = "CompVis/stable-diffusion-v1-4"
pipe = StableDiffusionPipeline.from_pretrained( pipe = StableDiffusionPipeline.from_pretrained(model_id, revision="fp16", torch_dtype=torch.float16).to(
model_id, revision="fp16", torch_dtype=torch.float16, use_auth_token=True torch_device
).to(torch_device) )
pipe.set_progress_bar_config(disable=None) pipe.set_progress_bar_config(disable=None)
prompt = "a photograph of an astronaut riding a horse" prompt = "a photograph of an astronaut riding a horse"
...@@ -1219,9 +1219,9 @@ class PipelineTesterMixin(unittest.TestCase): ...@@ -1219,9 +1219,9 @@ class PipelineTesterMixin(unittest.TestCase):
def test_stable_diffusion_text2img_pipeline_fp16(self): def test_stable_diffusion_text2img_pipeline_fp16(self):
torch.cuda.reset_peak_memory_stats() torch.cuda.reset_peak_memory_stats()
model_id = "CompVis/stable-diffusion-v1-4" model_id = "CompVis/stable-diffusion-v1-4"
pipe = StableDiffusionPipeline.from_pretrained( pipe = StableDiffusionPipeline.from_pretrained(model_id, revision="fp16", torch_dtype=torch.float16).to(
model_id, revision="fp16", torch_dtype=torch.float16, use_auth_token=True torch_device
).to(torch_device) )
pipe.set_progress_bar_config(disable=None) pipe.set_progress_bar_config(disable=None)
prompt = "a photograph of an astronaut riding a horse" prompt = "a photograph of an astronaut riding a horse"
...@@ -1258,7 +1258,6 @@ class PipelineTesterMixin(unittest.TestCase): ...@@ -1258,7 +1258,6 @@ class PipelineTesterMixin(unittest.TestCase):
pipe = StableDiffusionPipeline.from_pretrained( pipe = StableDiffusionPipeline.from_pretrained(
model_id, model_id,
safety_checker=self.dummy_safety_checker, safety_checker=self.dummy_safety_checker,
use_auth_token=True,
) )
pipe.to(torch_device) pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None) pipe.set_progress_bar_config(disable=None)
...@@ -1291,7 +1290,6 @@ class PipelineTesterMixin(unittest.TestCase): ...@@ -1291,7 +1290,6 @@ class PipelineTesterMixin(unittest.TestCase):
pipe = StableDiffusionImg2ImgPipeline.from_pretrained( pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
model_id, model_id,
safety_checker=self.dummy_safety_checker, safety_checker=self.dummy_safety_checker,
use_auth_token=True,
) )
pipe.to(torch_device) pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None) pipe.set_progress_bar_config(disable=None)
...@@ -1335,7 +1333,6 @@ class PipelineTesterMixin(unittest.TestCase): ...@@ -1335,7 +1333,6 @@ class PipelineTesterMixin(unittest.TestCase):
model_id, model_id,
scheduler=lms, scheduler=lms,
safety_checker=self.dummy_safety_checker, safety_checker=self.dummy_safety_checker,
use_auth_token=True,
) )
pipe.to(torch_device) pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None) pipe.set_progress_bar_config(disable=None)
...@@ -1379,7 +1376,6 @@ class PipelineTesterMixin(unittest.TestCase): ...@@ -1379,7 +1376,6 @@ class PipelineTesterMixin(unittest.TestCase):
pipe = StableDiffusionInpaintPipeline.from_pretrained( pipe = StableDiffusionInpaintPipeline.from_pretrained(
model_id, model_id,
safety_checker=self.dummy_safety_checker, safety_checker=self.dummy_safety_checker,
use_auth_token=True,
) )
pipe.to(torch_device) pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None) pipe.set_progress_bar_config(disable=None)
...@@ -1426,7 +1422,6 @@ class PipelineTesterMixin(unittest.TestCase): ...@@ -1426,7 +1422,6 @@ class PipelineTesterMixin(unittest.TestCase):
model_id, model_id,
scheduler=lms, scheduler=lms,
safety_checker=self.dummy_safety_checker, safety_checker=self.dummy_safety_checker,
use_auth_token=True,
) )
pipe.to(torch_device) pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None) pipe.set_progress_bar_config(disable=None)
...@@ -1452,7 +1447,7 @@ class PipelineTesterMixin(unittest.TestCase): ...@@ -1452,7 +1447,7 @@ class PipelineTesterMixin(unittest.TestCase):
@slow @slow
def test_stable_diffusion_onnx(self): def test_stable_diffusion_onnx(self):
sd_pipe = StableDiffusionOnnxPipeline.from_pretrained( sd_pipe = StableDiffusionOnnxPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="onnx", provider="CPUExecutionProvider", use_auth_token=True "CompVis/stable-diffusion-v1-4", revision="onnx", provider="CPUExecutionProvider"
) )
prompt = "A painting of a squirrel eating a burger" prompt = "A painting of a squirrel eating a burger"
...@@ -1487,7 +1482,7 @@ class PipelineTesterMixin(unittest.TestCase): ...@@ -1487,7 +1482,7 @@ class PipelineTesterMixin(unittest.TestCase):
test_callback_fn.has_been_called = False test_callback_fn.has_been_called = False
pipe = StableDiffusionPipeline.from_pretrained( pipe = StableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", use_auth_token=True, revision="fp16", torch_dtype=torch.float16 "CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16
) )
pipe.to(torch_device) pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None) pipe.set_progress_bar_config(disable=None)
...@@ -1533,7 +1528,7 @@ class PipelineTesterMixin(unittest.TestCase): ...@@ -1533,7 +1528,7 @@ class PipelineTesterMixin(unittest.TestCase):
init_image = init_image.resize((768, 512)) init_image = init_image.resize((768, 512))
pipe = StableDiffusionImg2ImgPipeline.from_pretrained( pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", use_auth_token=True, revision="fp16", torch_dtype=torch.float16 "CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16
) )
pipe.to(torch_device) pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None) pipe.set_progress_bar_config(disable=None)
...@@ -1586,7 +1581,7 @@ class PipelineTesterMixin(unittest.TestCase): ...@@ -1586,7 +1581,7 @@ class PipelineTesterMixin(unittest.TestCase):
) )
pipe = StableDiffusionInpaintPipeline.from_pretrained( pipe = StableDiffusionInpaintPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", use_auth_token=True, revision="fp16", torch_dtype=torch.float16 "CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16
) )
pipe.to(torch_device) pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None) pipe.set_progress_bar_config(disable=None)
...@@ -1629,7 +1624,7 @@ class PipelineTesterMixin(unittest.TestCase): ...@@ -1629,7 +1624,7 @@ class PipelineTesterMixin(unittest.TestCase):
test_callback_fn.has_been_called = False test_callback_fn.has_been_called = False
pipe = StableDiffusionOnnxPipeline.from_pretrained( pipe = StableDiffusionOnnxPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", use_auth_token=True, revision="onnx", provider="CPUExecutionProvider" "CompVis/stable-diffusion-v1-4", revision="onnx", provider="CPUExecutionProvider"
) )
pipe.set_progress_bar_config(disable=None) pipe.set_progress_bar_config(disable=None)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment