Unverified Commit 323a9e1f authored by Anton Lozhkov's avatar Anton Lozhkov Committed by GitHub
Browse files

Add diffusers version and pipeline class to the Hub UA (#814)



* Add diffusers version and pipeline class to the Hub UA

* Fallback to class name for pipelines

* Update src/diffusers/modeling_utils.py
Co-authored-by: default avatarPatrick von Platen <patrick.v.platen@gmail.com>

* Update src/diffusers/modeling_flax_utils.py
Co-authored-by: default avatarPatrick von Platen <patrick.v.platen@gmail.com>

* Remove autoclass
Co-authored-by: default avatarPatrick von Platen <patrick.v.platen@gmail.com>
parent 60c384bc
...@@ -27,7 +27,7 @@ from huggingface_hub import hf_hub_download ...@@ -27,7 +27,7 @@ from huggingface_hub import hf_hub_download
from huggingface_hub.utils import EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError from huggingface_hub.utils import EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError
from requests import HTTPError from requests import HTTPError
from . import is_torch_available from . import __version__, is_torch_available
from .modeling_flax_pytorch_utils import convert_pytorch_state_dict_to_flax from .modeling_flax_pytorch_utils import convert_pytorch_state_dict_to_flax
from .utils import ( from .utils import (
CONFIG_NAME, CONFIG_NAME,
...@@ -286,10 +286,13 @@ class FlaxModelMixin: ...@@ -286,10 +286,13 @@ class FlaxModelMixin:
local_files_only = kwargs.pop("local_files_only", False) local_files_only = kwargs.pop("local_files_only", False)
use_auth_token = kwargs.pop("use_auth_token", None) use_auth_token = kwargs.pop("use_auth_token", None)
revision = kwargs.pop("revision", None) revision = kwargs.pop("revision", None)
from_auto_class = kwargs.pop("_from_auto", False)
subfolder = kwargs.pop("subfolder", None) subfolder = kwargs.pop("subfolder", None)
user_agent = {"file_type": "model", "framework": "flax", "from_auto_class": from_auto_class} user_agent = {
"diffusers": __version__,
"file_type": "model",
"framework": "flax",
}
# Load config if we don't provide a configuration # Load config if we don't provide a configuration
config_path = config if config is not None else pretrained_model_name_or_path config_path = config if config is not None else pretrained_model_name_or_path
......
...@@ -26,6 +26,7 @@ from huggingface_hub import hf_hub_download ...@@ -26,6 +26,7 @@ from huggingface_hub import hf_hub_download
from huggingface_hub.utils import EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError from huggingface_hub.utils import EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError
from requests import HTTPError from requests import HTTPError
from . import __version__
from .utils import CONFIG_NAME, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, WEIGHTS_NAME, logging from .utils import CONFIG_NAME, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, WEIGHTS_NAME, logging
...@@ -292,12 +293,15 @@ class ModelMixin(torch.nn.Module): ...@@ -292,12 +293,15 @@ class ModelMixin(torch.nn.Module):
local_files_only = kwargs.pop("local_files_only", False) local_files_only = kwargs.pop("local_files_only", False)
use_auth_token = kwargs.pop("use_auth_token", None) use_auth_token = kwargs.pop("use_auth_token", None)
revision = kwargs.pop("revision", None) revision = kwargs.pop("revision", None)
from_auto_class = kwargs.pop("_from_auto", False)
torch_dtype = kwargs.pop("torch_dtype", None) torch_dtype = kwargs.pop("torch_dtype", None)
subfolder = kwargs.pop("subfolder", None) subfolder = kwargs.pop("subfolder", None)
device_map = kwargs.pop("device_map", None) device_map = kwargs.pop("device_map", None)
user_agent = {"file_type": "model", "framework": "pytorch", "from_auto_class": from_auto_class} user_agent = {
"diffusers": __version__,
"file_type": "model",
"framework": "pytorch",
}
# Load config if we don't provide a configuration # Load config if we don't provide a configuration
config_path = pretrained_model_name_or_path config_path = pretrained_model_name_or_path
......
...@@ -29,6 +29,7 @@ from huggingface_hub import snapshot_download ...@@ -29,6 +29,7 @@ from huggingface_hub import snapshot_download
from PIL import Image from PIL import Image
from tqdm.auto import tqdm from tqdm.auto import tqdm
from . import __version__
from .configuration_utils import ConfigMixin from .configuration_utils import ConfigMixin
from .dynamic_modules_utils import get_class_from_dynamic_module from .dynamic_modules_utils import get_class_from_dynamic_module
from .schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME from .schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME
...@@ -373,6 +374,11 @@ class DiffusionPipeline(ConfigMixin): ...@@ -373,6 +374,11 @@ class DiffusionPipeline(ConfigMixin):
if custom_pipeline is not None: if custom_pipeline is not None:
allow_patterns += [CUSTOM_PIPELINE_FILE_NAME] allow_patterns += [CUSTOM_PIPELINE_FILE_NAME]
requested_pipeline_class = config_dict.get("_class_name", cls.__name__)
user_agent = {"diffusers": __version__, "pipeline_class": requested_pipeline_class}
if custom_pipeline is not None:
user_agent["custom_pipeline"] = custom_pipeline
# download all allow_patterns # download all allow_patterns
cached_folder = snapshot_download( cached_folder = snapshot_download(
pretrained_model_name_or_path, pretrained_model_name_or_path,
...@@ -383,6 +389,7 @@ class DiffusionPipeline(ConfigMixin): ...@@ -383,6 +389,7 @@ class DiffusionPipeline(ConfigMixin):
use_auth_token=use_auth_token, use_auth_token=use_auth_token,
revision=revision, revision=revision,
allow_patterns=allow_patterns, allow_patterns=allow_patterns,
user_agent=user_agent,
) )
else: else:
cached_folder = pretrained_model_name_or_path cached_folder = pretrained_model_name_or_path
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment