"tests/git@developer.sourcefind.cn:OpenDAS/torchani.git" did not exist on "a9df4b41bc8d1e6fcd0659b5c6dcb2ad65120202"
Commit 679c77f8 authored by anton-l's avatar anton-l
Browse files

Add diffusers version and pipeline class to the Hub UA

parent c1b6ea3d
...@@ -27,7 +27,7 @@ from huggingface_hub import hf_hub_download ...@@ -27,7 +27,7 @@ from huggingface_hub import hf_hub_download
from huggingface_hub.utils import EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError from huggingface_hub.utils import EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError
from requests import HTTPError from requests import HTTPError
from . import is_torch_available from . import __version__, is_torch_available
from .modeling_flax_pytorch_utils import convert_pytorch_state_dict_to_flax from .modeling_flax_pytorch_utils import convert_pytorch_state_dict_to_flax
from .utils import ( from .utils import (
CONFIG_NAME, CONFIG_NAME,
...@@ -289,7 +289,12 @@ class FlaxModelMixin: ...@@ -289,7 +289,12 @@ class FlaxModelMixin:
from_auto_class = kwargs.pop("_from_auto", False) from_auto_class = kwargs.pop("_from_auto", False)
subfolder = kwargs.pop("subfolder", None) subfolder = kwargs.pop("subfolder", None)
user_agent = {"file_type": "model", "framework": "flax", "from_auto_class": from_auto_class} user_agent = {
"diffusers": __version__,
"file_type": "model",
"framework": "flax",
"from_auto_class": from_auto_class,
}
# Load config if we don't provide a configuration # Load config if we don't provide a configuration
config_path = config if config is not None else pretrained_model_name_or_path config_path = config if config is not None else pretrained_model_name_or_path
......
...@@ -26,6 +26,7 @@ from huggingface_hub import hf_hub_download ...@@ -26,6 +26,7 @@ from huggingface_hub import hf_hub_download
from huggingface_hub.utils import EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError from huggingface_hub.utils import EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError
from requests import HTTPError from requests import HTTPError
from . import __version__
from .utils import CONFIG_NAME, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, WEIGHTS_NAME, logging from .utils import CONFIG_NAME, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, WEIGHTS_NAME, logging
...@@ -297,7 +298,12 @@ class ModelMixin(torch.nn.Module): ...@@ -297,7 +298,12 @@ class ModelMixin(torch.nn.Module):
subfolder = kwargs.pop("subfolder", None) subfolder = kwargs.pop("subfolder", None)
device_map = kwargs.pop("device_map", None) device_map = kwargs.pop("device_map", None)
user_agent = {"file_type": "model", "framework": "pytorch", "from_auto_class": from_auto_class} user_agent = {
"diffusers": __version__,
"file_type": "model",
"framework": "pytorch",
"from_auto_class": from_auto_class,
}
# Load config if we don't provide a configuration # Load config if we don't provide a configuration
config_path = pretrained_model_name_or_path config_path = pretrained_model_name_or_path
......
...@@ -29,6 +29,7 @@ from huggingface_hub import snapshot_download ...@@ -29,6 +29,7 @@ from huggingface_hub import snapshot_download
from PIL import Image from PIL import Image
from tqdm.auto import tqdm from tqdm.auto import tqdm
from . import __version__
from .configuration_utils import ConfigMixin from .configuration_utils import ConfigMixin
from .dynamic_modules_utils import get_class_from_dynamic_module from .dynamic_modules_utils import get_class_from_dynamic_module
from .schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME from .schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME
...@@ -372,6 +373,10 @@ class DiffusionPipeline(ConfigMixin): ...@@ -372,6 +373,10 @@ class DiffusionPipeline(ConfigMixin):
if custom_pipeline is not None: if custom_pipeline is not None:
allow_patterns += [CUSTOM_PIPELINE_FILE_NAME] allow_patterns += [CUSTOM_PIPELINE_FILE_NAME]
user_agent = {"diffusers": __version__, "pipeline_class": config_dict["_class_name"]}
if custom_pipeline is not None:
user_agent["custom_pipeline"] = custom_pipeline
# download all allow_patterns # download all allow_patterns
cached_folder = snapshot_download( cached_folder = snapshot_download(
pretrained_model_name_or_path, pretrained_model_name_or_path,
...@@ -382,6 +387,7 @@ class DiffusionPipeline(ConfigMixin): ...@@ -382,6 +387,7 @@ class DiffusionPipeline(ConfigMixin):
use_auth_token=use_auth_token, use_auth_token=use_auth_token,
revision=revision, revision=revision,
allow_patterns=allow_patterns, allow_patterns=allow_patterns,
user_agent=user_agent,
) )
else: else:
cached_folder = pretrained_model_name_or_path cached_folder = pretrained_model_name_or_path
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment