Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
renzhc
diffusers_dcu
Commits
27039cd3
Commit
27039cd3
authored
Jun 09, 2022
by
Patrick von Platen
Browse files
Merge branch 'main' of
https://github.com/huggingface/diffusers
parents
8841d0d1
1f4d817c
Changes
9
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
1430 additions
and
1437 deletions
+1430
-1437
models/vision/glide/convert_weights.py
models/vision/glide/convert_weights.py
+1
-2
models/vision/glide/modeling_glide.py
models/vision/glide/modeling_glide.py
+679
-5
models/vision/glide/run_glide.py
models/vision/glide/run_glide.py
+2
-1
models/vision/latent_diffusion/modeling_latent_diffusion.py
models/vision/latent_diffusion/modeling_latent_diffusion.py
+721
-0
src/diffusers/__init__.py
src/diffusers/__init__.py
+0
-2
src/diffusers/models/__init__.py
src/diffusers/models/__init__.py
+0
-2
src/diffusers/models/clip_text_transformer.py
src/diffusers/models/clip_text_transformer.py
+0
-685
src/diffusers/models/vqvae.py
src/diffusers/models/vqvae.py
+0
-721
src/diffusers/pipeline_utils.py
src/diffusers/pipeline_utils.py
+27
-19
No files found.
models/vision/glide/convert_weights.py
View file @
27039cd3
...
@@ -3,12 +3,11 @@ from torch import nn
...
@@ -3,12 +3,11 @@ from torch import nn
from
diffusers
import
(
from
diffusers
import
(
ClassifierFreeGuidanceScheduler
,
ClassifierFreeGuidanceScheduler
,
CLIPTextModel
,
GlideDDIMScheduler
,
GlideDDIMScheduler
,
GLIDESuperResUNetModel
,
GLIDESuperResUNetModel
,
GLIDETextToImageUNetModel
,
GLIDETextToImageUNetModel
,
)
)
from
modeling_glide
import
GLIDE
from
modeling_glide
import
GLIDE
,
CLIPTextModel
from
transformers
import
CLIPTextConfig
,
GPT2Tokenizer
from
transformers
import
CLIPTextConfig
,
GPT2Tokenizer
...
...
models/vision/glide/modeling_glide.py
View file @
27039cd3
This diff is collapsed.
Click to expand it.
models/vision/glide/run_glide.py
View file @
27039cd3
...
@@ -13,9 +13,10 @@ model_id = "fusing/glide-base"
...
@@ -13,9 +13,10 @@ model_id = "fusing/glide-base"
pipeline
=
DiffusionPipeline
.
from_pretrained
(
model_id
)
pipeline
=
DiffusionPipeline
.
from_pretrained
(
model_id
)
# run inference (text-conditioned denoising + upscaling)
# run inference (text-conditioned denoising + upscaling)
img
=
pipeline
(
"a c
lip art of a hugging face
"
,
generator
)
img
=
pipeline
(
"a c
rayon drawing of a corgi
"
,
generator
)
# process image to PIL
# process image to PIL
img
=
img
.
squeeze
(
0
)
img
=
((
img
+
1
)
*
127.5
).
round
().
clamp
(
0
,
255
).
to
(
torch
.
uint8
).
cpu
().
numpy
()
img
=
((
img
+
1
)
*
127.5
).
round
().
clamp
(
0
,
255
).
to
(
torch
.
uint8
).
cpu
().
numpy
()
image_pil
=
PIL
.
Image
.
fromarray
(
img
)
image_pil
=
PIL
.
Image
.
fromarray
(
img
)
...
...
models/vision/latent_diffusion/modeling_latent_diffusion.py
View file @
27039cd3
This diff is collapsed.
Click to expand it.
src/diffusers/__init__.py
View file @
27039cd3
...
@@ -5,11 +5,9 @@
...
@@ -5,11 +5,9 @@
__version__
=
"0.0.1"
__version__
=
"0.0.1"
from
.modeling_utils
import
ModelMixin
from
.modeling_utils
import
ModelMixin
from
.models.clip_text_transformer
import
CLIPTextModel
from
.models.unet
import
UNetModel
from
.models.unet
import
UNetModel
from
.models.unet_glide
import
GLIDESuperResUNetModel
,
GLIDETextToImageUNetModel
from
.models.unet_glide
import
GLIDESuperResUNetModel
,
GLIDETextToImageUNetModel
from
.models.unet_ldm
import
UNetLDMModel
from
.models.unet_ldm
import
UNetLDMModel
from
.models.vqvae
import
VQModel
from
.pipeline_utils
import
DiffusionPipeline
from
.pipeline_utils
import
DiffusionPipeline
from
.schedulers.classifier_free_guidance
import
ClassifierFreeGuidanceScheduler
from
.schedulers.classifier_free_guidance
import
ClassifierFreeGuidanceScheduler
from
.schedulers.gaussian_ddpm
import
GaussianDDPMScheduler
from
.schedulers.gaussian_ddpm
import
GaussianDDPMScheduler
...
...
src/diffusers/models/__init__.py
View file @
27039cd3
...
@@ -16,8 +16,6 @@
...
@@ -16,8 +16,6 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
from
.clip_text_transformer
import
CLIPTextModel
from
.unet
import
UNetModel
from
.unet
import
UNetModel
from
.unet_glide
import
GLIDESuperResUNetModel
,
GLIDETextToImageUNetModel
from
.unet_glide
import
GLIDESuperResUNetModel
,
GLIDETextToImageUNetModel
from
.unet_ldm
import
UNetLDMModel
from
.unet_ldm
import
UNetLDMModel
from
.vqvae
import
VQModel
src/diffusers/models/clip_text_transformer.py
deleted
100644 → 0
View file @
8841d0d1
This diff is collapsed.
Click to expand it.
src/diffusers/models/vqvae.py
deleted
100644 → 0
View file @
8841d0d1
This diff is collapsed.
Click to expand it.
src/diffusers/pipeline_utils.py
View file @
27039cd3
...
@@ -34,13 +34,13 @@ logger = logging.get_logger(__name__)
...
@@ -34,13 +34,13 @@ logger = logging.get_logger(__name__)
LOADABLE_CLASSES
=
{
LOADABLE_CLASSES
=
{
"diffusers"
:
{
"diffusers"
:
{
"ModelMixin"
:
[
"save_pretrained"
,
"from_pretrained"
],
"ModelMixin"
:
[
"save_pretrained"
,
"from_pretrained"
],
"CLIPTextModel"
:
[
"save_pretrained"
,
"from_pretrained"
],
# TODO (Anton): move to transformers
"GaussianDDPMScheduler"
:
[
"save_config"
,
"from_config"
],
"GaussianDDPMScheduler"
:
[
"save_config"
,
"from_config"
],
"ClassifierFreeGuidanceScheduler"
:
[
"save_config"
,
"from_config"
],
"ClassifierFreeGuidanceScheduler"
:
[
"save_config"
,
"from_config"
],
"GlideDDIMScheduler"
:
[
"save_config"
,
"from_config"
],
"GlideDDIMScheduler"
:
[
"save_config"
,
"from_config"
],
},
},
"transformers"
:
{
"transformers"
:
{
"PreTrainedTokenizer"
:
[
"save_pretrained"
,
"from_pretrained"
],
"PreTrainedTokenizer"
:
[
"save_pretrained"
,
"from_pretrained"
],
"PreTrainedModel"
:
[
"save_pretrained"
,
"from_pretrained"
],
},
},
}
}
...
@@ -83,24 +83,25 @@ class DiffusionPipeline(ConfigMixin):
...
@@ -83,24 +83,25 @@ class DiffusionPipeline(ConfigMixin):
model_index_dict
.
pop
(
"_diffusers_version"
)
model_index_dict
.
pop
(
"_diffusers_version"
)
model_index_dict
.
pop
(
"_module"
)
model_index_dict
.
pop
(
"_module"
)
for
name
,
(
library_name
,
class_name
)
in
model_index_dict
.
items
():
for
pipeline_component_name
in
model_index_dict
.
keys
():
importable_classes
=
LOADABLE_CLASSES
[
library_name
]
sub_model
=
getattr
(
self
,
pipeline_component_name
)
model_cls
=
sub_model
.
__class__
# TODO: Suraj
if
library_name
==
self
.
__module__
:
library_name
=
self
library
=
importlib
.
import_module
(
library_name
)
class_obj
=
getattr
(
library
,
class_name
)
class_candidates
=
{
c
:
getattr
(
library
,
c
)
for
c
in
importable_classes
.
keys
()}
save_method_name
=
None
save_method_name
=
None
for
class_name
,
class_candidate
in
class_candidates
.
items
():
# search for the model's base class in LOADABLE_CLASSES
if
issubclass
(
class_obj
,
class_candidate
):
for
library_name
,
library_classes
in
LOADABLE_CLASSES
.
items
():
save_method_name
=
importable_classes
[
class_name
][
0
]
library
=
importlib
.
import_module
(
library_name
)
for
base_class
,
save_load_methods
in
library_classes
.
items
():
save_method
=
getattr
(
getattr
(
self
,
name
),
save_method_name
)
class_candidate
=
getattr
(
library
,
base_class
)
save_method
(
os
.
path
.
join
(
save_directory
,
name
))
if
issubclass
(
model_cls
,
class_candidate
):
# if we found a suitable base class in LOADABLE_CLASSES then grab its save method
save_method_name
=
save_load_methods
[
0
]
break
if
save_method_name
is
not
None
:
break
save_method
=
getattr
(
sub_model
,
save_method_name
)
save_method
(
os
.
path
.
join
(
save_directory
,
pipeline_component_name
))
@
classmethod
@
classmethod
def
from_pretrained
(
cls
,
pretrained_model_name_or_path
:
Optional
[
Union
[
str
,
os
.
PathLike
]],
**
kwargs
):
def
from_pretrained
(
cls
,
pretrained_model_name_or_path
:
Optional
[
Union
[
str
,
os
.
PathLike
]],
**
kwargs
):
...
@@ -112,7 +113,8 @@ class DiffusionPipeline(ConfigMixin):
...
@@ -112,7 +113,8 @@ class DiffusionPipeline(ConfigMixin):
proxies
=
kwargs
.
pop
(
"proxies"
,
None
)
proxies
=
kwargs
.
pop
(
"proxies"
,
None
)
local_files_only
=
kwargs
.
pop
(
"local_files_only"
,
False
)
local_files_only
=
kwargs
.
pop
(
"local_files_only"
,
False
)
use_auth_token
=
kwargs
.
pop
(
"use_auth_token"
,
None
)
use_auth_token
=
kwargs
.
pop
(
"use_auth_token"
,
None
)
# 1. Download the checkpoints and configs
# use snapshot download here to get it working from from_pretrained
# use snapshot download here to get it working from from_pretrained
if
not
os
.
path
.
isdir
(
pretrained_model_name_or_path
):
if
not
os
.
path
.
isdir
(
pretrained_model_name_or_path
):
cached_folder
=
snapshot_download
(
cached_folder
=
snapshot_download
(
...
@@ -128,11 +130,12 @@ class DiffusionPipeline(ConfigMixin):
...
@@ -128,11 +130,12 @@ class DiffusionPipeline(ConfigMixin):
config_dict
=
cls
.
get_config_dict
(
cached_folder
)
config_dict
=
cls
.
get_config_dict
(
cached_folder
)
module
=
config_dict
[
"_module"
]
# 2. Get class name and module candidates to load custom models
class_name_
=
config_dict
[
"_class_name"
]
class_name_
=
config_dict
[
"_class_name"
]
module_candidate
=
config_dict
[
"_module"
]
module_candidate
=
config_dict
[
"_module"
]
module_candidate_name
=
module_candidate
.
replace
(
".py"
,
""
)
module_candidate_name
=
module_candidate
.
replace
(
".py"
,
""
)
# 3. Load the pipeline class, if using custom module then load it from the hub
# if we load from explicit class, let's use it
# if we load from explicit class, let's use it
if
cls
!=
DiffusionPipeline
:
if
cls
!=
DiffusionPipeline
:
pipeline_class
=
cls
pipeline_class
=
cls
...
@@ -146,6 +149,7 @@ class DiffusionPipeline(ConfigMixin):
...
@@ -146,6 +149,7 @@ class DiffusionPipeline(ConfigMixin):
init_kwargs
=
{}
init_kwargs
=
{}
# 4. Load each module in the pipeline
for
name
,
(
library_name
,
class_name
)
in
init_dict
.
items
():
for
name
,
(
library_name
,
class_name
)
in
init_dict
.
items
():
# if the model is not in diffusers or transformers, we need to load it from the hub
# if the model is not in diffusers or transformers, we need to load it from the hub
# assumes that it's a subclass of ModelMixin
# assumes that it's a subclass of ModelMixin
...
@@ -155,6 +159,7 @@ class DiffusionPipeline(ConfigMixin):
...
@@ -155,6 +159,7 @@ class DiffusionPipeline(ConfigMixin):
importable_classes
=
ALL_IMPORTABLE_CLASSES
importable_classes
=
ALL_IMPORTABLE_CLASSES
class_candidates
=
{
c
:
class_obj
for
c
in
ALL_IMPORTABLE_CLASSES
.
keys
()}
class_candidates
=
{
c
:
class_obj
for
c
in
ALL_IMPORTABLE_CLASSES
.
keys
()}
else
:
else
:
# else we just import it from the library.
library
=
importlib
.
import_module
(
library_name
)
library
=
importlib
.
import_module
(
library_name
)
class_obj
=
getattr
(
library
,
class_name
)
class_obj
=
getattr
(
library
,
class_name
)
importable_classes
=
LOADABLE_CLASSES
[
library_name
]
importable_classes
=
LOADABLE_CLASSES
[
library_name
]
...
@@ -167,12 +172,15 @@ class DiffusionPipeline(ConfigMixin):
...
@@ -167,12 +172,15 @@ class DiffusionPipeline(ConfigMixin):
load_method
=
getattr
(
class_obj
,
load_method_name
)
load_method
=
getattr
(
class_obj
,
load_method_name
)
# check if the module is in a subdirectory
if
os
.
path
.
isdir
(
os
.
path
.
join
(
cached_folder
,
name
)):
if
os
.
path
.
isdir
(
os
.
path
.
join
(
cached_folder
,
name
)):
loaded_sub_model
=
load_method
(
os
.
path
.
join
(
cached_folder
,
name
))
loaded_sub_model
=
load_method
(
os
.
path
.
join
(
cached_folder
,
name
))
else
:
else
:
# else load from the root directory
loaded_sub_model
=
load_method
(
cached_folder
)
loaded_sub_model
=
load_method
(
cached_folder
)
init_kwargs
[
name
]
=
loaded_sub_model
# UNet(...), # DiffusionSchedule(...)
init_kwargs
[
name
]
=
loaded_sub_model
# UNet(...), # DiffusionSchedule(...)
# 5. Instantiate the pipeline
model
=
pipeline_class
(
**
init_kwargs
)
model
=
pipeline_class
(
**
init_kwargs
)
return
model
return
model
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment