Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
stable_diffusion_diffusers
Commits
e178cada
Commit
e178cada
authored
Apr 08, 2025
by
wangwei990215
Browse files
initial commit
parents
Changes
569
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
773 additions
and
0 deletions
+773
-0
diffusers-0.27.0/benchmarks/benchmark_ip_adapters.py
diffusers-0.27.0/benchmarks/benchmark_ip_adapters.py
+32
-0
diffusers-0.27.0/benchmarks/benchmark_sd_img.py
diffusers-0.27.0/benchmarks/benchmark_sd_img.py
+29
-0
diffusers-0.27.0/benchmarks/benchmark_sd_inpainting.py
diffusers-0.27.0/benchmarks/benchmark_sd_inpainting.py
+28
-0
diffusers-0.27.0/benchmarks/benchmark_t2i_adapter.py
diffusers-0.27.0/benchmarks/benchmark_t2i_adapter.py
+28
-0
diffusers-0.27.0/benchmarks/benchmark_t2i_lcm_lora.py
diffusers-0.27.0/benchmarks/benchmark_t2i_lcm_lora.py
+23
-0
diffusers-0.27.0/benchmarks/benchmark_text_to_image.py
diffusers-0.27.0/benchmarks/benchmark_text_to_image.py
+40
-0
diffusers-0.27.0/benchmarks/push_results.py
diffusers-0.27.0/benchmarks/push_results.py
+72
-0
diffusers-0.27.0/benchmarks/run_all.py
diffusers-0.27.0/benchmarks/run_all.py
+97
-0
diffusers-0.27.0/benchmarks/utils.py
diffusers-0.27.0/benchmarks/utils.py
+98
-0
diffusers-0.27.0/docker/diffusers-flax-cpu/Dockerfile
diffusers-0.27.0/docker/diffusers-flax-cpu/Dockerfile
+45
-0
diffusers-0.27.0/docker/diffusers-flax-tpu/Dockerfile
diffusers-0.27.0/docker/diffusers-flax-tpu/Dockerfile
+47
-0
diffusers-0.27.0/docker/diffusers-onnxruntime-cpu/Dockerfile
diffusers-0.27.0/docker/diffusers-onnxruntime-cpu/Dockerfile
+45
-0
diffusers-0.27.0/docker/diffusers-onnxruntime-cuda/Dockerfile
...users-0.27.0/docker/diffusers-onnxruntime-cuda/Dockerfile
+45
-0
diffusers-0.27.0/docker/diffusers-pytorch-compile-cuda/Dockerfile
...s-0.27.0/docker/diffusers-pytorch-compile-cuda/Dockerfile
+45
-0
diffusers-0.27.0/docker/diffusers-pytorch-cpu/Dockerfile
diffusers-0.27.0/docker/diffusers-pytorch-cpu/Dockerfile
+45
-0
diffusers-0.27.0/docker/diffusers-pytorch-cuda/Dockerfile
diffusers-0.27.0/docker/diffusers-pytorch-cuda/Dockerfile
+45
-0
diffusers-0.27.0/docker/diffusers-pytorch-xformers-cuda/Dockerfile
...-0.27.0/docker/diffusers-pytorch-xformers-cuda/Dockerfile
+0
-0
diffusers-0.27.0/docs/README.md
diffusers-0.27.0/docs/README.md
+0
-0
diffusers-0.27.0/docs/TRANSLATING.md
diffusers-0.27.0/docs/TRANSLATING.md
+0
-0
diffusers-0.27.0/docs/source/_config.py
diffusers-0.27.0/docs/source/_config.py
+9
-0
No files found.
diffusers-0.27.0/benchmarks/benchmark_ip_adapters.py
0 → 100755
View file @
e178cada
import
argparse
import
sys
sys
.
path
.
append
(
"."
)
from
base_classes
import
IPAdapterTextToImageBenchmark
# noqa: E402
IP_ADAPTER_CKPTS
=
{
"runwayml/stable-diffusion-v1-5"
:
(
"h94/IP-Adapter"
,
"ip-adapter_sd15.bin"
),
"stabilityai/stable-diffusion-xl-base-1.0"
:
(
"h94/IP-Adapter"
,
"ip-adapter_sdxl.bin"
),
}
if
__name__
==
"__main__"
:
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
"--ckpt"
,
type
=
str
,
default
=
"runwayml/stable-diffusion-v1-5"
,
choices
=
list
(
IP_ADAPTER_CKPTS
.
keys
()),
)
parser
.
add_argument
(
"--batch_size"
,
type
=
int
,
default
=
1
)
parser
.
add_argument
(
"--num_inference_steps"
,
type
=
int
,
default
=
50
)
parser
.
add_argument
(
"--model_cpu_offload"
,
action
=
"store_true"
)
parser
.
add_argument
(
"--run_compile"
,
action
=
"store_true"
)
args
=
parser
.
parse_args
()
args
.
ip_adapter_id
=
IP_ADAPTER_CKPTS
[
args
.
ckpt
]
benchmark_pipe
=
IPAdapterTextToImageBenchmark
(
args
)
args
.
ckpt
=
f
"
{
args
.
ckpt
}
(IP-Adapter)"
benchmark_pipe
.
benchmark
(
args
)
diffusers-0.27.0/benchmarks/benchmark_sd_img.py
0 → 100755
View file @
e178cada
import
argparse
import
sys
sys
.
path
.
append
(
"."
)
from
base_classes
import
ImageToImageBenchmark
,
TurboImageToImageBenchmark
# noqa: E402
if
__name__
==
"__main__"
:
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
"--ckpt"
,
type
=
str
,
default
=
"runwayml/stable-diffusion-v1-5"
,
choices
=
[
"runwayml/stable-diffusion-v1-5"
,
"stabilityai/stable-diffusion-2-1"
,
"stabilityai/stable-diffusion-xl-refiner-1.0"
,
"stabilityai/sdxl-turbo"
,
],
)
parser
.
add_argument
(
"--batch_size"
,
type
=
int
,
default
=
1
)
parser
.
add_argument
(
"--num_inference_steps"
,
type
=
int
,
default
=
50
)
parser
.
add_argument
(
"--model_cpu_offload"
,
action
=
"store_true"
)
parser
.
add_argument
(
"--run_compile"
,
action
=
"store_true"
)
args
=
parser
.
parse_args
()
benchmark_pipe
=
ImageToImageBenchmark
(
args
)
if
"turbo"
not
in
args
.
ckpt
else
TurboImageToImageBenchmark
(
args
)
benchmark_pipe
.
benchmark
(
args
)
diffusers-0.27.0/benchmarks/benchmark_sd_inpainting.py
0 → 100755
View file @
e178cada
import
argparse
import
sys
sys
.
path
.
append
(
"."
)
from
base_classes
import
InpaintingBenchmark
# noqa: E402
if
__name__
==
"__main__"
:
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
"--ckpt"
,
type
=
str
,
default
=
"runwayml/stable-diffusion-v1-5"
,
choices
=
[
"runwayml/stable-diffusion-v1-5"
,
"stabilityai/stable-diffusion-2-1"
,
"stabilityai/stable-diffusion-xl-base-1.0"
,
],
)
parser
.
add_argument
(
"--batch_size"
,
type
=
int
,
default
=
1
)
parser
.
add_argument
(
"--num_inference_steps"
,
type
=
int
,
default
=
50
)
parser
.
add_argument
(
"--model_cpu_offload"
,
action
=
"store_true"
)
parser
.
add_argument
(
"--run_compile"
,
action
=
"store_true"
)
args
=
parser
.
parse_args
()
benchmark_pipe
=
InpaintingBenchmark
(
args
)
benchmark_pipe
.
benchmark
(
args
)
diffusers-0.27.0/benchmarks/benchmark_t2i_adapter.py
0 → 100755
View file @
e178cada
import
argparse
import
sys
sys
.
path
.
append
(
"."
)
from
base_classes
import
T2IAdapterBenchmark
,
T2IAdapterSDXLBenchmark
# noqa: E402
if
__name__
==
"__main__"
:
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
"--ckpt"
,
type
=
str
,
default
=
"TencentARC/t2iadapter_canny_sd14v1"
,
choices
=
[
"TencentARC/t2iadapter_canny_sd14v1"
,
"TencentARC/t2i-adapter-canny-sdxl-1.0"
],
)
parser
.
add_argument
(
"--batch_size"
,
type
=
int
,
default
=
1
)
parser
.
add_argument
(
"--num_inference_steps"
,
type
=
int
,
default
=
50
)
parser
.
add_argument
(
"--model_cpu_offload"
,
action
=
"store_true"
)
parser
.
add_argument
(
"--run_compile"
,
action
=
"store_true"
)
args
=
parser
.
parse_args
()
benchmark_pipe
=
(
T2IAdapterBenchmark
(
args
)
if
args
.
ckpt
==
"TencentARC/t2iadapter_canny_sd14v1"
else
T2IAdapterSDXLBenchmark
(
args
)
)
benchmark_pipe
.
benchmark
(
args
)
diffusers-0.27.0/benchmarks/benchmark_t2i_lcm_lora.py
0 → 100755
View file @
e178cada
import
argparse
import
sys
sys
.
path
.
append
(
"."
)
from
base_classes
import
LCMLoRATextToImageBenchmark
# noqa: E402
if
__name__
==
"__main__"
:
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
"--ckpt"
,
type
=
str
,
default
=
"stabilityai/stable-diffusion-xl-base-1.0"
,
)
parser
.
add_argument
(
"--batch_size"
,
type
=
int
,
default
=
1
)
parser
.
add_argument
(
"--num_inference_steps"
,
type
=
int
,
default
=
4
)
parser
.
add_argument
(
"--model_cpu_offload"
,
action
=
"store_true"
)
parser
.
add_argument
(
"--run_compile"
,
action
=
"store_true"
)
args
=
parser
.
parse_args
()
benchmark_pipe
=
LCMLoRATextToImageBenchmark
(
args
)
benchmark_pipe
.
benchmark
(
args
)
diffusers-0.27.0/benchmarks/benchmark_text_to_image.py
0 → 100755
View file @
e178cada
import
argparse
import
sys
sys
.
path
.
append
(
"."
)
from
base_classes
import
TextToImageBenchmark
,
TurboTextToImageBenchmark
# noqa: E402
ALL_T2I_CKPTS
=
[
"runwayml/stable-diffusion-v1-5"
,
"segmind/SSD-1B"
,
"stabilityai/stable-diffusion-xl-base-1.0"
,
"kandinsky-community/kandinsky-2-2-decoder"
,
"warp-ai/wuerstchen"
,
"stabilityai/sdxl-turbo"
,
]
if
__name__
==
"__main__"
:
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
"--ckpt"
,
type
=
str
,
default
=
"runwayml/stable-diffusion-v1-5"
,
choices
=
ALL_T2I_CKPTS
,
)
parser
.
add_argument
(
"--batch_size"
,
type
=
int
,
default
=
1
)
parser
.
add_argument
(
"--num_inference_steps"
,
type
=
int
,
default
=
50
)
parser
.
add_argument
(
"--model_cpu_offload"
,
action
=
"store_true"
)
parser
.
add_argument
(
"--run_compile"
,
action
=
"store_true"
)
args
=
parser
.
parse_args
()
benchmark_cls
=
None
if
"turbo"
in
args
.
ckpt
:
benchmark_cls
=
TurboTextToImageBenchmark
else
:
benchmark_cls
=
TextToImageBenchmark
benchmark_pipe
=
benchmark_cls
(
args
)
benchmark_pipe
.
benchmark
(
args
)
diffusers-0.27.0/benchmarks/push_results.py
0 → 100755
View file @
e178cada
import
glob
import
sys
import
pandas
as
pd
from
huggingface_hub
import
hf_hub_download
,
upload_file
from
huggingface_hub.utils._errors
import
EntryNotFoundError
sys
.
path
.
append
(
"."
)
from
utils
import
BASE_PATH
,
FINAL_CSV_FILE
,
GITHUB_SHA
,
REPO_ID
,
collate_csv
# noqa: E402
def
has_previous_benchmark
()
->
str
:
csv_path
=
None
try
:
csv_path
=
hf_hub_download
(
repo_id
=
REPO_ID
,
repo_type
=
"dataset"
,
filename
=
FINAL_CSV_FILE
)
except
EntryNotFoundError
:
csv_path
=
None
return
csv_path
def
filter_float
(
value
):
if
isinstance
(
value
,
str
):
return
float
(
value
.
split
()[
0
])
return
value
def
push_to_hf_dataset
():
all_csvs
=
sorted
(
glob
.
glob
(
f
"
{
BASE_PATH
}
/*.csv"
))
collate_csv
(
all_csvs
,
FINAL_CSV_FILE
)
# If there's an existing benchmark file, we should report the changes.
csv_path
=
has_previous_benchmark
()
if
csv_path
is
not
None
:
current_results
=
pd
.
read_csv
(
FINAL_CSV_FILE
)
previous_results
=
pd
.
read_csv
(
csv_path
)
numeric_columns
=
current_results
.
select_dtypes
(
include
=
[
"float64"
,
"int64"
]).
columns
numeric_columns
=
[
c
for
c
in
numeric_columns
if
c
not
in
[
"batch_size"
,
"num_inference_steps"
,
"actual_gpu_memory (gbs)"
]
]
for
column
in
numeric_columns
:
previous_results
[
column
]
=
previous_results
[
column
].
map
(
lambda
x
:
filter_float
(
x
))
# Calculate the percentage change
current_results
[
column
]
=
current_results
[
column
].
astype
(
float
)
previous_results
[
column
]
=
previous_results
[
column
].
astype
(
float
)
percent_change
=
((
current_results
[
column
]
-
previous_results
[
column
])
/
previous_results
[
column
])
*
100
# Format the values with '+' or '-' sign and append to original values
current_results
[
column
]
=
current_results
[
column
].
map
(
str
)
+
percent_change
.
map
(
lambda
x
:
f
" (
{
'+'
if
x
>
0
else
''
}{
x
:.
2
f
}
%)"
)
# There might be newly added rows. So, filter out the NaNs.
current_results
[
column
]
=
current_results
[
column
].
map
(
lambda
x
:
x
.
replace
(
" (nan%)"
,
""
))
# Overwrite the current result file.
current_results
.
to_csv
(
FINAL_CSV_FILE
,
index
=
False
)
commit_message
=
f
"upload from sha:
{
GITHUB_SHA
}
"
if
GITHUB_SHA
is
not
None
else
"upload benchmark results"
upload_file
(
repo_id
=
REPO_ID
,
path_in_repo
=
FINAL_CSV_FILE
,
path_or_fileobj
=
FINAL_CSV_FILE
,
repo_type
=
"dataset"
,
commit_message
=
commit_message
,
)
if
__name__
==
"__main__"
:
push_to_hf_dataset
()
diffusers-0.27.0/benchmarks/run_all.py
0 → 100755
View file @
e178cada
import
glob
import
subprocess
import
sys
from
typing
import
List
sys
.
path
.
append
(
"."
)
from
benchmark_text_to_image
import
ALL_T2I_CKPTS
# noqa: E402
PATTERN
=
"benchmark_*.py"
class
SubprocessCallException
(
Exception
):
pass
# Taken from `test_examples_utils.py`
def
run_command
(
command
:
List
[
str
],
return_stdout
=
False
):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occurred while running `command`
"""
try
:
output
=
subprocess
.
check_output
(
command
,
stderr
=
subprocess
.
STDOUT
)
if
return_stdout
:
if
hasattr
(
output
,
"decode"
):
output
=
output
.
decode
(
"utf-8"
)
return
output
except
subprocess
.
CalledProcessError
as
e
:
raise
SubprocessCallException
(
f
"Command `
{
' '
.
join
(
command
)
}
` failed with the following error:
\n\n
{
e
.
output
.
decode
()
}
"
)
from
e
def
main
():
python_files
=
glob
.
glob
(
PATTERN
)
for
file
in
python_files
:
print
(
f
"****** Running file:
{
file
}
******"
)
# Run with canonical settings.
if
file
!=
"benchmark_text_to_image.py"
:
command
=
f
"python
{
file
}
"
run_command
(
command
.
split
())
command
+=
" --run_compile"
run_command
(
command
.
split
())
# Run variants.
for
file
in
python_files
:
if
file
==
"benchmark_text_to_image.py"
:
for
ckpt
in
ALL_T2I_CKPTS
:
command
=
f
"python
{
file
}
--ckpt
{
ckpt
}
"
if
"turbo"
in
ckpt
:
command
+=
" --num_inference_steps 1"
run_command
(
command
.
split
())
command
+=
" --run_compile"
run_command
(
command
.
split
())
elif
file
==
"benchmark_sd_img.py"
:
for
ckpt
in
[
"stabilityai/stable-diffusion-xl-refiner-1.0"
,
"stabilityai/sdxl-turbo"
]:
command
=
f
"python
{
file
}
--ckpt
{
ckpt
}
"
if
ckpt
==
"stabilityai/sdxl-turbo"
:
command
+=
" --num_inference_steps 2"
run_command
(
command
.
split
())
command
+=
" --run_compile"
run_command
(
command
.
split
())
elif
file
in
[
"benchmark_sd_inpainting.py"
,
"benchmark_ip_adapters.py"
]:
sdxl_ckpt
=
"stabilityai/stable-diffusion-xl-base-1.0"
command
=
f
"python
{
file
}
--ckpt
{
sdxl_ckpt
}
"
run_command
(
command
.
split
())
command
+=
" --run_compile"
run_command
(
command
.
split
())
elif
file
in
[
"benchmark_controlnet.py"
,
"benchmark_t2i_adapter.py"
]:
sdxl_ckpt
=
(
"diffusers/controlnet-canny-sdxl-1.0"
if
"controlnet"
in
file
else
"TencentARC/t2i-adapter-canny-sdxl-1.0"
)
command
=
f
"python
{
file
}
--ckpt
{
sdxl_ckpt
}
"
run_command
(
command
.
split
())
command
+=
" --run_compile"
run_command
(
command
.
split
())
if
__name__
==
"__main__"
:
main
()
diffusers-0.27.0/benchmarks/utils.py
0 → 100755
View file @
e178cada
import
argparse
import
csv
import
gc
import
os
from
dataclasses
import
dataclass
from
typing
import
Dict
,
List
,
Union
import
torch
import
torch.utils.benchmark
as
benchmark
GITHUB_SHA
=
os
.
getenv
(
"GITHUB_SHA"
,
None
)
BENCHMARK_FIELDS
=
[
"pipeline_cls"
,
"ckpt_id"
,
"batch_size"
,
"num_inference_steps"
,
"model_cpu_offload"
,
"run_compile"
,
"time (secs)"
,
"memory (gbs)"
,
"actual_gpu_memory (gbs)"
,
"github_sha"
,
]
PROMPT
=
"ghibli style, a fantasy landscape with castles"
BASE_PATH
=
os
.
getenv
(
"BASE_PATH"
,
"."
)
TOTAL_GPU_MEMORY
=
float
(
os
.
getenv
(
"TOTAL_GPU_MEMORY"
,
torch
.
cuda
.
get_device_properties
(
0
).
total_memory
/
(
1024
**
3
)))
REPO_ID
=
"diffusers/benchmarks"
FINAL_CSV_FILE
=
"collated_results.csv"
@
dataclass
class
BenchmarkInfo
:
time
:
float
memory
:
float
def
flush
():
"""Wipes off memory."""
gc
.
collect
()
torch
.
cuda
.
empty_cache
()
torch
.
cuda
.
reset_max_memory_allocated
()
torch
.
cuda
.
reset_peak_memory_stats
()
def
bytes_to_giga_bytes
(
bytes
):
return
f
"
{
(
bytes
/
1024
/
1024
/
1024
):.
3
f
}
"
def
benchmark_fn
(
f
,
*
args
,
**
kwargs
):
t0
=
benchmark
.
Timer
(
stmt
=
"f(*args, **kwargs)"
,
globals
=
{
"args"
:
args
,
"kwargs"
:
kwargs
,
"f"
:
f
},
num_threads
=
torch
.
get_num_threads
(),
)
return
f
"
{
(
t0
.
blocked_autorange
().
mean
):.
3
f
}
"
def
generate_csv_dict
(
pipeline_cls
:
str
,
ckpt
:
str
,
args
:
argparse
.
Namespace
,
benchmark_info
:
BenchmarkInfo
)
->
Dict
[
str
,
Union
[
str
,
bool
,
float
]]:
"""Packs benchmarking data into a dictionary for latter serialization."""
data_dict
=
{
"pipeline_cls"
:
pipeline_cls
,
"ckpt_id"
:
ckpt
,
"batch_size"
:
args
.
batch_size
,
"num_inference_steps"
:
args
.
num_inference_steps
,
"model_cpu_offload"
:
args
.
model_cpu_offload
,
"run_compile"
:
args
.
run_compile
,
"time (secs)"
:
benchmark_info
.
time
,
"memory (gbs)"
:
benchmark_info
.
memory
,
"actual_gpu_memory (gbs)"
:
f
"
{
(
TOTAL_GPU_MEMORY
):.
3
f
}
"
,
"github_sha"
:
GITHUB_SHA
,
}
return
data_dict
def
write_to_csv
(
file_name
:
str
,
data_dict
:
Dict
[
str
,
Union
[
str
,
bool
,
float
]]):
"""Serializes a dictionary into a CSV file."""
with
open
(
file_name
,
mode
=
"w"
,
newline
=
""
)
as
csvfile
:
writer
=
csv
.
DictWriter
(
csvfile
,
fieldnames
=
BENCHMARK_FIELDS
)
writer
.
writeheader
()
writer
.
writerow
(
data_dict
)
def
collate_csv
(
input_files
:
List
[
str
],
output_file
:
str
):
"""Collates multiple identically structured CSVs into a single CSV file."""
with
open
(
output_file
,
mode
=
"w"
,
newline
=
""
)
as
outfile
:
writer
=
csv
.
DictWriter
(
outfile
,
fieldnames
=
BENCHMARK_FIELDS
)
writer
.
writeheader
()
for
file
in
input_files
:
with
open
(
file
,
mode
=
"r"
)
as
infile
:
reader
=
csv
.
DictReader
(
infile
)
for
row
in
reader
:
writer
.
writerow
(
row
)
diffusers-0.27.0/docker/diffusers-flax-cpu/Dockerfile
0 → 100755
View file @
e178cada
FROM
ubuntu:20.04
LABEL
maintainer="Hugging Face"
LABEL
repository="diffusers"
ENV
DEBIAN_FRONTEND=noninteractive
RUN
apt update
&&
\
apt
install
-y
bash
\
build-essential
\
git
\
git-lfs
\
curl
\
ca-certificates
\
libsndfile1-dev
\
python3.8
\
python3-pip
\
python3.8-venv
&&
\
rm
-rf
/var/lib/apt/lists
# make sure to use venv
RUN
python3
-m
venv /opt/venv
ENV
PATH="/opt/venv/bin:$PATH"
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
# follow the instructions here: https://cloud.google.com/tpu/docs/run-in-container#train_a_jax_model_in_a_docker_container
RUN
python3
-m
pip
install
--no-cache-dir
--upgrade
pip
uv
==
0.1.11
&&
\
python3
-m
uv pip
install
--upgrade
--no-cache-dir
\
clu
\
"jax[cpu]>=0.2.16,!=0.3.2"
\
"flax>=0.4.1"
\
"jaxlib>=0.1.65"
&&
\
python3
-m
uv pip
install
--no-cache-dir
\
accelerate
\
datasets
\
hf-doc-builder
\
huggingface-hub
\
Jinja2
\
librosa
\
numpy
\
scipy
\
tensorboard
\
transformers
CMD
["/bin/bash"]
\ No newline at end of file
diffusers-0.27.0/docker/diffusers-flax-tpu/Dockerfile
0 → 100755
View file @
e178cada
FROM
ubuntu:20.04
LABEL
maintainer="Hugging Face"
LABEL
repository="diffusers"
ENV
DEBIAN_FRONTEND=noninteractive
RUN
apt update
&&
\
apt
install
-y
bash
\
build-essential
\
git
\
git-lfs
\
curl
\
ca-certificates
\
libsndfile1-dev
\
python3.8
\
python3-pip
\
python3.8-venv
&&
\
rm
-rf
/var/lib/apt/lists
# make sure to use venv
RUN
python3
-m
venv /opt/venv
ENV
PATH="/opt/venv/bin:$PATH"
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
# follow the instructions here: https://cloud.google.com/tpu/docs/run-in-container#train_a_jax_model_in_a_docker_container
RUN
python3
-m
pip
install
--no-cache-dir
--upgrade
pip
uv
==
0.1.11
&&
\
python3
-m
pip
install
--no-cache-dir
\
"jax[tpu]>=0.2.16,!=0.3.2"
\
-f
https://storage.googleapis.com/jax-releases/libtpu_releases.html
&&
\
python3
-m
uv pip
install
--upgrade
--no-cache-dir
\
clu
\
"flax>=0.4.1"
\
"jaxlib>=0.1.65"
&&
\
python3
-m
uv pip
install
--no-cache-dir
\
accelerate
\
datasets
\
hf-doc-builder
\
huggingface-hub
\
Jinja2
\
librosa
\
numpy \
scipy \
tensorboard \
transformers
CMD
["/bin/bash"]
\ No newline at end of file
diffusers-0.27.0/docker/diffusers-onnxruntime-cpu/Dockerfile
0 → 100755
View file @
e178cada
FROM
ubuntu:20.04
LABEL
maintainer="Hugging Face"
LABEL
repository="diffusers"
ENV
DEBIAN_FRONTEND=noninteractive
RUN
apt update
&&
\
apt
install
-y
bash
\
build-essential
\
git
\
git-lfs
\
curl
\
ca-certificates
\
libsndfile1-dev
\
python3.8
\
python3-pip
\
python3.8-venv
&&
\
rm
-rf
/var/lib/apt/lists
# make sure to use venv
RUN
python3
-m
venv /opt/venv
ENV
PATH="/opt/venv/bin:$PATH"
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
RUN
python3
-m
pip
install
--no-cache-dir
--upgrade
pip
uv
==
0.1.11
&&
\
python3
-m
uv pip
install
--no-cache-dir
\
torch
==
2.1.2
\
torchvision
==
0.16.2
\
torchaudio
==
2.1.2
\
onnxruntime
\
--extra-index-url
https://download.pytorch.org/whl/cpu
&&
\
python3
-m
uv pip
install
--no-cache-dir
\
accelerate
\
datasets
\
hf-doc-builder
\
huggingface-hub
\
Jinja2
\
librosa
\
numpy
\
scipy
\
tensorboard
\
transformers
CMD
["/bin/bash"]
\ No newline at end of file
diffusers-0.27.0/docker/diffusers-onnxruntime-cuda/Dockerfile
0 → 100755
View file @
e178cada
FROM
nvidia/cuda:12.1.0-runtime-ubuntu20.04
LABEL
maintainer="Hugging Face"
LABEL
repository="diffusers"
ENV
DEBIAN_FRONTEND=noninteractive
RUN
apt update
&&
\
apt
install
-y
bash
\
build-essential
\
git
\
git-lfs
\
curl
\
ca-certificates
\
libsndfile1-dev
\
python3.8
\
python3-pip
\
python3.8-venv
&&
\
rm
-rf
/var/lib/apt/lists
# make sure to use venv
RUN
python3
-m
venv /opt/venv
ENV
PATH="/opt/venv/bin:$PATH"
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
RUN
python3
-m
pip
install
--no-cache-dir
--upgrade
pip
uv
==
0.1.11
&&
\
python3
-m
uv pip
install
--no-cache-dir
\
torch
\
torchvision
\
torchaudio
\
"onnxruntime-gpu>=1.13.1"
\
--extra-index-url
https://download.pytorch.org/whl/cu117
&&
\
python3
-m
uv pip
install
--no-cache-dir
\
accelerate
\
datasets
\
hf-doc-builder
\
huggingface-hub
\
Jinja2
\
librosa
\
numpy
\
scipy
\
tensorboard
\
transformers
CMD
["/bin/bash"]
\ No newline at end of file
diffusers-0.27.0/docker/diffusers-pytorch-compile-cuda/Dockerfile
0 → 100755
View file @
e178cada
FROM
nvidia/cuda:12.1.0-runtime-ubuntu20.04
LABEL
maintainer="Hugging Face"
LABEL
repository="diffusers"
ENV
DEBIAN_FRONTEND=noninteractive
RUN
apt update
&&
\
apt
install
-y
bash
\
build-essential
\
git
\
git-lfs
\
curl
\
ca-certificates
\
libsndfile1-dev
\
libgl1
\
python3.9
\
python3.9-dev
\
python3-pip
\
python3.9-venv
&&
\
rm
-rf
/var/lib/apt/lists
# make sure to use venv
RUN
python3.9
-m
venv /opt/venv
ENV
PATH="/opt/venv/bin:$PATH"
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
RUN
python3.9
-m
pip
install
--no-cache-dir
--upgrade
pip
uv
==
0.1.11
&&
\
python3.9
-m
uv pip
install
--no-cache-dir
\
torch
\
torchvision
\
torchaudio
\
invisible_watermark
&&
\
python3.9
-m
pip
install
--no-cache-dir
\
accelerate
\
datasets
\
hf-doc-builder
\
huggingface-hub
\
Jinja2
\
librosa
\
numpy
\
scipy
\
tensorboard
\
transformers
CMD
["/bin/bash"]
diffusers-0.27.0/docker/diffusers-pytorch-cpu/Dockerfile
0 → 100755
View file @
e178cada
FROM
ubuntu:20.04
LABEL
maintainer="Hugging Face"
LABEL
repository="diffusers"
ENV
DEBIAN_FRONTEND=noninteractive
RUN
apt update
&&
\
apt
install
-y
bash
\
build-essential
\
git
\
git-lfs
\
curl
\
ca-certificates
\
libsndfile1-dev
\
python3.8
\
python3-pip
\
libgl1
\
python3.8-venv
&&
\
rm
-rf
/var/lib/apt/lists
# make sure to use venv
RUN
python3
-m
venv /opt/venv
ENV
PATH="/opt/venv/bin:$PATH"
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
RUN
python3
-m
pip
install
--no-cache-dir
--upgrade
pip
uv
==
0.1.11
&&
\
python3
-m
uv pip
install
--no-cache-dir
\
torch
\
torchvision
\
torchaudio
\
invisible_watermark
\
--extra-index-url
https://download.pytorch.org/whl/cpu
&&
\
python3
-m
uv pip
install
--no-cache-dir
\
accelerate
\
datasets
\
hf-doc-builder
\
huggingface-hub
\
Jinja2
\
librosa
\
numpy
\
scipy
\
tensorboard
\
transformers matplotlib
CMD
["/bin/bash"]
diffusers-0.27.0/docker/diffusers-pytorch-cuda/Dockerfile
0 → 100755
View file @
e178cada
FROM
nvidia/cuda:12.1.0-runtime-ubuntu20.04
LABEL
maintainer="Hugging Face"
LABEL
repository="diffusers"
ENV
DEBIAN_FRONTEND=noninteractive
RUN
apt update
&&
\
apt
install
-y
bash
\
build-essential
\
git
\
git-lfs
\
curl
\
ca-certificates
\
libsndfile1-dev
\
libgl1
\
python3.8
\
python3-pip
\
python3.8-venv
&&
\
rm
-rf
/var/lib/apt/lists
# make sure to use venv
RUN
python3
-m
venv /opt/venv
ENV
PATH="/opt/venv/bin:$PATH"
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
RUN
python3
-m
pip
install
--no-cache-dir
--upgrade
pip
uv
==
0.1.11
&&
\
python3
-m
uv pip
install
--no-cache-dir
\
torch
\
torchvision
\
torchaudio
\
invisible_watermark
&&
\
python3
-m
pip
install
--no-cache-dir
\
accelerate
\
datasets
\
hf-doc-builder
\
huggingface-hub
\
Jinja2
\
librosa
\
numpy
\
scipy
\
tensorboard
\
transformers
\
pytorch-lightning
CMD
["/bin/bash"]
diffusers-0.27.0/docker/diffusers-pytorch-xformers-cuda/Dockerfile
0 → 100755
View file @
e178cada
This diff is collapsed.
Click to expand it.
diffusers-0.27.0/docs/README.md
0 → 100755
View file @
e178cada
This diff is collapsed.
Click to expand it.
diffusers-0.27.0/docs/TRANSLATING.md
0 → 100755
View file @
e178cada
This diff is collapsed.
Click to expand it.
diffusers-0.27.0/docs/source/_config.py
0 → 100755
View file @
e178cada
# docstyle-ignore
INSTALL_CONTENT
=
"""
# Diffusers installation
! pip install diffusers transformers datasets accelerate
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/diffusers.git
"""
notebook_first_cells
=
[{
"type"
:
"code"
,
"content"
:
INSTALL_CONTENT
}]
Prev
1
2
3
4
5
6
7
…
29
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment