Commit f0ab1d9a authored by wangwf's avatar wangwf
Browse files

init

parent 1c4d8c24
Pipeline #2920 canceled with stages
import argparse
import os
import os.path as osp
import shutil
from diffusers import AutoencoderKL, UNet2DConditionModel
from transformers import CLIPTextModel
import onnx
import torch
def export_text_encoder(pipeline_dir):
model_name = "text_encoder"
save_path = osp.join(pipeline_dir, model_name, "model.onnx")
model = CLIPTextModel.from_pretrained(osp.join(pipeline_dir, model_name))
input_names = ["input_ids"]
output_names = ["last_hidden_state", "pooler_output"]
dynamic_axes = {
'input_ids': {
0: 'batch_size',
1: 'sequence_length',
},
'last_hidden_state': {
0: 'batch_size',
1: 'sequence_length',
},
'pooler_output': {
0: 'batch_size',
}
}
torch.onnx.export(
model,
(torch.zeros(1, model.config.max_position_embeddings, dtype=torch.int32), ),
save_path,
export_params=True,
input_names=input_names,
output_names=output_names,
dynamic_axes=dynamic_axes
)
if osp.isfile(save_path):
print(f"Successfully exported ${model_name} to ONNX: {save_path}")
else:
raise RuntimeError(f"Failed to export ${model_name} to ONNX.")
return save_path
def export_unet(pipeline_dir):
model_name = "unet"
save_path = osp.join(pipeline_dir, model_name, "model.onnx")
tmp_dir = "./temp"
os.makedirs(tmp_dir, exist_ok=True)
tmp_path = "./temp/model.onnx"
model = UNet2DConditionModel.from_pretrained(pipeline_dir, subfolder=model_name)
input_names = ["sample", "timestep", "encoder_hidden_states"]
output_names = ["out_sample"]
dynamic_axes = {
'sample': {
0: 'batch_size',
1: 'num_channels',
2: 'height',
3: 'width'
},
'timestep': {
0: 'steps',
},
'encoder_hidden_states': {
0: 'batch_size',
1: 'sequence_length',
},
'out_sample': {
0: 'batch_size',
1: 'num_channels',
2: 'height',
3: 'width'
}
}
dummy_input = (
torch.randn(2, model.config["in_channels"], 64, 64),
torch.tensor([1], dtype=torch.int64),
torch.randn(2, 77, 1024)
)
torch.onnx.export(
model,
dummy_input,
tmp_path,
export_params=True,
input_names=input_names,
output_names=output_names,
dynamic_axes=dynamic_axes
)
onnx_model = onnx.load(tmp_path)
external_data_path = osp.basename(save_path) + '.data'
if osp.isfile(external_data_path):
os.remove(external_data_path)
onnx.save(onnx_model,
save_path,
save_as_external_data=True,
all_tensors_to_one_file=True,
location=external_data_path,
size_threshold=1024,
convert_attribute=False)
shutil.rmtree(tmp_dir)
if osp.isfile(save_path):
print(f"Successfully exported {model_name} to ONNX: {save_path}")
else:
raise RuntimeError(f"Failed to export {model_name} to ONNX.")
return save_path
def export_vae_decoder(pipeline_dir):
model_name = "vae_decoder"
sub_model_dir = osp.join(pipeline_dir, model_name)
os.makedirs(sub_model_dir, exist_ok=True)
shutil.copy(osp.join(pipeline_dir, 'vae/config.json'),
osp.join(sub_model_dir, "config.json"))
save_path = osp.join(sub_model_dir, "model.onnx")
vae = AutoencoderKL.from_pretrained(pipeline_dir, subfolder="vae")
input_names = ["latent_sample"]
output_names = ["sample"]
dynamic_axes = {
'latent_sample': {
0: 'batch_size',
2: 'latent_height',
3: 'latent_width'
},
'latent': {
0: 'batch_size',
2: 'image_height',
3: 'image_width'
}
}
vae.forward = vae.decode
torch.onnx.export(
vae,
(torch.randn(1, vae.config["latent_channels"], 64, 64), ),
save_path ,
input_names=input_names,
output_names=output_names,
dynamic_axes=dynamic_axes
)
if osp.isfile(save_path):
print(f"Successfully exported ${model_name} to ONNX: {save_path}")
else:
raise RuntimeError(f"Failed to export ${model_name} to ONNX.")
return save_path
def main():
parser = argparse.ArgumentParser("Export vae decoder to ONNX")
parser.add_argument("--pipeline-dir",
type=str,
required=True,
help="The path to the sdxl pipeline directory.")
args = parser.parse_args()
export_text_encoder(args.pipeline_dir)
export_unet(args.pipeline_dir)
export_vae_decoder(args.pipeline_dir)
if __name__ == "__main__":
main()
from collections import namedtuple
import csv
import json
import os
import os.path as osp
from diffusers import DiffusionPipeline
import migraphx_diffusers
import torch
def parse_args():
from argparse import ArgumentParser
parser = ArgumentParser(description="SDXL inference with migraphx backend")
#=========================== mdoel load and compile ========================
parser.add_argument(
"-m",
"--model-dir",
type=str,
required=True,
help="Path to local model directory.",
)
parser.add_argument(
"--force-compile",
action="store_true",
default=False,
help="Ignore existing .mxr files and override them",
)
parser.add_argument(
"--img-size",
type=int,
default=None,
help="output image size",
)
parser.add_argument(
"--num-images-per-prompt",
type=int,
default=1,
help="The number of images to generate per prompt."
)
# --------------------------------------------------------------------------
# =============================== generation ===============================
parser.add_argument(
"-p",
"--parti-prompts-file",
type=str,
required=True,
help="Number of iteration steps",
)
parser.add_argument(
"-t",
"--num-inference-steps",
type=int,
default=50,
help="Number of iteration steps",
)
parser.add_argument(
"--save-dir",
type=str,
default=None,
help="Path to save images",
)
parser.add_argument(
"-s",
"--seed",
type=int,
default=42,
help="Random seed",
)
parser.add_argument(
"--resume",
action="store_true",
help="resume image generation",
)
# --------------------------------------------------------------------------
args = parser.parse_args()
return args
def get_name_and_migraphx_config(model_dir):
model_index_json = osp.join(model_dir, "model_index.json")
with open(model_index_json, "r") as f:
pipe_cfg = json.load(f)
if pipe_cfg["_class_name"] == "StableDiffusionXLPipeline":
return 'sdxl', migraphx_diffusers.DEFAULT_ARGS['sdxl']
elif pipe_cfg["_class_name"] == "StableDiffusionPipeline":
return 'sd2.1', migraphx_diffusers.DEFAULT_ARGS['sd2.1']
else:
raise NotImplementedError(
f"{pipe_cfg['_class_name']} has not been adapted yet")
def parse_prompts(parti_prompts_file):
Prompt = namedtuple("Prompt",
["prompt_text", "category", "challenge", "note"])
prompt_list = []
with open(parti_prompts_file, "r") as f:
csv_reader = csv.reader(f, delimiter="\t")
for i, row in enumerate(csv_reader):
if i == 0:
continue
prompt_list.append(Prompt(*row))
return prompt_list
def main():
args = parse_args()
name, migraphx_config = get_name_and_migraphx_config(args.model_dir)
if args.img_size is not None:
migraphx_config['common_args']['img_size'] = args.img_size
migraphx_config['common_args'].update(dict(
batch=args.num_images_per_prompt,
force_compile=args.force_compile,
))
pipe = DiffusionPipeline.from_pretrained(
args.model_dir,
torch_dtype=torch.float16,
migraphx_config=migraphx_config
)
pipe.to("cuda")
os.makedirs(args.save_dir, exist_ok=True)
generator = torch.Generator("cuda").manual_seed(args.seed)
print("Generating image...")
for i, prompt in enumerate(parse_prompts(args.parti_prompts_file)):
sub_dir = osp.join(args.save_dir,
prompt.category.replace(" ", "").replace("&", "_"),
f"prompt_{i:0>4d}")
prompt_json = osp.join(sub_dir, "prompt_info.json")
# =========================== resume =========================
if args.resume:
check_file_list = [osp.join(sub_dir, f"image_{j:0>2d}.png")
for j in range(args.num_images_per_prompt)]
check_file_list.append(prompt_json)
if all([osp.exists(f) for f in check_file_list]):
print(f"Skipping prompt {i}: \"{prompt.prompt_text}\"")
continue
# =========================== generate image =========================
print(f"Processing prompt {i}: \"{prompt.prompt_text}\"")
if not osp.isdir(sub_dir):
os.makedirs(sub_dir, exist_ok=True)
with open(prompt_json, "w") as f:
json.dump(prompt._asdict(), f)
images = pipe(
prompt=prompt.prompt_text,
num_inference_steps=args.num_inference_steps,
generator=generator
).images
for j, image in enumerate(images):
save_path = osp.join(sub_dir, f"{j:0>2d}.png")
image.save(save_path)
print(f"Generated image: {save_path}")
if __name__ == "__main__":
main()
if [[ $1 = "--help" ]] || [[ $1 = "-h" ]]
then
echo "Optimize stable diffusion ONNX models by using ONNXRuntime."
echo "Usage: ./onnx_optimize.sh <original_onnx_path> <converted_onnx_path>"
exit 0
fi
if [ $# -ne 2 ]
then
echo "Error: Incorrect number of arguments"
echo "Usage: ./onnx_optimize.sh <original_onnx_path> <converted_onnx_path>"
exit 1
fi
set -e
original_onnx_path=$1
converted_onnx_path=$2
tmp_save_path="./temp"
# for unet
python -m onnxruntime.transformers.models.stable_diffusion.optimize_pipeline \
-i ${original_onnx_path} \
-o ${tmp_save_path} \
--disable_bias_gelu \
--disable_bias_add \
--disable_bias_splitgelu \
--disable_nhwc_conv \
--use_group_norm_channels_first \
--use_multi_head_attention \
--float16
# for text_encoders and vae_decoder
python -m onnxruntime.transformers.models.stable_diffusion.optimize_pipeline \
-i ${original_onnx_path} \
-o ${converted_onnx_path} \
--disable_bias_gelu \
--disable_bias_add \
--disable_bias_splitgelu \
--disable_nhwc_conv \
--disable_attention \
--use_group_norm_channels_first \
--use_multi_head_attention \
--float16
rm -r ${converted_onnx_path}/unet
mv ${tmp_save_path}/unet ${converted_onnx_path}/
rm -r ${tmp_save_path}
echo "Finish to optimize ONNX models!"
echo "optimized ONNX models are saved in ${converted_onnx_path}"
import json
import os
import os.path as osp
from diffusers import DiffusionPipeline
import migraphx_diffusers
import torch
def parse_args():
from argparse import ArgumentParser
parser = ArgumentParser(description="SDXL inference with migraphx backend")
#=========================== mdoel load and compile ========================
parser.add_argument(
"-m",
"--model-dir",
type=str,
required=True,
help="Path to local model directory.",
)
parser.add_argument(
"--force-compile",
action="store_true",
default=False,
help="Ignore existing .mxr files and override them",
)
parser.add_argument(
"--num-images-per-prompt",
type=int,
default=1,
help="The number of images to generate per prompt."
)
parser.add_argument(
"--img-size",
type=int,
default=None,
help="output image size",
)
# --------------------------------------------------------------------------
# =============================== generation ===============================
parser.add_argument(
"-t",
"--num-inference-steps",
type=int,
default=50,
help="Number of iteration steps",
)
parser.add_argument(
"-s",
"--seed",
type=int,
default=42,
help="Random seed",
)
# --------------------------------------------------------------------------
parser.add_argument(
"--examples-json",
type=str,
default="./examples/prompts_and_negative_prompts.json",
help="Prompts and negative prompts data path",
)
parser.add_argument(
"--output-dir",
type=str,
default=None,
help="Path to save images",
)
args = parser.parse_args()
return args
def get_name_and_migraphx_config(model_dir):
model_index_json = osp.join(model_dir, "model_index.json")
with open(model_index_json, "r") as f:
pipe_cfg = json.load(f)
if pipe_cfg["_class_name"] == "StableDiffusionXLPipeline":
return 'sdxl', migraphx_diffusers.DEFAULT_ARGS['sdxl']
elif pipe_cfg["_class_name"] == "StableDiffusionPipeline":
return 'sd2.1', migraphx_diffusers.DEFAULT_ARGS['sd2.1']
else:
raise NotImplementedError(
f"{pipe_cfg['_class_name']} has not been adapted yet")
def parse_prompts(examples_json):
with open(examples_json, 'r') as f:
prompt_data = json.load(f)
return prompt_data
def main():
args = parse_args()
name, migraphx_config = get_name_and_migraphx_config(args.model_dir)
if args.output_dir is None:
args.output_dir = f"./examples/{name}-images-{args.img_size}"
if args.img_size is not None:
migraphx_config['common_args']['img_size'] = args.img_size
migraphx_config['common_args'].update(dict(
batch=args.num_images_per_prompt,
force_compile=args.force_compile,
))
pipe = DiffusionPipeline.from_pretrained(
args.model_dir,
torch_dtype=torch.float16,
migraphx_config=migraphx_config
)
pipe.to("cuda")
prompt_data = parse_prompts(args.examples_json)
cnt = 0
for i, d in enumerate(prompt_data):
theme = d["theme"]
pairs = d["examples"]
sub_dir = osp.join(args.output_dir,
f"{i}-{theme.title().replace(' ', '')}")
os.makedirs(sub_dir, exist_ok=True)
for j, pair in enumerate(pairs):
print(f"Generating image {cnt}...")
prompt = pair["prompt"]
negative_prompt = pair["negative_prompt"]
print(f"Prompt: {prompt}")
print(f"negative Prompt: {negative_prompt}")
images = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
num_inference_steps=args.num_inference_steps,
generator=torch.Generator("cuda").manual_seed(args.seed)
).images
for k, image in enumerate(images):
save_path = osp.join(
sub_dir, f"theme_{i}_example_{j}_image_{k}.png")
image.save(save_path)
print(f"Image saved: {save_path}")
cnt += 1
print(f"Total {cnt} images Generated!")
if __name__ == "__main__":
main()
import json
import os.path as osp
from diffusers import DiffusionPipeline
import migraphx_diffusers
import torch
def parse_args():
from argparse import ArgumentParser
parser = ArgumentParser(description="SDXL inference with migraphx backend")
#=========================== mdoel load and compile ========================
parser.add_argument(
"-m",
"--model-dir",
type=str,
required=True,
help="Path to local model directory.",
)
parser.add_argument(
"--force-compile",
action="store_true",
default=False,
help="Ignore existing .mxr files and override them",
)
parser.add_argument(
"--img-size",
type=int,
default=None,
help="output image size",
)
parser.add_argument(
"--num-images-per-prompt",
type=int,
default=1,
help="The number of images to generate per prompt."
)
# --------------------------------------------------------------------------
# =============================== generation ===============================
parser.add_argument(
"-p",
"--prompt",
type=str,
required=True,
help="Prompt for describe image content, style and so on."
)
parser.add_argument(
"-n",
"--negative-prompt",
type=str,
default=None,
help="Negative prompt",
)
parser.add_argument(
"-t",
"--num-inference-steps",
type=int,
default=50,
help="Number of iteration steps",
)
parser.add_argument(
"--save-prefix",
type=str,
default=None,
help="Prefix of path for saving results",
)
parser.add_argument(
"-s",
"--seed",
type=int,
default=42,
help="Random seed",
)
# --------------------------------------------------------------------------
args = parser.parse_args()
return args
def get_name_and_migraphx_config(model_dir):
model_index_json = osp.join(model_dir, "model_index.json")
with open(model_index_json, "r") as f:
pipe_cfg = json.load(f)
if pipe_cfg["_class_name"] == "StableDiffusionXLPipeline":
return 'sdxl', migraphx_diffusers.DEFAULT_ARGS['sdxl']
elif pipe_cfg["_class_name"] == "StableDiffusionPipeline":
return 'sd2.1', migraphx_diffusers.DEFAULT_ARGS['sd2.1']
else:
raise NotImplementedError(
f"{pipe_cfg['_class_name']} has not been adapted yet")
def main():
args = parse_args()
name, migraphx_config = get_name_and_migraphx_config(args.model_dir)
if args.save_prefix is None:
args.save_prefix = f"./{name}_output"
if args.img_size is not None:
migraphx_config['common_args']['img_size'] = args.img_size
migraphx_config['common_args'].update(dict(
batch=args.num_images_per_prompt,
force_compile=args.force_compile,
))
pipe = DiffusionPipeline.from_pretrained(
args.model_dir,
torch_dtype=torch.float16,
migraphx_config=migraphx_config
)
pipe.to("cuda")
print("Generating image...")
images = pipe(
prompt=args.prompt,
negative_prompt=args.negative_prompt,
num_inference_steps=args.num_inference_steps,
generator=torch.Generator("cuda").manual_seed(args.seed)
).images
for i, image in enumerate(images):
save_path = f"{args.save_prefix}_{i}.png"
image.save(save_path)
print(f"Generated image: {save_path}")
if __name__ == "__main__":
main()
from diffusers import DiffusionPipeline
import argparse
import os
import torch
import time
import migraphx_diffusers
parser = argparse.ArgumentParser("test sd2.1")
parser.add_argument('model_dir', type=str, help="path to sd2.1 models")
parser.add_argument('--result-dir', type=str, default="./results", help="path to sd2.1 models")
args = parser.parse_args()
os.makedirs(args.result_dir, exist_ok=True)
# 基础提示词
prompt = "An astronaut riding a green horse"
# 配置组合参数
widths = [512]
heights = [512]
steps_list = [20]
batch_sizes = [1, 2, 4, 8] # [8]
mgx_config = migraphx_diffusers.DEFAULT_ARGS['sd2.1']
# 生成8种配置组合
for width, height in zip(widths, heights):
assert width == height, "Only support generate images with square shape!"
mgx_config["common_args"]["img_size"] = width
for batch_size in batch_sizes:
mgx_config["common_args"]["batch"] = batch_size
# 初始化模型
pipe = DiffusionPipeline.from_pretrained(
args.model_dir,
torch_dtype=torch.float16,
use_safetensors=True,
variant="fp16",
migraphx_config=mgx_config,
)
pipe.to("cuda")
# Warm up
for i in range(1):
pipe(
prompt=prompt,
width=width,
height=height,
num_inference_steps=1,
num_images_per_prompt=batch_size
)
for num_inference_steps in steps_list:
print(f"\n生成配置: {width}x{height}, steps={num_inference_steps}, batch={batch_size}")
time_list = []
for i in range(1):
torch.cuda.synchronize()
time_start = time.time()
result = pipe(
prompt=prompt,
width=width,
height=height,
num_inference_steps=num_inference_steps,
num_images_per_prompt=batch_size,
generator=torch.Generator("cuda").manual_seed(42)
)
torch.cuda.synchronize()
time_end = time.time()
time_list.append((time_end - time_start)*1000)
print(f"time cost: {time_list}, avg: {sum(time_list)/len(time_list)}")
# 保存本批次生成的图片
print(len(result.images))
for i, image in enumerate(result.images):
filename = os.path.join(args.result_dir, f"output_{width}x{height}_steps{num_inference_steps}_batch{batch_size}_{i}.png")
image.save(filename)
print(f"保存图片: {filename}")
print("所有配置组合生成完成!")
import json
import os.path as osp
import time
from diffusers import DiffusionPipeline
import migraphx_diffusers
from migraphx_diffusers import AutoTimer
import torch
def parse_args():
date_str = time.strftime("%Y%m%d-%H%M%S", time.localtime())
from argparse import ArgumentParser
parser = ArgumentParser(description="SDXL inference with migraphx backend")
#=========================== mdoel load and compile ========================
parser.add_argument(
"-m",
"--model-dir",
type=str,
required=True,
help="Path to local model directory.",
)
parser.add_argument(
"--force-compile",
action="store_true",
default=False,
help="Ignore existing .mxr files and override them",
)
parser.add_argument(
"--img-size",
type=int,
default=None,
help="output image size",
)
parser.add_argument(
"--num-images-per-prompt",
type=int,
default=1,
help="The number of images to generate per prompt."
)
# --------------------------------------------------------------------------
# =============================== generation ===============================
parser.add_argument(
"-t",
"--num-inference-steps",
type=int,
default=50,
help="Number of iteration steps",
)
parser.add_argument(
"--out-csv-file",
type=str,
default=f"./perf-{date_str}.csv",
help="Prefix of path for saving results",
)
# --------------------------------------------------------------------------
# =============================== time count ===============================
parser.add_argument(
"--num-warmup-loops",
type=int,
default=1,
help="warmup loops",
)
parser.add_argument(
"--num-count-loops",
type=int,
default=100,
help="time count loops",
)
# --------------------------------------------------------------------------
args = parser.parse_args()
return args
def get_name_and_migraphx_config(model_dir):
model_index_json = osp.join(model_dir, "model_index.json")
with open(model_index_json, "r") as f:
pipe_cfg = json.load(f)
if pipe_cfg["_class_name"] == "StableDiffusionXLPipeline":
return 'sdxl', migraphx_diffusers.DEFAULT_ARGS['sdxl']
elif pipe_cfg["_class_name"] == "StableDiffusionPipeline":
return 'sd2.1', migraphx_diffusers.DEFAULT_ARGS['sd2.1']
else:
raise NotImplementedError(
f"{pipe_cfg['_class_name']} has not been adapted yet")
def main():
args = parse_args()
pipe_name, migraphx_config = get_name_and_migraphx_config(args.model_dir)
assert pipe_name in ['sdxl', 'sd2.1'], "Only support SDXL or SD2.1!"
if args.img_size is not None:
migraphx_config['common_args']['img_size'] = args.img_size
migraphx_config['common_args'].update(dict(
batch=args.num_images_per_prompt,
force_compile=args.force_compile,
))
pipe = DiffusionPipeline.from_pretrained(
args.model_dir,
torch_dtype=torch.float16,
migraphx_config=migraphx_config
)
pipe.to("cuda")
t = AutoTimer()
t.add_targets([
(pipe, "end2end"),
(pipe.text_encoder, "text_encoder"),
(pipe.unet, "unet"),
(pipe.vae.decode, "vae_decoder")
])
if hasattr(pipe, "text_encoder_2"):
t.add_target(pipe.text_encoder_2, key="text_encoder_2")
for i in range(args.num_warmup_loops + args.num_count_loops):
if i == args.num_warmup_loops:
t.start_work()
pipe(prompt="the ocean in dream",
negative_prompt=None,
num_inference_steps=args.num_inference_steps)
table = t.summary(batchsize=migraphx_config['common_args']['batch'])
t.clear()
with open(args.out_csv_file, 'w') as f:
f.write(table.get_csv_string())
if __name__ == "__main__":
main()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment