Commit aad7b6c7 authored by chenzk's avatar chenzk
Browse files

v1.0

parents
Pipeline #2416 canceled with stages
<!DOCTYPE html>
<html>
<head>
<!-- Import the component -->
<script src="https://cdn.jsdelivr.net/npm/@google/model-viewer@3.1.1/dist/model-viewer.min.js" type="module"></script>
<script>
document.addEventListener('DOMContentLoaded', () => {
const modelViewers = document.querySelectorAll('model-viewer');
modelViewers.forEach(modelViewer => {
modelViewer.addEventListener('load', (event) => {
const [material] = modelViewer.model.materials;
let color = [43, 44, 46, 255];
color = color.map(x => x / 255);
material.pbrMetallicRoughness.setMetallicFactor(0.1); // 完全金属
material.pbrMetallicRoughness.setRoughnessFactor(0.7); // 低粗糙度
material.pbrMetallicRoughness.setBaseColorFactor(color); // CornflowerBlue in RGB
});
});
});
</script>
<style>
body {
margin: 0;
font-family: Arial, sans-serif;
}
.centered-container {
display: flex;
justify-content: center;
align-items: center;
border-radius: 8px;
border-color: #e5e7eb;
border-style: solid;
border-width: 1px;
}
</style>
</head>
<body>
<div class="centered-container">
<model-viewer>
</div>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<!-- Import the component -->
<script src="https://cdn.jsdelivr.net/npm/@google/model-viewer@3.1.1/dist/model-viewer.min.js"
type="module"></script>
<style>
body {
margin: 0;
font-family: Arial, sans-serif;
}
.centered-container {
display: flex;
justify-content: center;
align-items: center;
border-radius: 8px;
border-color: #e5e7eb;
border-style: solid;
border-width: 1px;
}
</style>
</head>
<body>
<div class="centered-container">
<model-viewer>
</div>
<script>
document.addEventListener('DOMContentLoaded', () => {
const modelViewers = document.querySelectorAll('model-viewer');
modelViewers.forEach(modelViewer => {
modelViewer.addEventListener('load', (event) => {
const [material] = modelViewer.model.materials;
material.pbrMetallicRoughness.setMetallicFactor(0.1);
material.pbrMetallicRoughness.setRoughnessFactor(0.5);
});
});
});
</script>
</body>
</html>
\ No newline at end of file
bl_info = {
"name": "Hunyuan3D-2 Generator",
"author": "Tencent Hunyuan3D",
"version": (1, 0),
"blender": (3, 0, 0),
"location": "View3D > Sidebar > Hunyuan3D-2 3D Generator",
"description": "Generate/Texturing 3D models from text descriptions or images",
"category": "3D View",
}
import base64
import os
import tempfile
import threading
import bpy
import requests
from bpy.props import StringProperty, BoolProperty, IntProperty, FloatProperty
class Hunyuan3DProperties(bpy.types.PropertyGroup):
prompt: StringProperty(
name="Text Prompt",
description="Describe what you want to generate",
default=""
)
api_url: StringProperty(
name="API URL",
description="URL of the Text-to-3D API service",
default="http://localhost:8080"
)
is_processing: BoolProperty(
name="Processing",
default=False
)
job_id: StringProperty(
name="Job ID",
default=""
)
status_message: StringProperty(
name="Status Message",
default=""
)
# 添加图片路径属性
image_path: StringProperty(
name="Image",
description="Select an image to upload",
subtype='FILE_PATH'
)
# 修改后的 octree_resolution 属性
octree_resolution: IntProperty(
name="Octree Resolution",
description="Octree resolution for the 3D generation",
default=256,
min=128,
max=512,
)
num_inference_steps: IntProperty(
name="Number of Inference Steps",
description="Number of inference steps for the 3D generation",
default=20,
min=20,
max=50
)
guidance_scale: FloatProperty(
name="Guidance Scale",
description="Guidance scale for the 3D generation",
default=5.5,
min=1.0,
max=10.0
)
# 添加 texture 属性
texture: BoolProperty(
name="Generate Texture",
description="Whether to generate texture for the 3D model",
default=False
)
class Hunyuan3DOperator(bpy.types.Operator):
bl_idname = "object.generate_3d"
bl_label = "Generate 3D Model"
bl_description = "Generate a 3D model from text description, an image or a selected mesh"
job_id = ''
prompt = ""
api_url = ""
image_path = ""
octree_resolution = 256
num_inference_steps = 20
guidance_scale = 5.5
texture = False # 新增属性
selected_mesh_base64 = ""
selected_mesh = None # 新增属性,用于存储选中的 mesh
thread = None
task_finished = False
def modal(self, context, event):
if event.type in {'RIGHTMOUSE', 'ESC'}:
return {'CANCELLED'}
if self.task_finished:
print("Threaded task completed")
self.task_finished = False
props = context.scene.gen_3d_props
props.is_processing = False
return {'PASS_THROUGH'}
def invoke(self, context, event):
# 启动线程
props = context.scene.gen_3d_props
self.prompt = props.prompt
self.api_url = props.api_url
self.image_path = props.image_path
self.octree_resolution = props.octree_resolution
self.num_inference_steps = props.num_inference_steps
self.guidance_scale = props.guidance_scale
self.texture = props.texture # 获取 texture 属性的值
if self.prompt == "" and self.image_path == "":
self.report({'WARNING'}, "Please enter some text or select an image first.")
return {'FINISHED'}
# 保存选中的 mesh 对象引用
for obj in context.selected_objects:
if obj.type == 'MESH':
self.selected_mesh = obj
break
if self.selected_mesh:
temp_glb_file = tempfile.NamedTemporaryFile(delete=False, suffix=".glb")
temp_glb_file.close()
bpy.ops.export_scene.gltf(filepath=temp_glb_file.name, use_selection=True)
with open(temp_glb_file.name, "rb") as file:
mesh_data = file.read()
mesh_b64_str = base64.b64encode(mesh_data).decode()
os.unlink(temp_glb_file.name)
self.selected_mesh_base64 = mesh_b64_str
props.is_processing = True
# 将相对路径转换为相对于 Blender 文件所在目录的绝对路径
blend_file_dir = os.path.dirname(bpy.data.filepath)
self.report({'INFO'}, f"blend_file_dir {blend_file_dir}")
self.report({'INFO'}, f"image_path {self.image_path}")
if self.image_path.startswith('//'):
self.image_path = self.image_path[2:]
self.image_path = os.path.join(blend_file_dir, self.image_path)
if self.selected_mesh and self.texture:
props.status_message = "Texturing Selected Mesh...\n" \
"This may take several minutes depending \n on your GPU power."
else:
mesh_type = 'Textured Mesh' if self.texture else 'White Mesh'
prompt_type = 'Text Prompt' if self.prompt else 'Image'
props.status_message = f"Generating {mesh_type} with {prompt_type}...\n" \
"This may take several minutes depending \n on your GPU power."
self.thread = threading.Thread(target=self.generate_model)
self.thread.start()
wm = context.window_manager
wm.modal_handler_add(self)
return {'RUNNING_MODAL'}
def generate_model(self):
self.report({'INFO'}, f"Generation Start")
base_url = self.api_url.rstrip('/')
try:
if self.selected_mesh_base64 and self.texture:
# Texturing the selected mesh
if self.image_path and os.path.exists(self.image_path):
self.report({'INFO'}, f"Post Texturing with Image")
# 打开图片文件并以二进制模式读取
with open(self.image_path, "rb") as file:
# 读取文件内容
image_data = file.read()
# 对图片数据进行 Base64 编码
img_b64_str = base64.b64encode(image_data).decode()
response = requests.post(
f"{base_url}/generate",
json={
"mesh": self.selected_mesh_base64,
"image": img_b64_str,
"octree_resolution": self.octree_resolution,
"num_inference_steps": self.num_inference_steps,
"guidance_scale": self.guidance_scale,
"texture": self.texture # 传递 texture 参数
},
)
else:
self.report({'INFO'}, f"Post Texturing with Text")
response = requests.post(
f"{base_url}/generate",
json={
"mesh": self.selected_mesh_base64,
"text": self.prompt,
"octree_resolution": self.octree_resolution,
"num_inference_steps": self.num_inference_steps,
"guidance_scale": self.guidance_scale,
"texture": self.texture # 传递 texture 参数
},
)
else:
if self.image_path:
if not os.path.exists(self.image_path):
self.report({'ERROR'}, f"Image path does not exist {self.image_path}")
raise Exception(f'Image path does not exist {self.image_path}')
self.report({'INFO'}, f"Post Start Image to 3D")
# 打开图片文件并以二进制模式读取
with open(self.image_path, "rb") as file:
# 读取文件内容
image_data = file.read()
# 对图片数据进行 Base64 编码
img_b64_str = base64.b64encode(image_data).decode()
response = requests.post(
f"{base_url}/generate",
json={
"image": img_b64_str,
"octree_resolution": self.octree_resolution,
"num_inference_steps": self.num_inference_steps,
"guidance_scale": self.guidance_scale,
"texture": self.texture # 传递 texture 参数
},
)
else:
self.report({'INFO'}, f"Post Start Text to 3D")
response = requests.post(
f"{base_url}/generate",
json={
"text": self.prompt,
"octree_resolution": self.octree_resolution,
"num_inference_steps": self.num_inference_steps,
"guidance_scale": self.guidance_scale,
"texture": self.texture # 传递 texture 参数
},
)
self.report({'INFO'}, f"Post Done")
if response.status_code != 200:
self.report({'ERROR'}, f"Generation failed: {response.text}")
return
# Decode base64 and save to temporary file
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".glb")
temp_file.write(response.content)
temp_file.close()
# Import the GLB file in the main thread
def import_handler():
bpy.ops.import_scene.gltf(filepath=temp_file.name)
os.unlink(temp_file.name)
# 获取新导入的 mesh
new_obj = bpy.context.selected_objects[0] if bpy.context.selected_objects else None
if new_obj and self.selected_mesh and self.texture:
# 应用选中 mesh 的位置、旋转和缩放
new_obj.location = self.selected_mesh.location
new_obj.rotation_euler = self.selected_mesh.rotation_euler
new_obj.scale = self.selected_mesh.scale
# 隐藏原来的 mesh
self.selected_mesh.hide_set(True)
self.selected_mesh.hide_render = True
return None
bpy.app.timers.register(import_handler)
except Exception as e:
self.report({'ERROR'}, f"Error: {str(e)}")
finally:
self.task_finished = True
self.selected_mesh_base64 = ""
class Hunyuan3DPanel(bpy.types.Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = 'Hunyuan3D-2'
bl_label = 'Hunyuan3D-2 3D Generator'
def draw(self, context):
layout = self.layout
props = context.scene.gen_3d_props
layout.prop(props, "api_url")
layout.prop(props, "prompt")
# 添加图片选择器
layout.prop(props, "image_path")
# 添加新属性的 UI 元素
layout.prop(props, "octree_resolution")
layout.prop(props, "num_inference_steps")
layout.prop(props, "guidance_scale")
# 添加 texture 属性的 UI 元素
layout.prop(props, "texture")
row = layout.row()
row.enabled = not props.is_processing
row.operator("object.generate_3d")
if props.is_processing:
if props.status_message:
for line in props.status_message.split("\n"):
layout.label(text=line)
else:
layout.label("Processing...")
classes = (
Hunyuan3DProperties,
Hunyuan3DOperator,
Hunyuan3DPanel,
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.Scene.gen_3d_props = bpy.props.PointerProperty(type=Hunyuan3DProperties)
def unregister():
for cls in reversed(classes):
bpy.utils.unregister_class(cls)
del bpy.types.Scene.gen_3d_props
if __name__ == "__main__":
register()
FROM image.sourcefind.cn:5000/dcu/admin/base/pytorch:2.3.0-py3.10-dtk24.04.3-ubuntu20.04
ENV DEBIAN_FRONTEND=noninteractive
# RUN yum update && yum install -y git cmake wget build-essential
# RUN source /opt/dtk-24.04.3/env.sh
# # 安装pip相关依赖
COPY requirements.txt requirements.txt
RUN pip3 install -r requirements.txt -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com
ninja
pybind11
diffusers
einops
opencv-python
numpy
torch
transformers
torchvision
#taming-transformers-rom1504
#ConfigArgParse
#ipdb
omegaconf
scikit-image
rembg
onnxruntime
numba==0.58.0
numpy==1.25.0
transformers==4.49.0
#sentencepiece
tqdm
# Mesh Processing
trimesh
pymeshlab
pygltflib
xatlas
#kornia
#facexlib
# Training
accelerate
#pytorch_lightning
#scikit-learn
#scikit-image
# Demo only
gradio
fastapi
uvicorn
rembg
onnxruntime
#gevent
#geventhttpclient
docker run -it --shm-size=64G -v $PWD/Hunyuan3D-2:/home/Hunyuan3D-2 -v /public/DL_DATA/AI:/home/AI -v /opt/hyhal:/opt/hyhal:ro --privileged=true --device=/dev/kfd --device=/dev/dri/ --group-add video --name hy2 b272aae8ec72 bash
# python -m torch.utils.collect_env
import os
import shutil
import time
from glob import glob
from pathlib import Path
import gradio as gr
import torch
import uvicorn
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
def get_example_img_list():
print('Loading example img list ...')
return sorted(glob('./assets/example_images/*.png'))
def get_example_txt_list():
print('Loading example txt list ...')
txt_list = list()
for line in open('./assets/example_prompts.txt', encoding='utf-8'):
txt_list.append(line.strip())
return txt_list
def gen_save_folder(max_size=60):
os.makedirs(SAVE_DIR, exist_ok=True)
exists = set(int(_) for _ in os.listdir(SAVE_DIR) if _.isdigit())
cur_id = min(set(range(max_size)) - exists) if len(exists) < max_size else -1
if os.path.exists(f"{SAVE_DIR}/{(cur_id + 1) % max_size}"):
shutil.rmtree(f"{SAVE_DIR}/{(cur_id + 1) % max_size}")
print(f"remove {SAVE_DIR}/{(cur_id + 1) % max_size} success !!!")
save_folder = f"{SAVE_DIR}/{max(0, cur_id)}"
os.makedirs(save_folder, exist_ok=True)
print(f"mkdir {save_folder} success !!!")
return save_folder
def export_mesh(mesh, save_folder, textured=False):
if textured:
path = os.path.join(save_folder, f'textured_mesh.glb')
else:
path = os.path.join(save_folder, f'white_mesh.glb')
mesh.export(path, include_normals=textured)
return path
def build_model_viewer_html(save_folder, height=660, width=790, textured=False):
# Remove first folder from path to make relative path
if textured:
related_path = f"./textured_mesh.glb"
template_name = './assets/modelviewer-textured-template.html'
output_html_path = os.path.join(save_folder, f'textured_mesh.html')
else:
related_path = f"./white_mesh.glb"
template_name = './assets/modelviewer-template.html'
output_html_path = os.path.join(save_folder, f'white_mesh.html')
with open(os.path.join(CURRENT_DIR, template_name), 'r', encoding='utf-8') as f:
template_html = f.read()
obj_html = f"""
<div class="column is-mobile is-centered">
<model-viewer style="height: {height - 10}px; width: {width}px;" rotation-per-second="10deg" id="modelViewer"
src="{related_path}/" disable-tap
environment-image="neutral" auto-rotate camera-target="0m 0m 0m" orientation="0deg 0deg 170deg" shadow-intensity=".9"
ar auto-rotate camera-controls>
</model-viewer>
</div>
"""
with open(output_html_path, 'w', encoding='utf-8') as f:
f.write(template_html.replace('<model-viewer>', obj_html))
rel_path = os.path.relpath(output_html_path, SAVE_DIR)
iframe_tag = f'<iframe src="/static/{rel_path}" height="{height}" width="100%" frameborder="0"></iframe>'
print(
f'Find html file {output_html_path}, {os.path.exists(output_html_path)}, relative HTML path is /static/{rel_path}')
return f"""
<div style='height: {height}; width: 100%;'>
{iframe_tag}
</div>
"""
def _gen_shape(
caption,
image,
steps=50,
guidance_scale=7.5,
seed=1234,
octree_resolution=256,
check_box_rembg=False,
max_facenum = 40000,
):
if caption: print('prompt is', caption)
save_folder = gen_save_folder()
stats = {}
time_meta = {}
start_time_0 = time.time()
if image is None:
start_time = time.time()
try:
image = t2i_worker(caption)
except Exception as e:
raise gr.Error(f"Text to 3D is disabled. Please enable it by restarted the app with `python gradio_app.py --enable_t23d`.")
time_meta['text2image'] = time.time() - start_time
image.save(os.path.join(save_folder, 'input.png'))
print(image.mode)
if check_box_rembg or image.mode == "RGB":
start_time = time.time()
image = rmbg_worker(image.convert('RGB'))
time_meta['rembg'] = time.time() - start_time
image.save(os.path.join(save_folder, 'rembg.png'))
# image to white model
start_time = time.time()
generator = torch.Generator()
generator = generator.manual_seed(int(seed))
mesh = i23d_worker(
image=image,
num_inference_steps=steps,
guidance_scale=guidance_scale,
generator=generator,
octree_resolution=octree_resolution
)[0]
mesh = FloaterRemover()(mesh)
mesh = DegenerateFaceRemover()(mesh)
mesh = FaceReducer()(mesh, max_facenum=max_facenum)
stats['number_of_faces'] = mesh.faces.shape[0]
stats['number_of_vertices'] = mesh.vertices.shape[0]
time_meta['image_to_textured_3d'] = {'total': time.time() - start_time}
time_meta['total'] = time.time() - start_time_0
stats['time'] = time_meta
return mesh, image, save_folder
def generation_all(
caption,
image,
steps=50,
guidance_scale=7.5,
seed=1234,
octree_resolution=256,
check_box_rembg=False,
max_facenum = 40000
):
mesh, image, save_folder = _gen_shape(
caption,
image,
steps=steps,
guidance_scale=guidance_scale,
seed=seed,
octree_resolution=octree_resolution,
check_box_rembg=check_box_rembg,
max_facenum=max_facenum
)
path = export_mesh(mesh, save_folder, textured=False)
model_viewer_html = build_model_viewer_html(save_folder, height=596, width=700)
textured_mesh = texgen_worker(mesh, image)
path_textured = export_mesh(textured_mesh, save_folder, textured=True)
model_viewer_html_textured = build_model_viewer_html(save_folder, height=596, width=700, textured=True)
return (
gr.update(value=path, visible=True),
gr.update(value=path_textured, visible=True),
model_viewer_html,
model_viewer_html_textured,
)
def shape_generation(
caption,
image,
steps=50,
guidance_scale=7.5,
seed=1234,
octree_resolution=256,
check_box_rembg=False,
max_facenum = 40000
):
mesh, image, save_folder = _gen_shape(
caption,
image,
steps=steps,
guidance_scale=guidance_scale,
seed=seed,
octree_resolution=octree_resolution,
check_box_rembg=check_box_rembg,
max_facenum=max_facenum
)
path = export_mesh(mesh, save_folder, textured=False)
model_viewer_html = build_model_viewer_html(save_folder, height=596, width=700)
return (
gr.update(value=path, visible=True),
model_viewer_html,
)
def build_app():
title_html = """
<div style="font-size: 2em; font-weight: bold; text-align: center; margin-bottom: 5px">
Hunyuan3D-2: Scaling Diffusion Models for High Resolution Textured 3D Assets Generation
</div>
<div align="center">
Tencent Hunyuan3D Team
</div>
<div align="center">
<a href="https://github.com/tencent/Hunyuan3D-2">Github Page</a> &ensp;
<a href="http://3d-models.hunyuan.tencent.com">Homepage</a> &ensp;
<a href="#">Technical Report</a> &ensp;
<a href="https://huggingface.co/Tencent/Hunyuan3D-2"> Models</a> &ensp;
</div>
"""
with gr.Blocks(theme=gr.themes.Base(), title='Hunyuan-3D-2.0') as demo:
gr.HTML(title_html)
with gr.Row():
with gr.Column(scale=2):
with gr.Tabs() as tabs_prompt:
with gr.Tab('Image Prompt', id='tab_img_prompt') as tab_ip:
image = gr.Image(label='Image', type='pil', image_mode='RGBA', height=290)
with gr.Row():
check_box_rembg = gr.Checkbox(value=True, label='Remove Background')
with gr.Tab('Text Prompt', id='tab_txt_prompt', visible=HAS_T2I) as tab_tp:
caption = gr.Textbox(label='Text Prompt',
placeholder='HunyuanDiT will be used to generate image.',
info='Example: A 3D model of a cute cat, white background')
with gr.Accordion('Advanced Options', open=False):
num_steps = gr.Slider(maximum=100, minimum=1, value=30, step=1, label='Inference Steps')
octree_resolution = gr.Dropdown([256, 384, 512, 768, 1024], value=256, label='Octree Resolution')
cfg_scale = gr.Number(value=5.5, label='Guidance Scale')
max_facenum_slider = gr.Slider(maximum=200000, minimum=20000, value=40000, step=1000, label='Number of Faces')
seed = gr.Slider(maximum=1e7, minimum=0, value=1234, label='Seed')
with gr.Group():
btn = gr.Button(value='Generate Shape Only', variant='primary')
btn_all = gr.Button(value='Generate Shape and Texture', variant='primary', visible=HAS_TEXTUREGEN)
with gr.Group():
file_out = gr.File(label="File", visible=False)
file_out2 = gr.File(label="File", visible=False)
with gr.Column(scale=5):
with gr.Tabs():
with gr.Tab('Generated Mesh') as mesh1:
html_output1 = gr.HTML(HTML_OUTPUT_PLACEHOLDER, label='Output')
with gr.Tab('Generated Textured Mesh') as mesh2:
html_output2 = gr.HTML(HTML_OUTPUT_PLACEHOLDER, label='Output')
with gr.Column(scale=2):
with gr.Tabs() as gallery:
with gr.Tab('Image to 3D Gallery', id='tab_img_gallery') as tab_gi:
with gr.Row():
gr.Examples(examples=example_is, inputs=[image],
label="Image Prompts", examples_per_page=18)
with gr.Tab('Text to 3D Gallery', id='tab_txt_gallery', visible=HAS_T2I) as tab_gt:
with gr.Row():
gr.Examples(examples=example_ts, inputs=[caption],
label="Text Prompts", examples_per_page=18)
if not HAS_TEXTUREGEN:
gr.HTML("""
<div style="margin-top: 20px;">
<b>Warning: </b>
Texture synthesis is disabled due to missing requirements,
please refer to the README.md and install the missing requirements to activate it.
</div>
""")
if not args.enable_t23d:
gr.HTML("""
<div style="margin-top: 20px;">
<b>Warning: </b>
Text to 3D is disabled. Please enable it by restarted the app with `python gradio_app.py --enable_t23d`.
</div>
""")
tab_gi.select(fn=lambda: gr.update(selected='tab_img_prompt'), outputs=tabs_prompt)
if HAS_T2I:
tab_gt.select(fn=lambda: gr.update(selected='tab_txt_prompt'), outputs=tabs_prompt)
btn.click(
shape_generation,
inputs=[
caption,
image,
num_steps,
cfg_scale,
seed,
octree_resolution,
check_box_rembg,
max_facenum_slider
],
outputs=[file_out, html_output1]
).then(
lambda: gr.update(visible=True),
outputs=[file_out],
)
btn_all.click(
generation_all,
inputs=[
caption,
image,
num_steps,
cfg_scale,
seed,
octree_resolution,
check_box_rembg,
max_facenum_slider
],
outputs=[file_out, file_out2, html_output1, html_output2]
).then(
lambda: (gr.update(visible=True), gr.update(visible=True)),
outputs=[file_out, file_out2],
)
return demo
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--port', type=int, default=8080)
parser.add_argument('--host', type=str, default='0.0.0.0')
parser.add_argument('--cache-path', type=str, default='gradio_cache')
parser.add_argument('--enable_t23d', action='store_true')
args = parser.parse_args()
SAVE_DIR = args.cache_path
os.makedirs(SAVE_DIR, exist_ok=True)
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
HTML_OUTPUT_PLACEHOLDER = """
<div style='height: 596px; width: 100%; border-radius: 8px; border-color: #e5e7eb; order-style: solid; border-width: 1px;'></div>
"""
INPUT_MESH_HTML = """
<div style='height: 490px; width: 100%; border-radius: 8px;
border-color: #e5e7eb; order-style: solid; border-width: 1px;'>
</div>
"""
example_is = get_example_img_list()
example_ts = get_example_txt_list()
try:
from hy3dgen.texgen import Hunyuan3DPaintPipeline
texgen_worker = Hunyuan3DPaintPipeline.from_pretrained('tencent/Hunyuan3D-2')
HAS_TEXTUREGEN = True
except Exception as e:
print(e)
print("Failed to load texture generator.")
print('Please refer to the README.md and install the missing requirements to activate it.')
HAS_TEXTUREGEN = False
HAS_T2I = False
if args.enable_t23d:
from hy3dgen.text2image import HunyuanDiTPipeline
t2i_worker = HunyuanDiTPipeline('Tencent-Hunyuan/HunyuanDiT-v1.1-Diffusers-Distilled')
HAS_T2I = True
from hy3dgen.shapegen import FaceReducer, FloaterRemover, DegenerateFaceRemover, \
Hunyuan3DDiTFlowMatchingPipeline
from hy3dgen.rembg import BackgroundRemover
rmbg_worker = BackgroundRemover()
i23d_worker = Hunyuan3DDiTFlowMatchingPipeline.from_pretrained('tencent/Hunyuan3D-2')
floater_remove_worker = FloaterRemover()
degenerate_face_remove_worker = DegenerateFaceRemover()
face_reduce_worker = FaceReducer()
# https://discuss.huggingface.co/t/how-to-serve-an-html-file/33921/2
# create a FastAPI app
app = FastAPI()
# create a static directory to store the static files
static_dir = Path(SAVE_DIR).absolute()
static_dir.mkdir(parents=True, exist_ok=True)
app.mount("/static", StaticFiles(directory=static_dir, html=True), name="static")
demo = build_app()
app = gr.mount_gradio_app(app, demo, path="/")
uvicorn.run(app, host=args.host, port=args.port)
# Open Source Model Licensed under the Apache License Version 2.0
# and Other Licenses of the Third-Party Components therein:
# The below Model in this distribution may have been modified by THL A29 Limited
# ("Tencent Modifications"). All Tencent Modifications are Copyright (C) 2024 THL A29 Limited.
# Copyright (C) 2024 THL A29 Limited, a Tencent company. All rights reserved.
# The below software and/or models in this distribution may have been
# modified by THL A29 Limited ("Tencent Modifications").
# All Tencent Modifications are Copyright (C) THL A29 Limited.
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
# except for the third-party components listed below.
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
# in the repsective licenses of these third-party components.
# Users must comply with all terms and conditions of original licenses of these third-party
# components and must ensure that the usage of the third party components adheres to
# all relevant laws and regulations.
# For avoidance of doubts, Hunyuan 3D means the large language models and
# their software and algorithms, including trained model weights, parameters (including
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
# fine-tuning enabling code and other elements of the foregoing made publicly available
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment