Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
HunyuanDiT_pytorch
Commits
f91d2ea3
Commit
f91d2ea3
authored
May 28, 2024
by
mashun1
Browse files
hunyuandit
parents
Changes
82
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
283 additions
and
0 deletions
+283
-0
trt/install.sh
trt/install.sh
+80
-0
utils/collect_env.py
utils/collect_env.py
+203
-0
No files found.
trt/install.sh
0 → 100644
View file @
f91d2ea3
# ==============================================================================
# Description: Install TensorRT and prepare the environment for TensorRT.
# ==============================================================================
# ----------------------------------------
# Check the system, tools and arguments
# ----------------------------------------
# Check system. Only support TensorRT on Linux (MacOS is not supported.)
if
[
"
$(
uname
)
"
!=
"Linux"
]
;
then
echo
"Only support TensorRT on Linux"
exit
1
fi
# Check if the model_trt path is provided. If not, use the default path.
if
[
-z
"
$1
"
]
;
then
MODEL_TRT_DIR
=
$(
cd
ckpts/t2i/model_trt
;
pwd
)
else
MODEL_TRT_DIR
=
$(
cd
"
$1
"
;
pwd
)
fi
# Check if the model_trt path exists.
if
[
!
-d
"
${
MODEL_TRT_DIR
}
"
]
;
then
echo
"The model_trt directory (
${
MODEL_TRT_DIR
}
) does not exist. Please specify the path by:"
echo
" sh trt/install.sh <model_trt_dir>"
exit
1
fi
# Check if ldconfig exists.
if
[
!
-x
"
$(
command
-v
ldconfig
)
"
]
;
then
echo
"ldconfig is not installed. Please install it first."
exit
1
fi
export
TENSORRT_VERSION
=
'9.2.0.5'
TENSORRT_PACKAGE
=
"
${
MODEL_TRT_DIR
}
/TensorRT-
${
TENSORRT_VERSION
}
.tar.gz"
# Check if the TensorRT package is downloaded.
if
[
!
-f
"
${
TENSORRT_PACKAGE
}
"
]
;
then
echo
"The TensorRT package (
${
TENSORRT_PACKAGE
}
) does not exist. Please download it first with following steps:"
echo
"1. cd HunyuanDiT"
echo
"2. huggingface-cli download Tencent-Hunyuan/HunyuanDiT-TensorRT --local-dir ./ckpts/t2i/model_trt"
exit
1
else
echo
"Found TensorRT package:
${
TENSORRT_PACKAGE
}
"
fi
# ----------------------------------------
# Start to install TensorRT
# ----------------------------------------
# Extract the TensorRT package.
echo
"Extracting the TensorRT package..."
tar
xf
"
${
TENSORRT_PACKAGE
}
"
-C
"
${
MODEL_TRT_DIR
}
"
TENSORRT_DIR
=
"
${
MODEL_TRT_DIR
}
/TensorRT-
${
TENSORRT_VERSION
}
"
echo
"Extracting the TensorRT package finished"
# Add the TensorRT library path to the system library path.
echo
"
${
MODEL_TRT_DIR
}
/lib/"
>>
/etc/ld.so.conf.d/nvidia.conf
&&
ldconfig
# Install the TensorRT Python wheel.
echo
"Installing the TensorRT Python wheel..."
# Get python version, e.g., cp38 for Python 3.8; cp310 for Python 3.10
PYTHON_VERSION
=
$(
python
-c
'import sys; print(f"cp{sys.version_info.major}{sys.version_info.minor}")'
)
python
-m
pip
install
--no-cache-dir
${
TENSORRT_DIR
}
/python/tensorrt
*
-
${
PYTHON_VERSION
}*
echo
"Installing the TensorRT Python wheel finished"
# Prepare activate.sh and deactivate.sh
{
echo
"TENSORRT_DIR=
${
TENSORRT_DIR
}
"
echo
'export LD_LIBRARY_PATH=${TENSORRT_DIR}/lib/:$LD_LIBRARY_PATH'
echo
'export LIBRARY_PATH=${TENSORRT_DIR}/lib/:$LIBRARY_PATH'
echo
'export PATH=${TENSORRT_DIR}/bin/:$PATH'
}
>
$(
dirname
"
$0
"
)
/activate.sh
{
echo
"TENSORRT_DIR=
${
TENSORRT_DIR
}
"
echo
'export LD_LIBRARY_PATH=${LD_LIBRARY_PATH/${TENSORRT_DIR}\/lib\/:}'
echo
'export LIBRARY_PATH=${LIBRARY_PATH/${TENSORRT_DIR}\/lib\/:}'
echo
'export PATH=${PATH/${TENSORRT_DIR}\/bin\/:}'
}
>
$(
dirname
"
$0
"
)
/deactivate.sh
utils/collect_env.py
0 → 100644
View file @
f91d2ea3
# Copyright (c) OpenMMLab. All rights reserved.
"""This file holding some environment constant for sharing by other files."""
import
os
import
os.path
as
osp
import
subprocess
import
sys
from
collections
import
OrderedDict
,
defaultdict
import
numpy
as
np
import
torch
def
is_rocm_pytorch
()
->
bool
:
"""Check whether the PyTorch is compiled on ROCm."""
is_rocm
=
False
if
TORCH_VERSION
!=
'parrots'
:
try
:
from
torch.utils.cpp_extension
import
ROCM_HOME
is_rocm
=
True
if
((
torch
.
version
.
hip
is
not
None
)
and
(
ROCM_HOME
is
not
None
))
else
False
except
ImportError
:
pass
return
is_rocm
TORCH_VERSION
=
torch
.
__version__
def
get_build_config
():
"""Obtain the build information of PyTorch or Parrots."""
if
TORCH_VERSION
==
'parrots'
:
from
parrots.config
import
get_build_info
return
get_build_info
()
else
:
return
torch
.
__config__
.
show
()
try
:
import
torch_musa
# noqa: F401
IS_MUSA_AVAILABLE
=
True
except
Exception
:
IS_MUSA_AVAILABLE
=
False
def
is_musa_available
()
->
bool
:
return
IS_MUSA_AVAILABLE
def
is_cuda_available
()
->
bool
:
"""Returns True if cuda devices exist."""
return
torch
.
cuda
.
is_available
()
def
_get_cuda_home
():
if
TORCH_VERSION
==
'parrots'
:
from
parrots.utils.build_extension
import
CUDA_HOME
else
:
if
is_rocm_pytorch
():
from
torch.utils.cpp_extension
import
ROCM_HOME
CUDA_HOME
=
ROCM_HOME
else
:
from
torch.utils.cpp_extension
import
CUDA_HOME
return
CUDA_HOME
def
_get_musa_home
():
return
os
.
environ
.
get
(
'MUSA_HOME'
)
def
collect_env
():
"""Collect the information of the running environments.
Returns:
dict: The environment information. The following fields are contained.
- sys.platform: The variable of ``sys.platform``.
- Python: Python version.
- CUDA available: Bool, indicating if CUDA is available.
- GPU devices: Device type of each GPU.
- CUDA_HOME (optional): The env var ``CUDA_HOME``.
- NVCC (optional): NVCC version.
- GCC: GCC version, "n/a" if GCC is not installed.
- MSVC: Microsoft Virtual C++ Compiler version, Windows only.
- PyTorch: PyTorch version.
- PyTorch compiling details: The output of
\
``torch.__config__.show()``.
- TorchVision (optional): TorchVision version.
- OpenCV (optional): OpenCV version.
"""
from
distutils
import
errors
env_info
=
OrderedDict
()
env_info
[
'sys.platform'
]
=
sys
.
platform
env_info
[
'Python'
]
=
sys
.
version
.
replace
(
'
\n
'
,
''
)
cuda_available
=
is_cuda_available
()
musa_available
=
is_musa_available
()
env_info
[
'CUDA available'
]
=
cuda_available
env_info
[
'MUSA available'
]
=
musa_available
env_info
[
'numpy_random_seed'
]
=
np
.
random
.
get_state
()[
1
][
0
]
if
cuda_available
:
devices
=
defaultdict
(
list
)
for
k
in
range
(
torch
.
cuda
.
device_count
()):
devices
[
torch
.
cuda
.
get_device_name
(
k
)].
append
(
str
(
k
))
for
name
,
device_ids
in
devices
.
items
():
env_info
[
'GPU '
+
','
.
join
(
device_ids
)]
=
name
CUDA_HOME
=
_get_cuda_home
()
env_info
[
'CUDA_HOME'
]
=
CUDA_HOME
if
CUDA_HOME
is
not
None
and
osp
.
isdir
(
CUDA_HOME
):
if
CUDA_HOME
==
'/opt/rocm'
:
try
:
nvcc
=
osp
.
join
(
CUDA_HOME
,
'hip/bin/hipcc'
)
nvcc
=
subprocess
.
check_output
(
f
'"
{
nvcc
}
" --version'
,
shell
=
True
)
nvcc
=
nvcc
.
decode
(
'utf-8'
).
strip
()
release
=
nvcc
.
rfind
(
'HIP version:'
)
build
=
nvcc
.
rfind
(
''
)
nvcc
=
nvcc
[
release
:
build
].
strip
()
except
subprocess
.
SubprocessError
:
nvcc
=
'Not Available'
else
:
try
:
nvcc
=
osp
.
join
(
CUDA_HOME
,
'bin/nvcc'
)
nvcc
=
subprocess
.
check_output
(
f
'"
{
nvcc
}
" -V'
,
shell
=
True
)
nvcc
=
nvcc
.
decode
(
'utf-8'
).
strip
()
release
=
nvcc
.
rfind
(
'Cuda compilation tools'
)
build
=
nvcc
.
rfind
(
'Build '
)
nvcc
=
nvcc
[
release
:
build
].
strip
()
except
subprocess
.
SubprocessError
:
nvcc
=
'Not Available'
env_info
[
'NVCC'
]
=
nvcc
elif
musa_available
:
devices
=
defaultdict
(
list
)
for
k
in
range
(
torch
.
musa
.
device_count
()):
devices
[
torch
.
musa
.
get_device_name
(
k
)].
append
(
str
(
k
))
for
name
,
device_ids
in
devices
.
items
():
env_info
[
'GPU '
+
','
.
join
(
device_ids
)]
=
name
MUSA_HOME
=
_get_musa_home
()
env_info
[
'MUSA_HOME'
]
=
MUSA_HOME
if
MUSA_HOME
is
not
None
and
osp
.
isdir
(
MUSA_HOME
):
try
:
mcc
=
osp
.
join
(
MUSA_HOME
,
'bin/mcc'
)
subprocess
.
check_output
(
f
'"
{
mcc
}
" -v'
,
shell
=
True
)
except
subprocess
.
SubprocessError
:
mcc
=
'Not Available'
env_info
[
'mcc'
]
=
mcc
try
:
# Check C++ Compiler.
# For Unix-like, sysconfig has 'CC' variable like 'gcc -pthread ...',
# indicating the compiler used, we use this to get the compiler name
import
io
import
sysconfig
cc
=
sysconfig
.
get_config_var
(
'CC'
)
if
cc
:
cc
=
osp
.
basename
(
cc
.
split
()[
0
])
cc_info
=
subprocess
.
check_output
(
f
'
{
cc
}
--version'
,
shell
=
True
)
env_info
[
'GCC'
]
=
cc_info
.
decode
(
'utf-8'
).
partition
(
'
\n
'
)[
0
].
strip
()
else
:
# on Windows, cl.exe is not in PATH. We need to find the path.
# distutils.ccompiler.new_compiler() returns a msvccompiler
# object and after initialization, path to cl.exe is found.
import
locale
import
os
from
distutils.ccompiler
import
new_compiler
ccompiler
=
new_compiler
()
ccompiler
.
initialize
()
cc
=
subprocess
.
check_output
(
f
'
{
ccompiler
.
cc
}
'
,
stderr
=
subprocess
.
STDOUT
,
shell
=
True
)
encoding
=
os
.
device_encoding
(
sys
.
stdout
.
fileno
())
or
locale
.
getpreferredencoding
()
env_info
[
'MSVC'
]
=
cc
.
decode
(
encoding
).
partition
(
'
\n
'
)[
0
].
strip
()
env_info
[
'GCC'
]
=
'n/a'
except
(
subprocess
.
CalledProcessError
,
errors
.
DistutilsPlatformError
):
env_info
[
'GCC'
]
=
'n/a'
except
io
.
UnsupportedOperation
as
e
:
# JupyterLab on Windows changes sys.stdout, which has no `fileno` attr
# Refer to: https://github.com/open-mmlab/mmengine/issues/931
# TODO: find a solution to get compiler info in Windows JupyterLab,
# while preserving backward-compatibility in other systems.
env_info
[
'MSVC'
]
=
f
'n/a, reason:
{
str
(
e
)
}
'
env_info
[
'PyTorch'
]
=
torch
.
__version__
env_info
[
'PyTorch compiling details'
]
=
get_build_config
()
try
:
import
torchvision
env_info
[
'TorchVision'
]
=
torchvision
.
__version__
except
ModuleNotFoundError
:
pass
try
:
import
cv2
env_info
[
'OpenCV'
]
=
cv2
.
__version__
except
ImportError
:
pass
return
env_info
if
__name__
==
'__main__'
:
for
name
,
val
in
collect_env
().
items
():
print
(
f
'
{
name
}
:
{
val
}
'
)
\ No newline at end of file
Prev
1
2
3
4
5
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment