"git@developer.sourcefind.cn:yangql/googletest.git" did not exist on "0599a7b8410dc5cfdb477900b280475ae775d7f9"
Commit 4353fa59 authored by limm's avatar limm
Browse files

add part code

parents
Pipeline #2807 canceled with stages
cff-version: 1.2.0
message: "If you use this software, please cite it as below."
title: "OpenMMLab's Model deployment toolbox"
authors:
- name: "MMDeploy Contributors"
date-released: 2021-12-27
url: "https://github.com/open-mmlab/mmdeploy"
license: Apache-2.0
# Copyright (c) OpenMMLab. All rights reserved.
if (NOT DEFINED CMAKE_INSTALL_PREFIX)
set(CMAKE_INSTALL_PREFIX "${CMAKE_BINARY_DIR}/install" CACHE PATH "installation directory")
endif ()
message(STATUS "CMAKE_INSTALL_PREFIX: ${CMAKE_INSTALL_PREFIX}")
if (NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE Release CACHE STRING "choose 'Release' as default build type" FORCE)
endif ()
cmake_minimum_required(VERSION 3.14)
project(MMDeploy VERSION 1.3.1)
set(CMAKE_CXX_STANDARD 17)
set(MMDEPLOY_VERSION_MAJOR ${PROJECT_VERSION_MAJOR})
set(MMDEPLOY_VERSION_MINOR ${PROJECT_VERSION_MINOR})
set(MMDEPLOY_VERSION_PATCH ${PROJECT_VERSION_PATCH})
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
if (MSVC)
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
else ()
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
endif ()
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
# options
option(MMDEPLOY_SHARED_LIBS "build shared libs" OFF)
option(MMDEPLOY_BUILD_SDK "build MMDeploy SDK" OFF)
option(MMDEPLOY_DYNAMIC_BACKEND "dynamic load backend" OFF)
option(MMDEPLOY_BUILD_SDK_MONOLITHIC "build single lib for SDK API" ON)
option(MMDEPLOY_BUILD_TEST "build unittests" OFF)
option(MMDEPLOY_BUILD_SDK_PYTHON_API "build SDK Python API" OFF)
option(MMDEPLOY_BUILD_SDK_CSHARP_API "build SDK C# API support" OFF)
option(MMDEPLOY_BUILD_SDK_JAVA_API "build SDK JAVA API" OFF)
option(MMDEPLOY_BUILD_EXAMPLES "build examples" OFF)
option(MMDEPLOY_SPDLOG_EXTERNAL "use external spdlog" OFF)
option(MMDEPLOY_ZIP_MODEL "support SDK model in zip format" OFF)
option(MMDEPLOY_COVERAGE "build SDK for coverage" OFF)
option(MMDEPLOY_USE_MSCV_STATIC "statically linked CRT" OFF)
option(MMDEPLOY_ELENA_FUSION "use elena to fuse preprocess" OFF)
set(MMDEPLOY_TARGET_DEVICES "cpu" CACHE STRING "target devices to support")
set(MMDEPLOY_TARGET_BACKENDS "" CACHE STRING "target inference engines to support")
set(MMDEPLOY_CODEBASES "all" CACHE STRING "select OpenMMLab codebases")
if ((NOT MMDEPLOY_BUILD_SDK_MONOLITHIC) AND MMDEPLOY_DYNAMIC_BACKEND)
set(MMDEPLOY_DYNAMIC_BACKEND OFF)
endif ()
if (MMDEPLOY_SHARED_LIBS)
set(MMDEPLOY_LIB_TYPE SHARED)
else ()
set(MMDEPLOY_LIB_TYPE STATIC)
endif ()
set(MMDEPLOY_TASKS "" CACHE INTERNAL "")
if (MMDEPLOY_COVERAGE)
add_compile_options(-coverage -fprofile-arcs -ftest-coverage)
add_link_options(-coverage -lgcov)
endif ()
# when CUDA devices are enabled, the environment variable ASAN_OPTIONS=protect_shadow_gap=0
# must be set at runtime
if (MMDEPLOY_ASAN_ENABLE)
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:-fsanitize=address>)
add_link_options(-fsanitize=address)
endif ()
# notice that ubsan has linker issues for ubuntu < 18.04, see
# https://stackoverflow.com/questions/50024731/ld-unrecognized-option-push-state-no-as-needed
if (MMDEPLOY_UBSAN_ENABLE)
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:-fsanitize=undefined>)
add_link_options(-fsanitize=undefined)
endif ()
if (MSVC)
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/diagnostics:classic>)
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/wd4251>)
if (MMDEPLOY_USE_MSCV_STATIC)
foreach(lang C CXX)
string(REPLACE /MD /MT CMAKE_${lang}_FLAGS_DEBUG "${CMAKE_${lang}_FLAGS_DEBUG}")
string(REPLACE /MD /MT CMAKE_${lang}_FLAGS_RELEASE "${CMAKE_${lang}_FLAGS_RELEASE}")
endforeach()
endif ()
endif ()
if(APPLE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fobjc-arc")
endif()
add_library(MMDeployStaticModules INTERFACE)
add_library(MMDeployDynamicModules INTERFACE)
add_library(MMDeployLibs INTERFACE)
if ((cuda IN_LIST MMDEPLOY_TARGET_DEVICES) OR (trt IN_LIST MMDEPLOY_TARGET_BACKENDS))
include(cmake/cuda.cmake NO_POLICY_SCOPE)
endif ()
# this must come after including cuda.cmake because policies in function scope is captured
# at function definition
include(cmake/MMDeploy.cmake)
add_subdirectory(csrc/mmdeploy)
if (MMDEPLOY_BUILD_SDK)
if (NOT MMDEPLOY_BUILD_SDK_MONOLITHIC)
install(TARGETS MMDeployStaticModules
MMDeployDynamicModules
MMDeployLibs
EXPORT MMDeployTargets)
endif ()
if (MMDEPLOY_BUILD_TEST)
add_subdirectory(tests/test_csrc)
endif ()
if (MMDEPLOY_BUILD_EXAMPLES)
include(${CMAKE_SOURCE_DIR}/cmake/opencv.cmake)
add_subdirectory(demo/csrc)
endif ()
# export MMDeploy package
install(EXPORT MMDeployTargets
FILE MMDeployTargets.cmake
DESTINATION lib/cmake/MMDeploy)
if (MMDEPLOY_SPDLOG_EXTERNAL)
set(SPDLOG_DEPENDENCY "find_package(spdlog QUIET)")
endif ()
# append backend deps
mmdeploy_add_deps(trt BACKENDS ${MMDEPLOY_TARGET_BACKENDS} DEPS TENSORRT CUDNN)
mmdeploy_add_deps(ort BACKENDS ${MMDEPLOY_TARGET_BACKENDS} DEPS ONNXRUNTIME)
mmdeploy_add_deps(ncnn BACKENDS ${MMDEPLOY_TARGET_BACKENDS} DEPS ncnn)
mmdeploy_add_deps(openvino BACKENDS ${MMDEPLOY_TARGET_BACKENDS} DEPS InferenceEngine)
if (NOT MMDEPLOY_SHARED_LIBS)
mmdeploy_add_deps(pplnn BACKENDS ${MMDEPLOY_TARGET_BACKENDS} DEPS pplnn)
endif ()
mmdeploy_add_deps(snpe BACKENDS ${MMDEPLOY_TARGET_BACKENDS} DEPS snpe)
mmdeploy_add_deps(rknn BACKENDS ${MMDEPLOY_TARGET_BACKENDS} DEPS rknn)
include(CMakePackageConfigHelpers)
# generate the config file that is includes the exports
configure_package_config_file(${CMAKE_SOURCE_DIR}/cmake/MMDeployConfig.cmake.in
"${CMAKE_CURRENT_BINARY_DIR}/MMDeployConfig.cmake"
INSTALL_DESTINATION "lib/cmake"
NO_SET_AND_CHECK_MACRO
NO_CHECK_REQUIRED_COMPONENTS_MACRO
)
write_basic_package_version_file(
"${CMAKE_CURRENT_BINARY_DIR}/MMDeployConfigVersion.cmake"
VERSION "${MMDeploy_VERSION_MAJOR}.${MMDeploy_VERSION_MINOR}"
COMPATIBILITY AnyNewerVersion
)
install(FILES
${CMAKE_CURRENT_BINARY_DIR}/MMDeployConfig.cmake
${CMAKE_CURRENT_BINARY_DIR}/MMDeployConfigVersion.cmake
${CMAKE_CURRENT_SOURCE_DIR}/cmake/MMDeploy.cmake
DESTINATION lib/cmake/MMDeploy
)
if (MSVC)
install(FILES
${CMAKE_CURRENT_SOURCE_DIR}/cmake/loader.cpp.in
DESTINATION lib/cmake/MMDeploy
)
endif ()
install(DIRECTORY
${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules
DESTINATION lib/cmake/MMDeploy
)
if (${CMAKE_VERSION} VERSION_LESS "3.17.0")
install(SCRIPT cmake/post-install.cmake)
endif ()
endif ()
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
include requirements/*.txt
include mmdeploy/backend/ncnn/*.so
include mmdeploy/backend/ncnn/*.dll
include mmdeploy/backend/ncnn/*.pyd
include mmdeploy/backend/ncnn/mmdeploy_onnx2ncnn*
include mmdeploy/lib/*.so
include mmdeploy/lib/*.so*
include mmdeploy/lib/*.dll
include mmdeploy/lib/*.pyd
include mmdeploy/backend/torchscript/*.so
include mmdeploy/backend/torchscript/*.dll
include mmdeploy/backend/torchscript/*.pyd
<div align="center">
<img src="resources/mmdeploy-logo.png" width="450"/>
<div>&nbsp;</div>
<div align="center">
<b><font size="5">OpenMMLab website</font></b>
<sup>
<a href="https://openmmlab.com">
<i><font size="4">HOT</font></i>
</a>
</sup>
&nbsp;&nbsp;&nbsp;&nbsp;
<b><font size="5">OpenMMLab platform</font></b>
<sup>
<a href="https://platform.openmmlab.com">
<i><font size="4">TRY IT OUT</font></i>
</a>
</sup>
</div>
<div>&nbsp;</div>
[![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmdeploy.readthedocs.io/en/latest/)
[![badge](https://github.com/open-mmlab/mmdeploy/workflows/build/badge.svg)](https://github.com/open-mmlab/mmdeploy/actions)
[![codecov](https://codecov.io/gh/open-mmlab/mmdeploy/branch/main/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmdeploy)
[![license](https://img.shields.io/github/license/open-mmlab/mmdeploy.svg)](https://github.com/open-mmlab/mmdeploy/tree/main/LICENSE)
[![issue resolution](https://img.shields.io/github/issues-closed-raw/open-mmlab/mmdeploy)](https://github.com/open-mmlab/mmdeploy/issues)
[![open issues](https://img.shields.io/github/issues-raw/open-mmlab/mmdeploy)](https://github.com/open-mmlab/mmdeploy/issues)
English | [简体中文](README_zh-CN.md)
</div>
<div align="center">
<a href="https://openmmlab.medium.com/" style="text-decoration:none;">
<img src="https://user-images.githubusercontent.com/25839884/218352562-cdded397-b0f3-4ca1-b8dd-a60df8dca75b.png" width="3%" alt="" /></a>
<img src="https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png" width="3%" alt="" />
<a href="https://discord.gg/raweFPmdzG" style="text-decoration:none;">
<img src="https://user-images.githubusercontent.com/25839884/218347213-c080267f-cbb6-443e-8532-8e1ed9a58ea9.png" width="3%" alt="" /></a>
<img src="https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png" width="3%" alt="" />
<a href="https://twitter.com/OpenMMLab" style="text-decoration:none;">
<img src="https://user-images.githubusercontent.com/25839884/218346637-d30c8a0f-3eba-4699-8131-512fb06d46db.png" width="3%" alt="" /></a>
<img src="https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png" width="3%" alt="" />
<a href="https://www.youtube.com/openmmlab" style="text-decoration:none;">
<img src="https://user-images.githubusercontent.com/25839884/218346691-ceb2116a-465a-40af-8424-9f30d2348ca9.png" width="3%" alt="" /></a>
</div>
## Highlights
The MMDeploy 1.x has been released, which is adapted to upstream codebases from OpenMMLab 2.0. Please **align the version** when using it.
The default branch has been switched to `main` from `master`. MMDeploy 0.x (`master`) will be deprecated and new features will only be added to MMDeploy 1.x (`main`) in future.
| mmdeploy | mmengine | mmcv | mmdet | others |
| :------: | :------: | :------: | :------: | :----: |
| 0.x.y | - | \<=1.x.y | \<=2.x.y | 0.x.y |
| 1.x.y | 0.x.y | 2.x.y | 3.x.y | 1.x.y |
[deploee](https://platform.openmmlab.com/deploee/) offers over 2,300 AI models in ONNX, NCNN, TRT and OpenVINO formats. Featuring a built-in list of real hardware devices, deploee enables users to convert Torch models into any target inference format for profiling purposes.
## Introduction
MMDeploy is an open-source deep learning model deployment toolset. It is a part of the [OpenMMLab](https://openmmlab.com/) project.
<div align="center">
<img src="resources/introduction.png">
</div>
## Main features
### Fully support OpenMMLab models
The currently supported codebases and models are as follows, and more will be included in the future
- [mmpretrain](docs/en/04-supported-codebases/mmpretrain.md)
- [mmdet](docs/en/04-supported-codebases/mmdet.md)
- [mmseg](docs/en/04-supported-codebases/mmseg.md)
- [mmagic](docs/en/04-supported-codebases/mmagic.md)
- [mmocr](docs/en/04-supported-codebases/mmocr.md)
- [mmpose](docs/en/04-supported-codebases/mmpose.md)
- [mmdet3d](docs/en/04-supported-codebases/mmdet3d.md)
- [mmrotate](docs/en/04-supported-codebases/mmrotate.md)
- [mmaction2](docs/en/04-supported-codebases/mmaction2.md)
### Multiple inference backends are available
The supported Device-Platform-InferenceBackend matrix is presented as following, and more will be compatible.
The benchmark can be found from [here](docs/en/03-benchmark/benchmark.md)
<div style="width: fit-content; margin: auto;">
<table>
<tr>
<th>Device / <br> Platform</th>
<th>Linux</th>
<th>Windows</th>
<th>macOS</th>
<th>Android</th>
</tr>
<tr>
<th>x86_64 <br> CPU</th>
<td>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-ort.yml"><img src="https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-ort.yml"></a></sub> <sub>onnxruntime</sub> <br>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-pplnn.yml"><img src="https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-pplnn.yml"></a></sub> <sub>pplnn</sub> <br>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-ncnn.yml"><img src="https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-ncnn.yml"></a></sub> <sub>ncnn</sub> <br>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-torchscript.yml"><img src="https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-torchscript.yml"></a></sub> <sub>LibTorch</sub> <br>
<sub><img src="https://img.shields.io/badge/build-no%20status-lightgrey"></sub> <sub>OpenVINO</sub> <br>
<sub><img src="https://img.shields.io/badge/build-no%20status-lightgrey"></sub> <sub>TVM</sub> <br>
</td>
<td>
<sub><img src="https://img.shields.io/badge/build-no%20status-lightgrey"></sub> <sub>onnxruntime</sub> <br>
<sub><img src="https://img.shields.io/badge/build-no%20status-lightgrey"></sub> <sub>OpenVINO</sub> <br>
<sub><img src="https://img.shields.io/badge/build-no%20status-lightgrey"></sub> <sub>ncnn</sub> <br>
</td>
<td align="center">
-
</td>
<td align="center">
-
</td>
</tr>
<tr>
<th>ARM <br> CPU</th>
<td>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml"><img src="https://byob.yarr.is/open-mmlab/mmdeploy/cross_build_aarch64"></a></sub> <sub>ncnn</sub> <br>
</td>
<td align="center">
-
</td>
<td align="center">
-
</td>
<td align="center">
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-ncnn.yml"><img src="https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-ncnn.yml"></a></sub> <sub>ncnn</sub> <br>
</td>
</tr>
<tr>
<th>RISC-V</th>
<td>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/linux-riscv64-gcc.yml"><img src="https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/linux-riscv64-gcc.yml"></a></sub> <sub>ncnn</sub> <br>
</td>
<td align="center">
-
</td>
<td align="center">
-
</td>
<td align="center">
-
</td>
</tr>
<tr>
<th>NVIDIA <br> GPU</th>
<td>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml"><img src="https://byob.yarr.is/open-mmlab/mmdeploy/build_cuda113_linux"></a></sub> <sub>onnxruntime</sub> <br>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml"><img src="https://byob.yarr.is/open-mmlab/mmdeploy/build_cuda113_linux"></a></sub> <sub>TensorRT</sub> <br>
<sub><img src="https://img.shields.io/badge/build-no%20status-lightgrey"></sub> <sub>LibTorch</sub> <br>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-pplnn.yml"><img src="https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-pplnn.yml"></a></sub> <sub>pplnn</sub> <br>
</td>
<td>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml"><img src="https://byob.yarr.is/open-mmlab/mmdeploy/build_cuda113_windows"></a></sub> <sub>onnxruntime</sub> <br>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml"><img src="https://byob.yarr.is/open-mmlab/mmdeploy/build_cuda113_windows"></a></sub> <sub>TensorRT</sub> <br>
</td>
<td align="center">
-
</td>
<td align="center">
-
</td>
</tr>
<tr>
<th>NVIDIA <br> Jetson</th>
<td>
<sub><img src="https://img.shields.io/badge/build-no%20status-lightgrey"></sub> <sub>TensorRT</sub> <br>
</td>
<td align="center">
-
</td>
<td align="center">
-
</td>
<td align="center">
-
</td>
</tr>
<tr>
<th>Huawei <br> ascend310</th>
<td>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-ascend.yml"><img src="https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-ascend.yml"></a></sub> <sub>CANN</sub> <br>
</td>
<td align="center">
-
</td>
<td align="center">
-
</td>
<td align="center">
-
</td>
</tr>
<tr>
<th>Rockchip</th>
<td>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-rknn.yml"><img src="https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-rknn.yml"></a></sub> <sub>RKNN</sub> <br>
</td>
<td align="center">
-
</td>
<td align="center">
-
</td>
<td align="center">
-
</td>
</tr>
<tr>
<th>Apple M1</th>
<td align="center">
-
</td>
<td align="center">
-
</td>
<td>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-coreml.yml"><img src="https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-coreml.yml"></a></sub> <sub>CoreML</sub> <br>
</td>
<td align="center">
-
</td>
</tr>
<tr>
<th>Adreno <br> GPU</th>
<td align="center">
-
</td>
<td align="center">
-
</td>
<td align="center">
-
</td>
<td>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-snpe.yml"><img src="https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-snpe.yml"></a></sub> <sub>SNPE</sub> <br>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-ncnn.yml"><img src="https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-ncnn.yml"></a></sub> <sub>ncnn</sub> <br>
</td>
</tr>
<tr>
<th>Hexagon <br> DSP</th>
<td align="center">
-
</td>
<td align="center">
-
</td>
<td align="center">
-
</td>
<td>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-snpe.yml"><img src="https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-snpe.yml"></a></sub> <sub>SNPE</sub> <br>
</td>
</tr>
</table>
</div>
### Efficient and scalable C/C++ SDK Framework
All kinds of modules in the SDK can be extended, such as `Transform` for image processing, `Net` for Neural Network inference, `Module` for postprocessing and so on
## [Documentation](https://mmdeploy.readthedocs.io/en/latest/)
Please read [getting_started](docs/en/get_started.md) for the basic usage of MMDeploy. We also provide tutoials about:
- [Build](docs/en/01-how-to-build/build_from_source.md)
- [Build from Docker](docs/en/01-how-to-build/build_from_docker.md)
- [Build from Script](docs/en/01-how-to-build/build_from_script.md)
- [Build for Linux](docs/en/01-how-to-build/linux-x86_64.md)
- [Build for macOS](docs/en/01-how-to-build/macos-arm64.md)
- [Build for Win10](docs/en/01-how-to-build/windows.md)
- [Build for Android](docs/en/01-how-to-build/android.md)
- [Build for Jetson](docs/en/01-how-to-build/jetsons.md)
- [Build for SNPE](docs/en/01-how-to-build/snpe.md)
- [Cross Build for aarch64](docs/en/01-how-to-build/cross_build_ncnn_aarch64.md)
- User Guide
- [How to convert model](docs/en/02-how-to-run/convert_model.md)
- [How to write config](docs/en/02-how-to-run/write_config.md)
- [How to profile model](docs/en/02-how-to-run/profile_model.md)
- [How to quantize model](docs/en/02-how-to-run/quantize_model.md)
- [Useful tools](docs/en/02-how-to-run/useful_tools.md)
- Developer Guide
- [Architecture](docs/en/07-developer-guide/architecture.md)
- [How to support new models](docs/en/07-developer-guide/support_new_model.md)
- [How to support new backends](docs/en/07-developer-guide/support_new_backend.md)
- [How to partition model](docs/en/07-developer-guide/partition_model.md)
- [How to test rewritten model](docs/en/07-developer-guide/test_rewritten_models.md)
- [How to test backend ops](docs/en/07-developer-guide/add_backend_ops_unittest.md)
- [How to do regression test](docs/en/07-developer-guide/regression_test.md)
- Custom Backend Ops
- [ncnn](docs/en/06-custom-ops/ncnn.md)
- [ONNXRuntime](docs/en/06-custom-ops/onnxruntime.md)
- [tensorrt](docs/en/06-custom-ops/tensorrt.md)
- [FAQ](docs/en/faq.md)
- [Contributing](.github/CONTRIBUTING.md)
## Benchmark and Model zoo
You can find the supported models from [here](docs/en/03-benchmark/supported_models.md) and their performance in the [benchmark](docs/en/03-benchmark/benchmark.md).
## Contributing
We appreciate all contributions to MMDeploy. Please refer to [CONTRIBUTING.md](.github/CONTRIBUTING.md) for the contributing guideline.
## Acknowledgement
We would like to sincerely thank the following teams for their contributions to [MMDeploy](https://github.com/open-mmlab/mmdeploy):
- [OpenPPL](https://github.com/openppl-public)
- [OpenVINO](https://github.com/openvinotoolkit/openvino)
- [ncnn](https://github.com/Tencent/ncnn)
## Citation
If you find this project useful in your research, please consider citing:
```BibTeX
@misc{=mmdeploy,
title={OpenMMLab's Model Deployment Toolbox.},
author={MMDeploy Contributors},
howpublished = {\url{https://github.com/open-mmlab/mmdeploy}},
year={2021}
}
```
## License
This project is released under the [Apache 2.0 license](LICENSE).
## Projects in OpenMMLab
- [MMEngine](https://github.com/open-mmlab/mmengine): OpenMMLab foundational library for training deep learning models.
- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab foundational library for computer vision.
- [MMPretrain](https://github.com/open-mmlab/mmpretrain): OpenMMLab pre-training toolbox and benchmark.
- [MMagic](https://github.com/open-mmlab/mmagic): Open**MM**Lab **A**dvanced, **G**enerative and **I**ntelligent **C**reation toolbox.
- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab detection toolbox and benchmark.
- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab's next-generation platform for general 3D object detection.
- [MMYOLO](https://github.com/open-mmlab/mmyolo): OpenMMLab YOLO series toolbox and benchmark
- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab rotated object detection toolbox and benchmark.
- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab video perception toolbox and benchmark.
- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab semantic segmentation toolbox and benchmark.
- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab text detection, recognition, and understanding toolbox.
- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab pose estimation toolbox and benchmark.
- [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 3D human parametric model toolbox and benchmark.
- [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab fewshot learning toolbox and benchmark.
- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab's next-generation action understanding toolbox and benchmark.
- [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab optical flow toolbox and benchmark.
- [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab model deployment framework.
- [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab model compression toolbox and benchmark.
- [MIM](https://github.com/open-mmlab/mim): MIM installs OpenMMLab packages.
- [Playground](https://github.com/open-mmlab/playground): A central hub for gathering and showcasing amazing projects built upon OpenMMLab.
<div align="center">
<img src="resources/mmdeploy-logo.png" width="450"/>
<div>&nbsp;</div>
<div align="center">
<b><font size="5">OpenMMLab 官网</font></b>
<sup>
<a href="https://openmmlab.com">
<i><font size="4">HOT</font></i>
</a>
</sup>
&nbsp;&nbsp;&nbsp;&nbsp;
<b><font size="5">OpenMMLab 开放平台</font></b>
<sup>
<a href="https://platform.openmmlab.com">
<i><font size="4">TRY IT OUT</font></i>
</a>
</sup>
</div>
<div>&nbsp;</div>
</div>
[![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmdeploy.readthedocs.io/zh_CN/latest/)
[![badge](https://github.com/open-mmlab/mmdeploy/workflows/build/badge.svg)](https://github.com/open-mmlab/mmdeploy/actions)
[![codecov](https://codecov.io/gh/open-mmlab/mmdeploy/branch/main/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmdeploy)
[![license](https://img.shields.io/github/license/open-mmlab/mmdeploy.svg)](https://github.com/open-mmlab/mmdeploy/tree/main/LICENSE)
[![issue resolution](https://img.shields.io/github/issues-closed-raw/open-mmlab/mmdeploy)](https://github.com/open-mmlab/mmdeploy/issues)
[![open issues](https://img.shields.io/github/issues-raw/open-mmlab/mmdeploy)](https://github.com/open-mmlab/mmdeploy/issues)
[English](README.md) | 简体中文
## MMDeploy 1.x 版本
全新的 MMDeploy 1.x 已发布,该版本适配OpenMMLab 2.0 生态体系,使用时务必**对齐版本**
MMDeploy 代码库默认分支从`master`切换至`main`。 MMDeploy 0.x (`master`)将逐步废弃,新特性将只添加到 MMDeploy 1.x (`main`)。
| mmdeploy | mmengine | mmcv | mmdet | mmpretrain and others |
| :------: | :------: | :------: | :------: | :-------------------: |
| 0.x.y | - | \<=1.x.y | \<=2.x.y | 0.x.y |
| 1.x.y | 0.x.y | 2.x.y | 3.x.y | 1.x.y |
[硬件模型库](https://platform.openmmlab.com/deploee/) 使用 MMDeploy 1.x 版本转换了 2300 个 onnx/ncnn/trt/openvino 模型,可免费搜索下载。系统内置真实的服务端/嵌入式硬件,用户可以在线完成模型转和速度测试。
## 介绍
MMDeploy 是 [OpenMMLab](https://openmmlab.com/) 模型部署工具箱,**为各算法库提供统一的部署体验**。基于 MMDeploy,开发者可以轻松从训练 repo 生成指定硬件所需 SDK,省去大量适配时间。
## 架构简析
<div align="center">
<img src="resources/introduction.png"/>
</div>
## 特性简介
### 支持超多 OpenMMLab 算法库
- [mmpretrain](docs/zh_cn/04-supported-codebases/mmpretrain.md)
- [mmdet](docs/zh_cn/04-supported-codebases/mmdet.md)
- [mmseg](docs/zh_cn/04-supported-codebases/mmseg.md)
- [mmagic](docs/zh_cn/04-supported-codebases/mmagic.md)
- [mmocr](docs/zh_cn/04-supported-codebases/mmocr.md)
- [mmpose](docs/zh_cn/04-supported-codebases/mmpose.md)
- [mmdet3d](docs/zh_cn/04-supported-codebases/mmdet3d.md)
- [mmrotate](docs/zh_cn/04-supported-codebases/mmrotate.md)
- [mmaction2](docs/zh_cn/04-supported-codebases/mmaction2.md)
### 支持多种推理后端
支持的设备平台和推理引擎如下表所示。benchmark请参考[这里](docs/zh_cn/03-benchmark/benchmark.md)
<div style="width: fit-content; margin: auto;">
<table>
<tr>
<th>Device / <br> Platform</th>
<th>Linux</th>
<th>Windows</th>
<th>macOS</th>
<th>Android</th>
</tr>
<tr>
<th>x86_64 <br> CPU</th>
<td>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-ort.yml"><img src="https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-ort.yml"></a></sub> <sub>onnxruntime</sub> <br>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-pplnn.yml"><img src="https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-pplnn.yml"></a></sub> <sub>pplnn</sub> <br>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-ncnn.yml"><img src="https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-ncnn.yml"></a></sub> <sub>ncnn</sub> <br>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-torchscript.yml"><img src="https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-torchscript.yml"></a></sub> <sub>LibTorch</sub> <br>
<sub><img src="https://img.shields.io/badge/build-no%20status-lightgrey"></sub> <sub>OpenVINO</sub> <br>
<sub><img src="https://img.shields.io/badge/build-no%20status-lightgrey"></sub> <sub>TVM</sub> <br>
</td>
<td>
<sub><img src="https://img.shields.io/badge/build-no%20status-lightgrey"></sub> <sub>onnxruntime</sub> <br>
<sub><img src="https://img.shields.io/badge/build-no%20status-lightgrey"></sub> <sub>OpenVINO</sub> <br>
<sub><img src="https://img.shields.io/badge/build-no%20status-lightgrey"></sub> <sub>ncnn</sub> <br>
</td>
<td align="center">
-
</td>
<td align="center">
-
</td>
</tr>
<tr>
<th>ARM <br> CPU</th>
<td>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml"><img src="https://byob.yarr.is/open-mmlab/mmdeploy/cross_build_aarch64"></a></sub> <sub>ncnn</sub> <br>
</td>
<td align="center">
-
</td>
<td align="center">
-
</td>
<td align="center">
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-ncnn.yml"><img src="https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-ncnn.yml"></a></sub> <sub>ncnn</sub> <br>
</td>
</tr>
<tr>
<th>RISC-V</th>
<td>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/linux-riscv64-gcc.yml"><img src="https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/linux-riscv64-gcc.yml"></a></sub> <sub>ncnn</sub> <br>
</td>
<td align="center">
-
</td>
<td align="center">
-
</td>
<td align="center">
-
</td>
</tr>
<tr>
<th>NVIDIA <br> GPU</th>
<td>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml"><img src="https://byob.yarr.is/open-mmlab/mmdeploy/build_cuda113_linux"></a></sub> <sub>onnxruntime</sub> <br>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml"><img src="https://byob.yarr.is/open-mmlab/mmdeploy/build_cuda113_linux"></a></sub> <sub>TensorRT</sub> <br>
<sub><img src="https://img.shields.io/badge/build-no%20status-lightgrey"></sub> <sub>LibTorch</sub> <br>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-pplnn.yml"><img src="https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-pplnn.yml"></a></sub> <sub>pplnn</sub> <br>
</td>
<td>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml"><img src="https://byob.yarr.is/open-mmlab/mmdeploy/build_cuda113_windows"></a></sub> <sub>onnxruntime</sub> <br>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/build.yml"><img src="https://byob.yarr.is/open-mmlab/mmdeploy/build_cuda113_windows"></a></sub> <sub>TensorRT</sub> <br>
</td>
<td align="center">
-
</td>
<td align="center">
-
</td>
</tr>
<tr>
<th>NVIDIA <br> Jetson</th>
<td>
<sub><img src="https://img.shields.io/badge/build-no%20status-lightgrey"></sub> <sub>TensorRT</sub> <br>
</td>
<td align="center">
-
</td>
<td align="center">
-
</td>
<td align="center">
-
</td>
</tr>
<tr>
<th>Huawei <br> ascend310</th>
<td>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-ascend.yml"><img src="https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-ascend.yml"></a></sub> <sub>CANN</sub> <br>
</td>
<td align="center">
-
</td>
<td align="center">
-
</td>
<td align="center">
-
</td>
</tr>
<tr>
<th>Rockchip</th>
<td>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-rknn.yml"><img src="https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-rknn.yml"></a></sub> <sub>RKNN</sub> <br>
</td>
<td align="center">
-
</td>
<td align="center">
-
</td>
<td align="center">
-
</td>
</tr>
<tr>
<th>Apple M1</th>
<td align="center">
-
</td>
<td align="center">
-
</td>
<td>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-coreml.yml"><img src="https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-coreml.yml"></a></sub> <sub>CoreML</sub> <br>
</td>
<td align="center">
-
</td>
</tr>
<tr>
<th>Adreno <br> GPU</th>
<td align="center">
-
</td>
<td align="center">
-
</td>
<td align="center">
-
</td>
<td>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-snpe.yml"><img src="https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-snpe.yml"></a></sub> <sub>SNPE</sub> <br>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-ncnn.yml"><img src="https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-ncnn.yml"></a></sub> <sub>ncnn</sub> <br>
</td>
</tr>
<tr>
<th>Hexagon <br> DSP</th>
<td align="center">
-
</td>
<td align="center">
-
</td>
<td align="center">
-
</td>
<td>
<sub><a href="https://github.com/open-mmlab/mmdeploy/actions/workflows/backend-snpe.yml"><img src="https://img.shields.io/github/actions/workflow/status/open-mmlab/mmdeploy/backend-snpe.yml"></a></sub> <sub>SNPE</sub> <br>
</td>
</tr>
</table>
</div>
### SDK 可高度定制化
- Transform 数据预处理
- Net 推理
- Module 后处理
## [中文文档](https://mmdeploy.readthedocs.io/zh_CN/latest/)
- [快速上手](docs/zh_cn/get_started.md)
- [编译](docs/zh_cn/01-how-to-build/build_from_source.md)
- [一键式脚本安装](docs/zh_cn/01-how-to-build/build_from_script.md)
- [Build from Docker](docs/zh_cn/01-how-to-build/build_from_docker.md)
- [Build for Linux](docs/zh_cn/01-how-to-build/linux-x86_64.md)
- [Build for macOS](docs/zh_cn/01-how-to-build/macos-arm64.md)
- [Build for Win10](docs/zh_cn/01-how-to-build/windows.md)
- [Build for Android](docs/zh_cn/01-how-to-build/android.md)
- [Build for Jetson](docs/zh_cn/01-how-to-build/jetsons.md)
- [Build for SNPE](docs/zh_cn/01-how-to-build/snpe.md)
- [Cross Build for aarch64](docs/zh_cn/01-how-to-build/cross_build_ncnn_aarch64.md)
- 使用
- [把模型转换到推理 Backend](docs/zh_cn/02-how-to-run/convert_model.md)
- [配置转换参数](docs/zh_cn/02-how-to-run/write_config.md)
- [量化](docs/zh_cn/02-how-to-run/quantize_model.md)
- [测试转换完成的模型](docs/zh_cn/02-how-to-run/profile_model.md)
- [工具集介绍](docs/zh_cn/02-how-to-run/useful_tools.md)
- 开发指南
- [软件架构](docs/zh_cn/07-developer-guide/architecture.md)
- [支持新模型](docs/zh_cn/07-developer-guide/support_new_model.md)
- [增加推理 backend](docs/zh_cn/07-developer-guide/support_new_backend.md)
- [模型分块](docs/zh_cn/07-developer-guide/partition_model.md)
- [测试重写模型](docs/zh_cn/07-developer-guide/test_rewritten_models.md)
- [backend 算子测试](docs/zh_cn/07-developer-guide/add_backend_ops_unittest.md)
- [回归测试](docs/zh_cn/07-developer-guide/regression_test.md)
- 各 backend 自定义算子列表
- [ncnn](docs/zh_cn/06-custom-ops/ncnn.md)
- [onnxruntime](docs/zh_cn/06-custom-ops/onnxruntime.md)
- [tensorrt](docs/zh_cn/06-custom-ops/tensorrt.md)
- [FAQ](docs/zh_cn/faq.md)
- [贡献者手册](.github/CONTRIBUTING.md)
## 新人解说
- [01 术语解释、加载第一个模型](docs/zh_cn/tutorial/01_introduction_to_model_deployment.md)
- [02 部署常见问题](docs/zh_cn/tutorial/02_challenges.md)
- [03 torch转onnx](docs/zh_cn/tutorial/03_pytorch2onnx.md)
- [04 让torch支持更多onnx算子](docs/zh_cn/tutorial/04_onnx_custom_op.md)
- [05 调试onnx模型](docs/zh_cn/tutorial/05_onnx_model_editing.md)
## 基准与模型库
基准和支持的模型列表可以在[基准](https://mmdeploy.readthedocs.io/zh_CN/latest/03-benchmark/benchmark.html)[模型列表](https://mmdeploy.readthedocs.io/en/latest/03-benchmark/supported_models.html)中获得。
## 贡献指南
我们感谢所有的贡献者为改进和提升 MMDeploy 所作出的努力。请参考[贡献指南](.github/CONTRIBUTING.md)来了解参与项目贡献的相关指引。
## 致谢
- [OpenPPL](https://github.com/openppl-public/ppl.nn): 高性能推理框架底层库
- [OpenVINO](https://github.com/openvinotoolkit/openvino): AI 推理优化和部署框架
- [ncnn](https://github.com/tencent/ncnn): 为手机端极致优化的高性能神经网络前向计算框架
## 引用
如果您在研究中使用了本项目的代码或者性能基准,请参考如下 bibtex 引用 MMDeploy:
```BibTeX
@misc{=mmdeploy,
title={OpenMMLab's Model Deployment Toolbox.},
author={MMDeploy Contributors},
howpublished = {\url{https://github.com/open-mmlab/mmdeploy}},
year={2021}
}
```
## 开源许可证
该项目采用 [Apache 2.0 开源许可证](LICENSE)
## OpenMMLab 的其他项目
- [MMEngine](https://github.com/open-mmlab/mmengine): OpenMMLab 深度学习模型训练基础库
- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab 计算机视觉基础库
- [MMPretrain](https://github.com/open-mmlab/mmpretrain): OpenMMLab 深度学习预训练工具箱
- [MMagic](https://github.com/open-mmlab/mmagic): OpenMMLab 新一代人工智能内容生成(AIGC)工具箱
- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab 目标检测工具箱
- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab 新一代通用 3D 目标检测平台
- [MMYOLO](https://github.com/open-mmlab/mmyolo): OpenMMLab YOLO 系列工具箱和基准测试
- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab 旋转框检测工具箱与测试基准
- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab 一体化视频目标感知平台
- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab 语义分割工具箱
- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab 全流程文字检测识别理解工具包
- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab 姿态估计工具箱
- [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 人体参数化模型工具箱与测试基准
- [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab 少样本学习工具箱与测试基准
- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab 新一代视频理解工具箱
- [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab 光流估计工具箱与测试基准
- [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab 模型部署框架
- [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab 模型压缩工具箱与测试基准
- [MIM](https://github.com/open-mmlab/mim): OpenMMlab 项目、算法、模型的统一入口
- [Playground](https://github.com/open-mmlab/playground): 收集和展示 OpenMMLab 相关的前沿、有趣的社区项目
## 欢迎加入 OpenMMLab 社区
扫描下方的二维码可关注 OpenMMLab 团队的 [知乎官方账号](https://www.zhihu.com/people/openmmlab),扫描下方微信二维码添加喵喵好友,进入 MMDeploy 微信交流社群。【加好友申请格式:研究方向+地区+学校/公司+姓名】
<div align="center">
<img src="https://user-images.githubusercontent.com/25839884/205870927-39f4946d-8751-4219-a4c0-740117558fd7.jpg" height="400" />
<img src="https://github.com/open-mmlab/mmdeploy/assets/62195058/a8f116ca-b567-42ce-b70e-38526a81c9a3" height="400" />
</div>
我们会在 OpenMMLab 社区为大家
- 📢 分享 AI 框架的前沿核心技术
- 💻 解读 PyTorch 常用模块源码
- 📰 发布 OpenMMLab 的相关新闻
- 🚀 介绍 OpenMMLab 开发的前沿算法
- 🏃 获取更高效的问题答疑和意见反馈
- 🔥 提供与各行各业开发者充分交流的平台
干货满满 📘,等您来撩 💗,OpenMMLab 社区期待您的加入 👬
ARG CUDA_INT=118
FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu20.04 AS cuda-118
FROM nvidia/cuda:11.3.0-cudnn8-devel-ubuntu20.04 AS cuda-113
FROM cuda-${CUDA_INT} AS final
ARG TORCH_VERSION=2.0.0
ARG TORCHVISION_VERSION=0.15.1
# important dependencies
ARG OPENCV_VERSION==4.5.4.60
ARG PPLCV_VERSION=0.7.0
# backends
ARG ONNXRUNTIME_VERSION=1.15.1
ARG PPLNN_VERSION=0.8.1
ARG NCNN_VERSION=20230816
ARG TENSORRT_VERSION=8.6.1.6
ARG OPENVINO_VERSION=2022.3.0
# tensorrt tar file url
ARG TENSORRT_URL
USER root
WORKDIR /root/workspace
ENV DEBIAN_FRONTEND=nointeractive
ENV FORCE_CUDA="1"
RUN apt-get update && apt-get install -y --no-install-recommends \
apt-utils \
ca-certificates \
gcc-7 \
g++-7 \
git \
vim \
wget \
libopencv-dev \
libprotobuf-dev protobuf-compiler \
unzip \
python3-dev python3-pip \
&& rm -rf /var/lib/apt/lists/*
ENV CUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda
ENV LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH
# install jdk, onnxruntime, openvino, and other python packages
RUN wget https://download.java.net/java/GA/jdk18/43f95e8614114aeaa8e8a5fcf20a682d/36/GPL/openjdk-18_linux-x64_bin.tar.gz &&\
tar xvf openjdk-18_linux-x64_bin.tar.gz && rm -rf openjdk-18_linux-x64_bin.tar.gz && \
wget https://github.com/microsoft/onnxruntime/releases/download/v${ONNXRUNTIME_VERSION}/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}.tgz &&\
tar -xzvf onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}.tgz &&\
wget https://github.com/microsoft/onnxruntime/releases/download/v${ONNXRUNTIME_VERSION}/onnxruntime-linux-x64-gpu-${ONNXRUNTIME_VERSION}.tgz &&\
tar -xzvf onnxruntime-linux-x64-gpu-${ONNXRUNTIME_VERSION}.tgz && rm onnxruntime-*.tgz &&\
wget https://github.com/Kitware/CMake/releases/download/v3.25.2/cmake-3.25.2-linux-x86_64.tar.gz &&\
tar -xzvf cmake-3.25.2-linux-x86_64.tar.gz && rm cmake-*.tar.gz && mv cmake-* cmake &&\
export CUDA_INT=$(echo $CUDA_VERSION | awk '{split($0, a, "."); print a[1]a[2]}') &&\
python3 -m pip install --no-cache-dir --upgrade pip setuptools wheel &&\
python3 -m pip install --no-cache-dir onnxruntime-gpu==${ONNXRUNTIME_VERSION} &&\
python3 -m pip install --no-cache-dir openvino openvino-dev[onnx]==${OPENVINO_VERSION} &&\
python3 -m pip install --no-cache-dir opencv-python==${OPENCV_VERSION} opencv-python-headless==${OPENCV_VERSION} opencv-contrib-python==${OPENCV_VERSION} &&\
python3 -m pip install --no-cache-dir torch==${TORCH_VERSION}+cu${CUDA_INT} torchvision==${TORCHVISION_VERSION}+cu${CUDA_INT} -f https://download.pytorch.org/whl/torch_stable.html
# create env
ENV JAVA_HOME=/root/workspace/jdk-18
ENV PATH=$JAVA_HOME/bin:/root/workspace/cmake/bin:$PATH
ENV ONNXRUNTIME_DIR=/root/workspace/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}
ENV ONNXRUNTIME_GPU_DIR=/root/workspace/onnxruntime-linux-x64-gpu-${ONNXRUNTIME_VERSION}
ENV LD_LIBRARY_PATH=${ONNXRUNTIME_DIR}/lib:$LD_LIBRARY_PATH
### install ppl.nn
RUN git clone --depth 1 --branch v${PPLNN_VERSION} --recursive https://github.com/openppl-public/ppl.nn.git &&\
cd ppl.nn &&\
export PYTHON_INCLUDE_DIR=$(python3 -c 'import sysconfig;print(sysconfig.get_paths()["include"])') &&\
./build.sh -DPPLNN_USE_X86_64=ON -DPPLNN_USE_CUDA=ON -DPPLNN_ENABLE_PYTHON_API=ON -DPYTHON3_INCLUDE_DIRS=${PYTHON_INCLUDE_DIR} &&\
cd ./python/package && \
./build.sh && \
cd - && mv pplnn-build/install ./ && rm -rf pplnn-build &&\
cd /tmp/pyppl-package/dist && \
python3 -m pip install pyppl*.whl && rm *.whl
ENV pplnn_DIR=/root/workspace/ppl.nn/install/lib/cmake/ppl
ENV PYTHONPATH=/root/workspace/ppl.nn/install/lib:$PYTHONPATH
### build ncnn
RUN git clone --depth 1 --branch ${NCNN_VERSION} --recursive https://github.com/Tencent/ncnn.git &&\
python3 -m pip install pybind11 &&\
mkdir ncnn/build && cd ncnn/build &&\
cmake -DNCNN_VULKAN=OFF -DNCNN_PYTHON=ON -DNCNN_BUILD_TOOLS=OFF -DCMAKE_INSTALL_PREFIX=../install .. &&\
make -j $(nproc) && make install &&\
cd .. && python3 -m pip install . && rm -rf ./build
ENV ncnn_DIR=/root/workspace/ncnn/install/lib/cmake/ncnn
### install ppl.cv
RUN git clone --depth 1 --branch v${PPLCV_VERSION} https://github.com/openppl-public/ppl.cv.git &&\
cd ppl.cv &&\
./build.sh cuda && mv cuda-build/install ./ && rm -rf cuda-build
ENV pplcv_DIR=/root/workspace/ppl.cv/install/lib/cmake/ppl
### install tensorrt
RUN wget -c $TENSORRT_URL && \
tar -zxvf /root/workspace/TensorRT-${TENSORRT_VERSION}*.tar.gz -C /root/workspace &&\
rm -rf TensorRT-${TENSORRT_VERSION}*.tar.gz &&\
ln -sf /root/workspace/TensorRT-${TENSORRT_VERSION} /root/workspace/TensorRT &&\
cd /root/workspace/TensorRT && rm -rf data doc samples uff &&\
export PY_VERSION=$(python3 -V | awk '{print $2}' | awk '{split($0, a, "."); print a[1]a[2]}') &&\
python3 -m pip install ./python/tensorrt-*-cp${PY_VERSION}-none-linux_x86_64.whl
ENV TENSORRT_DIR=/root/workspace/TensorRT
ENV LD_LIBRARY_PATH=$TENSORRT_DIR/lib:$LD_LIBRARY_PATH
ENV PATH=$TENSORRT_DIR/bin:$PATH
# openvino
RUN wget https://storage.openvinotoolkit.org/repositories/openvino/packages/2022.3/linux/l_openvino_toolkit_ubuntu20_2022.3.0.9052.9752fafe8eb_x86_64.tgz &&\
tar -zxvf ./l_openvino_toolkit*.tgz &&\
rm ./l_openvino_toolkit*.tgz &&\
mv ./l_openvino* ./openvino_toolkit &&\
bash ./openvino_toolkit/install_dependencies/install_openvino_dependencies.sh
ENV OPENVINO_DIR=/root/workspace/openvino_toolkit
ENV InferenceEngine_DIR=$OPENVINO_DIR/runtime/cmake
FROM openvino/ubuntu20_dev:2022.3.0
ARG PYTHON_VERSION=3.8
ARG TORCH_VERSION=1.10.0
ARG TORCHVISION_VERSION=0.11.0
ARG ONNXRUNTIME_VERSION=1.8.1
ARG MMCV_VERSION=">=2.0.0rc2"
ARG MMENGINE_VERSION=">=0.3.0"
USER root
### change the system source for installing libs
ARG USE_SRC_INSIDE=false
RUN if [ ${USE_SRC_INSIDE} == true ] ; \
then \
sed -i s/archive.ubuntu.com/mirrors.aliyun.com/g /etc/apt/sources.list ; \
sed -i s/security.ubuntu.com/mirrors.aliyun.com/g /etc/apt/sources.list ; \
echo "Use aliyun source for installing libs" ; \
else \
echo "Keep the download source unchanged" ; \
fi
RUN apt-get update && apt-get install -y --no-install-recommends \
ca-certificates \
libopencv-dev libspdlog-dev \
gnupg \
libssl-dev \
libprotobuf-dev protobuf-compiler \
build-essential \
libjpeg-dev \
libpng-dev \
ccache \
cmake \
gcc \
g++ \
git \
vim \
wget \
curl \
&& rm -rf /var/lib/apt/lists/*
RUN curl -fsSL -v -o ~/miniconda.sh -O https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \
chmod +x ~/miniconda.sh && \
bash ~/miniconda.sh -b -p /opt/conda && \
rm ~/miniconda.sh && \
/opt/conda/bin/conda install -y python=${PYTHON_VERSION} conda-build pyyaml numpy ipython cython typing typing_extensions mkl mkl-include ninja && \
/opt/conda/bin/conda clean -ya
### change the pip source for installing packages
RUN if [ ${USE_SRC_INSIDE} == true ] ; \
then \
/opt/conda/bin/pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple; \
echo "pip using tsinghua source" ; \
else \
echo "Keep pip the download source unchanged" ; \
fi
### pytorch mmcv onnxruntime and openvino
RUN /opt/conda/bin/pip install torch==${TORCH_VERSION}+cpu torchvision==${TORCHVISION_VERSION}+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html \
&& /opt/conda/bin/pip install --no-cache-dir openmim
RUN /opt/conda/bin/mim install --no-cache-dir "mmcv"${MMCV_VERSION} onnxruntime==${ONNXRUNTIME_VERSION} openvino-dev==2022.3.0 mmengine${MMENGINE_VERSION}
ENV PATH /opt/conda/bin:$PATH
WORKDIR /root/workspace
### get onnxruntime
RUN wget https://github.com/microsoft/onnxruntime/releases/download/v${ONNXRUNTIME_VERSION}/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}.tgz \
&& tar -zxvf onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}.tgz
ENV ONNXRUNTIME_DIR=/root/workspace/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}
### build ncnn
RUN git clone https://github.com/Tencent/ncnn.git &&\
cd ncnn &&\
export NCNN_DIR=$(pwd) &&\
git submodule update --init &&\
mkdir -p build && cd build &&\
cmake -DNCNN_VULKAN=OFF -DNCNN_SYSTEM_GLSLANG=ON -DNCNN_BUILD_EXAMPLES=ON -DNCNN_PYTHON=ON -DNCNN_BUILD_TOOLS=ON -DNCNN_BUILD_BENCHMARK=ON -DNCNN_BUILD_TESTS=ON .. &&\
make -j$(nproc) &&\
make install &&\
cd /root/workspace/ncnn/python &&\
/opt/conda/bin/mim install -e .
ENV PATH="/root/workspace/ncnn/build/tools/quantize/:${PATH}"
### install mmdeploy
WORKDIR /root/workspace
ARG VERSION
RUN git clone -b main https://github.com/open-mmlab/mmdeploy.git &&\
cd mmdeploy &&\
if [ -z ${VERSION} ] ; then echo "No MMDeploy version passed in, building on main" ; else git checkout tags/v${VERSION} -b tag_v${VERSION} ; fi &&\
git submodule update --init --recursive &&\
rm -rf build &&\
mkdir build &&\
cd build &&\
cmake -DMMDEPLOY_TARGET_BACKENDS=ncnn -Dncnn_DIR=/root/workspace/ncnn/build/install/lib/cmake/ncnn .. &&\
make -j$(nproc) &&\
cmake -DMMDEPLOY_TARGET_BACKENDS=ort .. &&\
make -j$(nproc) &&\
cd .. &&\
/opt/conda/bin/mim install -e .
### build SDK
ENV LD_LIBRARY_PATH="/root/workspace/mmdeploy/build/lib:${LD_LIBRARY_PATH}"
RUN cd mmdeploy && rm -rf build/CM* && mkdir -p build && cd build && cmake .. \
-DMMDEPLOY_BUILD_SDK=ON \
-DMMDEPLOY_BUILD_EXAMPLES=ON \
-DCMAKE_CXX_COMPILER=g++-9 \
-DONNXRUNTIME_DIR=${ONNXRUNTIME_DIR} \
-Dncnn_DIR=/root/workspace/ncnn/build/install/lib/cmake/ncnn \
-DInferenceEngine_DIR=/opt/intel/openvino/runtime/cmake \
-DMMDEPLOY_TARGET_DEVICES=cpu \
-DMMDEPLOY_BUILD_SDK_PYTHON_API=ON \
-DMMDEPLOY_TARGET_BACKENDS="ort;ncnn;openvino" \
-DMMDEPLOY_CODEBASES=all &&\
cmake --build . -- -j$(nproc) && cmake --install . &&\
export SPDLOG_LEVEL=warn &&\
if [ -z ${VERSION} ] ; then echo "Built MMDeploy main for CPU devices successfully!" ; else echo "Built MMDeploy version v${VERSION} for CPU devices successfully!" ; fi
FROM nvcr.io/nvidia/tensorrt:22.04-py3
ARG CUDA=11.3
ARG PYTHON_VERSION=3.8
ARG TORCH_VERSION=1.10.0
ARG TORCHVISION_VERSION=0.11.0
ARG ONNXRUNTIME_VERSION=1.8.1
ARG PPLCV_VERSION=0.7.0
ENV FORCE_CUDA="1"
ARG MMCV_VERSION=">=2.0.0rc2"
ARG MMENGINE_VERSION=">=0.3.0"
ENV DEBIAN_FRONTEND=noninteractive
### change the system source for installing libs
ARG USE_SRC_INSIDE=false
RUN if [ ${USE_SRC_INSIDE} == true ] ; \
then \
sed -i s/archive.ubuntu.com/mirrors.aliyun.com/g /etc/apt/sources.list ; \
sed -i s/security.ubuntu.com/mirrors.aliyun.com/g /etc/apt/sources.list ; \
echo "Use aliyun source for installing libs" ; \
else \
echo "Keep the download source unchanged" ; \
fi
### update apt and install libs
RUN apt-get update &&\
apt-get install -y vim libsm6 libxext6 libxrender-dev libgl1-mesa-glx git wget libssl-dev libopencv-dev libspdlog-dev --no-install-recommends &&\
rm -rf /var/lib/apt/lists/*
RUN curl -fsSL -v -o ~/miniconda.sh -O https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \
chmod +x ~/miniconda.sh && \
bash ~/miniconda.sh -b -p /opt/conda && \
rm ~/miniconda.sh && \
/opt/conda/bin/conda install -y python=${PYTHON_VERSION} conda-build pyyaml numpy ipython cython typing typing_extensions mkl mkl-include ninja && \
/opt/conda/bin/conda clean -ya
### change the pip source for installing packages
RUN if [ ${USE_SRC_INSIDE} == true ] ; \
then \
/opt/conda/bin/pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple; \
echo "pip using tsinghua source" ; \
else \
echo "Keep pip the download source unchanged" ; \
fi
### install pytorch openmim
RUN /opt/conda/bin/conda install pytorch==${TORCH_VERSION} torchvision==${TORCHVISION_VERSION} cudatoolkit=${CUDA} -c pytorch -c conda-forge -y \
&& /opt/conda/bin/pip install --no-cache-dir openmim
### pytorch mmcv onnxruntime
RUN /opt/conda/bin/mim install --no-cache-dir "mmcv"${MMCV_VERSION} onnxruntime-gpu==${ONNXRUNTIME_VERSION} mmengine${MMENGINE_VERSION}
ENV PATH /opt/conda/bin:$PATH
WORKDIR /root/workspace
### get onnxruntime
RUN wget https://github.com/microsoft/onnxruntime/releases/download/v${ONNXRUNTIME_VERSION}/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}.tgz \
&& tar -zxvf onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}.tgz
### cp trt from pip to conda
RUN cp -r /usr/local/lib/python${PYTHON_VERSION}/dist-packages/tensorrt* /opt/conda/lib/python${PYTHON_VERSION}/site-packages/
### install mmdeploy
ENV ONNXRUNTIME_DIR=/root/workspace/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}
ENV TENSORRT_DIR=/workspace/tensorrt
ARG VERSION
RUN git clone -b main https://github.com/open-mmlab/mmdeploy &&\
cd mmdeploy &&\
if [ -z ${VERSION} ] ; then echo "No MMDeploy version passed in, building on main" ; else git checkout tags/v${VERSION} -b tag_v${VERSION} ; fi &&\
git submodule update --init --recursive &&\
mkdir -p build &&\
cd build &&\
cmake -DMMDEPLOY_TARGET_BACKENDS="ort;trt" .. &&\
make -j$(nproc) &&\
cd .. &&\
/opt/conda/bin/mim install -e .
### build sdk
RUN git clone https://github.com/openppl-public/ppl.cv.git &&\
cd ppl.cv &&\
git checkout tags/v${PPLCV_VERSION} -b v${PPLCV_VERSION} &&\
./build.sh cuda
ENV BACKUP_LD_LIBRARY_PATH=$LD_LIBRARY_PATH
ENV LD_LIBRARY_PATH=/usr/local/cuda/compat/lib.real/:$LD_LIBRARY_PATH
RUN cd /root/workspace/mmdeploy &&\
rm -rf build/CM* build/cmake-install.cmake build/Makefile build/csrc &&\
mkdir -p build && cd build &&\
cmake .. \
-DMMDEPLOY_BUILD_SDK=ON \
-DMMDEPLOY_BUILD_EXAMPLES=ON \
-DCMAKE_CXX_COMPILER=g++ \
-Dpplcv_DIR=/root/workspace/ppl.cv/cuda-build/install/lib/cmake/ppl \
-DTENSORRT_DIR=${TENSORRT_DIR} \
-DONNXRUNTIME_DIR=${ONNXRUNTIME_DIR} \
-DMMDEPLOY_BUILD_SDK_PYTHON_API=ON \
-DMMDEPLOY_TARGET_DEVICES="cuda;cpu" \
-DMMDEPLOY_TARGET_BACKENDS="ort;trt" \
-DMMDEPLOY_CODEBASES=all &&\
make -j$(nproc) && make install &&\
export SPDLOG_LEVEL=warn &&\
if [ -z ${VERSION} ] ; then echo "Built MMDeploy for GPU devices successfully!" ; else echo "Built MMDeploy version v${VERSION} for GPU devices successfully!" ; fi
ENV LD_LIBRARY_PATH="/root/workspace/mmdeploy/build/lib:${BACKUP_LD_LIBRARY_PATH}"
FROM openmmlab/mmdeploy:ubuntu20.04-cuda11.8
ARG MMDEPLOY_VERSION=main
ENV BACKUP_LD_LIBRARY_PATH=$LD_LIBRARY_PATH
ENV LD_LIBRARY_PATH=/usr/local/cuda/compat:$LD_LIBRARY_PATH
# build mmdeploy
RUN git clone --recursive -b $MMDEPLOY_VERSION --depth 1 https://github.com/open-mmlab/mmdeploy.git &&\
export Torch_DIR=$(python3 -c "import torch;print(torch.utils.cmake_prefix_path + '/Torch')") &&\
bash mmdeploy/.github/scripts/linux/build.sh "cpu;cuda" "ort;trt;ncnn;torchscript;openvino" \
-DMMDEPLOY_BUILD_EXAMPLES=ON \
-Dpplcv_DIR=${pplcv_DIR} \
-DTENSORRT_DIR=${TENSORRT_DIR} \
-DInferenceEngine_DIR=${InferenceEngine_DIR} \
-DONNXRUNTIME_DIR=${ONNXRUNTIME_DIR} \
-Dncnn_DIR=${ncnn_DIR} \
-DTorch_DIR=${Torch_DIR} &&\
cd mmdeploy &&\
python3 -m pip install -U openmim pycuda &&\
python3 -m mim install "mmcv>=2.0.0" &&\
python3 -m pip install -r requirements.txt &&\
python3 -m pip install -e . --user
ENV MMDeploy_DIR="/root/workspace/mmdeploy/build/install/lib/cmake/MMDeploy"
ENV LD_LIBRARY_PATH="/root/workspace/mmdeploy/build/lib:${BACKUP_LD_LIBRARY_PATH}"
ENV PATH="/root/workspace/mmdeploy/build/bin:${PATH}"
ENV PYTHONPATH="/root/workspace/mmdeploy/build/lib:${PYTHONPATH}"
FROM quay.io/pypa/manylinux2014_x86_64
# package urls
ARG CUDA_URL
ARG CUDNN_URL
ARG TENSORRT_URL
ARG CUDA_VERSION=11.8
# important dependencies
ARG OPENCV_VERSION=4.5.5
ARG PPLCV_VERSION=0.7.0
# backends
ARG ONNXRUNTIME_VERSION=1.15.1
ARG TENSORRT_VERSION=8.6.1.6
# torch
ARG TORCH_VERSION=2.0.0
ARG TORCHVISION_VERSION=0.15.0
ARG TOOLSET_VERSION=7
USER root
WORKDIR /root/workspace
ENV FORCE_CUDA="1"
# install toolset
RUN yum install centos-release-scl devtoolset-${TOOLSET_VERSION}-gcc* -y
ENV TOOLSET_DIR=/opt/rh/devtoolset-${TOOLSET_VERSION}/root/usr
ENV PATH=$TOOLSET_DIR/bin:$PATH
ENV LD_LIBRARY_PATH=$TOOLSET_DIR/lib:$TOOLSET_DIR/lib64:/usr/local/lib64
# install cuda cudnn
RUN curl -fsSL -v -o ./cuda_install.run -O $CUDA_URL &&\
chmod +x ./cuda_install.run &&\
./cuda_install.run --silent --toolkit &&\
rm -f ./cuda_install.run &&\
curl -fsSL -v -o ./cudnn.tgz -O $CUDNN_URL &&\
tar -xzvf ./cudnn.tgz &&\
rm -f ./cudnn.tgz &&\
mv cu* /opt/cudnn
# install ort, trt
RUN curl -fsSL -v -o ./onnxruntime.tgz -O https://github.com/microsoft/onnxruntime/releases/download/v${ONNXRUNTIME_VERSION}/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}.tgz &&\
tar -xzvf onnxruntime.tgz &&\
rm onnxruntime.tgz &&\
mv onnxruntime* /opt/onnxruntime &&\
curl -fsSL -v -o ./onnxruntime.tgz -O https://github.com/microsoft/onnxruntime/releases/download/v${ONNXRUNTIME_VERSION}/onnxruntime-linux-x64-gpu-${ONNXRUNTIME_VERSION}.tgz &&\
tar -xzvf onnxruntime.tgz &&\
rm onnxruntime.tgz &&\
mv onnxruntime* /opt/onnxruntime-gpu &&\
curl -fsSL -v -o ./tensorrt.tgz -O $TENSORRT_URL &&\
tar -xzvf ./tensorrt.tgz &&\
rm -f ./tensorrt.tgz &&\
mv ./TensorRT* /opt/TensorRT &&\
cd /opt/TensorRT &&\
rm -rf data doc samples uff
ENV CUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda
ENV CUDNN_DIR=/opt/cudnn
ENV ONNXRUNTIME_DIR=/opt/onnxruntime
ENV ONNXRUNTIME_GPU_DIR=/opt/onnxruntime-gpu
ENV TENSORRT_DIR=/opt/TensorRT
ENV LD_LIBRARY_PATH=$CUDA_TOOLKIT_ROOT_DIR/lib64:$CUDNN_DIR/lib64:$LD_LIBRARY_PATH
ENV LD_LIBRARY_PATH=${ONNXRUNTIME_GPU_DIR}/lib:$TENSORRT_DIR/lib:$LD_LIBRARY_PATH
ENV PATH=$TENSORRT_DIR/bin:$PATH
### install ppl.cv
RUN git clone --depth 1 --branch v${PPLCV_VERSION} https://github.com/openppl-public/ppl.cv.git &&\
cd ppl.cv &&\
./build.sh cuda &&\
mv cuda-build/install ./ &&\
rm -rf cuda-build
ENV pplcv_DIR=/root/workspace/ppl.cv/install/lib/cmake/ppl
# build opencv as static lib
RUN curl -fsSL -v -o ./opencv.tgz -O https://github.com/opencv/opencv/archive/refs/tags/${OPENCV_VERSION}.tar.gz &&\
tar -xzvf ./opencv.tgz &&\
rm -f ./opencv.tgz &&\
cd opencv-${OPENCV_VERSION} &&\
mkdir build && cd build &&\
cmake .. \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX=/usr/local \
-DOPENCV_FORCE_3RDPARTY_BUILD=ON \
-DBUILD_TESTS=OFF \
-DBUILD_PERF_TESTS=OFF \
-DBUILD_SHARED_LIBS=OFF &&\
make -j$(nproc) && make install
ENV OpenCV_DIR=/usr/local/lib64/cmake/opencv4
# install conda env
RUN curl -fsSL -v -o ~/miniconda.sh -O https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh &&\
chmod +x ~/miniconda.sh &&\
bash ~/miniconda.sh -b -p /opt/conda &&\
rm ~/miniconda.sh &&\
/opt/conda/bin/conda create -n mmdeploy-3.6 python=3.6 -y &&\
/opt/conda/bin/conda create -n mmdeploy-3.7 python=3.7 -y &&\
/opt/conda/bin/conda create -n mmdeploy-3.8 python=3.8 -y &&\
/opt/conda/bin/conda create -n mmdeploy-3.9 python=3.9 -y &&\
/opt/conda/bin/conda create -n mmdeploy-3.10 python=3.10 -y &&\
/opt/conda/bin/conda create -n mmdeploy-3.11 python=3.11 -y &&\
export CUDA_INT=$(echo $CUDA_VERSION | awk '{split($0, a, "."); print a[1]a[2]}') &&\
/opt/conda/bin/conda create -n torch${TORCH_VERSION} python=3.8 -y &&\
/opt/conda/envs/mmdeploy-3.6/bin/pip install --no-cache-dir setuptools wheel pyyaml packaging &&\
/opt/conda/envs/torch${TORCH_VERSION}/bin/pip install --no-cache-dir onnxruntime-gpu==${ONNXRUNTIME_VERSION} &&\
/opt/conda/envs/torch${TORCH_VERSION}/bin/pip install ${TENSORRT_DIR}/python/tensorrt-*-cp38-none-linux_x86_64.whl &&\
/opt/conda/envs/torch${TORCH_VERSION}/bin/pip install --no-cache-dir torch==${TORCH_VERSION}+cu${CUDA_INT} \
torchvision==${TORCHVISION_VERSION}+cu${CUDA_INT} -f https://download.pytorch.org/whl/torch_stable.html &&\
/opt/conda/bin/conda init bash &&\
/opt/conda/bin/conda clean -ya
ENV CONDA=/opt/conda
ENV PATH=$CONDA/bin:$PATH
FROM nvidia/cuda:11.3.1-cudnn8-devel-ubuntu16.04
ARG CUDA_URL
ARG CUDNN_URL
ARG TENSORRT_URL
ARG CUDA_VERSION=11.8
ARG OPENCV_VERSION=4.5.5
ARG PPLCV_VERSION=0.7.0
ARG TENSORRT_VERSION=8.6.1.6
ARG ONNXRUNTIME_VERSION=1.15.1
USER root
ENV WORK_DIR=/root/workspace
WORKDIR $WORK_DIR
RUN apt-get update && apt-get install software-properties-common -y &&\
add-apt-repository ppa:git-core/ppa -y &&\
add-apt-repository ppa:ubuntu-toolchain-r/test -y &&\
apt-get update && apt-get install git wget curl g++-7 -y &&\
update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-7 10 &&\
update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-7 10 &&\
apt-get clean
RUN wget https://github.com/Kitware/CMake/releases/download/v3.25.3/cmake-3.25.3-linux-x86_64.sh &&\
bash cmake-3.25.3-linux-x86_64.sh --skip-license --prefix=/usr &&\
rm -rf cmake-3.25.3-linux-x86_64.sh
# install cuda cudnn
RUN curl -fsSL -v -o ./cuda_install.run -O $CUDA_URL &&\
chmod +x ./cuda_install.run &&\
./cuda_install.run --silent --toolkit &&\
rm -f ./cuda_install.run &&\
curl -fsSL -v -o ./cudnn.tgz -O $CUDNN_URL &&\
tar -xvf ./cudnn.tgz &&\
rm -f ./cudnn.tgz &&\
mv cu* /opt/cudnn
# install ort, trt
RUN curl -fsSL -v -o ./onnxruntime.tgz -O https://github.com/microsoft/onnxruntime/releases/download/v${ONNXRUNTIME_VERSION}/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}.tgz &&\
tar -xzvf onnxruntime.tgz &&\
rm onnxruntime.tgz &&\
mv onnxruntime* /opt/onnxruntime &&\
curl -fsSL -v -o ./onnxruntime.tgz -O https://github.com/microsoft/onnxruntime/releases/download/v${ONNXRUNTIME_VERSION}/onnxruntime-linux-x64-gpu-${ONNXRUNTIME_VERSION}.tgz &&\
tar -xzvf onnxruntime.tgz &&\
rm onnxruntime.tgz &&\
mv onnxruntime* /opt/onnxruntime-gpu &&\
curl -fsSL -v -o ./tensorrt.tgz -O $TENSORRT_URL &&\
tar -xzvf ./tensorrt.tgz &&\
rm -f ./tensorrt.tgz &&\
mv ./TensorRT* /opt/TensorRT &&\
cd /opt/TensorRT &&\
rm -rf data doc samples uff
ENV CUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda-${CUDA_VERSION}
ENV CUDNN_DIR=/opt/cudnn
ENV ONNXRUNTIME_DIR=/opt/onnxruntime
ENV ONNXRUNTIME_GPU_DIR=/opt/onnxruntime-gpu
ENV TENSORRT_DIR=/opt/TensorRT
ENV LD_LIBRARY_PATH=$CUDA_TOOLKIT_ROOT_DIR/lib64:$CUDNN_DIR/lib64:$LD_LIBRARY_PATH
ENV LD_LIBRARY_PATH=${ONNXRUNTIME_GPU_DIR}/lib:$TENSORRT_DIR/lib:$LD_LIBRARY_PATH
ENV PATH=$TENSORRT_DIR/bin:$PATH
RUN wget https://github.com/opencv/opencv/archive/refs/tags/${OPENCV_VERSION}.tar.gz -O ./opencv.tgz &&\
tar -xzvf ./opencv.tgz &&\
rm -f ./opencv.tgz &&\
cd opencv-${OPENCV_VERSION} &&\
mkdir build && cd build &&\
cmake .. \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX=../install \
-DOPENCV_FORCE_3RDPARTY_BUILD=ON \
-DBUILD_TESTS=OFF \
-DBUILD_PERF_TESTS=OFF \
-DBUILD_opencv_python2=OFF \
-DBUILD_opencv_python3=OFF \
-DBUILD_SHARED_LIBS=OFF &&\
make -j$(nproc) && make install &&\
cd ../ && rm -rf build
ENV OpenCV_DIR=/root/workspace/opencv-${OPENCV_VERSION}/install/lib/cmake/opencv4
RUN git clone --depth 1 --branch v${PPLCV_VERSION} https://github.com/openppl-public/ppl.cv.git &&\
cd ppl.cv &&\
./build.sh cuda &&\
mv cuda-build/install ./ &&\
rm -rf cuda-build
ENV pplcv_DIR=/root/workspace/ppl.cv/install/lib/cmake/ppl
RUN wget https://repo.anaconda.com/miniconda/Miniconda3-py38_23.1.0-1-Linux-x86_64.sh -O ~/miniconda.sh &&\
chmod +x ~/miniconda.sh &&\
bash ~/miniconda.sh -b -p /opt/conda &&\
rm ~/miniconda.sh
ENV PATH=/opt/conda/bin:$PATH
RUN pip install packaging pyyaml
ENTRYPOINT ["/bin/bash"]
This diff is collapsed.
# Build for Android
- [Build for Android](#build-for-android)
- [Build From Source](#build-from-source)
- [Install Toolchains](#install-toolchains)
- [Install Dependencies](#install-dependencies)
- [Install Dependencies for SDK](#install-dependencies-for-sdk)
- [Build MMDeploy](#build-mmdeploy)
- [Build SDK and Demos](#build-sdk-and-demos)
______________________________________________________________________
MMDeploy provides cross compile for android platform.
Model converter is executed on linux platform, and SDK is executed on android platform.
Here are two steps for android build.
1. Build model converter on linux, please refer to [How to build linux](linux-x86_64.md)
2. Build SDK using android toolchain on linux.
This doc is only for how to build SDK using android toolchain on linux.
## Build From Source
### Install Toolchains
- cmake
**Make sure cmake version >= 3.14.0**. If not, you can follow instructions below to install cmake 3.20.0. For more versions of cmake, please refer to [cmake website](https://cmake.org/install).
```bash
wget https://github.com/Kitware/CMake/releases/download/v3.20.0/cmake-3.20.0-linux-x86_64.tar.gz
tar -xzvf cmake-3.20.0-linux-x86_64.tar.gz
sudo ln -sf $(pwd)/cmake-3.20.0-linux-x86_64/bin/* /usr/bin/
```
- ANDROID NDK 19+
**Make sure android ndk version >= 19.0**. If not, you can follow instructions below to install android ndk r23c. For more versions of android ndk, please refer to [android ndk website](https://developer.android.com/ndk/downloads).
```bash
wget https://dl.google.com/android/repository/android-ndk-r23c-linux.zip
unzip android-ndk-r23c-linux.zip
cd android-ndk-r23c
export NDK_PATH=${PWD}
```
### Install Dependencies
#### Install Dependencies for SDK
You can skip this chapter if only interested in model converter.
<table>
<thead>
<tr>
<th>NAME </th>
<th>INSTALLATION </th>
</tr>
</thead>
<tbody>
<tr>
<td>OpenCV<br>(>=3.0) </td>
<td>
<pre><code>
export OPENCV_VERSION=4.6.0
wget https://github.com/opencv/opencv/releases/download/${OPENCV_VERSION}/opencv-${OPENCV_VERSION}-android-sdk.zip
unzip opencv-${OPENCV_VERSION}-android-sdk.zip
export OPENCV_ANDROID_SDK_DIR=${PWD}/OpenCV-android-sdk
</code></pre>
</td>
</tr>
<tr>
<td>ncnn </td>
<td>A high-performance neural network inference computing framework supporting for android.</br>
<b> Now, MMDeploy supports 20220721 and has to use <code>git clone</code> to download it. For supported android ABI, see <a href='https://github.com/Tencent/ncnn/releases'> here </a>. </b><br>
<pre><code>
git clone -b 20220721 https://github.com/Tencent/ncnn.git
cd ncnn
git submodule update --init
export NCNN_DIR=${PWD}
export ANDROID_ABI=arm64-v8a
mkdir -p build\_${ANDROID_ABI}
cd build\_${ANDROID_ABI}
cmake -DCMAKE_TOOLCHAIN_FILE=${NDK_PATH}/build/cmake/android.toolchain.cmake -DANDROID_ABI="${ANDROID_ABI}" -DANDROID_PLATFORM=android-30 -DNCNN_VULKAN=ON -DNCNN_DISABLE_EXCEPTION=OFF -DNCNN_DISABLE_RTTI=OFF -DANDROID_USE_LEGACY_TOOLCHAIN_FILE=False ..
make -j$(nproc) install
</code></pre>
</td>
</tr>
<tr>
<td>OpenJDK </td>
<td>It is necessary for building Java API.</br>
See <a href='https://github.com/open-mmlab/mmdeploy/tree/main/csrc/mmdeploy/apis/java/README.md'> Java API build </a> for building tutorials.
</td>
</tr>
</tbody>
</table>
### Build MMDeploy
#### Build SDK and Demos
MMDeploy provides a recipe as shown below for building SDK with ncnn as inference engine for android.
- cpu + ncnn
```Bash
export ANDROID_ABI=arm64-v8a
cd ${MMDEPLOY_DIR}
mkdir -p build_${ANDROID_ABI} && cd build_${ANDROID_ABI}
cmake .. \
-DMMDEPLOY_BUILD_SDK=ON \
-DMMDEPLOY_BUILD_SDK_JAVA_API=ON \
-DMMDEPLOY_BUILD_EXAMPLES=ON \
-DOpenCV_DIR=${OPENCV_ANDROID_SDK_DIR}/sdk/native/jni/abi-${ANDROID_ABI} \
-Dncnn_DIR=${NCNN_DIR}/build_${ANDROID_ABI}/install/lib/cmake/ncnn \
-DMMDEPLOY_TARGET_BACKENDS=ncnn \
-DMMDEPLOY_SHARED_LIBS=OFF \
-DCMAKE_TOOLCHAIN_FILE=${NDK_PATH}/build/cmake/android.toolchain.cmake \
-DANDROID_USE_LEGACY_TOOLCHAIN_FILE=False \
-DANDROID_ABI=${ANDROID_ABI} \
-DANDROID_PLATFORM=android-30 \
-DANDROID_CPP_FEATURES="rtti exceptions"
make -j$(nproc) && make install
```
Please check [cmake build option spec](cmake_option.md)
# Use Docker Image
This document guides how to install mmdeploy with [Docker](https://docs.docker.com/get-docker/).
## Get prebuilt docker images
MMDeploy provides prebuilt docker images for the convenience of its users on [Docker Hub](https://hub.docker.com/r/openmmlab/mmdeploy). The docker images are built on
the latest and released versions. For instance, the image with tag `openmmlab/mmdeploy:ubuntu20.04-cuda11.8-mmdeploy` is built on the latest mmdeploy and the image with tag `openmmlab/mmdeploy:ubuntu20.04-cuda11.8-mmdeploy1.2.0` is for `mmdeploy==1.2.0`.
The specifications of the Docker Image are shown below.
| Item | Version |
| :---------: | :---------: |
| OS | Ubuntu20.04 |
| CUDA | 11.8 |
| CUDNN | 8.9 |
| Python | 3.8.10 |
| Torch | 2.0.0 |
| TorchVision | 0.15.0 |
| TorchScript | 2.0.0 |
| TensorRT | 8.6.1.6 |
| ONNXRuntime | 1.15.1 |
| OpenVINO | 2022.3.0 |
| ncnn | 20230816 |
| openppl | 0.8.1 |
You can select a [tag](https://hub.docker.com/r/openmmlab/mmdeploy/tags) and run `docker pull` to get the docker image:
```shell
export TAG=openmmlab/mmdeploy:ubuntu20.04-cuda11.8-mmdeploy
docker pull $TAG
```
## Build docker images (optional)
If the prebuilt docker images do not meet your requirements,
then you can build your own image by running the following script.
The docker file is `docker/Release/Dockerfile`and its building argument is `MMDEPLOY_VERSION`,
which can be a [tag](https://github.com/open-mmlab/mmdeploy/tags) or a branch from [mmdeploy](https://github.com/open-mmlab/mmdeploy).
```shell
export MMDEPLOY_VERSION=main
export TAG=mmdeploy-${MMDEPLOY_VERSION}
docker build docker/Release/ -t ${TAG} --build-arg MMDEPLOY_VERSION=${MMDEPLOY_VERSION}
```
## Run docker container
After pulling or building the docker image, you can use `docker run` to launch the docker service:
```shell
export TAG=openmmlab/mmdeploy:ubuntu20.04-cuda11.8-mmdeploy
docker run --gpus=all -it --rm $TAG
```
## FAQs
1. CUDA error: the provided PTX was compiled with an unsupported toolchain:
As described [here](https://forums.developer.nvidia.com/t/cuda-error-the-provided-ptx-was-compiled-with-an-unsupported-toolchain/185754), update the GPU driver to the latest one for your GPU.
2. docker: Error response from daemon: could not select device driver "" with capabilities: [gpu].
```shell
# Add the package repositories
distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list
sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit
sudo systemctl restart docker
```
# Build from Script
Through user investigation, we know that most users are already familiar with python and torch before using mmdeploy. Therefore we provide scripts to simplify mmdeploy installation.
Assuming you already have
- python3 -m pip (`conda` or `pyenv`)
- nvcc (depends on inference backend)
- torch (not compulsory)
run this script to install mmdeploy + ncnn backend, `nproc` is not compulsory.
```bash
$ cd /path/to/mmdeploy
$ python3 tools/scripts/build_ubuntu_x64_ncnn.py
..
```
A sudo password may be required during this time, and the script will try its best to build and install mmdeploy SDK and demo:
- Detect host OS version, `make` job number, whether use `root` and try to fix `python3 -m pip`
- Find the necessary basic tools, such as g++-7, cmake, wget, etc.
- Compile necessary dependencies, such as pyncnn, protobuf
The script will also try to avoid affecting host environment:
- The dependencies of source code compilation are placed in the `mmdeploy-dep` directory at the same level as mmdeploy
- The script would not modify variables such as PATH, LD_LIBRARY_PATH, PYTHONPATH, etc.
- The environment variables that need to be modified will be printed, **please pay attention to the final output**
The script will eventually execute `python3 tools/check_env.py`, the successful installation should display the version number of the corresponding backend and `ops_is_available: True`, for example:
```bash
$ python3 tools/check_env.py
..
2022-09-13 14:49:13,767 - mmdeploy - INFO - **********Backend information**********
2022-09-13 14:49:14,116 - mmdeploy - INFO - onnxruntime: 1.8.0 ops_is_avaliable : True
2022-09-13 14:49:14,131 - mmdeploy - INFO - tensorrt: 8.4.1.5 ops_is_avaliable : True
2022-09-13 14:49:14,139 - mmdeploy - INFO - ncnn: 1.0.20220901 ops_is_avaliable : True
2022-09-13 14:49:14,150 - mmdeploy - INFO - pplnn_is_avaliable: True
..
```
Here is the verified installation script. If you want mmdeploy to support multiple backends at the same time, you can execute each script once:
| script | OS version |
| :-----------------------------: | :-----------------: |
| build_ubuntu_x64_ncnn.py | 18.04/20.04 |
| build_ubuntu_x64_ort.py | 18.04/20.04 |
| build_ubuntu_x64_pplnn.py | 18.04/20.04 |
| build_ubuntu_x64_torchscript.py | 18.04/20.04 |
| build_ubuntu_x64_tvm.py | 18.04/20.04 |
| build_jetson_orin_python38.sh | JetPack5.0 L4T 34.1 |
# Build from Source
## Download
```shell
git clone -b main git@github.com:open-mmlab/mmdeploy.git --recursive
```
Note:
- If fetching submodule fails, you could get submodule manually by following instructions:
```shell
cd mmdeploy
git clone git@github.com:NVIDIA/cub.git third_party/cub
cd third_party/cub
git checkout c3cceac115
# go back to third_party directory and git clone pybind11
cd ..
git clone git@github.com:pybind/pybind11.git pybind11
cd pybind11
git checkout 70a58c5
cd ..
git clone git@github.com:gabime/spdlog.git spdlog
cd spdlog
git checkout 9e8e52c048
```
- If it fails when `git clone` via `SSH`, you can try the `HTTPS` protocol like this:
```shell
git clone -b main https://github.com/open-mmlab/mmdeploy.git --recursive
```
## Build
Please visit the following links to find out how to build MMDeploy according to the target platform.
- [Linux-x86_64](linux-x86_64.md)
- [Windows](windows.md)
- [MacOS](macos-arm64.md)
- [Android-aarch64](android.md)
- [NVIDIA Jetson](jetsons.md)
- [SNPE](snpe.md)
- [RISC-V](riscv.md)
- [Rockchip](rockchip.md)
# CMake Build Option Spec
<table class="docutils">
<thead>
<tr>
<th>NAME</th>
<th>VALUE</th>
<th>DEFAULT</th>
<th>REMARK</th>
</tr>
</thead>
<tbody>
<tr>
<td>MMDEPLOY_SHARED_LIBS</td>
<td>{ON, OFF}</td>
<td>ON</td>
<td>Switch to build shared libs</td>
</tr>
<tr>
<td>MMDEPLOY_BUILD_SDK</td>
<td>{ON, OFF}</td>
<td>OFF</td>
<td>Switch to build MMDeploy SDK</td>
</tr>
<tr>
<td>MMDEPLOY_BUILD_SDK_MONOLITHIC</td>
<td>{ON, OFF}</td>
<td>OFF</td>
<td>Build single lib</td>
</tr>
<tr>
<td>MMDEPLOY_BUILD_TEST</td>
<td>{ON, OFF}</td>
<td>OFF</td>
<td>Build unittest</td>
</tr>
<tr>
<td>MMDEPLOY_BUILD_SDK_PYTHON_API</td>
<td>{ON, OFF}</td>
<td>OFF</td>
<td>Switch to build MMDeploy SDK python package</td>
</tr>
<tr>
<td>MMDEPLOY_BUILD_SDK_CSHARP_API</td>
<td>{ON, OFF}</td>
<td>OFF</td>
<td>Build C# SDK API</td>
</tr>
<tr>
<td>MMDEPLOY_BUILD_SDK_JAVA_API</td>
<td>{ON, OFF}</td>
<td>OFF</td>
<td>Build Java SDK API</td>
</tr>
<tr>
<td>MMDEPLOY_BUILD_TEST</td>
<td>{ON, OFF}</td>
<td>OFF</td>
<td>Switch to build MMDeploy SDK unittest cases</td>
</tr>
<tr>
<td>MMDEPLOY_SPDLOG_EXTERNAL</td>
<td>{ON, OFF}</td>
<td>OFF</td>
<td>Build with spdlog installation package that comes with the system</td>
</tr>
<tr>
<td>MMDEPLOY_ZIP_MODEL</td>
<td>{ON, OFF}</td>
<td>OFF</td>
<td>Enable SDK with zip format</td>
</tr>
<tr>
<td>MMDEPLOY_COVERAGE</td>
<td>{ON, OFF}</td>
<td>OFF</td>
<td>Build for cplus code coverage report</td>
</tr>
<tr>
<td>MMDEPLOY_TARGET_DEVICES</td>
<td>{"cpu", "cuda"}</td>
<td>cpu</td>
<td>Enable target device. You can enable more by
passing a semicolon separated list of device names to <code>MMDEPLOY_TARGET_DEVICES</code> variable, e.g. <code>-DMMDEPLOY_TARGET_DEVICES="cpu;cuda"</code> </td>
</tr>
<tr>
<td>MMDEPLOY_TARGET_BACKENDS</td>
<td>{"trt", "ort", "pplnn", "ncnn", "openvino", "torchscript", "snpe", "tvm"}</td>
<td>N/A</td>
<td>Enabling inference engine. <b>By default, no target inference engine is set, since it highly depends on the use case.</b> When more than one engine are specified, it has to be set with a semicolon separated list of inference backend names, e.g. <pre><code>-DMMDEPLOY_TARGET_BACKENDS="trt;ort;pplnn;ncnn;openvino"</code></pre>
After specifying the inference engine, it's package path has to be passed to cmake as follows, <br>
1. <b>trt</b>: TensorRT. <code>TENSORRT_DIR</code> and <code>CUDNN_DIR</code> are needed.
<pre><code>
-DTENSORRT_DIR=${TENSORRT_DIR}
-DCUDNN_DIR=${CUDNN_DIR}
</code></pre>
2. <b>ort</b>: ONNXRuntime. <code>ONNXRUNTIME_DIR</code> is needed.
<pre><code>-DONNXRUNTIME_DIR=${ONNXRUNTIME_DIR}</code></pre>
3. <b>pplnn</b>: PPL.NN. <code>pplnn_DIR</code> is needed.
<pre><code>-Dpplnn_DIR=${PPLNN_DIR}</code></pre>
4. <b>ncnn</b>: ncnn. <code>ncnn_DIR</code> is needed.
<pre><code>-Dncnn_DIR=${NCNN_DIR}/build/install/lib/cmake/ncnn</code></pre>
5. <b>openvino</b>: OpenVINO. <code>InferenceEngine_DIR</code> is needed.
<pre><code>-DInferenceEngine_DIR=${INTEL_OPENVINO_DIR}/deployment_tools/inference_engine/share</code></pre>
6. <b>torchscript</b>: TorchScript. <code>Torch_DIR</code> is needed.
<pre><code>-DTorch_DIR=${Torch_DIR}</code></pre>
7. <b>snpe</b>: qcom snpe. <code>SNPE_ROOT</code> must existed in the environment variable because of C/S mode.<br>
8. <b>coreml</b>: CoreML. <code>Torch_DIR</code> is required.
<pre><code>-DTorch_DIR=${Torch_DIR}</code></pre>
9. <b>TVM</b>: TVM. <code>TVM_DIR</code> is required. <pre><code>-DTVM_DIR=${TVM_DIR}</code></pre>
</td>
</tr>
<tr>
<td>MMDEPLOY_CODEBASES</td>
<td>{"mmpretrain", "mmdet", "mmseg", "mmagic", "mmocr", "all"}</td>
<td>all</td>
<td>Enable codebase's postprocess modules. You can provide a semicolon separated list of codebase names to enable them, e.g., <code>-DMMDEPLOY_CODEBASES="mmpretrain;mmdet"</code>. Or you can pass <code>all</code> to enable them all, i.e., <code>-DMMDEPLOY_CODEBASES=all</code></td>
</tr>
</tbody>
</table>
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment