Unverified Commit 1644fff3 authored by Nicolas Hug's avatar Nicolas Hug Committed by GitHub
Browse files

Cleanup C++ model example and cmake CI job (#8411)

parent d23a6e16
......@@ -46,16 +46,10 @@ fi
echo '::group::Prepare CMake builds'
mkdir -p cpp_build
pushd test/tracing/frcnn
python trace_model.py
pushd examples/cpp
python script_model.py
mkdir -p build
mv fasterrcnn_resnet50_fpn.pt build
popd
pushd examples/cpp/hello_world
python trace_model.py
mkdir -p build
mv resnet18.pt build
mv resnet18.pt fasterrcnn_resnet50_fpn.pt build
popd
# This was only needed for the tracing above
......@@ -65,6 +59,7 @@ echo '::endgroup::'
echo '::group::Build and install libtorchvision'
pushd cpp_build
# On macOS, CMake is looking for the library (*.dylib) and the header (*.h) separately. By default, it prefers to load
# the header from other packages that install the library. This easily leads to a mismatch if the library installed
# from conda doesn't have the exact same version. Thus, we need to explicitly set CMAKE_FIND_FRAMEWORK=NEVER to force
......@@ -85,40 +80,24 @@ fi
popd
echo '::endgroup::'
echo '::group::Build and run project that uses Faster-RCNN'
pushd test/tracing/frcnn/build
cmake .. -DTorch_DIR="${Torch_DIR}" -DWITH_CUDA="${WITH_CUDA}" \
-DCMAKE_PREFIX_PATH="${CONDA_PREFIX}" \
-DCMAKE_FIND_FRAMEWORK=NEVER
if [[ $OS_TYPE == windows ]]; then
"${PACKAGING_DIR}/windows/internal/vc_env_helper.bat" "${PACKAGING_DIR}/windows/internal/build_frcnn.bat" $JOBS
cd Release
cp ../fasterrcnn_resnet50_fpn.pt .
else
make -j$JOBS
fi
./test_frcnn_tracing
popd
echo '::endgroup::'
echo '::group::Build and run C++ example'
pushd examples/cpp/hello_world/build
pushd examples/cpp/build
cmake .. -DTorch_DIR="${Torch_DIR}" \
-DCMAKE_PREFIX_PATH="${CONDA_PREFIX}" \
-DCMAKE_FIND_FRAMEWORK=NEVER
-DCMAKE_FIND_FRAMEWORK=NEVER \
-DUSE_TORCHVISION=ON # Needed for faster-rcnn since it's using torchvision ops like NMS.
if [[ $OS_TYPE == windows ]]; then
"${PACKAGING_DIR}/windows/internal/vc_env_helper.bat" "${PACKAGING_DIR}/windows/internal/build_cpp_example.bat" $JOBS
cd Release
cp ../resnet18.pt .
cp ../fasterrcnn_resnet50_fpn.pt .
else
make -j$JOBS
fi
./hello-world
./run_model resnet18.pt
./run_model fasterrcnn_resnet50_fpn.pt
popd
echo '::endgroup::'
......@@ -74,40 +74,14 @@ python setup.py install
# Using the models on C++
TorchVision provides an example project for how to use the models on C++ using JIT Script.
Installation From source:
```
mkdir build
cd build
# Add -DWITH_CUDA=on support for the CUDA if needed
cmake ..
make
make install
```
Once installed, the library can be accessed in cmake (after properly configuring `CMAKE_PREFIX_PATH`) via the
`TorchVision::TorchVision` target:
```
find_package(TorchVision REQUIRED)
target_link_libraries(my-target PUBLIC TorchVision::TorchVision)
```
The `TorchVision` package will also automatically look for the `Torch` package and add it as a dependency to
`my-target`, so make sure that it is also available to cmake via the `CMAKE_PREFIX_PATH`.
For an example setup, take a look at `examples/cpp/hello_world`.
Python linking is disabled by default when compiling TorchVision with CMake, this allows you to run models without any
Python dependency. In some special cases where TorchVision's operators are used from Python code, you may need to link
to Python. This can be done by passing `-DUSE_PYTHON=on` to CMake.
### TorchVision Operators
In order to get the torchvision operators registered with torch (eg. for the JIT), all you need to do is to ensure that
you `#include <torchvision/vision.h>` in your project.
Refer to [example/cpp](https://github.com/pytorch/vision/tree/main/examples/cpp).
**DISCLAIMER**: the `libtorchvision` library includes the torchvision
custom ops as well as most of the C++ torchvision APIs. Those APIs do not come
with any backward-compatibility guarantees and may change from one version to
the next. Only the Python APIs are stable and with backward-compatibility
guarantees. So, if you need stability within a C++ environment, your best bet is
to export the Python APIs via torchscript.
## Documentation
......
cmake_minimum_required(VERSION 3.10)
project(run_model)
option(USE_TORCHVISION "Whether to link to torchvision" OFF)
find_package(Torch REQUIRED)
if(USE_TORCHVISION)
find_package(TorchVision REQUIRED)
endif()
add_executable(run_model run_model.cpp)
target_link_libraries(run_model "${TORCH_LIBRARIES}")
if(USE_TORCHVISION)
target_link_libraries(run_model TorchVision::TorchVision)
endif()
set_property(TARGET run_model PROPERTY CXX_STANDARD 17)
Using torchvision models in C++
===============================
This is a minimal example of getting TorchVision models to work in C++ with
Torchscript. The model is first scripted in Python and exported to a file, and
then loaded in C++. For a similar tutorial, see [this
tutorial](https://pytorch.org/tutorials/advanced/cpp_export.html).
In order to successfully compile this example, make sure you have ``LibTorch``
installed. You can either:
- Install PyTorch normally
- Or download the LibTorch C++ distribution.
In both cases refer [here](https://pytorch.org/get-started/locally/) the
corresponding install or download instructions.
Some torchvision models only depend on PyTorch operators, and can be used in C++
without depending on the torchvision lib. Other models rely on torchvision's C++
operators like NMS, RoiAlign (typically the detection models) and those need to
be linked against the torchvision lib.
We'll first see the simpler case of running a model without the torchvision lib
dependency.
Running a model that doesn't need torchvision lib
-------------------------------------------------
Create a ``build`` directory inside the current one.
```bash
mkdir build
cd build
```
Then run `python ../trace_model.py` which should create a `resnet18.pt` file in
the build directory. This is the scripted model that will be used in the C++
code.
We can now start building with CMake. We have to tell CMake where it can find
the necessary PyTorch resources. If you installed PyTorch normally, you can do:
```bash
TORCH_PATH=$(python -c "import pathlib, torch; print(pathlib.Path(torch.__path__[0]))")
Torch_DIR="${TORCH_PATH}/share/cmake/Torch" # there should be .cmake files in there
cmake .. -DTorch_DIR=$Torch_DIR
```
If instead you downloaded the LibTorch somewhere, you can do:
```bash
cmake .. -DCMAKE_PREFIX_PATH=/path/to/libtorch
```
Then `cmake --build .` and you should now be able to run
```bash
./run_model resnet18.pt
```
If you try to run the model with a model that depends on the torchvision lib, like
`./run_model fasterrcnn_resnet50_fpn.pt`, you should get a runtime error. This is
because the executable wasn't linked against the torchvision lib.
Running a model that needs torchvision lib
------------------------------------------
First, we need to build the torchvision lib. To build the torchvision lib go to
the root of the torchvision project and run:
```bash
mkdir build
cd build
cmake .. -DCMAKE_PREFIX_PATH=/path/to/libtorch # or -DTorch_DIR= if you installed PyTorch normally, see above
cmake --build .
cmake --install .
```
You may want to pass `-DCMAKE_INSTALL_PREFIX=/path/to/libtorchvision` for
cmake to copy/install the files to a specific location (e.g. `$CONDA_PREFIX`).
On Windows, you may also need to pass `-DUSE_PYTHON`. Refer to the corresponding
`CMakeLists.txt` for additional options.
**DISCLAIMER**: the `libtorchvision` library includes the torchvision
custom ops as well as most of the C++ torchvision APIs. Those APIs do not come
with any backward-compatibility guarantees and may change from one version to
the next. Only the Python APIs are stable and with backward-compatibility
guarantees. So, if you need stability within a C++ environment, your best bet is
to export the Python APIs via torchscript.
Now that libtorchvision is built and installed we can tell our project to use
and link to it via the `-DUSE_TORCHVISION` flag. We also need to tell CMake
where to find it, just like we did with LibTorch, e.g.:
```bash
cmake .. -DTorch_DIR=$Torch_DIR -DTorchVision_DIR=path/to/libtorchvision -DUSE_TORCHVISION=ON
cmake --build .
```
Now the `run_model` executable should be able to run the
`fasterrcnn_resnet50_fpn.pt` file.
cmake_minimum_required(VERSION 3.10)
project(hello-world)
# The first thing do is to tell cmake to find the TorchVision library.
# The package pulls in all the necessary torch libraries,
# so there is no need to also add `find_package(Torch)` here.
find_package(TorchVision REQUIRED)
# This due to LibTorch's version is the one included in the Python
# package that links to Python.
find_package(Python3 COMPONENTS Development)
add_executable(hello-world main.cpp)
# We now need to link the TorchVision library to our executable.
# We can do that by using the TorchVision::TorchVision target,
# which also adds all the necessary torch dependencies.
target_compile_features(hello-world PUBLIC cxx_range_for)
target_link_libraries(hello-world TorchVision::TorchVision)
set_property(TARGET hello-world PROPERTY CXX_STANDARD 17)
Hello World!
============
This is a minimal example of getting TorchVision to work in C++ with CMake.
In order to successfully compile this example, make sure you have both ``LibTorch`` and
``TorchVision`` installed.
Once both dependencies are sorted, we can start the CMake fun:
1) Create a ``build`` directory inside the current one.
2) from within the ``build`` directory, run the following commands:
- ``python ../trace_model.py`` To use a torchvision model in C++, you must first export it from the python version of torchvision. More information can be found on the corresponding `documentation page <https://pytorch.org/tutorials/advanced/cpp_export.html#loading-a-torchscript-model-in-c>`_.
- | ``cmake -DCMAKE_PREFIX_PATH="<PATH_TO_LIBTORCH>;<PATH_TO_TORCHVISION>" ..``
| where ``<PATH_TO_LIBTORCH>`` and ``<PATH_TO_TORCHVISION>`` are the paths to the libtorch and torchvision installations.
- ``cmake --build .``
| That's it!
| You should now have a ``hello-world`` executable in your ``build`` folder.
Running it will output a (fairly long) tensor of random values to your terminal.
import os.path as osp
import torch
import torchvision
HERE = osp.dirname(osp.abspath(__file__))
ASSETS = osp.dirname(osp.dirname(HERE))
model = torchvision.models.resnet18()
model.eval()
traced_model = torch.jit.script(model)
traced_model.save("resnet18.pt")
#include <iostream>
#include <torch/script.h>
#include <torch/torch.h>
#include <cstring>
#include <iostream>
#ifdef _WIN32
#include <torchvision/vision.h>
#endif // _WIN32
int main() {
int main(int argc, const char* argv[]) {
if (argc != 2) {
std::cout << "Usage: run_model <path_to_scripted_model>\n";
return -1;
}
torch::DeviceType device_type;
device_type = torch::kCPU;
......@@ -11,10 +19,10 @@ int main() {
try {
std::cout << "Loading model\n";
// Deserialize the ScriptModule from a file using torch::jit::load().
model = torch::jit::load("resnet18.pt");
model = torch::jit::load(argv[1]);
std::cout << "Model loaded\n";
} catch (const torch::Error& e) {
std::cout << "error loading the model\n";
std::cout << "error loading the model.\n";
return -1;
} catch (const std::exception& e) {
std::cout << "Other error: " << e.what() << "\n";
......@@ -24,8 +32,15 @@ int main() {
// TorchScript models require a List[IValue] as input
std::vector<torch::jit::IValue> inputs;
// Create a random input tensor and run it through the model.
if (std::strstr(argv[1], "fasterrcnn") != NULL) {
// Faster RCNN accepts a List[Tensor] as main input
std::vector<torch::Tensor> images;
images.push_back(torch::rand({3, 256, 275}));
images.push_back(torch::rand({3, 256, 275}));
inputs.push_back(images);
} else {
inputs.push_back(torch::rand({1, 3, 10, 10}));
}
auto out = model.forward(inputs);
std::cout << out << "\n";
......@@ -36,7 +51,15 @@ int main() {
// Add GPU inputs
inputs.clear();
torch::TensorOptions options = torch::TensorOptions{torch::kCUDA};
if (std::strstr(argv[1], "fasterrcnn") != NULL) {
// Faster RCNN accepts a List[Tensor] as main input
std::vector<torch::Tensor> images;
images.push_back(torch::rand({3, 256, 275}, options));
images.push_back(torch::rand({3, 256, 275}, options));
inputs.push_back(images);
} else {
inputs.push_back(torch::rand({1, 3, 10, 10}, options));
}
auto gpu_out = model.forward(inputs);
std::cout << gpu_out << "\n";
......
import torch
from torchvision import models
for model, name in (
(models.resnet18(weights=None), "resnet18"),
(models.detection.fasterrcnn_resnet50_fpn(weights=None, weights_backbone=None), "fasterrcnn_resnet50_fpn"),
):
model.eval()
traced_model = torch.jit.script(model)
traced_model.save(f"{name}.pt")
@echo on
set CL=/I"C:\Program Files (x86)\torchvision\include"
msbuild "-p:Configuration=Release" "-p:BuildInParallel=true" "-p:MultiProcessorCompilation=true" "-p:CL_MPCount=%1" hello-world.vcxproj -maxcpucount:%1
msbuild "-p:Configuration=Release" "-p:BuildInParallel=true" "-p:MultiProcessorCompilation=true" "-p:CL_MPCount=%1" run_model.vcxproj -maxcpucount:%1
@echo on
set CL=/I"C:\Program Files (x86)\torchvision\include"
msbuild "-p:Configuration=Release" "-p:BuildInParallel=true" "-p:MultiProcessorCompilation=true" "-p:CL_MPCount=%1" test_frcnn_tracing.vcxproj -maxcpucount:%1
cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
project(test_frcnn_tracing)
find_package(Torch REQUIRED)
find_package(TorchVision REQUIRED)
# This due to some headers importing Python.h
find_package(Python3 COMPONENTS Development)
add_executable(test_frcnn_tracing test_frcnn_tracing.cpp)
target_compile_features(test_frcnn_tracing PUBLIC cxx_range_for)
target_link_libraries(test_frcnn_tracing ${TORCH_LIBRARIES} TorchVision::TorchVision Python3::Python)
set_property(TARGET test_frcnn_tracing PROPERTY CXX_STANDARD 17)
#include <torch/script.h>
#include <torch/torch.h>
#include <torchvision/vision.h>
#include <torchvision/ops/nms.h>
int main() {
torch::DeviceType device_type;
device_type = torch::kCPU;
torch::jit::script::Module module;
try {
std::cout << "Loading model\n";
// Deserialize the ScriptModule from a file using torch::jit::load().
module = torch::jit::load("fasterrcnn_resnet50_fpn.pt");
std::cout << "Model loaded\n";
} catch (const torch::Error& e) {
std::cout << "error loading the model\n";
return -1;
} catch (const std::exception& e) {
std::cout << "Other error: " << e.what() << "\n";
return -1;
}
// TorchScript models require a List[IValue] as input
std::vector<torch::jit::IValue> inputs;
// Faster RCNN accepts a List[Tensor] as main input
std::vector<torch::Tensor> images;
images.push_back(torch::rand({3, 256, 275}));
images.push_back(torch::rand({3, 256, 275}));
inputs.push_back(images);
auto output = module.forward(inputs);
std::cout << "ok\n";
std::cout << "output" << output << "\n";
if (torch::cuda::is_available()) {
// Move traced model to GPU
module.to(torch::kCUDA);
// Add GPU inputs
images.clear();
inputs.clear();
torch::TensorOptions options = torch::TensorOptions{torch::kCUDA};
images.push_back(torch::rand({3, 256, 275}, options));
images.push_back(torch::rand({3, 256, 275}, options));
inputs.push_back(images);
auto output = module.forward(inputs);
std::cout << "ok\n";
std::cout << "output" << output << "\n";
}
return 0;
}
import os.path as osp
import torch
import torchvision
HERE = osp.dirname(osp.abspath(__file__))
ASSETS = osp.dirname(osp.dirname(HERE))
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(weights=None, weights_backbone=None)
model.eval()
traced_model = torch.jit.script(model)
traced_model.save("fasterrcnn_resnet50_fpn.pt")
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment