Commit bf491463 authored by limm's avatar limm
Browse files

add v0.19.1 release

parent e17f5ea2
cff-version: 1.2.0
title: "TorchVision: PyTorch's Computer Vision library"
message: >-
If you find TorchVision useful in your work, please
consider citing the following BibTeX entry.
type: software
authors:
- given-names: TorchVision maintainers and contributors
url: "https://github.com/pytorch/vision"
license: "BSD-3-Clause"
date-released: "2016-11-06"
journal: "GitHub repository"
publisher: "GitHub"
key: "torchvision2016"
cmake_minimum_required(VERSION 3.12)
cmake_minimum_required(VERSION 3.18)
project(torchvision)
set(CMAKE_CXX_STANDARD 14)
set(CMAKE_CXX_STANDARD 17)
file(STRINGS version.txt TORCHVISION_VERSION)
option(WITH_CUDA "Enable CUDA support" OFF)
option(WITH_MPS "Enable MPS support" OFF)
option(WITH_PNG "Enable features requiring LibPNG." ON)
option(WITH_JPEG "Enable features requiring LibJPEG." ON)
if(WITH_CUDA)
enable_language(CUDA)
......@@ -12,11 +15,22 @@ if(WITH_CUDA)
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr")
endif()
find_package(Python3 COMPONENTS Development)
if(WITH_MPS)
enable_language(OBJC OBJCXX)
add_definitions(-DWITH_MPS)
endif()
find_package(Torch REQUIRED)
find_package(PNG REQUIRED)
find_package(JPEG REQUIRED)
if (WITH_PNG)
add_definitions(-DPNG_FOUND)
find_package(PNG REQUIRED)
endif()
if (WITH_JPEG)
add_definitions(-DJPEG_FOUND)
find_package(JPEG REQUIRED)
endif()
function(CUDA_CONVERT_FLAGS EXISTING_TARGET)
get_property(old_flags TARGET ${EXISTING_TARGET} PROPERTY INTERFACE_COMPILE_OPTIONS)
......@@ -60,23 +74,49 @@ include(GNUInstallDirs)
include(CMakePackageConfigHelpers)
set(TVCPP torchvision/csrc)
list(APPEND ALLOW_LISTED ${TVCPP} ${TVCPP}/io/image ${TVCPP}/io/image/cpu ${TVCPP}/models ${TVCPP}/ops
list(APPEND ALLOW_LISTED ${TVCPP} ${TVCPP}/io/image ${TVCPP}/io/image/cpu ${TVCPP}/io/image/cpu/giflib ${TVCPP}/models ${TVCPP}/ops
${TVCPP}/ops/autograd ${TVCPP}/ops/cpu ${TVCPP}/io/image/cuda)
if(WITH_CUDA)
list(APPEND ALLOW_LISTED ${TVCPP}/ops/cuda ${TVCPP}/ops/autocast)
endif()
if(WITH_MPS)
list(APPEND ALLOW_LISTED ${TVCPP}/ops/mps)
endif()
FOREACH(DIR ${ALLOW_LISTED})
file(GLOB ALL_SOURCES ${ALL_SOURCES} ${DIR}/*.*)
ENDFOREACH()
add_library(${PROJECT_NAME} SHARED ${ALL_SOURCES})
target_link_libraries(${PROJECT_NAME} PRIVATE ${TORCH_LIBRARIES} ${PNG_LIBRARY} ${JPEG_LIBRARIES} Python3::Python)
target_link_libraries(${PROJECT_NAME} PRIVATE ${TORCH_LIBRARIES})
if(WITH_MPS)
find_library(metal NAMES Metal)
find_library(foundation NAMES Foundation)
target_link_libraries(${PROJECT_NAME} PRIVATE ${metal} ${foundation})
endif()
if (WITH_PNG)
target_link_libraries(${PROJECT_NAME} PRIVATE ${PNG_LIBRARY})
endif()
if (WITH_JPEG)
target_link_libraries(${PROJECT_NAME} PRIVATE ${JPEG_LIBRARIES})
endif()
set_target_properties(${PROJECT_NAME} PROPERTIES
EXPORT_NAME TorchVision
INSTALL_RPATH ${TORCH_INSTALL_PREFIX}/lib)
include_directories(torchvision/csrc ${JPEG_INCLUDE_DIRS} ${PNG_INCLUDE_DIRS})
include_directories(torchvision/csrc)
if (WITH_PNG)
include_directories(${PNG_INCLUDE_DIRS})
endif()
if (WITH_JPEG)
include_directories(${JPEG_INCLUDE_DIRS})
endif()
set(TORCHVISION_CMAKECONFIG_INSTALL_DIR "share/cmake/TorchVision" CACHE STRING "install path for TorchVisionConfig.cmake")
......
......@@ -4,22 +4,22 @@ We want to make contributing to this project as easy and transparent as possible
## TL;DR
We appreciate all contributions. If you are interested in contributing to Torchvision, there are many ways to help out.
We appreciate all contributions. If you are interested in contributing to Torchvision, there are many ways to help out.
Your contributions may fall into the following categories:
- It helps the project if you could
- It helps the project if you could
- Report issues you're facing
- Give a :+1: on issues that others reported and that are relevant to you
- Give a :+1: on issues that others reported and that are relevant to you
- Answering queries on the issue tracker, investigating bugs are very valuable contributions to the project.
- You would like to improve the documentation. This is no less important than improving the library itself!
- You would like to improve the documentation. This is no less important than improving the library itself!
If you find a typo in the documentation, do not hesitate to submit a GitHub pull request.
- If you would like to fix a bug
- please pick one from the [list of open issues labelled as "help wanted"](https://github.com/pytorch/vision/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22)
- comment on the issue that you want to work on this issue
- send a PR with your fix, see below.
- send a PR with your fix, see below.
- If you plan to contribute new features, utility functions or extensions, please first open an issue and discuss the feature with us.
......@@ -30,56 +30,116 @@ clear and has sufficient instructions to be able to reproduce the issue.
## Development installation
### Install PyTorch Nightly
### Dependencies
Start by installing the **nightly** build of PyTorch following the [official
instructions](https://pytorch.org/get-started/locally/). Note that the official
instructions may ask you to install torchvision itself. If you are doing development
on torchvision, you should not install prebuilt torchvision packages.
**Optionally**, install `libpng` and `libjpeg-turbo` if you want to enable
support for
native encoding / decoding of PNG and JPEG formats in
[torchvision.io](https://pytorch.org/vision/stable/io.html#image):
```bash
conda install pytorch -c pytorch-nightly -c conda-forge
# or with pip (see https://pytorch.org/get-started/locally/)
# pip install numpy
# pip install --pre torch -f https://download.pytorch.org/whl/nightly/cu102/torch_nightly.html
conda install libpng libjpeg-turbo -c pytorch
```
### Install Torchvision
Note: you can use the `TORCHVISION_INCLUDE` and `TORCHVISION_LIBRARY`
environment variables to tell the build system where to find those libraries if
they are in specific locations. Take a look at
[setup.py](https://github.com/pytorch/vision/blob/main/setup.py) for more
details.
### Clone and install torchvision
```bash
git clone https://github.com/pytorch/vision.git
cd vision
python setup.py develop
python setup.py develop # use install instead of develop if you don't care about development.
# or, for OSX
# MACOSX_DEPLOYMENT_TARGET=10.9 CC=clang CXX=clang++ python setup.py develop
# for C++ debugging, please use DEBUG=1
# for C++ debugging, use DEBUG=1
# DEBUG=1 python setup.py develop
pip install flake8 typing mypy pytest scipy
```
You may also have to install `libpng-dev` and `libjpeg-turbo8-dev` libraries:
```bash
conda install libpng jpeg
By default, GPU support is built if CUDA is found and `torch.cuda.is_available()` is true. It's possible to force
building GPU support by setting `FORCE_CUDA=1` environment variable, which is useful when building a docker image.
We don't officially support building from source using `pip`, but _if_ you do, you'll need to use the
`--no-build-isolation` flag.
#### Other development dependencies (some of these are needed to run tests):
```
pip install expecttest flake8 typing mypy pytest pytest-mock scipy requests
```
## Development Process
If you plan to modify the code or documentation, please follow the steps below:
1. Fork the repository and create your branch from `master`.
1. Fork the repository and create your branch from `main`.
2. If you have modified the code (new feature or bug-fix), please add unit tests.
3. If you have changed APIs, update the documentation. Make sure the documentation builds.
4. Ensure the test suite passes.
5. Make sure your code passes `flake8` formatting check.
5. Make sure your code passes the formatting checks (see below).
For more details about pull requests,
please read [GitHub's guides](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request).
For more details about pull requests,
please read [GitHub's guides](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request).
If you would like to contribute a new model, please see [here](#New-model).
If you would like to contribute a new model, please see [here](#New-architecture-or-improved-model-weights).
If you would like to contribute a new dataset, please see [here](#New-dataset).
If you would like to contribute a new dataset, please see [here](#New-dataset).
### Code formatting and typing
New code should be compatible with Python 3.X versions and be compliant with PEP8. To check the codebase, please run
#### Formatting
The torchvision code is formatted by [black](https://black.readthedocs.io/en/stable/),
and checked against pep8 compliance with [flake8](https://flake8.pycqa.org/en/latest/).
Instead of relying directly on `black` however, we rely on
[ufmt](https://github.com/omnilib/ufmt), for compatibility reasons with Facebook
internal infrastructure.
To format your code, install `ufmt` with `pip install ufmt==1.3.3 black==22.3.0 usort==1.0.2` and use e.g.:
```bash
flake8 --config=setup.cfg .
ufmt format torchvision
```
For the vast majority of cases, this is all you should need to run. For the
formatting to be a bit faster, you can also choose to only apply `ufmt` to the
files that were edited in your PR with e.g.:
```bash
ufmt format `git diff main --name-only`
```
Similarly, you can check for `flake8` errors with `flake8 torchvision`, although
they should be fairly rare considering that most of the errors are automatically
taken care of by `ufmt` already.
##### Pre-commit hooks
For convenience and **purely optionally**, you can rely on [pre-commit
hooks](https://pre-commit.com/) which will run both `ufmt` and `flake8` prior to
every commit.
First install the `pre-commit` package with `pip install pre-commit`, and then
run `pre-commit install` at the root of the repo for the hooks to be set up -
that's it.
Feel free to read the [pre-commit docs](https://pre-commit.com/#usage) to learn
more and improve your workflow. You'll see for example that `pre-commit run
--all-files` will run both `ufmt` and `flake8` without the need for you to
commit anything, and that the `--no-verify` flag can be added to `git commit` to
temporarily deactivate the hooks.
#### Type annotations
The codebase has type annotations, please make sure to add type hints if required. We use `mypy` tool for type checking:
```bash
mypy --config-file mypy.ini
......@@ -87,8 +147,10 @@ mypy --config-file mypy.ini
### Unit tests
If you have modified the code by adding a new feature or a bug-fix, please add unit tests for that. To run a specific
test:
Before running tests make sure to install [test dependencies](#other-development-dependencies-some-of-these-are-needed-to-run-tests).
If you have modified the code by adding a new feature or a bug-fix, please add unit tests for that. To run a specific
test:
```bash
pytest test/<test-module.py> -vvv -k <test_myfunc>
# e.g. pytest test/test_transforms.py -vvv -k test_center_crop
......@@ -97,7 +159,7 @@ pytest test/<test-module.py> -vvv -k <test_myfunc>
If you would like to run all tests:
```bash
pytest test -vvv
```
```
Tests that require internet access should be in
`test/test_internet.py`.
......@@ -120,7 +182,7 @@ pip install -r requirements.txt
```bash
cd docs
make html
make html-noplot
```
Then open `docs/build/html/index.html` in your favorite browser.
......@@ -134,38 +196,39 @@ clean``.
#### Building the example gallery - or not
When you run ``make html`` for the first time, all the examples in the gallery
will be built. Subsequent builds should be faster, and will only build the
examples that have been modified.
In most cases, running `make html-noplot` is enough to build the docs for your
specific use-case. The `noplot` part tells sphinx **not** to build the examples
in the [gallery](https://pytorch.org/vision/stable/auto_examples/index.html),
which saves a lot of building time.
You can run ``make html-noplot`` to not build the examples at all. This is
useful after a ``make clean`` to do some quick checks that are not related to
the examples.
If you need to build all the examples in the gallery, then you can use `make
html`.
You can also choose to only build a subset of the examples by using the
``EXAMPLES_PATTERN`` env variable, which accepts a regular expression. For
example ``EXAMPLES_PATTERN="transforms" make html`` will only build the examples
with "transforms" in their name.
### New model
### New architecture or improved model weights
Please refer to the guidelines in [Contributing to Torchvision - Models](https://github.com/pytorch/vision/blob/main/CONTRIBUTING_MODELS.md).
More details on how to add a new model will be provided later. Please, do not send any PR with a new model without discussing
it in an issue as, most likely, it will not be accepted.
### New dataset
More details on how to add a new dataset will be provided later. Please, do not send any PR with a new dataset without discussing
Please, do not send any PR with a new dataset without discussing
it in an issue as, most likely, it will not be accepted.
### Pull Request
If all previous checks (flake8, mypy, unit tests) are passing, please send a PR. Submitted PR will pass other tests on
different operation systems, python versions and hardwares.
If all previous checks (flake8, mypy, unit tests) are passing, please send a PR. Submitted PR will pass other tests on
different operating systems, python versions and hardware.
For more details about pull requests workflow,
For more details about pull requests workflow,
please read [GitHub's guides](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request).
## License
By contributing to Torchvision, you agree that your contributions will be licensed
under the LICENSE file in the root directory of this source tree.
Contributors are also required to [sign our Contributor License Agreement](https://code.facebook.com/cla).
# Contributing to Torchvision - Models
- [New Model Architectures - Overview](#new-model-architectures---overview)
- [New Weights for Existing Model Architectures](#new-weights-for-existing-model-architectures)
## New Model Architectures - Overview
For someone who would be interested in adding a model architecture, it is also expected to train the model, so here are a few important considerations:
- Training big models requires lots of resources and the cost quickly adds up
- Reproducing models is fun but also risky as you might not always get the results reported on the paper. It might require a huge amount of effort to close the gap
- The contribution might not get merged if we significantly lack in terms of accuracy, speed etc
- Including new models in TorchVision might not be the best approach, so other options such as releasing the model through to [Pytorch Hub](https://pytorch.org/hub/) should be considered
So, before starting any work and submitting a PR there are a few critical things that need to be taken into account in order to make sure the planned contribution is within the context of TorchVision, and the requirements and expectations are discussed beforehand. If this step is skipped and a PR is submitted without prior discussion it will almost certainly be rejected.
### 1. Preparation work
- Start by looking into this [issue](https://github.com/pytorch/vision/issues/2707) in order to have an idea of the models that are being considered, express your willingness to add a new model and discuss with the community whether this model should be included in TorchVision. It is very important at this stage to make sure that there is an agreement on the value of having this model in TorchVision and there is no one else already working on it.
- If the decision is to include the new model, then please create a new ticket which will be used for all design and implementation discussions prior to the PR. One of the TorchVision maintainers will reach out at this stage and this will be your POC from this point onwards in order to provide support, guidance and regular feedback.
### 2. Implement the model
Please take a look at existing models in TorchVision to get familiar with the idioms. Also, please look at recent contributions for new models. If in doubt about any design decisions you can ask for feedback on the issue created in step 1. Example of things to take into account:
- The implementation should be as close as possible to the canonical implementation/paper
- The PR must include the code implementation, documentation and tests
- It should also extend the existing reference scripts used to train the model
- The weights need to reproduce closely the results of the paper in terms of accuracy, even though the final weights to be deployed will be those trained by the TorchVision maintainers
- The PR description should include commands/configuration used to train the model, so that the TorchVision maintainers can easily run them to verify the implementation and generate the final model to be released
- Make sure we re-use existing components as much as possible (inheritance)
- New primitives (transforms, losses, etc.) can be added if necessary, but the final location will be determined after discussion with the dedicated maintainer
- Please take a look at the detailed [implementation and documentation guidelines](https://github.com/pytorch/vision/issues/5319) for a fine grain list of things not to be missed
### 3. Train the model with reference scripts
To validate the new model against the common benchmark, as well as to generate pre-trained weights, you must use TorchVision’s reference scripts to train the model.
Make sure all logs and a final (or best) checkpoint are saved, because it is expected that a submission shows that a model has been successfully trained and the results are in line with the original paper/repository. This will allow the reviewers to quickly check the validity of the submission, but please note that the final model to be released will be re-trained by the maintainers in order to verify reproducibility, ensure that the changes occurred during the PR review did not introduce any bugs, and to avoid moving around a large amount of data (including all checkpoints and logs).
### 4. Submit a PR
Submit a PR and tag the assigned maintainer. This PR should:
- Link the original ticket
- Provide a link for the original paper and the original repository if available
- Highlight the important test metrics and how they compare to the original paper
- Highlight any design choices that deviate from the original paper/implementation and rationale for these choices
## New Weights for Existing Model Architectures
The process of improving existing models, for instance improving accuracy by retraining the model with a different set of hyperparameters or augmentations, is the following:
1. Open a ticket and discuss with the community and maintainers whether this improvement should be added to TorchVision. Note that to add new weights the improvement should be significant.
2. Train the model using TorchVision reference scripts. You can add new primitives (transforms, losses, etc) when necessary, but the final location will be determined after discussion with the dedicated maintainer.
3. Open a PR with the new weights, together with the training logs and the checkpoint chosen so the reviewers can verify the submission. Details on how the model was trained, i.e., the training command using the reference scripts, should be included in the PR.
4. The PR reviewers should replicate the results on their side to verify the submission and if all goes well the new weights should be ready to be released!
include README.rst
include README.md
include LICENSE
recursive-exclude * __pycache__
......
# <div align="center"><strong>VISION</strong></div>
## 简介
torchvision 软件包由常用的数据集、模型架构和计算机视觉的常见图像转换组成。
## 安装
组件支持组合
| PyTorch版本 | fastpt版本 |vision版本 | DTK版本 | Python版本 | 推荐编译方式 |
| ----------- | ----------- | ----------- | ------------------------ | -----------------| ------------ |
| 2.5.1 | 2.1.0 |v0.19.1 | >= 25.04 | 3.8、3.10、3.11 | fastpt不转码 |
| 2.4.1 | 2.0.1 |v0.19.1 | >= 25.04 | 3.8、3.10、3.11 | fastpt不转码 |
| 其他 | 其他 | 其他 | 其他 | 3.8、3.10、3.11 | hip转码 |
+ pytorch版本大于2.4.1 && dtk版本大于25.04 推荐使用fastpt不转码编译。
### 1、使用pip方式安装
vision whl包下载目录:[光和开发者社区](https://download.sourcefind.cn:65024/4/main/vision),选择对应的pytorch版本和python版本下载对应vision的whl包
```shell
pip install torch* (下载torch的whl包)
pip install fastpt* --no-deps (下载fastpt的whl包)
source /usr/local/bin/fastpt -E
pip install torchvision* (下载的vision-fastpt的whl包)
```
### 2、使用源码编译方式安装
#### 相关依赖问题
可通过conda或源码编译安装libpng/libjpeg
```shell
conda install libpng
conda install jpeg
```
libpng和libjpeg必须在编译时可用,确保它在标准库位置可用,否则,分别在环境变量TORCHVISION_INCLUDE和TORCHVISION_LIBRARY中添加头文件路径和库路径。
```shell
pip3 install pybind11
pip3 install 'numpy<=1.24.3'
pip3 install 'urllib3==1.26.14'
pip3 install requests
pip3 install wheel
```
#### 编译环境准备
提供基于fastpt不转码编译:
1. 基于光源pytorch基础镜像环境:镜像下载地址:[光合开发者社区](https://sourcefind.cn/#/image/dcu/pytorch),根据pytorch、python、dtk及系统下载对应的镜像版本。
2. 基于现有python环境:安装pytorch,fastpt whl包下载目录:[光合开发者社区](https://sourcefind.cn/#/image/dcu/pytorch),根据python、dtk版本,下载对应pytorch的whl包。安装命令如下:
```shell
pip install torch* (下载torch的whl包)
pip install fastpt* --no-deps (下载fastpt的whl包, 安装顺序,先安装torch,后安装fastpt)
```
#### 源码编译安装
- 代码下载
```shell
git clone http://developer.sourcefind.cn/codes/OpenDAS/vision.git # 根据编译需要切换分支
```
- 提供源码编译方式(进入vision目录):
```shell
1. 设置不转码编译环境变量
export FORCE_CUDA=1
source /usr/local/bin/fastpt -C
```
2. 编译whl包并安装
```shell
python3 setup.py -v bdist_wheel
pip install dist/torchvision*
```
3. 源码编译安装
```shell
python3 setup.py install
```
#### 注意事项
+ 若使用pip install下载安装过慢,可添加pypi清华源:-i https://pypi.tuna.tsinghua.edu.cn/simple/
+ ROCM_PATH为dtk的路径,默认为/opt/dtk
## 验证
- python -c "import torchvision; torchvision.\_\_version__",版本号与官方版本同步,查询该软件的版本号,例如v0.19.1;
## Known issue
-
## 参考资料
- [README_ORIGIN.md][README_ORIGIN.md]
- [https://github.com/pytorch/vision.git][https://github.com/pytorch/vision.git]
torchvision
===========
//copy v0.10.0
.. image:: https://pepy.tech/badge/torchvision
:target: https://pepy.tech/project/torchvision
.. image:: https://img.shields.io/badge/dynamic/json.svg?label=docs&url=https%3A%2F%2Fpypi.org%2Fpypi%2Ftorchvision%2Fjson&query=%24.info.version&colorB=brightgreen&prefix=v
:target: https://pytorch.org/vision/stable/index.html
The torchvision package consists of popular datasets, model architectures, and common image transformations for computer vision.
Installation
============
We recommend Anaconda as Python package management system. Please refer to `pytorch.org <https://pytorch.org/>`_
for the detail of PyTorch (``torch``) installation. The following is the corresponding ``torchvision`` versions and
supported Python versions.
+--------------------------+--------------------------+---------------------------------+
| ``torch`` | ``torchvision`` | ``python`` |
+==========================+==========================+=================================+
| ``master`` / ``nightly`` | ``master`` / ``nightly`` | ``>=3.6`` |
+--------------------------+--------------------------+---------------------------------+
| ``1.8.0`` | ``0.9.0`` | ``>=3.6`` |
+--------------------------+--------------------------+---------------------------------+
| ``1.7.1`` | ``0.8.2`` | ``>=3.6`` |
+--------------------------+--------------------------+---------------------------------+
| ``1.7.0`` | ``0.8.1`` | ``>=3.6`` |
+--------------------------+--------------------------+---------------------------------+
| ``1.7.0`` | ``0.8.0`` | ``>=3.6`` |
+--------------------------+--------------------------+---------------------------------+
| ``1.6.0`` | ``0.7.0`` | ``>=3.6`` |
+--------------------------+--------------------------+---------------------------------+
| ``1.5.1`` | ``0.6.1`` | ``>=3.5`` |
+--------------------------+--------------------------+---------------------------------+
| ``1.5.0`` | ``0.6.0`` | ``>=3.5`` |
+--------------------------+--------------------------+---------------------------------+
| ``1.4.0`` | ``0.5.0`` | ``==2.7``, ``>=3.5``, ``<=3.8`` |
+--------------------------+--------------------------+---------------------------------+
| ``1.3.1`` | ``0.4.2`` | ``==2.7``, ``>=3.5``, ``<=3.7`` |
+--------------------------+--------------------------+---------------------------------+
| ``1.3.0`` | ``0.4.1`` | ``==2.7``, ``>=3.5``, ``<=3.7`` |
+--------------------------+--------------------------+---------------------------------+
| ``1.2.0`` | ``0.4.0`` | ``==2.7``, ``>=3.5``, ``<=3.7`` |
+--------------------------+--------------------------+---------------------------------+
| ``1.1.0`` | ``0.3.0`` | ``==2.7``, ``>=3.5``, ``<=3.7`` |
+--------------------------+--------------------------+---------------------------------+
| ``<=1.0.1`` | ``0.2.2`` | ``==2.7``, ``>=3.5``, ``<=3.7`` |
+--------------------------+--------------------------+---------------------------------+
Anaconda:
.. code:: bash
conda install torchvision -c pytorch
pip:
.. code:: bash
pip install torchvision
From source:
.. code:: bash
python setup.py install
# or, for OSX
# MACOSX_DEPLOYMENT_TARGET=10.9 CC=clang CXX=clang++ python setup.py install
In case building TorchVision from source fails, install the nightly version of PyTorch following
the linked guide on the `contributing page <https://github.com/pytorch/vision/blob/master/CONTRIBUTING.md#development-installation>`_ and retry the install.
By default, GPU support is built if CUDA is found and ``torch.cuda.is_available()`` is true.
It's possible to force building GPU support by setting ``FORCE_CUDA=1`` environment variable,
which is useful when building a docker image.
Image Backend
=============
Torchvision currently supports the following image backends:
* `Pillow`_ (default)
* `Pillow-SIMD`_ - a **much faster** drop-in replacement for Pillow with SIMD. If installed will be used as the default.
* `accimage`_ - if installed can be activated by calling :code:`torchvision.set_image_backend('accimage')`
* `libpng`_ - can be installed via conda :code:`conda install libpng` or any of the package managers for debian-based and RHEL-based Linux distributions.
* `libjpeg`_ - can be installed via conda :code:`conda install jpeg` or any of the package managers for debian-based and RHEL-based Linux distributions. `libjpeg-turbo`_ can be used as well.
**Notes:** ``libpng`` and ``libjpeg`` must be available at compilation time in order to be available. Make sure that it is available on the standard library locations,
otherwise, add the include and library paths in the environment variables ``TORCHVISION_INCLUDE`` and ``TORCHVISION_LIBRARY``, respectively.
.. _libpng : http://www.libpng.org/pub/png/libpng.html
.. _Pillow : https://python-pillow.org/
.. _Pillow-SIMD : https://github.com/uploadcare/pillow-simd
.. _accimage: https://github.com/pytorch/accimage
.. _libjpeg: http://ijg.org/
.. _libjpeg-turbo: https://libjpeg-turbo.org/
C++ API
=======
TorchVision also offers a C++ API that contains C++ equivalent of python models.
Installation From source:
.. code:: bash
mkdir build
cd build
# Add -DWITH_CUDA=on support for the CUDA if needed
cmake ..
make
make install
Once installed, the library can be accessed in cmake (after properly configuring ``CMAKE_PREFIX_PATH``) via the :code:`TorchVision::TorchVision` target:
.. code:: rest
find_package(TorchVision REQUIRED)
target_link_libraries(my-target PUBLIC TorchVision::TorchVision)
The ``TorchVision`` package will also automatically look for the ``Torch`` package and add it as a dependency to ``my-target``,
so make sure that it is also available to cmake via the ``CMAKE_PREFIX_PATH``.
For an example setup, take a look at ``examples/cpp/hello_world``.
TorchVision Operators
---------------------
In order to get the torchvision operators registered with torch (eg. for the JIT), all you need to do is to ensure that you
:code:`#include <torchvision/vision.h>` in your project.
Documentation
=============
You can find the API documentation on the pytorch website: https://pytorch.org/vision/stable/index.html
Contributing
============
See the `CONTRIBUTING <CONTRIBUTING.md>`_ file for how to help out.
Disclaimer on Datasets
======================
This is a utility library that downloads and prepares public datasets. We do not host or distribute these datasets, vouch for their quality or fairness, or claim that you have license to use the dataset. It is your responsibility to determine whether you have permission to use the dataset under the dataset's license.
If you're a dataset owner and wish to update any part of it (description, citation, etc.), or do not want your dataset to be included in this library, please get in touch through a GitHub issue. Thanks for your contribution to the ML community!
# torchvision
[![total torchvision downloads](https://pepy.tech/badge/torchvision)](https://pepy.tech/project/torchvision)
[![documentation](https://img.shields.io/badge/dynamic/json.svg?label=docs&url=https%3A%2F%2Fpypi.org%2Fpypi%2Ftorchvision%2Fjson&query=%24.info.version&colorB=brightgreen&prefix=v)](https://pytorch.org/vision/stable/index.html)
The torchvision package consists of popular datasets, model architectures, and common image transformations for computer
vision.
## Installation
Please refer to the [official
instructions](https://pytorch.org/get-started/locally/) to install the stable
versions of `torch` and `torchvision` on your system.
To build source, refer to our [contributing
page](https://github.com/pytorch/vision/blob/main/CONTRIBUTING.md#development-installation).
The following is the corresponding `torchvision` versions and supported Python
versions.
| `torch` | `torchvision` | Python |
| ------------------ | ------------------ | ------------------- |
| `main` / `nightly` | `main` / `nightly` | `>=3.8`, `<=3.12` |
| `2.3` | `0.18` | `>=3.8`, `<=3.12` |
| `2.2` | `0.17` | `>=3.8`, `<=3.11` |
| `2.1` | `0.16` | `>=3.8`, `<=3.11` |
| `2.0` | `0.15` | `>=3.8`, `<=3.11` |
<details>
<summary>older versions</summary>
| `torch` | `torchvision` | Python |
|---------|-------------------|---------------------------|
| `1.13` | `0.14` | `>=3.7.2`, `<=3.10` |
| `1.12` | `0.13` | `>=3.7`, `<=3.10` |
| `1.11` | `0.12` | `>=3.7`, `<=3.10` |
| `1.10` | `0.11` | `>=3.6`, `<=3.9` |
| `1.9` | `0.10` | `>=3.6`, `<=3.9` |
| `1.8` | `0.9` | `>=3.6`, `<=3.9` |
| `1.7` | `0.8` | `>=3.6`, `<=3.9` |
| `1.6` | `0.7` | `>=3.6`, `<=3.8` |
| `1.5` | `0.6` | `>=3.5`, `<=3.8` |
| `1.4` | `0.5` | `==2.7`, `>=3.5`, `<=3.8` |
| `1.3` | `0.4.2` / `0.4.3` | `==2.7`, `>=3.5`, `<=3.7` |
| `1.2` | `0.4.1` | `==2.7`, `>=3.5`, `<=3.7` |
| `1.1` | `0.3` | `==2.7`, `>=3.5`, `<=3.7` |
| `<=1.0` | `0.2` | `==2.7`, `>=3.5`, `<=3.7` |
</details>
## Image Backends
Torchvision currently supports the following image backends:
- torch tensors
- PIL images:
- [Pillow](https://python-pillow.org/)
- [Pillow-SIMD](https://github.com/uploadcare/pillow-simd) - a **much faster** drop-in replacement for Pillow with SIMD.
Read more in in our [docs](https://pytorch.org/vision/stable/transforms.html).
## [UNSTABLE] Video Backend
Torchvision currently supports the following video backends:
- [pyav](https://github.com/PyAV-Org/PyAV) (default) - Pythonic binding for ffmpeg libraries.
- video_reader - This needs ffmpeg to be installed and torchvision to be built from source. There shouldn't be any
conflicting version of ffmpeg installed. Currently, this is only supported on Linux.
```
conda install -c conda-forge 'ffmpeg<4.3'
python setup.py install
```
# Using the models on C++
Refer to [example/cpp](https://github.com/pytorch/vision/tree/main/examples/cpp).
**DISCLAIMER**: the `libtorchvision` library includes the torchvision
custom ops as well as most of the C++ torchvision APIs. Those APIs do not come
with any backward-compatibility guarantees and may change from one version to
the next. Only the Python APIs are stable and with backward-compatibility
guarantees. So, if you need stability within a C++ environment, your best bet is
to export the Python APIs via torchscript.
## Documentation
You can find the API documentation on the pytorch website: <https://pytorch.org/vision/stable/index.html>
## Contributing
See the [CONTRIBUTING](CONTRIBUTING.md) file for how to help out.
## Disclaimer on Datasets
This is a utility library that downloads and prepares public datasets. We do not host or distribute these datasets,
vouch for their quality or fairness, or claim that you have license to use the dataset. It is your responsibility to
determine whether you have permission to use the dataset under the dataset's license.
If you're a dataset owner and wish to update any part of it (description, citation, etc.), or do not want your dataset
to be included in this library, please get in touch through a GitHub issue. Thanks for your contribution to the ML
community!
## Pre-trained Model License
The pre-trained models provided in this library may have their own licenses or terms and conditions derived from the
dataset used for training. It is your responsibility to determine whether you have permission to use the models for your
use case.
More specifically, SWAG models are released under the CC-BY-NC 4.0 license. See
[SWAG LICENSE](https://github.com/facebookresearch/SWAG/blob/main/LICENSE) for additional details.
## Citing TorchVision
If you find TorchVision useful in your work, please consider citing the following BibTeX entry:
```bibtex
@software{torchvision2016,
title = {TorchVision: PyTorch's Computer Vision library},
author = {TorchVision maintainers and contributors},
year = 2016,
journal = {GitHub repository},
publisher = {GitHub},
howpublished = {\url{https://github.com/pytorch/vision}}
}
```
## Status
The Android demo of TorchVision is currently unmaintained, untested and likely out-of-date.
......@@ -14,14 +14,13 @@ allprojects {
androidSupportAppCompatV7Version = "28.0.0"
fbjniJavaOnlyVersion = "0.0.3"
soLoaderNativeLoaderVersion = "0.8.0"
pytorchAndroidVersion = "1.9.0-SNAPSHOT"
soLoaderNativeLoaderVersion = "0.10.5"
pytorchAndroidVersion = "1.12"
}
repositories {
google()
mavenCentral()
jcenter()
}
dependencies {
......@@ -32,11 +31,10 @@ allprojects {
repositories {
google()
jcenter()
mavenCentral()
}
}
ext.deps = [
jsr305: 'com.google.code.findbugs:jsr305:3.0.1',
]
ABI_FILTERS=armeabi-v7a,arm64-v8a,x86,x86_64
VERSION_NAME=0.10.0-SNAPSHOT
VERSION_NAME=0.15.0-SNAPSHOT
GROUP=org.pytorch
MAVEN_GROUP=org.pytorch
SONATYPE_STAGING_PROFILE=orgpytorch
......@@ -9,7 +9,7 @@ POM_SCM_URL=https://github.com/pytorch/vision.git
POM_SCM_CONNECTION=scm:git:https://github.com/pytorch/vision
POM_SCM_DEV_CONNECTION=scm:git:git@github.com:pytorch/vision.git
POM_LICENSE_NAME=BSD 3-Clause
POM_LICENSE_URL=https://github.com/pytorch/vision/blob/master/LICENSE
POM_LICENSE_URL=https://github.com/pytorch/vision/blob/main/LICENSE
POM_ISSUES_URL=https://github.com/pytorch/vision/issues
POM_LICENSE_DIST=repo
POM_DEVELOPER_ID=pytorch
......
@if "%DEBUG%" == "" @echo off
@rem ##########################################################################
@rem
@rem Gradle startup script for Windows
@rem
@rem ##########################################################################
@rem Set local scope for the variables with windows NT shell
if "%OS%"=="Windows_NT" setlocal
set DIRNAME=%~dp0
if "%DIRNAME%" == "" set DIRNAME=.
set APP_BASE_NAME=%~n0
set APP_HOME=%DIRNAME%
@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
set DEFAULT_JVM_OPTS=
@rem Find java.exe
if defined JAVA_HOME goto findJavaFromJavaHome
set JAVA_EXE=java.exe
%JAVA_EXE% -version >NUL 2>&1
if "%ERRORLEVEL%" == "0" goto init
echo.
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:findJavaFromJavaHome
set JAVA_HOME=%JAVA_HOME:"=%
set JAVA_EXE=%JAVA_HOME%/bin/java.exe
if exist "%JAVA_EXE%" goto init
echo.
echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:init
@rem Get command-line arguments, handling Windows variants
if not "%OS%" == "Windows_NT" goto win9xME_args
:win9xME_args
@rem Slurp the command line arguments.
set CMD_LINE_ARGS=
set _SKIP=2
:win9xME_args_slurp
if "x%~1" == "x" goto execute
set CMD_LINE_ARGS=%*
:execute
@rem Setup the command line
set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
@rem Execute Gradle
"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
:end
@rem End local scope for the variables with windows NT shell
if "%ERRORLEVEL%"=="0" goto mainEnd
:fail
rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
rem the _cmd.exe /c_ return code!
if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
exit /b 1
:mainEnd
if "%OS%"=="Windows_NT" endlocal
:omega
@if "%DEBUG%" == "" @echo off
@rem ##########################################################################
@rem
@rem Gradle startup script for Windows
@rem
@rem ##########################################################################
@rem Set local scope for the variables with windows NT shell
if "%OS%"=="Windows_NT" setlocal
set DIRNAME=%~dp0
if "%DIRNAME%" == "" set DIRNAME=.
set APP_BASE_NAME=%~n0
set APP_HOME=%DIRNAME%
@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
set DEFAULT_JVM_OPTS=
@rem Find java.exe
if defined JAVA_HOME goto findJavaFromJavaHome
set JAVA_EXE=java.exe
%JAVA_EXE% -version >NUL 2>&1
if "%ERRORLEVEL%" == "0" goto init
echo.
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:findJavaFromJavaHome
set JAVA_HOME=%JAVA_HOME:"=%
set JAVA_EXE=%JAVA_HOME%/bin/java.exe
if exist "%JAVA_EXE%" goto init
echo.
echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:init
@rem Get command-line arguments, handling Windows variants
if not "%OS%" == "Windows_NT" goto win9xME_args
:win9xME_args
@rem Slurp the command line arguments.
set CMD_LINE_ARGS=
set _SKIP=2
:win9xME_args_slurp
if "x%~1" == "x" goto execute
set CMD_LINE_ARGS=%*
:execute
@rem Setup the command line
set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
@rem Execute Gradle
"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
:end
@rem End local scope for the variables with windows NT shell
if "%ERRORLEVEL%"=="0" goto mainEnd
:fail
rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
rem the _cmd.exe /c_ return code!
if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
exit /b 1
:mainEnd
if "%OS%"=="Windows_NT" endlocal
:omega
cmake_minimum_required(VERSION 3.4.1)
set(TARGET torchvision_ops)
project(${TARGET} CXX)
set(CMAKE_CXX_STANDARD 14)
set(CMAKE_CXX_STANDARD 17)
string(APPEND CMAKE_CXX_FLAGS " -DMOBILE")
......@@ -14,13 +14,6 @@ file(GLOB VISION_SRCS
../../torchvision/csrc/ops/*.h
../../torchvision/csrc/ops/*.cpp)
# Remove interpolate_aa sources as they are temporary code
# see https://github.com/pytorch/vision/pull/3761
# and IndexingUtils.h is unavailable on Android build
list(REMOVE_ITEM VISION_SRCS "${CMAKE_CURRENT_LIST_DIR}/../../torchvision/csrc/ops/cpu/interpolate_aa_kernels.cpp")
list(REMOVE_ITEM VISION_SRCS "${CMAKE_CURRENT_LIST_DIR}/../../torchvision/csrc/ops/interpolate_aa.cpp")
list(REMOVE_ITEM VISION_SRCS "${CMAKE_CURRENT_LIST_DIR}/../../torchvision/csrc/ops/interpolate_aa.h")
add_library(${TARGET} SHARED
${VISION_SRCS}
)
......@@ -35,7 +28,7 @@ target_compile_options(${TARGET} PRIVATE
set(BUILD_SUBDIR ${ANDROID_ABI})
find_library(PYTORCH_LIBRARY pytorch_jni_lite
find_library(PYTORCH_LIBRARY pytorch_jni
PATHS ${PYTORCH_LINK_DIRS}
NO_CMAKE_FIND_ROOT_PATH)
......
......@@ -2,7 +2,7 @@ apply plugin: 'com.android.library'
apply plugin: 'maven'
repositories {
jcenter()
mavenCentral()
maven {
url "https://oss.sonatype.org/content/repositories/snapshots"
}
......
apply plugin: 'com.android.application'
repositories {
jcenter()
mavenCentral()
maven {
url "https://oss.sonatype.org/content/repositories/snapshots"
}
......
......@@ -14,4 +14,4 @@
android:background="@android:color/black"
android:textColor="@android:color/white" />
</FrameLayout>
\ No newline at end of file
</FrameLayout>
import torch
import torchvision
from torch.utils.mobile_optimizer import optimize_for_mobile
from torchvision.models.detection import (
fasterrcnn_mobilenet_v3_large_320_fpn,
FasterRCNN_MobileNet_V3_Large_320_FPN_Weights,
)
print(torch.__version__)
model = torchvision.models.detection.fasterrcnn_mobilenet_v3_large_320_fpn(
pretrained=True,
model = fasterrcnn_mobilenet_v3_large_320_fpn(
weights=FasterRCNN_MobileNet_V3_Large_320_FPN_Weights.DEFAULT,
box_score_thresh=0.7,
rpn_post_nms_top_n_test=100,
rpn_score_thresh=0.4,
rpn_pre_nms_top_n_test=150)
rpn_pre_nms_top_n_test=150,
)
model.eval()
script_model = torch.jit.script(model)
......
import os
import platform
import statistics
import torch
import torch.utils.benchmark as benchmark
import torchvision
def print_machine_specs():
print("Processor:", platform.processor())
print("Platform:", platform.platform())
print("Logical CPUs:", os.cpu_count())
print(f"\nCUDA device: {torch.cuda.get_device_name()}")
print(f"Total Memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.2f} GB")
def get_data():
transform = torchvision.transforms.Compose(
[
torchvision.transforms.PILToTensor(),
]
)
path = os.path.join(os.getcwd(), "data")
testset = torchvision.datasets.Places365(
root="./data", download=not os.path.exists(path), transform=transform, split="val"
)
testloader = torch.utils.data.DataLoader(
testset, batch_size=1000, shuffle=False, num_workers=1, collate_fn=lambda batch: [r[0] for r in batch]
)
return next(iter(testloader))
def run_benchmark(batch):
results = []
for device in ["cpu", "cuda"]:
batch_device = [t.to(device=device) for t in batch]
for size in [1, 100, 1000]:
for num_threads in [1, 12, 24]:
for stmt, strat in zip(
[
"[torchvision.io.encode_jpeg(img) for img in batch_input]",
"torchvision.io.encode_jpeg(batch_input)",
],
["unfused", "fused"],
):
batch_input = batch_device[:size]
t = benchmark.Timer(
stmt=stmt,
setup="import torchvision",
globals={"batch_input": batch_input},
label="Image Encoding",
sub_label=f"{device.upper()} ({strat}): {stmt}",
description=f"{size} images",
num_threads=num_threads,
)
results.append(t.blocked_autorange())
compare = benchmark.Compare(results)
compare.print()
if __name__ == "__main__":
print_machine_specs()
batch = get_data()
mean_h, mean_w = statistics.mean(t.shape[-2] for t in batch), statistics.mean(t.shape[-1] for t in batch)
print(f"\nMean image size: {int(mean_h)}x{int(mean_w)}")
run_benchmark(batch)
......@@ -22,21 +22,28 @@ if(NOT (CMAKE_VERSION VERSION_LESS 3.0))
# Don't include targets if this file is being picked up by another
# project which has already built this as a subproject
#-----------------------------------------------------------------------------
if(NOT TARGET ${PN}::TorchVision)
if(NOT TARGET ${PN}::${PN})
include("${CMAKE_CURRENT_LIST_DIR}/${PN}Targets.cmake")
if(NOT TARGET torch_library)
find_package(Torch REQUIRED)
endif()
if(NOT TARGET Python3::Python)
find_package(Python3 COMPONENTS Development)
target_include_directories(${PN}::${PN} INTERFACE "${${PN}_INCLUDE_DIR}")
if(@WITH_CUDA@)
target_compile_definitions(${PN}::${PN} INTERFACE WITH_CUDA)
endif()
set_target_properties(TorchVision::TorchVision PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${${PN}_INCLUDE_DIR}" INTERFACE_LINK_LIBRARIES "torch;Python3::Python" )
find_package(Torch REQUIRED)
target_link_libraries(${PN}::${PN} INTERFACE torch)
if(@WITH_PNG@)
find_package(PNG REQUIRED)
target_link_libraries(${PN}::${PN} INTERFACE ${PNG_LIBRARY})
target_compile_definitions(${PN}::${PN} INTERFACE PNG_FOUND)
endif()
if(@WITH_CUDA@)
target_compile_definitions(TorchVision::TorchVision INTERFACE WITH_CUDA)
if(@WITH_JPEG@)
find_package(JPEG REQUIRED)
target_link_libraries(${PN}::${PN} INTERFACE ${JPEG_LIBRARIES})
target_compile_definitions(${PN}::${PN} INTERFACE JPEG_FOUND)
endif()
endif()
......
......@@ -10,11 +10,11 @@
# SIMULATOR - used to build for the Simulator platforms, which have an x86 arch.
#
# CMAKE_IOS_DEVELOPER_ROOT = automatic(default) or /path/to/platform/Developer folder
# By default this location is automatcially chosen based on the IOS_PLATFORM value above.
# By default this location is automatically chosen based on the IOS_PLATFORM value above.
# If set manually, it will override the default location and force the user of a particular Developer Platform
#
# CMAKE_IOS_SDK_ROOT = automatic(default) or /path/to/platform/Developer/SDKs/SDK folder
# By default this location is automatcially chosen based on the CMAKE_IOS_DEVELOPER_ROOT value.
# By default this location is automatically chosen based on the CMAKE_IOS_DEVELOPER_ROOT value.
# In this case it will always be the most up-to-date SDK found in the CMAKE_IOS_DEVELOPER_ROOT path.
# If set manually, this will force the use of a specific SDK version
......@@ -100,7 +100,7 @@ if(IOS_DEPLOYMENT_TARGET)
set(XCODE_IOS_PLATFORM_VERSION_FLAGS "-m${XCODE_IOS_PLATFORM}-version-min=${IOS_DEPLOYMENT_TARGET}")
endif()
# Hidden visibilty is required for cxx on iOS
# Hidden visibility is required for cxx on iOS
set(CMAKE_C_FLAGS_INIT "${XCODE_IOS_PLATFORM_VERSION_FLAGS}")
set(CMAKE_CXX_FLAGS_INIT "${XCODE_IOS_PLATFORM_VERSION_FLAGS} -fvisibility-inlines-hidden")
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment