Commit 6f3c5f1c authored by limm's avatar limm
Browse files

support v1.4.0

parent 6f674c7e
{
"Linux": [
{
"cuda": "11.7",
"torch": "1.13.x",
"mmcv": [
"2.0.0rc3"
]
},
{
"cuda": "11.6",
"torch": "1.13.x",
"mmcv": [
"2.0.0rc3"
]
},
{
"cuda": "11.6",
"torch": "1.12.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "11.5",
"torch": "1.11.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "11.3",
"torch": "1.12.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "11.3",
"torch": "1.11.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "11.3",
"torch": "1.10.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "11.1",
"torch": "1.10.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "11.1",
"torch": "1.9.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "11.1",
"torch": "1.8.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "11.0",
"torch": "1.7.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "10.2",
"torch": "1.12.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "10.2",
"torch": "1.11.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "10.2",
"torch": "1.10.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "10.2",
"torch": "1.9.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "10.2",
"torch": "1.8.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "10.2",
"torch": "1.7.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "10.2",
"torch": "1.6.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "10.1",
"torch": "1.8.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "10.1",
"torch": "1.7.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "10.1",
"torch": "1.6.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "9.2",
"torch": "1.7.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "9.2",
"torch": "1.6.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "cpu",
"torch": "1.13.x",
"mmcv": [
"2.0.0rc3"
]
},
{
"cuda": "cpu",
"torch": "1.12.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "cpu",
"torch": "1.11.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "cpu",
"torch": "1.10.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "cpu",
"torch": "1.9.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "cpu",
"torch": "1.8.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "cpu",
"torch": "1.7.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "cpu",
"torch": "1.6.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
}
],
"Windows": [
{
"cuda": "11.7",
"torch": "1.13.x",
"mmcv": [
"2.0.0rc3"
]
},
{
"cuda": "11.6",
"torch": "1.13.x",
"mmcv": [
"2.0.0rc3"
]
},
{
"cuda": "11.6",
"torch": "1.12.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "11.5",
"torch": "1.11.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "11.3",
"torch": "1.12.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "11.3",
"torch": "1.11.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "11.3",
"torch": "1.10.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "11.1",
"torch": "1.10.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "11.1",
"torch": "1.9.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "11.1",
"torch": "1.8.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "10.2",
"torch": "1.10.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "10.2",
"torch": "1.9.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "10.2",
"torch": "1.8.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "10.2",
"torch": "1.7.x",
"mmcv": [
"2.0.0rc3"
]
},
{
"cuda": "10.2",
"torch": "1.6.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "10.1",
"torch": "1.8.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "10.1",
"torch": "1.7.x",
"mmcv": [
"2.0.0rc3"
]
},
{
"cuda": "10.1",
"torch": "1.6.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "cpu",
"torch": "1.13.x",
"mmcv": [
"2.0.0rc3"
]
},
{
"cuda": "cpu",
"torch": "1.12.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "cpu",
"torch": "1.11.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "cpu",
"torch": "1.10.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "cpu",
"torch": "1.9.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "cpu",
"torch": "1.8.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "cpu",
"torch": "1.7.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
},
{
"cuda": "cpu",
"torch": "1.6.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2",
"2.0.0rc1"
]
}
],
"macOS": [
{
"cuda": "cpu",
"torch": "1.13.x",
"mmcv": [
"2.0.0rc3"
]
},
{
"cuda": "mps",
"torch": "1.13.x",
"mmcv": [
"2.0.0rc3"
]
},
{
"cuda": "cpu",
"torch": "1.12.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2"
]
},
{
"cuda": "cpu",
"torch": "1.11.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2"
]
},
{
"cuda": "cpu",
"torch": "1.10.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2"
]
},
{
"cuda": "cpu",
"torch": "1.9.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2"
]
},
{
"cuda": "cpu",
"torch": "1.8.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2"
]
},
{
"cuda": "cpu",
"torch": "1.7.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2"
]
},
{
"cuda": "cpu",
"torch": "1.6.x",
"mmcv": [
"2.0.0rc3",
"2.0.0rc2"
]
}
]
}
.. role:: hidden
:class: hidden-section
.. currentmodule:: {{ module }}
{{ name | underline}}
.. autoclass:: {{ name }}
:members:
..
autogenerated from source/_templates/classtemplate.rst
note it does not have :inherited-members:
.. role:: hidden
:class: hidden-section
mmcv.arraymisc
===================================
.. contents:: mmcv.arraymisc
:depth: 2
:local:
:backlinks: top
.. currentmodule:: mmcv.arraymisc
.. autosummary::
:toctree: generated
:nosignatures:
quantize
dequantize
.. role:: hidden
:class: hidden-section
mmcv.cnn
===================================
.. contents:: mmcv.cnn
:depth: 2
:local:
:backlinks: top
.. currentmodule:: mmcv.cnn
Module
----------------
.. autosummary::
:toctree: generated
:nosignatures:
:template: classtemplate.rst
ContextBlock
Conv2d
Conv3d
ConvAWS2d
ConvModule
ConvTranspose2d
ConvTranspose3d
ConvWS2d
DepthwiseSeparableConvModule
GeneralizedAttention
HSigmoid
HSwish
LayerScale
Linear
MaxPool2d
MaxPool3d
NonLocal1d
NonLocal2d
NonLocal3d
Scale
Swish
Conv2dRFSearchOp
Build Function
----------------
.. autosummary::
:toctree: generated
:nosignatures:
build_activation_layer
build_conv_layer
build_norm_layer
build_padding_layer
build_plugin_layer
build_upsample_layer
Miscellaneous
----------------
.. autosummary::
:toctree: generated
:nosignatures:
fuse_conv_bn
conv_ws_2d
is_norm
make_res_layer
make_vgg_layer
get_model_complexity_info
.. role:: hidden
:class: hidden-section
mmcv.image
===================================
.. contents:: mmcv.image
:depth: 2
:local:
:backlinks: top
.. currentmodule:: mmcv.image
IO
----------------
.. autosummary::
:toctree: generated
:nosignatures:
imfrombytes
imread
imwrite
use_backend
Color Space
----------------
.. autosummary::
:toctree: generated
:nosignatures:
bgr2gray
bgr2hls
bgr2hsv
bgr2rgb
bgr2ycbcr
gray2bgr
gray2rgb
hls2bgr
hsv2bgr
imconvert
rgb2bgr
rgb2gray
rgb2ycbcr
ycbcr2bgr
ycbcr2rgb
Geometric
----------------
.. autosummary::
:toctree: generated
:nosignatures:
cutout
imcrop
imflip
impad
impad_to_multiple
imrescale
imresize
imresize_like
imresize_to_multiple
imrotate
imshear
imtranslate
rescale_size
Photometric
----------------
.. autosummary::
:toctree: generated
:nosignatures:
adjust_brightness
adjust_color
adjust_contrast
adjust_hue
adjust_lighting
adjust_sharpness
auto_contrast
clahe
imdenormalize
imequalize
iminvert
imnormalize
lut_transform
posterize
solarize
Miscellaneous
----------------
.. autosummary::
:toctree: generated
:nosignatures:
tensor2imgs
.. role:: hidden
:class: hidden-section
mmcv.ops
===================================
.. contents:: mmcv.ops
:depth: 2
:local:
:backlinks: top
.. currentmodule:: mmcv.ops
.. autosummary::
:toctree: generated
:nosignatures:
:template: classtemplate.rst
BorderAlign
CARAFE
CARAFENaive
CARAFEPack
Conv2d
ConvTranspose2d
CornerPool
Correlation
CrissCrossAttention
DeformConv2d
DeformConv2dPack
DeformRoIPool
DeformRoIPoolPack
DynamicScatter
FusedBiasLeakyReLU
GroupAll
Linear
MaskedConv2d
MaxPool2d
ModulatedDeformConv2d
ModulatedDeformConv2dPack
ModulatedDeformRoIPoolPack
MultiScaleDeformableAttention
PSAMask
PointsSampler
PrRoIPool
QueryAndGroup
RiRoIAlignRotated
RoIAlign
RoIAlignRotated
RoIAwarePool3d
RoIPointPool3d
RoIPool
SAConv2d
SigmoidFocalLoss
SimpleRoIAlign
SoftmaxFocalLoss
SparseConv2d
SparseConv3d
SparseConvTensor
SparseConvTranspose2d
SparseConvTranspose3d
SparseInverseConv2d
SparseInverseConv3d
SparseMaxPool2d
SparseMaxPool3d
SparseModule
SparseSequential
SubMConv2d
SubMConv3d
SyncBatchNorm
TINShift
Voxelization
.. autosummary::
:toctree: generated
:nosignatures:
active_rotated_filter
assign_score_withk
ball_query
batched_nms
bbox_overlaps
border_align
box_iou_rotated
boxes_iou3d
boxes_iou_bev
boxes_overlap_bev
carafe
carafe_naive
chamfer_distance
contour_expand
convex_giou
convex_iou
deform_conv2d
deform_roi_pool
diff_iou_rotated_2d
diff_iou_rotated_3d
dynamic_scatter
furthest_point_sample
furthest_point_sample_with_dist
fused_bias_leakyrelu
gather_points
grouping_operation
knn
masked_conv2d
min_area_polygons
modulated_deform_conv2d
nms
nms3d
nms3d_normal
nms_bev
nms_match
nms_normal_bev
nms_rotated
pixel_group
point_sample
points_in_boxes_all
points_in_boxes_cpu
points_in_boxes_part
points_in_polygons
prroi_pool
rel_roi_point_to_rel_img_point
riroi_align_rotated
roi_align
roi_align_rotated
roi_pool
rotated_feature_align
scatter_nd
sigmoid_focal_loss
soft_nms
softmax_focal_loss
three_interpolate
three_nn
tin_shift
upfirdn2d
voxelization
.. role:: hidden
:class: hidden-section
mmcv.transforms
===================================
.. currentmodule:: mmcv.transforms
.. autosummary::
:toctree: generated
:nosignatures:
:template: classtemplate.rst
BaseTransform
TestTimeAug
Loading
----------------
.. autosummary::
:toctree: generated
:nosignatures:
:template: classtemplate.rst
LoadAnnotations
LoadImageFromFile
Processing
----------------
.. autosummary::
:toctree: generated
:nosignatures:
:template: classtemplate.rst
CenterCrop
MultiScaleFlipAug
Normalize
Pad
RandomChoiceResize
RandomFlip
RandomGrayscale
RandomResize
Resize
ToTensor
ImageToTensor
Wrapper
----------------
.. autosummary::
:toctree: generated
:nosignatures:
:template: classtemplate.rst
Compose
KeyMapper
RandomApply
RandomChoice
TransformBroadcaster
.. role:: hidden
:class: hidden-section
mmcv.utils
===================================
.. contents:: mmcv.utils
:depth: 2
:local:
:backlinks: top
.. currentmodule:: mmcv.utils
.. autosummary::
:toctree: generated
:nosignatures:
IS_CUDA_AVAILABLE
IS_MLU_AVAILABLE
IS_MPS_AVAILABLE
collect_env
jit
skip_no_elena
.. role:: hidden
:class: hidden-section
mmcv.video
===================================
.. contents:: mmcv.video
:depth: 2
:local:
:backlinks: top
.. currentmodule:: mmcv.video
IO
----------------
.. autosummary::
:toctree: generated
:nosignatures:
:template: classtemplate.rst
VideoReader
Cache
.. autosummary::
:toctree: generated
:nosignatures:
frames2video
Optical Flow
----------------
.. autosummary::
:toctree: generated
:nosignatures:
dequantize_flow
flow_from_bytes
flow_warp
flowread
flowwrite
quantize_flow
sparse_flow_from_bytes
Video Processing
----------------
.. autosummary::
:toctree: generated
:nosignatures:
concat_video
convert_video
cut_video
resize_video
.. role:: hidden
:class: hidden-section
mmcv.visualization
===================================
.. contents:: mmcv.visualization
:depth: 2
:local:
:backlinks: top
.. currentmodule:: mmcv.visualization
Color
----------------
.. autosummary::
:toctree: generated
:nosignatures:
:template: classtemplate.rst
Color
.. autosummary::
:toctree: generated
:nosignatures:
color_val
Image
----------------
.. autosummary::
:toctree: generated
:nosignatures:
imshow
imshow_bboxes
imshow_det_bboxes
Optical Flow
----------------
.. autosummary::
:toctree: generated
:nosignatures:
flow2rgb
flowshow
make_color_wheel
## Contributing to OpenMMLab
Welcome to the MMCV community, we are committed to building a cutting-edge computer vision foundational library and all kinds of contributions are welcomed, including but not limited to
**Fix bug**
You can directly post a Pull Request to fix typo in code or documents
The steps to fix the bug of code implementation are as follows.
1. If the modification involve significant changes, you should create an issue first and describe the error information and how to trigger the bug. Other developers will discuss with you and propose an proper solution.
2. Posting a pull request after fixing the bug and adding corresponding unit test.
**New Feature or Enhancement**
1. If the modification involve significant changes, you should create an issue to discuss with our developers to propose an proper design.
2. Post a Pull Request after implementing the new feature or enhancement and add corresponding unit test.
**Document**
You can directly post a pull request to fix documents. If you want to add a document, you should first create an issue to check if it is reasonable.
### Pull Request Workflow
If you're not familiar with Pull Request, don't worry! The following guidance will tell you how to create a Pull Request step by step. If you want to dive into the develop mode of Pull Request, you can refer to the [official documents](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/about-pull-requests)
#### 1. Fork and clone
If you are posting a pull request for the first time, you should fork the OpenMMLab repositories by clicking the **Fork** button in the top right corner of the GitHub page, and the forked repositories will appear under your GitHub profile.
<img src="https://user-images.githubusercontent.com/57566630/167305749-43c7f4e9-449b-4e98-ade5-0c9276d5c9ce.png" width="1200">
Then, you can clone the repositories to local:
```shell
git clone git@github.com:{username}/mmcv.git
```
After that, you should ddd official repository as the upstream repository
```bash
git remote add upstream git@github.com:open-mmlab/mmcv
```
Check whether remote repository has been added successfully by `git remote -v`
```bash
origin git@github.com:{username}/mmcv.git (fetch)
origin git@github.com:{username}/mmcv.git (push)
upstream git@github.com:open-mmlab/mmcv (fetch)
upstream git@github.com:open-mmlab/mmcv (push)
```
```{note}
Here's a brief introduction to origin and upstream. When we use "git clone", we create an "origin" remote by default, which points to the repository cloned from. As for "upstream", we add it ourselves to point to the target repository. Of course, if you don't like the name "upstream", you could name it as you wish. Usually, we'll push the code to "origin". If the pushed code conflicts with the latest code in official("upstream"), we should pull the latest code from upstream to resolve the conflicts, and then push to "origin" again. The posted Pull Request will be updated automatically.
```
#### 2. Configure pre-commit
You should configure [pre-commit](https://pre-commit.com/#intro) in the local development environment to make sure the code style matches that of OpenMMLab. **Note**: The following code should be executed under the MMCV directory.
```shell
pip install -U pre-commit
pre-commit install
```
Check that pre-commit is configured successfully, and install the hooks defined in `.pre-commit-config.yaml`.
```shell
pre-commit run --all-files
```
<img src="https://user-images.githubusercontent.com/57566630/173660750-3df20a63-cb66-4d33-a986-1f643f1d8aaf.png" width="1200">
<img src="https://user-images.githubusercontent.com/57566630/202368856-0465a90d-8fce-4345-918e-67b8b9c82614.png" width="1200">
```{note}
Chinese users may fail to download the pre-commit hooks due to the network issue. In this case, you could download these hooks from gitee by setting the .pre-commit-config-zh-cn.yaml
pre-commit install -c .pre-commit-config-zh-cn.yaml
pre-commit run --all-files -c .pre-commit-config-zh-cn.yaml
```
If the installation process is interrupted, you can repeatedly run `pre-commit run ... ` to continue the installation.
If the code does not conform to the code style specification, pre-commit will raise a warning and fixes some of the errors automatically.
<img src="https://user-images.githubusercontent.com/57566630/202369176-67642454-0025-4023-a095-263529107aa3.png" width="1200">
If we want to commit our code bypassing the pre-commit hook, we can use the `--no-verify` option(**only for temporarily commit**.
```shell
git commit -m "xxx" --no-verify
```
#### 3. Create a development branch
After configuring the pre-commit, we should create a branch based on the master branch to develop the new feature or fix the bug. The proposed branch name is `username/pr_name`
```shell
git checkout -b yhc/refactor_contributing_doc
```
In subsequent development, if the master branch of the local repository is behind the master branch of "upstream", we need to pull the upstream for synchronization, and then execute the above command:
```shell
git pull upstream master
```
#### 4. Commit the code and pass the unit test
- MMCV introduces mypy to do static type checking to increase the robustness of the code. Therefore, we need to add Type Hints to our code and pass the mypy check. If you are not familiar with Type Hints, you can refer to [this tutorial](https://docs.python.org/3/library/typing.html).
- The committed code should pass through the unit test
```shell
# Pass all unit tests
pytest tests
# Pass the unit test of runner
pytest tests/test_runner/test_runner.py
```
If the unit test fails for lack of dependencies, you can install the dependencies referring to the [guidance](#unit-test)
- If the documents are modified/added, we should check the rendering result referring to [guidance](#document-rendering)
#### 5. Push the code to remote
We could push the local commits to remote after passing through the check of unit test and pre-commit. You can associate the local branch with remote branch by adding `-u` option.
```shell
git push -u origin {branch_name}
```
This will allow you to use the `git push` command to push code directly next time, without having to specify a branch or the remote repository.
#### 6. Create a Pull Request
(1) Create a pull request in GitHub's Pull request interface
<img src="https://user-images.githubusercontent.com/57566630/201533288-516f7ac4-0b14-4dc8-afbd-912475c368b5.png" width="1200">
(2) Modify the PR description according to the guidelines so that other developers can better understand your changes
<img src="https://user-images.githubusercontent.com/57566630/202242953-c91a18ff-e388-4ff9-8591-5fae0ead6c1e.png" width="1200">
Find more details about Pull Request description in [pull request guidelines](#pr-specs).
**note**
(a) The Pull Request description should contain the reason for the change, the content of the change, and the impact of the change, and be associated with the relevant Issue (see [documentation](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue)
(b) If it is your first contribution, please sign the CLA
<img src="https://user-images.githubusercontent.com/57566630/167307569-a794b967-6e28-4eac-a942-00deb657815f.png" width="1200">
(c) Check whether the Pull Request pass through the CI
<img src="https://user-images.githubusercontent.com/57566630/167307490-f9ebf9fa-63c0-4d83-8ba1-081ea169eb3a.png" width="1200">
MMCV will run unit test for the posted Pull Request on different platforms (Linux, Window, Mac), based on different versions of Python, PyTorch, CUDA to make sure the code is correct. We can see the specific test information by clicking `Details` in the above image so that we can modify the code.
(3) If the Pull Request passes the CI, then you can wait for the review from other developers. You'll modify the code based on the reviewer's comments, and repeat the steps [4](#4-commit-the-code-and-pass-the-unit-test)-[5](#5-push-the-code-to-remote) until all reviewers approve it. Then, we will merge it ASAP.
<img src="https://user-images.githubusercontent.com/57566630/202145400-cc2cd8c4-10b0-472f-ba37-07e6f50acc67.png" width="1200">
#### 7. Resolve conflicts
If your local branch conflicts with the latest master branch of "upstream", you'll need to resolove them. There are two ways to do this:
```shell
git fetch --all --prune
git rebase upstream/master
```
or
```shell
git fetch --all --prune
git merge upstream/master
```
If you are very good at handling conflicts, then you can use rebase to resolve conflicts, as this will keep your commit logs tidy. If you are not familiar with `rebase`, then you can use `merge` to resolve conflicts.
### Guidance
#### Unit test
If you cannot run the unit test of some modules for lacking of some dependencies, such as [video](https://github.com/open-mmlab/mmcv/tree/master/mmcv/video) module, you can try to install the following dependencies:
```shell
# Linux
sudo apt-get update -y
sudo apt-get install -y libturbojpeg
sudo apt-get install -y ffmpeg
# Windows
conda install ffmpeg
```
We should also make sure the committed code will not decrease the coverage of unit test, we could run the following command to check the coverage of unit test:
```shell
python -m coverage run -m pytest /path/to/test_file
python -m coverage html
# check file in htmlcov/index.html
```
#### Document rendering
If the documents are modified/added, we should check the rendering result. We could install the dependencies and run the following command to render the documents and check the results:
```shell
pip install -r requirements/docs.txt
cd docs/zh_cn/
# or docs/en
make html
# check file in ./docs/zh_cn/_build/html/index.html
```
### Code style
#### Python
We adopt [PEP8](https://www.python.org/dev/peps/pep-0008/) as the preferred code style.
We use the following tools for linting and formatting:
- [flake8](https://github.com/PyCQA/flake8): A wrapper around some linter tools.
- [isort](https://github.com/timothycrosley/isort): A Python utility to sort imports.
- [yapf](https://github.com/google/yapf): A formatter for Python files.
- [codespell](https://github.com/codespell-project/codespell): A Python utility to fix common misspellings in text files.
- [mdformat](https://github.com/executablebooks/mdformat): Mdformat is an opinionated Markdown formatter that can be used to enforce a consistent style in Markdown files.
- [docformatter](https://github.com/myint/docformatter): A formatter to format docstring.
Style configurations of yapf and isort can be found in [setup.cfg](./setup.cfg).
We use [pre-commit hook](https://pre-commit.com/) that checks and formats for `flake8`, `yapf`, `isort`, `trailing whitespaces`, `markdown files`,
fixes `end-of-files`, `double-quoted-strings`, `python-encoding-pragma`, `mixed-line-ending`, sorts `requirments.txt` automatically on every commit.
The config for a pre-commit hook is stored in [.pre-commit-config](./.pre-commit-config.yaml).
#### C++ and CUDA
We follow the [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html).
### PR Specs
1. Use [pre-commit](https://pre-commit.com) hook to avoid issues of code style
2. One short-time branch should be matched with only one PR
3. Accomplish a detailed change in one PR. Avoid large PR
- Bad: Support Faster R-CNN
- Acceptable: Add a box head to Faster R-CNN
- Good: Add a parameter to box head to support custom conv-layer number
4. Provide clear and significant commit message
5. Provide clear and meaningful PR description
- Task name should be clarified in title. The general format is: \[Prefix\] Short description of the PR (Suffix)
- Prefix: add new feature \[Feature\], fix bug \[Fix\], related to documents \[Docs\], in developing \[WIP\] (which will not be reviewed temporarily)
- Introduce main changes, results and influences on other modules in short description
- Associate related issues and pull requests with a milestone
## Pull Request (PR)
Content has been migrated to [contributing guidance](contributing.md).
[html writers]
table_style: colwidths-auto
## Frequently Asked Questions
We list some common troubles faced by many users and their corresponding solutions here.
Feel free to enrich the list if you find any frequent issues and have ways to help others to solve them.
### Installation
- KeyError: "xxx: 'yyy is not in the zzz registry'"
The registry mechanism will be triggered only when the file of the module is imported.
So you need to import that file somewhere. More details can be found at [KeyError: "MaskRCNN: 'RefineRoIHead is not in the models registry'"](https://github.com/open-mmlab/mmdetection/issues/5974).
- "No module named 'mmcv.ops'"; "No module named 'mmcv.\_ext'"
1. Uninstall existing mmcv in the environment using `pip uninstall mmcv`
2. Install mmcv-full following the [installation instruction](https://mmcv.readthedocs.io/en/latest/get_started/installation.html) or [Build MMCV from source](https://mmcv.readthedocs.io/en/latest/get_started/build.html)
- "invalid device function" or "no kernel image is available for execution"
1. Check the CUDA compute capability of you GPU
2. Run `python mmdet/utils/collect_env.py` to check whether PyTorch, torchvision, and MMCV are built for the correct GPU architecture. You may need to set `TORCH_CUDA_ARCH_LIST` to reinstall MMCV. The compatibility issue could happen when using old GPUS, e.g., Tesla K80 (3.7) on colab.
3. Check whether the running environment is the same as that when mmcv/mmdet is compiled. For example, you may compile mmcv using CUDA 10.0 bug run it on CUDA9.0 environments
- "undefined symbol" or "cannot open xxx.so"
1. If those symbols are CUDA/C++ symbols (e.g., libcudart.so or GLIBCXX), check
whether the CUDA/GCC runtimes are the same as those used for compiling mmcv
2. If those symbols are Pytorch symbols (e.g., symbols containing caffe, aten, and TH), check whether the Pytorch version is the same as that used for compiling mmcv
3. Run `python mmdet/utils/collect_env.py` to check whether PyTorch, torchvision, and MMCV are built by and running on the same environment
- "RuntimeError: CUDA error: invalid configuration argument"
This error may be caused by the poor performance of GPU. Try to decrease the value of [THREADS_PER_BLOCK](https://github.com/open-mmlab/mmcv/blob/cac22f8cf5a904477e3b5461b1cc36856c2793da/mmcv/ops/csrc/common_cuda_helper.hpp#L10)
and recompile mmcv.
- "RuntimeError: nms is not compiled with GPU support"
This error is because your CUDA environment is not installed correctly.
You may try to re-install your CUDA environment and then delete the build/ folder before re-compile mmcv.
- "Segmentation fault"
1. Check your GCC version and use GCC >= 5.4. This usually caused by the incompatibility between PyTorch and the environment (e.g., GCC \< 4.9 for PyTorch). We also recommend the users to avoid using GCC 5.5 because many feedbacks report that GCC 5.5 will cause "segmentation fault" and simply changing it to GCC 5.4 could solve the problem
2. Check whether PyTorch is correctly installed and could use CUDA op, e.g. type the following command in your terminal and see whether they could correctly output results
```shell
python -c 'import torch; print(torch.cuda.is_available())'
```
3. If PyTorch is correctly installed, check whether MMCV is correctly installed. If MMCV is correctly installed, then there will be no issue of the command
```shell
python -c 'import mmcv; import mmcv.ops'
```
4. If MMCV and PyTorch are correctly installed, you can use `ipdb` to set breakpoints or directly add `print` to debug and see which part leads the `segmentation fault`
- "libtorch_cuda_cu.so: cannot open shared object file"
`mmcv-full` depends on the share object but it can not be found. We can check whether the object exists in `~/miniconda3/envs/{environment-name}/lib/python3.7/site-packages/torch/lib` or try to re-install the PyTorch.
- "fatal error C1189: #error: -- unsupported Microsoft Visual Studio version!"
If you are building mmcv-full on Windows and the version of CUDA is 9.2, you will probably encounter the error `"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v9.2\include\crt/host_config.h(133): fatal error C1189: #error: -- unsupported Microsoft Visual Studio version! Only the versions 2012, 2013, 2015 and 2017 are supported!"`, in which case you can use a lower version of Microsoft Visual Studio like vs2017.
- "error: member "torch::jit::detail::ModulePolicy::all_slots" may not be initialized"
If your version of PyTorch is 1.5.0 and you are building mmcv-full on Windows, you will probably encounter the error `- torch/csrc/jit/api/module.h(474): error: member "torch::jit::detail::ModulePolicy::all_slots" may not be initialized`. The way to solve the error is to replace all the `static constexpr bool all_slots = false;` with `static bool all_slots = false;` at this file `https://github.com/pytorch/pytorch/blob/v1.5.0/torch/csrc/jit/api/module.h`. More details can be found at [member "torch::jit::detail::AttributePolicy::all_slots" may not be initialized](https://github.com/pytorch/pytorch/issues/39394).
- "error: a member with an in-class initializer must be const"
If your version of PyTorch is 1.6.0 and you are building mmcv-full on Windows, you will probably encounter the error `"- torch/include\torch/csrc/jit/api/module.h(483): error: a member with an in-class initializer must be const"`. The way to solve the error is to replace all the `CONSTEXPR_EXCEPT_WIN_CUDA ` with `const` at `torch/include\torch/csrc/jit/api/module.h`. More details can be found at [Ninja: build stopped: subcommand failed](https://github.com/open-mmlab/mmcv/issues/575).
- "error: member "torch::jit::ProfileOptionalOp::Kind" may not be initialized"
If your version of PyTorch is 1.7.0 and you are building mmcv-full on Windows, you will probably encounter the error `torch/include\torch/csrc/jit/ir/ir.h(1347): error: member "torch::jit::ProfileOptionalOp::Kind" may not be initialized`. The way to solve the error needs to modify several local files of PyTorch:
- delete `static constexpr Symbol Kind = ::c10::prim::profile;` and `tatic constexpr Symbol Kind = ::c10::prim::profile_optional;` at `torch/include\torch/csrc/jit/ir/ir.h`
- replace `explicit operator type&() { return *(this->value); }` with `explicit operator type&() { return *((type*)this->value); }` at `torch\include\pybind11\cast.h`
- replace all the `CONSTEXPR_EXCEPT_WIN_CUDA` with `const` at `torch/include\torch/csrc/jit/api/module.h`
More details can be found at [Ensure default extra_compile_args](https://github.com/pytorch/pytorch/pull/45956).
- Compatibility issue between MMCV and MMDetection; "ConvWS is already registered in conv layer"
Please install the correct version of MMCV for the version of your MMDetection following the [installation instruction](https://mmdetection.readthedocs.io/en/latest/get_started.html#installation).
### Usage
- "RuntimeError: Expected to have finished reduction in the prior iteration before starting a new one"
1. This error indicates that your module has parameters that were not used in producing loss. This phenomenon may be caused by running different branches in your code in DDP mode. More datails at [Expected to have finished reduction in the prior iteration before starting a new one](https://github.com/pytorch/pytorch/issues/55582).
2. You can set ` find_unused_parameters = True` in the config to solve the above problems or find those unused parameters manually
- "RuntimeError: Trying to backward through the graph a second time"
`GradientCumulativeOptimizerHook` and `OptimizerHook` are both set which causes the `loss.backward()` to be called twice so `RuntimeError` was raised. We can only use one of these. More datails at [Trying to backward through the graph a second time](https://github.com/open-mmlab/mmcv/issues/1379).
## Build MMCV from source
### Build mmcv
Before installing mmcv, make sure that PyTorch has been successfully installed following the [PyTorch official installation guide](https://pytorch.org/get-started/locally/#start-locally). This can be verified using the following command
```bash
python -c 'import torch;print(torch.__version__)'
```
If version information is output, then PyTorch is installed.
```{note}
If you would like to use `opencv-python-headless` instead of `opencv-python`,
e.g., in a minimum container environment or servers without GUI,
you can first install it before installing MMCV to skip the installation of `opencv-python`.
```
#### Build on Linux
1. Clone the repo
```bash
git clone https://github.com/open-mmlab/mmcv.git
cd mmcv
```
2. Install `ninja` and `psutil` to speed up the compilation
```bash
pip install -r requirements/optional.txt
```
3. Check the nvcc version (requires 9.2+. Skip if no GPU available.)
```bash
nvcc --version
```
If the above command outputs the following message, it means that the nvcc setting is OK, otherwise you need to set CUDA_HOME.
```
nvcc: NVIDIA (R) Cuda compiler driver
Copyright (c) 2005-2020 NVIDIA Corporation
Built on Mon_Nov_30_19:08:53_PST_2020
Cuda compilation tools, release 11.2, V11.2.67
Build cuda_11.2.r11.2/compiler.29373293_0
```
:::{note}
If you want to support ROCm, you can refer to [AMD ROCm](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html) to install ROCm.
:::
4. Check the gcc version (requires 5.4+)
```bash
gcc --version
```
5. Start building (takes 10+ min)
```bash
pip install -e . -v
```
6. Validate the installation
```bash
python .dev_scripts/check_installation.py
```
If no error is reported by the above command, the installation is successful. If there is an error reported, please check [Frequently Asked Questions](../faq.md) to see if there is already a solution.
If no solution is found, please feel free to open an [issue](https://github.com/open-mmlab/mmcv/issues).
#### Build on macOS
```{note}
If you are using a mac with apple silicon chip, install the PyTorch 1.13+, otherwise you will encounter the problem in [issues#2218](https://github.com/open-mmlab/mmcv/issues/2218).
```
1. Clone the repo
```bash
git clone https://github.com/open-mmlab/mmcv.git
cd mmcv
```
2. Install `ninja` and `psutil` to speed up the compilation
```bash
pip install -r requirements/optional.txt
```
3. Start building
```bash
MMCV_WITH_OPS=1 pip install -e .
```
4. Validate the installation
```bash
python .dev_scripts/check_installation.py
```
If no error is reported by the above command, the installation is successful. If there is an error reported, please check [Frequently Asked Questions](../faq.md) to see if there is already a solution.
If no solution is found, please feel free to open an [issue](https://github.com/open-mmlab/mmcv/issues).
#### Build on Windows
Building MMCV on Windows is a bit more complicated than that on Linux.
The following instructions show how to get this accomplished.
##### Prerequisite
The following software is required for building MMCV on windows.
Install them first.
- [Git](https://git-scm.com/download/win)
- During installation, tick **add git to Path**.
- [Visual Studio Community 2019](https://visualstudio.microsoft.com)
- A compiler for C++ and CUDA codes.
- [Miniconda](https://docs.conda.io/en/latest/miniconda.html)
- Official distributions of Python should work too.
- [CUDA 10.2](https://developer.nvidia.com/cuda-10.2-download-archive)
- Not required for building CPU version.
- Customize the installation if necessary. As a recommendation, skip the driver installation if a newer version is already installed.
```{note}
You should know how to set up environment variables, especially `Path`, on Windows. The following instruction relies heavily on this skill.
```
##### Common steps
1. Launch Anaconda prompt from Windows Start menu
Do not use raw `cmd.exe` s instruction is based on PowerShell syntax.
2. Create a new conda environment
```powershell
(base) PS C:\Users\xxx> conda create --name mmcv python=3.7
(base) PS C:\Users\xxx> conda activate mmcv # make sure to activate environment before any operation
```
3. Install PyTorch. Choose a version based on your need.
```powershell
# CUDA version
(mmcv) PS C:\Users\xxx> conda install pytorch torchvision cudatoolkit=10.2 -c pytorch
# CPU version
(mmcv) PS C:\Users\xxx> conda install install pytorch torchvision cpuonly -c pytorch
```
4. Clone the repo
```powershell
(mmcv) PS C:\Users\xxx> git clone https://github.com/open-mmlab/mmcv.git
(mmcv) PS C:\Users\xxx\mmcv> cd mmcv
```
5. Install `ninja` and `psutil` to speed up the compilation
```powershell
(mmcv) PS C:\Users\xxx\mmcv> pip install -r requirements/optional.txt
```
6. Set up MSVC compiler
Set Environment variable, add `C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.27.29110\bin\Hostx86\x64` to `PATH`, so that `cl.exe` will be available in prompt, as shown below.
```powershell
(mmcv) PS C:\Users\xxx\mmcv> cl
Microsoft (R) C/C++ Optimizing Compiler Version 19.27.29111 for x64
Copyright (C) Microsoft Corporation. All rights reserved.
usage: cl [ option... ] filename... [ / link linkoption... ]
```
For compatibility, we use the x86-hosted and x64-targeted compiler. note `Hostx86\x64` in the path.
You may want to change the system language to English because pytorch will parse text output from `cl.exe` to check its version. However only utf-8 is recognized. Navigate to Control Panel -> Region -> Administrative -> Language for Non-Unicode programs and change it to English.
##### Build and install MMCV
mmcv can be built in two ways:
1. Full version (CPU ops)
Module `ops` will be compiled as a pytorch extension, but only x86 code will be compiled. The compiled ops can be executed on CPU only.
2. Full version (CUDA ops)
Both x86 and CUDA codes of `ops` module will be compiled. The compiled version can be run on both CPU and CUDA-enabled GPU (if implemented).
###### CPU version
Build and install
```powershell
(mmcv) PS C:\Users\xxx\mmcv> python setup.py build_ext
(mmcv) PS C:\Users\xxx\mmcv> python setup.py develop
```
###### GPU version
1. Make sure `CUDA_PATH` or `CUDA_HOME` is already set in `envs` via `ls env:`, desired output is shown as below:
```powershell
(mmcv) PS C:\Users\xxx\mmcv> ls env:
Name Value
---- -----
CUDA_PATH C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.2
CUDA_PATH_V10_1 C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.1
CUDA_PATH_V10_2 C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.2
```
This should already be done by CUDA installer. If not, or you have multiple version of CUDA toolkit installed, set it with
```powershell
(mmcv) PS C:\Users\xxx\mmcv> $env:CUDA_HOME = "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.2"
# OR
(mmcv) PS C:\Users\xxx\mmcv> $env:CUDA_HOME = $env:CUDA_PATH_V10_2 # if CUDA_PATH_V10_2 is in envs:
```
2. Set CUDA target arch
```shell
# Here you need to change to the target architecture corresponding to your GPU
(mmcv) PS C:\Users\xxx\mmcv> $env:TORCH_CUDA_ARCH_LIST="7.5"
```
:::{note}
Check your the compute capability of your GPU from [here](https://developer.nvidia.com/cuda-gpus).
```powershell
(mmcv) PS C:\Users\xxx\mmcv> &"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.2\extras\demo_suite\deviceQuery.exe"
Device 0: "NVIDIA GeForce GTX 1660 SUPER"
CUDA Driver Version / Runtime Version 11.7 / 11.1
CUDA Capability Major/Minor version number: 7.5
```
The 7.5 above indicates the target architecture. Note: You need to replace v10.2 with your CUDA version in the above command.
:::
3. Build and install
```powershell
# build
python setup.py build_ext # if success, cl will be launched to compile ops
# install
python setup.py develop
```
```{note}
If you are compiling against PyTorch 1.6.0, you might meet some errors from PyTorch as described in [this issue](https://github.com/pytorch/pytorch/issues/42467). Follow [this pull request](https://github.com/pytorch/pytorch/pull/43380/files) to modify the source code in your local PyTorch installation.
```
##### Validate installation
```powershell
(mmcv) PS C:\Users\xxx\mmcv> python .dev_scripts/check_installation.py
```
If no error is reported by the above command, the installation is successful. If there is an error reported, please check [Frequently Asked Questions](../faq.md) to see if there is already a solution.
If no solution is found, please feel free to open an [issue](https://github.com/open-mmlab/mmcv/issues).
### Build mmcv-lite
If you need to use PyTorch-related modules, make sure PyTorch has been successfully installed in your environment by referring to the [PyTorch official installation guide](https://github.com/pytorch/pytorch#installation).
1. Clone the repo
```bash
git clone https://github.com/open-mmlab/mmcv.git
cd mmcv
```
2. Start building
```bash
MMCV_WITH_OPS=0 pip install -e . -v
```
3. Validate installation
```bash
python -c 'import mmcv;print(mmcv.__version__)'
```
## Installation
There are two versions of MMCV:
- **mmcv**: comprehensive, with full features and various CUDA ops out of box. It takes longer time to build.
- **mmcv-lite**: lite, without CUDA ops but all other features, similar to mmcv\<1.0.0. It is useful when you do not need those CUDA ops.
```{warning}
Do not install both versions in the same environment, otherwise you may encounter errors like `ModuleNotFound`. You need to uninstall one before installing the other. `Installing the full version is highly recommended if CUDA is avaliable`.
```
### Install mmcv
Before installing mmcv, make sure that PyTorch has been successfully installed following the [PyTorch official installation guide](https://pytorch.org/get-started/locally/#start-locally). This can be verified using the following command
```bash
python -c 'import torch;print(torch.__version__)'
```
If version information is output, then PyTorch is installed.
#### Install with mim (recommended)
[mim](https://github.com/open-mmlab/mim) is the package management tool for the OpenMMLab projects, which makes it easy to install mmcv
```bash
pip install -U openmim
mim install "mmcv>=2.0.0rc1"
```
If you find that the above installation command does not use a pre-built package ending with `.whl` but a source package ending with `.tar.gz`, you may not have a pre-build package corresponding to the PyTorch or CUDA or mmcv version, in which case you can [build mmcv from source](build.md).
<details>
<summary>Installation log using pre-built packages</summary>
Looking in links: https://download.openmmlab.com/mmcv/dist/cu102/torch1.8.0/index.html<br />
Collecting mmcv<br />
<b>Downloading https://download.openmmlab.com/mmcv/dist/cu102/torch1.8.0/mmcv-2.0.0rc3-cp38-cp38-manylinux1_x86_64.whl</b>
</details>
<details>
<summary>Installation log using source packages</summary>
Looking in links: https://download.openmmlab.com/mmcv/dist/cu102/torch1.8.0/index.html<br />
Collecting mmcv==2.0.0rc3<br />
<b>Downloading mmcv-2.0.0rc3.tar.gz</b>
</details>
To install a specific version of mmcv, for example, mmcv version 2.0.0rc3, you can use the following command
```bash
mim install mmcv==2.0.0rc3
```
:::{note}
If you would like to use `opencv-python-headless` instead of `opencv-python`,
e.g., in a minimum container environment or servers without GUI,
you can first install it before installing MMCV to skip the installation of `opencv-python`.
Alternatively, if it takes too long to install a dependency library, you can specify the pypi source
```bash
mim install "mmcv>=2.0.0rc3" -i https://pypi.tuna.tsinghua.edu.cn/simple
```
:::
You can run [check_installation.py](https://github.com/open-mmlab/mmcv/blob/2.x/.dev_scripts/check_installation.py) to check the installation of mmcv-full after running the installation commands.
#### Install with pip
Use the following command to check the version of CUDA and PyTorch
```bash
python -c 'import torch;print(torch.__version__);print(torch.version.cuda)'
```
Select the appropriate installation command depending on the type of system, CUDA version, PyTorch version, and MMCV version
<html>
<body>
<style>
select {
z-index: 1000;
position: absolute;
top: 10px;
width: 6.7rem;
}
#select-container {
position: relative;
height: 30px;
}
#select-cmd {
background-color: #f5f6f7;
font-size: 14px;
margin-top: 20px;
}
/* 让每一个都间隔1.3rem */
#select-os {
/* left: 1.375rem; */
left: 0;
}
#select-cuda {
/* left: 9.375rem; 9.375 = 1.375 + 6.7 + 1.3 */
left: 8rem;
}
#select-torch {
/* left: 17.375rem; 17.375 = 9.375 + 6.7 + 1.3 */
left: 16rem;
}
#select-mmcv {
/* left: 25.375rem; 25.375 = 17.375 + 6.7 + 1.3 */
left: 24rem;
}
</style>
<div id="select-container">
<select
onmousedown="handleSelectMouseDown(this.id)"
onblur="handleSelectBlur(this.id)"
onchange="changeOS(this.value)"
id="select-os">
</select>
<select
onmousedown="handleSelectMouseDown(this.id)"
onblur="handleSelectBlur(this.id)"
onchange="changeCUDA(this.value)"
id="select-cuda">
</select>
<select
onmousedown="handleSelectMouseDown(this.id)"
onblur="handleSelectBlur(this.id)"
onchange="changeTorch(this.value)"
id="select-torch">
</select>
<select
onmousedown="handleSelectMouseDown(this.id)"
onblur="handleSelectBlur(this.id)"
onchange="changeMMCV(this.value)"
id="select-mmcv">
</select>
</div>
<pre id="select-cmd"></pre>
</body>
<script>
let osVal, cudaVal, torchVal, mmcvVal;
function changeMMCV(val) {
mmcvVal = val;
change("select-mmcv");
}
function changeTorch(val) {
torchVal = val;
change("select-torch");
}
function changeCUDA(val) {
cudaVal = val;
change("select-cuda");
}
function changeOS(val) {
osVal = val;
change("select-os");
}
function handleSelectMouseDown(id) {
const dom = document.getElementById(id);
if (!dom) return;
const len = dom?.options?.length;
if (len >= 9) {
dom.size = 10;
dom.style.zIndex = 100;
}
}
function handleSelectClick() {
const selects = Array.from(document.getElementsByTagName("select"));
selects.forEach(select => {
select.size = 1;
});
}
function handleSelectBlur(id) {
const dom = document.getElementById(id);
if (!dom) {
handleSelectClick();
return;
}
dom.size = 1;
dom.style.zIndex = 1;
}
function changeCmd() {
const cmd = document.getElementById("select-cmd");
let cmdString = "pip install mmcv=={mmcv_version} -f https://download.openmmlab.com/mmcv/dist/{cu_version}/{torch_version}/index.html";
// e.g: pip install mmcv==2.0.0rc1 -f https://download.openmmlab.com/mmcv/dist/cu111/torch1.9/index.html
let cudaVersion;
if (cudaVal === "cpu" || cudaVal === "mps") {
cudaVersion = "cpu";
} else {
cudaVersion = `cu${cudaVal.split(".").join("")}`;
}
const torchVersion = `torch${torchVal.substring(0, torchVal.length - 2)}`;
cmdString = cmdString.replace("{cu_version}", cudaVersion).replace("{mmcv_version}", mmcvVal).replace("{torch_version}", torchVersion);
cmd.textContent = cmdString;
}
function unique(arr) {
if (!arr || !Array.isArray(arr)) return [];
return [...new Set(arr)];
}
function genOptionFragment(data, id) {
const name = id.includes("-")? id.split("-")[1] : id;
const fragment = new DocumentFragment();
data.forEach(option => {
const ele = document.createElement("option");
let text = `${name} ${option}`;
if (name === "os" || option.toUpperCase() === "CPU" || option.toUpperCase() === "MPS") {
text = `${option}`;
}
ele.textContent = text;
ele.value = option;
ele.addEventListener('click', handleSelectClick);
fragment.appendChild(ele);
});
return fragment;
}
function findAndAppend(data, id) {
const fragment = genOptionFragment(data, id);
const dom = document.getElementById(id);
if (dom) dom.replaceChildren(fragment);
}
function change(id) {
const order = ["select-mmcv", "select-torch", "select-cuda", "select-os"];
const idx = order.indexOf(id);
if (idx === -1) return;
const versionDetail = version[osVal];
if (idx >= 3) {
let cuda = [];
versionDetail.forEach(v => {
cuda.push(v.cuda);
});
cuda = unique(cuda);
cudaVal = cuda[0];
findAndAppend(cuda, "select-cuda");
}
if (idx >= 2) {
const torch = [];
versionDetail.forEach(v => {
if (v.cuda === cudaVal) torch.push(v.torch);
});
torchVal = torch[0];
findAndAppend(torch, "select-torch");
}
if (idx >= 1) {
let mmcv = [];
versionDetail.forEach(v => {
if (v.cuda === cudaVal && v.torch === torchVal) mmcv = v.mmcv;
});
mmcvVal = mmcv[0];
findAndAppend(mmcv, "select-mmcv");
}
changeCmd();
}
function init() {
document.addEventListener("click", handleSelectBlur);
const version = window.version;
const os = Object.keys(version);
osVal = os[0];
findAndAppend(os, "select-os");
change("select-os");
changeCmd();
}
window.onload = function () {
const url = "../_static/version.json"
const request = new XMLHttpRequest();
request.open("get", url);
request.send(null);
request.onload = function () {
if (request.status !== 200) return;
const data = JSON.parse(request.responseText);
window.version = data;
init();
}
}
</script>
</html>
If you do not find a corresponding version in the dropdown box above, you probably do not have a pre-built package corresponding to the PyTorch or CUDA or mmcv version, at which point you can [build mmcv from source](build.md).
:::{note}
mmcv is only compiled on PyTorch 1.x.0 because the compatibility
usually holds between 1.x.0 and 1.x.1. If your PyTorch version is 1.x.1, you
can install mmcv compiled with PyTorch 1.x.0 and it usually works well.
For example, if your PyTorch version is 1.8.1, you can feel free to choose 1.8.x.
:::
:::{note}
If you would like to use `opencv-python-headless` instead of `opencv-python`,
e.g., in a minimum container environment or servers without GUI,
you can first install it before installing MMCV to skip the installation of `opencv-python`.
Alternatively, if it takes too long to install a dependency library, you can specify the pypi source
```bash
mim install "mmcv>=2.0.0rc1" -i https://pypi.tuna.tsinghua.edu.cn/simple
```
:::
You can run [check_installation.py](https://github.com/open-mmlab/mmcv/blob/2.x/.dev_scripts/check_installation.py) to check the installation of mmcv after running the installation commands.
#### Using mmcv with Docker
Build with local repository
```bash
git clone https://github.com/open-mmlab/mmcv.git && cd mmcv
docker build -t mmcv -f docker/release/Dockerfile .
```
Or build with remote repository
```bash
docker build -t mmcv https://github.com/open-mmlab/mmcv.git#2.x:docker/release
```
The [Dockerfile](release/Dockerfile) installs latest released version of mmcv-full by default, but you can specify mmcv versions to install expected versions.
```bash
docker image build -t mmcv -f docker/release/Dockerfile --build-arg MMCV=2.0.0rc1 .
```
If you also want to use other versions of PyTorch and CUDA, you can also pass them when building docker images.
An example to build an image with PyTorch 1.11 and CUDA 11.3.
```bash
docker build -t mmcv -f docker/release/Dockerfile \
--build-arg PYTORCH=1.11.0 \
--build-arg CUDA=11.3 \
--build-arg CUDNN=8 \
--build-arg MMCV=2.0.0rc1 .
```
More available versions of PyTorch and CUDA can be found at [dockerhub/pytorch](https://hub.docker.com/r/pytorch/pytorch/tags).
### Install mmcv-lite
If you need to use PyTorch-related modules, make sure PyTorch has been successfully installed in your environment by referring to the [PyTorch official installation guide](https://github.com/pytorch/pytorch#installation).
```python
pip install mmcv-lite
```
## <a href='https://mmcv.readthedocs.io/en/latest/'>English</a>
## <a href='https://mmcv.readthedocs.io/zh_CN/latest/'>简体中文</a>
## CNN
We provide some building bricks for CNNs, including layer building, module bundles and weight initialization.
### Layer building
We may need to try different layers of the same type when running experiments,
but do not want to modify the code from time to time.
Here we provide some layer building methods to construct layers from a dict,
which can be written in configs or specified via command line arguments.
#### Usage
A simplest example is
```python
from mmcv.cnn import build_conv_layer
cfg = dict(type='Conv3d')
layer = build_conv_layer(cfg, in_channels=3, out_channels=8, kernel_size=3)
```
- `build_conv_layer`: Supported types are Conv1d, Conv2d, Conv3d, Conv (alias for Conv2d).
- `build_norm_layer`: Supported types are BN1d, BN2d, BN3d, BN (alias for BN2d), SyncBN, GN, LN, IN1d, IN2d, IN3d, IN (alias for IN2d).
- `build_activation_layer`: Supported types are ReLU, LeakyReLU, PReLU, RReLU, ReLU6, ELU, Sigmoid, Tanh, GELU.
- `build_upsample_layer`: Supported types are nearest, bilinear, deconv, pixel_shuffle.
- `build_padding_layer`: Supported types are zero, reflect, replicate.
#### Extension
We also allow extending the building methods with custom layers and operators.
1. Write and register your own module.
```python
from mmengine.registry import MODELS
@MODELS.register_module()
class MyUpsample:
def __init__(self, scale_factor):
pass
def forward(self, x):
pass
```
2. Import `MyUpsample` somewhere (e.g., in `__init__.py`) and then use it.
```python
from mmcv.cnn import build_upsample_layer
cfg = dict(type='MyUpsample', scale_factor=2)
layer = build_upsample_layer(cfg)
```
### Module bundles
We also provide common module bundles to facilitate the network construction.
`ConvModule` is a bundle of convolution, normalization and activation layers,
please refer to the [api](api.html#mmcv.cnn.ConvModule) for details.
```python
from mmcv.cnn import ConvModule
# conv + bn + relu
conv = ConvModule(3, 8, 2, norm_cfg=dict(type='BN'))
# conv + gn + relu
conv = ConvModule(3, 8, 2, norm_cfg=dict(type='GN', num_groups=2))
# conv + relu
conv = ConvModule(3, 8, 2)
# conv
conv = ConvModule(3, 8, 2, act_cfg=None)
# conv + leaky relu
conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='LeakyReLU'))
# bn + conv + relu
conv = ConvModule(
3, 8, 2, norm_cfg=dict(type='BN'), order=('norm', 'conv', 'act'))
```
### Model Zoo
Besides torchvision pre-trained models, we also provide pre-trained models of following CNN:
- VGG Caffe
- ResNet Caffe
- ResNeXt
- ResNet with Group Normalization
- ResNet with Group Normalization and Weight Standardization
- HRNetV2
- Res2Net
- RegNet
#### Model URLs in JSON
The model zoo links in MMCV are managed by JSON files.
The json file consists of key-value pair of model name and its url or path.
An example json file could be like:
```json
{
"model_a": "https://example.com/models/model_a_9e5bac.pth",
"model_b": "pretrain/model_b_ab3ef2c.pth"
}
```
The default links of the pre-trained models hosted on OpenMMLab AWS could be found [here](https://github.com/open-mmlab/mmcv/blob/master/mmcv/model_zoo/open_mmlab.json).
You may override default links by putting `open-mmlab.json` under `MMCV_HOME`. If `MMCV_HOME` is not found in your environment, `~/.cache/mmcv` will be used by default. You may use your own path with `export MMCV_HOME=/your/path`.
The external json files will be merged into default one. If the same key presents in both external json and default json, the external one will be used.
#### Load Checkpoint
The following types are supported for `filename` of `mmcv.load_checkpoint()`.
- filepath: The filepath of the checkpoint.
- `http://xxx` and `https://xxx`: The link to download the checkpoint. The `SHA256` postfix should be contained in the filename.
- `torchvision://xxx`: The model links in `torchvision.models`. Please refer to [torchvision](https://pytorch.org/docs/stable/torchvision/models.html) for details.
- `open-mmlab://xxx`: The model links or filepath provided in default and additional json files.
# Data Transformation
In the OpenMMLab algorithm library, dataset construction and data preparation are decoupled. Usually, the construction of the dataset only parses the dataset and records the basic information of each sample, while the data preparation is a series of data transformations including data loading, preprocessing, formatting, and other operations performed according to the basic information of the sample.
## Design of data transformation
In MMCV, we use various callable data transformation classes to manipulate data. These data transformation classes can accept several configuration parameters for the instantiation and then process the input data dictionary by `__call__` method. All data transformation methods accept a dictionary as the input and produce the output as a dictionary as well. A simple example is as follows:
```python
>>> import numpy as np
>>> from mmcv.transforms import Resize
>>>
>>> transform = Resize(scale=(224, 224))
>>> data_dict = {'img': np.random.rand(256, 256, 3)}
>>> data_dict = transform(data_dict)
>>> print(data_dict['img'].shape)
(224, 224, 3)
```
The data transformation class reads some fields of the input dictionary and may add or update some fields. The keys of these fields are mostly fixed. For example, `Resize` will always read fields such as `"img"` in the input dictionary. More information about the conventions for input and output fields could be found in the documentation of the corresponding class.
```{note}
By convention, the order of image shape which is used as **initialization parameters** in data transformation (such as Resize, Pad) is (width, height). In the dictionary returned by the data transformation, the image related shape, such as `img_shape`, `ori_shape`, `pad_shape`, etc., is (height, width).
```
MMCV provides a unified base class called `BaseTransform` for all data transformation classes:
```python
class BaseTransform(metaclass=ABCMeta):
def __call__(self, results: dict) -> dict:
return self.transform(results)
@abstractmethod
def transform(self, results: dict) -> dict:
pass
```
All data transformation classes must inherit `BaseTransform` and implement the `transform` method. Both the input and output of the `transform` method are a dictionary. In the **Custom data transformation class** section, we will describe how to implement a data transformation class in more detail.
## Data pipeline
As mentioned above, the inputs and outputs of all data transformations are dictionaries. Moreover, according to the \[Convention on Datasets\] (TODO) in OpenMMLab, the basic information of each sample in the dataset is also a dictionary. This way, we can connect all data transformation operations end to end and combine them into a data pipeline. This pipeline inputs the information dictionary of the samples in the dataset and outputs the information dictionary after a series of processing.
Taking the classification task as an example, we show a typical data pipeline in the figure below. For each sample, the information stored in the dataset is a dictionary, as shown on the far left in the figure. After each data transformation operation represented by the blue block, a new field (marked in green) will be added to the data dictionary or an existing field (marked in orange) will be updated.
<div align=center>
<img src="https://user-images.githubusercontent.com/26739999/154197953-bf0b1a16-3f41-4bc7-9e67-b2b9b323d895.png" width="90%"/>
</div>
The data pipeline is a list of several data transformation configuration dictionaries in the configuration file. Each dataset needs to set the parameter `pipeline` to define the data preparation operations the dataset needs to perform. The configuration of the above data pipeline in the configuration file is as follows:
```python
pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', size=256, keep_ratio=True),
dict(type='CenterCrop', crop_size=224),
dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
dict(type='ClsFormatBundle')
]
dataset = dict(
...
pipeline=pipeline,
...
)
```
## Common data transformation classes
The commonly used data transformation classes can be roughly divided into data loading, data preprocessing and augmentation, and data formatting. In MMCV, we provide some commonly used classes as follows:
### Data loading
To support the loading of large-scale datasets, data is usually not loaded when `Dataset` is initialized. Only the corresponding path is loaded. Therefore, it is necessary to load specific data in the data pipeline.
| Class | Feature |
| :-------------------------: | :--------------------------------------------: |
| [`LoadImageFromFile`](TODO) | Load from file path |
| [`LoadAnnotations`](TODO) | Load and organize the annotations (bbox, etc.) |
### Data preprocessing and enhancement
Data preprocessing and augmentation usually involve transforming the image itself, such as cropping, padding, scaling, etc.
| Class | Feature |
| :------------------------------: | :----------------------------------------------------: |
| [`Pad`](TODO) | Padding |
| [`CenterCrop`](TODO) | Center crop |
| [`Normalize`](TODO) | Image normalization |
| [`Resize`](TODO) | Resize to the specified size or ratio |
| [`RandomResize`](TODO) | Scale the image randomly within the specified range |
| [`RandomMultiscaleResize`](TODO) | Scale the image to a random size from multiple options |
| [`RandomGrayscale`](TODO) | Random grayscale |
| [`RandomFlip`](TODO) | Random flip |
| [`MultiScaleFlipAug`](TODO) | Support scaling and flipping during the testing |
### Data formatting
Data formatting operations are type conversions performed on the data.
| Class | Feature |
| :---------------------: | :------------------------------------------: |
| [`ToTensor`](TODO) | Convert the specified data to `torch.Tensor` |
| [`ImageToTensor`](TODO) | Convert the image to `torch.Tensor` |
## Customize data transformation classes
To implement a new data transformation class, you must inherit `BaseTransform` and implement the `transform` method. Here, we use a simple flip transform (`MyFlip`) as an example:
```python
import random
import mmcv
from mmcv.transforms import BaseTransform, TRANSFORMS
@TRANSFORMS.register_module()
class MyFlip(BaseTransform):
def __init__(self, direction: str):
super().__init__()
self.direction = direction
def transform(self, results: dict) -> dict:
img = results['img']
results['img'] = mmcv.imflip(img, direction=self.direction)
return results
```
Now, we can instantiate `MyFlip` as a callable object to handle our data dictionary.
```python
import numpy as np
transform = MyFlip(direction='horizontal')
data_dict = {'img': np.random.rand(224, 224, 3)}
data_dict = transform(data_dict)
processed_img = data_dict['img']
```
Alternatively, use `MyFlip` transform in the `pipeline` of the config file.
```python
pipeline = [
...
dict(type='MyFlip', direction='horizontal'),
...
]
```
It should be noted that if you want to use it in the configuration file, you must ensure that the file where the `MyFlip` class is located can be imported at the runtime.
## Transform wrapper
Transform wrappers are a special class of data transformations. They do not operate on images, labels or other information in the data dictionary by themselves. Instead, they enhance the behavior of data transformations defined in them.
### KeyMapper
`KeyMapper` is used to map fields in the data dictionary. For example, image processing transforms usually get their values from the `"img"` field in the data dictionary. But sometimes we want these transforms to handle images in other fields in the data dictionary, such as the `"gt_img"` field.
When used with registry and configuration file, the field map wrapper should be used as follows:
```python
pipeline = [
...
dict(type='KeyMapper',
mapping={
'img': 'gt_img', # map "gt_img" to "img"
'mask': ..., # The "mask" field in the raw data is not used. That is, for wrapped data transformations, the "mask" field is not included in the data
},
auto_remap=True, # remap "img" back to "gt_img" after the transformation
transforms=[
# only need to specify "img" in `RandomFlip`
dict(type='RandomFlip'),
])
...
]
```
With `KeyMapper`, we don't need to consider various possible input field names in the `transform` method when we implement the data transformation class. We only need to deal with the default fields.
### RandomChoice and RandomApply
`RandomChoice` is used to randomly select a data transformation pipeline from the given choices. With this wrapper, we can easily implement some data augmentation functions, such as AutoAugment.
In configuration file, you can use `RandomChoice` as follows:
```python
pipeline = [
...
dict(type='RandomChoice',
transforms=[
[
dict(type='Posterize', bits=4),
dict(type='Rotate', angle=30.)
], # the first combo option
[
dict(type='Equalize'),
dict(type='Rotate', angle=30)
], # the second combo option
],
prob=[0.4, 0.6] # the prob of each combo
)
...
]
```
`RandomApply` is used to randomly perform a combination of data transformations with a specified probability. For example:
```python
pipeline = [
...
dict(type='RandomApply',
transforms=[dict(type='Rotate', angle=30.)],
prob=0.3) # perform the transformation with prob as 0.3
...
]
```
### TransformBroadcaster
Usually, a data transformation class only reads the target of an operation from one field. While we can also use `KeyMapper` to change the fields read, there is no way to apply transformations to the data of multiple fields at once. To achieve this, we need to use the multi-target extension wrapper `TransformBroadcaster`.
`TransformBroadcaster` has two uses, one is to apply data transformation to multiple specified fields, and the other is to apply data transformation to a group of targets under a field.
1. Apply to multiple fields
Suppose we need to apply a data transformation to images in two fields `"lq"` (low-quality) and `"gt"` (ground-truth).
```python
pipeline = [
dict(type='TransformBroadcaster',
# apply to the "lq" and "gt" fields respectively, and set the "img" field to both
mapping={'img': ['lq', 'gt']},
# remap the "img" field back to the original field after the transformation
auto_remap=True,
# whether to share random variables in the transformation of each target
# more introduction will be referred in the following chapters (random variable sharing)
share_random_params=True,
transforms=[
# only need to manipulate the "img" field in the `RandomFlip` class
dict(type='RandomFlip'),
])
]
```
In the `mapping` setting of the multi-target extension, we can also use `...` to ignore the specified original field. As shown in the following example, the wrapped `RandomCrop` will crop the image in the field `"img"` and update the size of the cropped image if the field `"img_shape"` exists. If we want to do the same random cropping for both image fields `"lq"` and `"gt"` at the same time but update the `"img_shape"` field only once, we can do it as in the example:
```python
pipeline = [
dict(type='TransformBroadcaster',
mapping={
'img': ['lq', 'gt'],
'img_shape': ['img_shape', ...],
},
# remap the "img" and "img_shape" fields back to their original fields after the transformation
auto_remap=True,
# whether to share random variables in the transformation of each target
# more introduction will be referred in the following chapters (random variable sharing)
share_random_params=True,
transforms=[
# "img" and "img_shape" fields are manipulated in the `RandomCrop` class
# if "img_shape" is missing, only operate on "img"
dict(type='RandomCrop'),
])
]
```
2. A set of targets applied to a field
Suppose we need to apply a data transformation to the `"images"` field, which is a list of images.
```python
pipeline = [
dict(type='TransformBroadcaster',
# map each image under the "images" field to the "img" field
mapping={'img': 'images'},
# remap the images under the "img" field back to the list in the "images" field after the transformation
auto_remap=True,
# whether to share random variables in the transformation of each target
share_random_params=True,
transforms=[
# in the `RandomFlip` transformation class, we only need to manipulate the "img" field
dict(type='RandomFlip'),
])
]
```
#### Decorator `cache_randomness`
In `TransformBroadcaster`, we provide the `share_random_params` option to support sharing random states across multiple data transformations. For example, in a super-resolution task, we want to apply **the same** random transformations **simultaneously** to the low-resolution image and the original image. If we use this function in a custom data transformation class, we need to mark which random variables support sharing in the class. This can be achieved with the decorator `cache_randomness`.
Taking `MyFlip` from the above example, we want to perform flipping randomly with a certain probability:
```python
from mmcv.transforms.utils import cache_randomness
@TRANSFORMS.register_module()
class MyRandomFlip(BaseTransform):
def __init__(self, prob: float, direction: str):
super().__init__()
self.prob = prob
self.direction = direction
@cache_randomness # label the output of the method as a shareable random variable
def do_flip(self):
flip = True if random.random() > self.prob else False
return flip
def transform(self, results: dict) -> dict:
img = results['img']
if self.do_flip():
results['img'] = mmcv.imflip(img, direction=self.direction)
return results
```
In the above example, we decorate the `do_flip` method with `cache_randomness`, marking the method return value `flip` as a random variable that supports sharing. Therefore, in the transformation of `TransformBroadcaster` to multiple targets, the value of this variable will remain the same.
#### Decorator `avoid_cache_randomness`
In some cases, we cannot separate the process of generating random variables in data transformation into a class method. For example, modules from third-party libraries used in data transformation encapsulate the relevant parts of random variables inside, making them impossible to be extracted as class methods for data transformation. Such data transformations cannot support shared random variables through the decorator `cache_randomness` annotation, and thus cannot share random variables during multi-objective expansion.
To avoid misuse of such data transformations in multi-object extensions, we provide another decorator, `avoid_cache_randomness`, to mark such data transformations:
```python
from mmcv.transforms.utils import avoid_cache_randomness
@TRANSFORMS.register_module()
@avoid_cache_randomness
class MyRandomTransform(BaseTransform):
def transform(self, results: dict) -> dict:
...
```
Data transformation classes marked with `avoid_cache_randomness` will throw an exception when their instance is wrapped by `TransformBroadcaster` and the parameter `share_random_params` is set to True. This reminds the user not to use it in this way.
There are a few things to keep in mind when using `avoid_cache_randomness`:
1. `avoid_cache_randomness` is only used to decorate data transformation classes (subclasses of `BaseTransfrom`) and cannot be used to decorate other general classes, class methods, or functions
2. When a data transformation decorated with `avoid_cache_randomness` is used as a base class, its subclasses **will not inherit** its feature. If the subclass is still unable to share random variables, `avoid_cache_randomness` should be used again.
3. A data transformation needs to be modified with `avoid_cache_randomness` only when a data transformation is random and cannot share its random parameters. Data transformations without randomness require no decoration
## ops
We implement common ops used in detection, segmentation, etc.
| Device | CPU | CUDA | MLU | MPS | Ascend |
| ---------------------------- | --- | ---- | --- | --- | ------ |
| ActiveRotatedFilter | √ | √ | | | |
| AssignScoreWithK | | √ | | | |
| BallQuery | | √ | | | |
| BBoxOverlaps | | √ | √ | √ | √ |
| BorderAlign | | √ | | | |
| BoxIouRotated | √ | √ | | | |
| BoxIouQuadri | √ | √ | | | |
| CARAFE | | √ | √ | | |
| ChamferDistance | | √ | | | |
| CrissCrossAttention | | √ | | | |
| ContourExpand | √ | | | | |
| ConvexIoU | | √ | | | |
| CornerPool | | √ | | | |
| Correlation | | √ | | | |
| Deformable Convolution v1/v2 | √ | √ | | | √ |
| Deformable RoIPool | | √ | √ | | √ |
| DiffIoURotated | | √ | | | |
| DynamicScatter | | √ | | | |
| FurthestPointSample | | √ | | | |
| FurthestPointSampleWithDist | | √ | | | |
| FusedBiasLeakyrelu | | √ | | | √ |
| GatherPoints | | √ | | | √ |
| GroupPoints | | √ | | | |
| Iou3d | | √ | √ | | |
| KNN | | √ | | | |
| MaskedConv | | √ | √ | | √ |
| MergeCells | | √ | | | |
| MinAreaPolygon | | √ | | | |
| ModulatedDeformConv2d | √ | √ | | | √ |
| MultiScaleDeformableAttn | | √ | √ | | |
| NMS | √ | √ | √ | | √ |
| NMSRotated | √ | √ | | | √ |
| NMSQuadri | √ | √ | | | |
| PixelGroup | √ | | | | |
| PointsInBoxes | √ | √ | | | |
| PointsInPolygons | | √ | | | |
| PSAMask | √ | √ | √ | | √ |
| RotatedFeatureAlign | √ | √ | | | |
| RoIPointPool3d | | √ | √ | | |
| RoIPool | | √ | √ | | √ |
| RoIAlignRotated | √ | √ | √ | | |
| RiRoIAlignRotated | | √ | | | |
| RoIAlign | √ | √ | √ | | |
| RoIAwarePool3d | | √ | √ | | |
| SAConv2d | | √ | | | |
| SigmoidFocalLoss | | √ | √ | | √ |
| SoftmaxFocalLoss | | √ | | | √ |
| SoftNMS | | √ | | | |
| Sparse Convolution | | √ | | | |
| Synchronized BatchNorm | | √ | | | |
| ThreeInterpolate | | √ | | | |
| ThreeNN | | √ | √ | | |
| TINShift | | √ | √ | | |
| UpFirDn2d | | √ | | | |
| Voxelization | √ | √ | | | √ |
| PrRoIPool | | √ | | | |
| BezierAlign | √ | √ | | | |
| BiasAct | | √ | | | |
| FilteredLrelu | | √ | | | |
| Conv2dGradfix | | √ | | | |
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment