Commit dbf06b50 authored by facebook-github-bot's avatar facebook-github-bot
Browse files

Initial commit

fbshipit-source-id: ad58e416e3ceeca85fae0583308968d04e78fe0d
parents
#!/bin/bash -e
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# Run this script before committing config.yml to verify it is valid yaml.
python -c 'import yaml; yaml.safe_load(open("config.yml"))' && echo OK
version: 2.1
#examples:
#https://github.com/facebookresearch/ParlAI/blob/master/.circleci/config.yml
#https://github.com/facebookresearch/hydra/blob/master/.circleci/config.yml
#https://github.com/facebookresearch/habitat-api/blob/master/.circleci/config.yml
#drive tests with nox or tox or pytest?
# -------------------------------------------------------------------------------------
# environments where we run our jobs
# -------------------------------------------------------------------------------------
setupcuda: &setupcuda
run:
name: Setup CUDA
working_directory: ~/
command: |
# download and install nvidia drivers, cuda, etc
wget --no-verbose --no-clobber -P ~/nvidia-downloads 'https://s3.amazonaws.com/ossci-linux/nvidia_driver/NVIDIA-Linux-x86_64-430.40.run'
wget --no-verbose --no-clobber -P ~/nvidia-downloads http://developer.download.nvidia.com/compute/cuda/10.2/Prod/local_installers/cuda_10.2.89_440.33.01_linux.run
sudo /bin/bash ~/nvidia-downloads/NVIDIA-Linux-x86_64-430.40.run --no-drm -q --ui=none
sudo sh ~/nvidia-downloads/cuda_10.2.89_440.33.01_linux.run --silent
echo "Done installing CUDA."
pyenv versions
nvidia-smi
pyenv global 3.7.0
gpu: &gpu
environment:
CUDA_VERSION: "10.2"
machine:
image: default
resource_class: gpu.medium # tesla m60
binary_common: &binary_common
parameters:
# Edit these defaults to do a release`
build_version:
description: "version number of release binary; by default, build a nightly"
type: string
default: ""
pytorch_version:
description: "PyTorch version to build against; by default, use a nightly"
type: string
default: ""
# Don't edit these
python_version:
description: "Python version to build against (e.g., 3.7)"
type: string
cu_version:
description: "CUDA version to build against, in CU format (e.g., cpu or cu100)"
type: string
wheel_docker_image:
description: "Wheel only: what docker image to use"
type: string
default: "pytorch/manylinux-cuda101"
environment:
PYTHON_VERSION: << parameters.python_version >>
BUILD_VERSION: << parameters.build_version >>
PYTORCH_VERSION: << parameters.pytorch_version >>
CU_VERSION: << parameters.cu_version >>
jobs:
main:
<<: *gpu
machine:
image: ubuntu-1604:201903-01
steps:
- checkout
- <<: *setupcuda
- run: pip3 install --progress-bar off wheel matplotlib 'pillow<7'
- run: pip3 install --progress-bar off torch torchvision
# - run: conda create -p ~/conda_env python=3.7 numpy
# - run: conda activate ~/conda_env
# - run: conda install -c pytorch pytorch torchvision
- run: pip3 install --progress-bar off 'git+https://github.com/facebookresearch/fvcore'
- run: LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-10.2/lib64 python3 setup.py build_ext --inplace
- run: LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-10.2/lib64 python -m unittest discover -v -s tests
- run: python3 setup.py bdist_wheel
binary_linux_wheel:
<<: *binary_common
docker:
- image: << parameters.wheel_docker_image >>
resource_class: 2xlarge+
steps:
- checkout
- run: packaging/build_wheel.sh
- store_artifacts:
path: dist
- persist_to_workspace:
root: dist
paths:
- "*"
binary_linux_conda:
<<: *binary_common
docker:
- image: "pytorch/conda-cuda"
resource_class: 2xlarge+
steps:
- checkout
# This is building with cuda but no gpu present,
# so we aren't running the tests.
- run: TEST_FLAG=--no-test packaging/build_conda.sh
- store_artifacts:
path: /opt/conda/conda-bld/linux-64
- persist_to_workspace:
root: /opt/conda/conda-bld/linux-64
paths:
- "*"
binary_linux_conda_cuda:
<<: *binary_common
machine:
image: ubuntu-1604:201903-01
resource_class: gpu.medium
steps:
- checkout
- run:
name: Setup environment
command: |
set -e
curl -L https://packagecloud.io/circleci/trusty/gpgkey | sudo apt-key add -
curl -L https://dl.google.com/linux/linux_signing_key.pub | sudo apt-key add -
sudo apt-get update
sudo apt-get install \
apt-transport-https \
ca-certificates \
curl \
gnupg-agent \
software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
sudo apt-get update
export DOCKER_VERSION="5:19.03.2~3-0~ubuntu-xenial"
sudo apt-get install docker-ce=${DOCKER_VERSION} docker-ce-cli=${DOCKER_VERSION} containerd.io=1.2.6-3
# Add the package repositories
distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list
export NVIDIA_CONTAINER_VERSION="1.0.3-1"
sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit=${NVIDIA_CONTAINER_VERSION}
sudo systemctl restart docker
DRIVER_FN="NVIDIA-Linux-x86_64-410.104.run"
wget "https://s3.amazonaws.com/ossci-linux/nvidia_driver/$DRIVER_FN"
sudo /bin/bash "$DRIVER_FN" -s --no-drm || (sudo cat /var/log/nvidia-installer.log && false)
nvidia-smi
- run:
name: Pull docker image
command: |
set -e
export DOCKER_IMAGE=pytorch/conda-cuda
echo Pulling docker image $DOCKER_IMAGE
docker pull $DOCKER_IMAGE >/dev/null
- run:
name: Build and run tests
command: |
set -e
cd ${HOME}/project/
export DOCKER_IMAGE=pytorch/conda-cuda
export VARS_TO_PASS="-e PYTHON_VERSION -e BUILD_VERSION -e PYTORCH_VERSION -e UNICODE_ABI -e CU_VERSION"
docker run --gpus all --ipc=host -v $(pwd):/remote -w /remote ${VARS_TO_PASS} ${DOCKER_IMAGE} ./packaging/build_conda.sh
workflows:
version: 2
build_and_test:
jobs:
- main
{{workflows()}}
- binary_linux_conda:
cu_version: cu101
name: binary_linux_conda_py3.7_cu101
python_version: '3.7'
- binary_linux_conda_cuda:
name: testrun_conda_cuda_py3.7_cu100
python_version: "3.7"
pytorch_version: "1.4"
cu_version: "cu100"
version: 2.1
#examples:
#https://github.com/facebookresearch/ParlAI/blob/master/.circleci/config.yml
#https://github.com/facebookresearch/hydra/blob/master/.circleci/config.yml
#https://github.com/facebookresearch/habitat-api/blob/master/.circleci/config.yml
#drive tests with nox or tox or pytest?
# -------------------------------------------------------------------------------------
# environments where we run our jobs
# -------------------------------------------------------------------------------------
setupcuda: &setupcuda
run:
name: Setup CUDA
working_directory: ~/
command: |
# download and install nvidia drivers, cuda, etc
wget --no-verbose --no-clobber -P ~/nvidia-downloads 'https://s3.amazonaws.com/ossci-linux/nvidia_driver/NVIDIA-Linux-x86_64-430.40.run'
wget --no-verbose --no-clobber -P ~/nvidia-downloads http://developer.download.nvidia.com/compute/cuda/10.2/Prod/local_installers/cuda_10.2.89_440.33.01_linux.run
sudo /bin/bash ~/nvidia-downloads/NVIDIA-Linux-x86_64-430.40.run --no-drm -q --ui=none
sudo sh ~/nvidia-downloads/cuda_10.2.89_440.33.01_linux.run --silent
echo "Done installing CUDA."
pyenv versions
nvidia-smi
pyenv global 3.7.0
gpu: &gpu
environment:
CUDA_VERSION: "10.2"
machine:
image: default
resource_class: gpu.medium # tesla m60
binary_common: &binary_common
parameters:
# Edit these defaults to do a release`
build_version:
description: "version number of release binary; by default, build a nightly"
type: string
default: ""
pytorch_version:
description: "PyTorch version to build against; by default, use a nightly"
type: string
default: ""
# Don't edit these
python_version:
description: "Python version to build against (e.g., 3.7)"
type: string
cu_version:
description: "CUDA version to build against, in CU format (e.g., cpu or cu100)"
type: string
wheel_docker_image:
description: "Wheel only: what docker image to use"
type: string
default: "pytorch/manylinux-cuda101"
environment:
PYTHON_VERSION: << parameters.python_version >>
BUILD_VERSION: << parameters.build_version >>
PYTORCH_VERSION: << parameters.pytorch_version >>
CU_VERSION: << parameters.cu_version >>
jobs:
main:
<<: *gpu
machine:
image: ubuntu-1604:201903-01
steps:
- checkout
- <<: *setupcuda
- run: pip3 install --progress-bar off wheel matplotlib 'pillow<7'
- run: pip3 install --progress-bar off torch torchvision
# - run: conda create -p ~/conda_env python=3.7 numpy
# - run: conda activate ~/conda_env
# - run: conda install -c pytorch pytorch torchvision
- run: pip3 install --progress-bar off 'git+https://github.com/facebookresearch/fvcore'
- run: LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-10.2/lib64 python3 setup.py build_ext --inplace
- run: LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-10.2/lib64 python -m unittest discover -v -s tests
- run: python3 setup.py bdist_wheel
binary_linux_wheel:
<<: *binary_common
docker:
- image: << parameters.wheel_docker_image >>
resource_class: 2xlarge+
steps:
- checkout
- run: packaging/build_wheel.sh
- store_artifacts:
path: dist
- persist_to_workspace:
root: dist
paths:
- "*"
binary_linux_conda:
<<: *binary_common
docker:
- image: "pytorch/conda-cuda"
resource_class: 2xlarge+
steps:
- checkout
# This is building with cuda but no gpu present,
# so we aren't running the tests.
- run: TEST_FLAG=--no-test packaging/build_conda.sh
- store_artifacts:
path: /opt/conda/conda-bld/linux-64
- persist_to_workspace:
root: /opt/conda/conda-bld/linux-64
paths:
- "*"
binary_linux_conda_cuda:
<<: *binary_common
machine:
image: ubuntu-1604:201903-01
resource_class: gpu.medium
steps:
- checkout
- run:
name: Setup environment
command: |
set -e
curl -L https://packagecloud.io/circleci/trusty/gpgkey | sudo apt-key add -
curl -L https://dl.google.com/linux/linux_signing_key.pub | sudo apt-key add -
sudo apt-get update
sudo apt-get install \
apt-transport-https \
ca-certificates \
curl \
gnupg-agent \
software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
sudo apt-get update
export DOCKER_VERSION="5:19.03.2~3-0~ubuntu-xenial"
sudo apt-get install docker-ce=${DOCKER_VERSION} docker-ce-cli=${DOCKER_VERSION} containerd.io=1.2.6-3
# Add the package repositories
distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list
export NVIDIA_CONTAINER_VERSION="1.0.3-1"
sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit=${NVIDIA_CONTAINER_VERSION}
sudo systemctl restart docker
DRIVER_FN="NVIDIA-Linux-x86_64-410.104.run"
wget "https://s3.amazonaws.com/ossci-linux/nvidia_driver/$DRIVER_FN"
sudo /bin/bash "$DRIVER_FN" -s --no-drm || (sudo cat /var/log/nvidia-installer.log && false)
nvidia-smi
- run:
name: Pull docker image
command: |
set -e
export DOCKER_IMAGE=pytorch/conda-cuda
echo Pulling docker image $DOCKER_IMAGE
docker pull $DOCKER_IMAGE >/dev/null
- run:
name: Build and run tests
command: |
set -e
cd ${HOME}/project/
export DOCKER_IMAGE=pytorch/conda-cuda
export VARS_TO_PASS="-e PYTHON_VERSION -e BUILD_VERSION -e PYTORCH_VERSION -e UNICODE_ABI -e CU_VERSION"
docker run --gpus all --ipc=host -v $(pwd):/remote -w /remote ${VARS_TO_PASS} ${DOCKER_IMAGE} ./packaging/build_conda.sh
workflows:
version: 2
build_and_test:
jobs:
- main
- binary_linux_conda:
build_version: 0.1.0
cu_version: cu92
name: binary_linux_conda_py3.6_cu92
python_version: '3.6'
pytorch_version: '1.4'
wheel_docker_image: pytorch/manylinux-cuda92
- binary_linux_conda:
build_version: 0.1.0
cu_version: cu100
name: binary_linux_conda_py3.6_cu100
python_version: '3.6'
pytorch_version: '1.4'
wheel_docker_image: pytorch/manylinux-cuda100
- binary_linux_conda:
build_version: 0.1.0
cu_version: cu101
name: binary_linux_conda_py3.6_cu101
python_version: '3.6'
pytorch_version: '1.4'
- binary_linux_conda:
build_version: 0.1.0
cu_version: cu92
name: binary_linux_conda_py3.7_cu92
python_version: '3.7'
pytorch_version: '1.4'
wheel_docker_image: pytorch/manylinux-cuda92
- binary_linux_conda:
build_version: 0.1.0
cu_version: cu100
name: binary_linux_conda_py3.7_cu100
python_version: '3.7'
pytorch_version: '1.4'
wheel_docker_image: pytorch/manylinux-cuda100
- binary_linux_conda:
build_version: 0.1.0
cu_version: cu101
name: binary_linux_conda_py3.7_cu101
python_version: '3.7'
pytorch_version: '1.4'
- binary_linux_conda:
build_version: 0.1.0
cu_version: cu92
name: binary_linux_conda_py3.8_cu92
python_version: '3.8'
pytorch_version: '1.4'
wheel_docker_image: pytorch/manylinux-cuda92
- binary_linux_conda:
build_version: 0.1.0
cu_version: cu100
name: binary_linux_conda_py3.8_cu100
python_version: '3.8'
pytorch_version: '1.4'
wheel_docker_image: pytorch/manylinux-cuda100
- binary_linux_conda:
build_version: 0.1.0
cu_version: cu101
name: binary_linux_conda_py3.8_cu101
python_version: '3.8'
pytorch_version: '1.4'
- binary_linux_conda:
cu_version: cu101
name: binary_linux_conda_py3.7_cu101
python_version: '3.7'
- binary_linux_conda_cuda:
name: testrun_conda_cuda_py3.7_cu100
python_version: "3.7"
pytorch_version: "1.4"
cu_version: "cu100"
\ No newline at end of file
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
"""
This script is adapted from the torchvision one.
There is no python2.7 nor macos.
TODO: python 3.8 when pytorch 1.4.
"""
import os.path
import jinja2
import yaml
def workflows(prefix="", filter_branch=None, upload=False, indentation=6):
w = []
# add "wheel" here for pypi
for btype in ["conda"]:
for python_version in ["3.6", "3.7", "3.8"]:
for cu_version in ["cu92", "cu100", "cu101"]:
w += workflow_pair(
btype=btype,
python_version=python_version,
cu_version=cu_version,
prefix=prefix,
upload=upload,
filter_branch=filter_branch,
)
return indent(indentation, w)
def workflow_pair(
*, btype, python_version, cu_version, prefix="", upload=False, filter_branch
):
w = []
base_workflow_name = (
f"{prefix}binary_linux_{btype}_py{python_version}_{cu_version}"
)
w.append(
generate_base_workflow(
base_workflow_name=base_workflow_name,
python_version=python_version,
cu_version=cu_version,
btype=btype,
filter_branch=filter_branch,
)
)
if upload:
w.append(
generate_upload_workflow(
base_workflow_name=base_workflow_name,
btype=btype,
cu_version=cu_version,
filter_branch=filter_branch,
)
)
return w
def generate_base_workflow(
*, base_workflow_name, python_version, cu_version, btype, filter_branch=None
):
d = {
"name": base_workflow_name,
"python_version": python_version,
"cu_version": cu_version,
"build_version": "0.1.0",
"pytorch_version": "1.4",
}
if cu_version == "cu92":
d["wheel_docker_image"] = "pytorch/manylinux-cuda92"
elif cu_version == "cu100":
d["wheel_docker_image"] = "pytorch/manylinux-cuda100"
if filter_branch is not None:
d["filters"] = {"branches": {"only": filter_branch}}
return {f"binary_linux_{btype}": d}
def generate_upload_workflow(
*, base_workflow_name, btype, cu_version, filter_branch
):
d = {
"name": f"{base_workflow_name}_upload",
"context": "org-member",
"requires": [base_workflow_name],
}
if btype == "wheel":
d["subfolder"] = cu_version + "/"
if filter_branch is not None:
d["filters"] = {"branches": {"only": filter_branch}}
return {f"binary_{btype}_upload": d}
def indent(indentation, data_list):
return ("\n" + " " * indentation).join(
yaml.dump(data_list, default_flow_style=False).splitlines()
)
if __name__ == "__main__":
d = os.path.dirname(__file__)
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(d), lstrip_blocks=True, autoescape=False
)
with open(os.path.join(d, "config.yml"), "w") as f:
f.write(env.get_template("config.in.yml").render(workflows=workflows))
AccessModifierOffset: -1
AlignAfterOpenBracket: AlwaysBreak
AlignConsecutiveAssignments: false
AlignConsecutiveDeclarations: false
AlignEscapedNewlinesLeft: true
AlignOperands: false
AlignTrailingComments: false
AllowAllParametersOfDeclarationOnNextLine: false
AllowShortBlocksOnASingleLine: false
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: Empty
AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: true
AlwaysBreakTemplateDeclarations: true
BinPackArguments: false
BinPackParameters: false
BraceWrapping:
AfterClass: false
AfterControlStatement: false
AfterEnum: false
AfterFunction: false
AfterNamespace: false
AfterObjCDeclaration: false
AfterStruct: false
AfterUnion: false
BeforeCatch: false
BeforeElse: false
IndentBraces: false
BreakBeforeBinaryOperators: None
BreakBeforeBraces: Attach
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: false
BreakAfterJavaFieldAnnotations: false
BreakStringLiterals: false
ColumnLimit: 80
CommentPragmas: '^ IWYU pragma:'
ConstructorInitializerAllOnOneLineOrOnePerLine: true
ConstructorInitializerIndentWidth: 4
ContinuationIndentWidth: 4
Cpp11BracedListStyle: true
DerivePointerAlignment: false
DisableFormat: false
ForEachMacros: [ FOR_EACH, FOR_EACH_ENUMERATE, FOR_EACH_KV, FOR_EACH_R, FOR_EACH_RANGE, ]
IncludeCategories:
- Regex: '^<.*\.h(pp)?>'
Priority: 1
- Regex: '^<.*'
Priority: 2
- Regex: '.*'
Priority: 3
IndentCaseLabels: true
IndentWidth: 2
IndentWrappedFunctionNames: false
KeepEmptyLinesAtTheStartOfBlocks: false
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
ObjCBlockIndentWidth: 2
ObjCSpaceAfterProperty: false
ObjCSpaceBeforeProtocolList: false
PenaltyBreakBeforeFirstCallParameter: 1
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 200
PointerAlignment: Left
ReflowComments: true
SortIncludes: true
SpaceAfterCStyleCast: false
SpaceBeforeAssignmentOperators: true
SpaceBeforeParens: ControlStatements
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 1
SpacesInAngles: false
SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: Cpp11
TabWidth: 8
UseTab: Never
[flake8]
ignore = E203, E266, E501, W503, E221
max-line-length = 80
max-complexity = 18
select = B,C,E,F,W,T4,B9
exclude = build,__init__.py
# Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to make participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, sex characteristics, gender identity and expression,
level of experience, education, socio-economic status, nationality, personal
appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies within all project spaces, and it also applies when
an individual is representing the project or its community in public spaces.
Examples of representing a project or community include using an official
project e-mail address, posting via an official social media account, or acting
as an appointed representative at an online or offline event. Representation of
a project may be further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at <opensource-conduct@fb.com>. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see
https://www.contributor-covenant.org/faq
# Contributing to PyTorch3D
We want to make contributing to this project as easy and transparent as
possible.
## Pull Requests
We actively welcome your pull requests.
However, if you're adding any significant features, please make sure to have a corresponding issue to outline your proposal and motivation and allow time for us to give feedback, *before* you send a PR.
We do not always accept new features, and we take the following factors into consideration:
- Whether the same feature can be achieved without modifying PyTorch3d directly. If any aspect of the API is not extensible, please highlight this in an issue so we can work on making this more extensible.
- Whether the feature is potentially useful to a large audience, or only to a small portion of users.
- Whether the proposed solution has a good design and interface.
- Whether the proposed solution adds extra mental/practical overhead to users who don't need such feature.
- Whether the proposed solution breaks existing APIs.
When sending a PR, please ensure you complete the following steps:
1. Fork the repo and create your branch from `master`. Follow the instructions
in [INSTALL.md](../INSTALL.md) to build the repo.
2. If you've added code that should be tested, add tests.
3. If you've changed any APIs, please update the documentation.
4. Ensure the test suite passes:
```
cd pytorch3d/tests
python -m unittest -v
```
5. Make sure your code lints by running `dev/linter.sh` from the project root.
6. If a PR contains multiple orthogonal changes, split it into multiple separate PRs.
7. If you haven't already, complete the Contributor License Agreement ("CLA").
## Contributor License Agreement ("CLA")
In order to accept your pull request, we need you to submit a CLA. You only need
to do this once to work on any of Facebook's open source projects.
Complete your CLA here: <https://code.facebook.com/cla>
## Issues
We use GitHub issues to track public bugs. Please ensure your description is
clear and has sufficient instructions to be able to reproduce the issue.
Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe
disclosure of security bugs. In those cases, please go through the process
outlined on that page and do not file a public issue.
## Coding Style
We follow these [python](http://google.github.io/styleguide/pyguide.html) and [C++](https://google.github.io/styleguide/cppguide.html) style guides.
For the linter to work, you will need to install `black`, `flake`, `isort` and `clang-format`, and
they need to be fairly up to date.
## License
By contributing to PyTorch3D, you agree that your contributions will be licensed
under the LICENSE file in the root directory of this source tree.
---
name: "🐛 Bugs / Unexpected behaviors"
about: Please report unexpected behaviors or bugs in PyTorch3d.
---
If you do not know the root cause of the problem / bug, and wish someone to help you, please
post according to this template:
## 🐛 Bugs / Unexpected behaviors
<!-- A clear and concise description of the issue -->
## Instructions To Reproduce the Issue:
Please include the following (depending on what the issue is):
1. Any changes you made (`git diff`) or code you wrote
```
<put diff or code here>
```
2. The exact command(s) you ran:
3. What you observed (including the full logs):
```
<put logs here>
```
Please also simplify the steps as much as possible so they do not require additional resources to
run, such as a private dataset.
blank_issues_enabled: false
---
name: "\U0001F680 Feature Request"
about: Submit a proposal/request for a new PyTorch3d feature
---
## 🚀 Feature
<!-- A clear and concise description of the feature proposal -->
## Motivation
<!-- Please outline the motivation for the proposal.
e.g. It would be great if I could do [...], I'm always frustrated when [...]. If this is related to another GitHub issue, please link here too -->
## Pitch
<!-- A clear and concise description, optionally with code examples showing the functionality you want. -->
NOTE: we only consider adding new features if they are useful for many users.
---
name: " Questions"
about: How do I do X with PyTorch3d? How does PyTorch3d do X?
---
## ❓ Questions on how to use PyTorch3d
<!-- A clear and concise description of the question you need help with. -->
NOTE:
1. If you encountered any errors or unexpected issues while using PyTorch3d and need help resolving them,
please use the "Bugs / Unexpected behaviors" issue template.
2. We do not answer general machine learning / computer vision questions that are not specific to
PyTorch3d, such as how a model works or what algorithm/methods can be
used to achieve X.
build/
dist/
*.egg-info/
**/__pycache__/
# Installation
## Requirements
### Core library
The core library is written in PyTorch. Several components have underlying implementation in CUDA for improved performance. A subset of these components have CPU implementations in C++/Pytorch. It is advised to use PyTorch3d with GPU support in order to use all the features.
- Linux or macOS
- Python ≥ 3.6
- PyTorch 1.4
- torchvision that matches the PyTorch installation. You can install them together at pytorch.org to make sure of this.
- gcc & g++ ≥ 4.9
- CUDA 9.2 or 10.0 or 10.1
- [fvcore](https://github.com/facebookresearch/fvcore)
These can be installed by running:
```
conda create -n pytorch3d python=3.6
conda activate pytorch3d
conda install -c pytorch pytorch torchvision cudatoolkit=10.0
conda install -c conda-forge -c takatosp1 fvcore
```
### Tests/Linting and Demos
For developing on top of PyTorch3d or contributing, you will need to run the linter and tests. If you want to run any of the notebook tutorials as `docs/tutorials` you will also need matplotlib.
- scikit-image
- black
- isort
- flake8
- matplotlib
- tdqm
- jupyter
- imageio
These can be installed by running:
```
# Demos
conda install jupyter
pip install scikit-image matplotlib imageio
# Tests/Linting
pip install black isort flake8
```
## Build/Install Pytorch3d
After installing the above dependencies, run one of the following commands:
### 1. Install from Anaconda Cloud
```
# Anaconda Cloud
conda install pytorch3d
```
### 2. Install from GitHub
```
pip install 'git+https://github.com/facebookresearch/pytorch3d.git'
# (add --user if you don't have permission)
```
### 3. Install from a local clone
```
git clone https://github.com/facebookresearch/pytorch3d.git
cd pytorch3d && pip install -e .
```
To rebuild after installing from a local clone run, `rm -rf build/ **/*.so` then `pip install -e` .. You often need to rebuild pytorch3d after reinstalling PyTorch.
**Install from local clone on macOS:**
```
MACOSX_DEPLOYMENT_TARGET=10.9 CC=clang CXX=clang++ pip install -e .
```
BSD License
For PyTorch3d software
Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name Facebook nor the names of its contributors may be used to
endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment