name: Docs on: pull_request: push: branches: - nightly - main - release/* workflow_dispatch: jobs: build: uses: pytorch/test-infra/.github/workflows/linux_job.yml@main with: repository: pytorch/vision upload-artifact: docs script: | set -euo pipefail export PYTHON_VERSION=3.8 export GPU_ARCH_TYPE=cpu export GPU_ARCH_VERSION='' ./.github/scripts/setup-env.sh # Prepare conda CONDA_PATH=$(which conda) eval "$(${CONDA_PATH} shell.bash hook)" conda activate ci # FIXME: not sure why we need this. `ldd torchvision/video_reader.so` shows that it # already links against the one pulled from conda. However, at runtime it pulls from # /lib64 # Should we maybe always do this in `./.github/scripts/setup-env.sh` so that we don't # have to pay attention in all other workflows? export LD_LIBRARY_PATH="${CONDA_PREFIX}/lib:${LD_LIBRARY_PATH}" cd docs echo '::group::Install doc requirements' pip install --progress-bar=off -r requirements.txt echo '::endgroup::' echo '::group::Build HTML docs' # The runner does not have sufficient memory to run with as many processes as their are # cores (`-j auto`). Thus, we limit to a single process (`-j 1`) here. sed -i -e 's/-j auto/-j 1/' Makefile make html echo '::endgroup::' mv build/html "${RUNNER_ARTIFACT_DIR}" upload-preview: if: github.event_name == 'pull_request' needs: [build] runs-on: [self-hosted, linux.2xlarge] steps: - uses: actions/download-artifact@v3 with: name: docs - name: Upload docs preview uses: seemethere/upload-artifact-s3@v5 with: retention-days: 14 s3-bucket: doc-previews if-no-files-found: error path: html s3-prefix: pytorch/vision/${{ github.event.pull_request.number }}