Unverified Commit 99ebb75d authored by Nicolas Hug's avatar Nicolas Hug Committed by GitHub
Browse files

Split example gallery into subsections (#7849)

parent 6241d471
......@@ -29,6 +29,7 @@ from pathlib import Path
import pytorch_sphinx_theme
import torchvision
import torchvision.models as M
from sphinx_gallery.sorting import ExplicitOrder
from tabulate import tabulate
sys.path.append(os.path.abspath("."))
......@@ -61,6 +62,7 @@ extensions = [
sphinx_gallery_conf = {
"examples_dirs": "../../gallery/", # path to your example scripts
"gallery_dirs": "auto_examples", # path to where to save gallery generated output
"subsection_order": ExplicitOrder(["../../gallery/v2_transforms", "../../gallery/others"]),
"backreferences_dir": "gen_modules/backreferences",
"doc_module": ("torchvision",),
"remove_config_comments": True,
......
......@@ -6,7 +6,7 @@ Datapoints
Datapoints are tensor subclasses which the :mod:`~torchvision.transforms.v2` v2 transforms use under the hood to
dispatch their inputs to the appropriate lower-level kernels. Most users do not
need to manipulate datapoints directly and can simply rely on dataset wrapping -
see e.g. :ref:`sphx_glr_auto_examples_plot_transforms_v2_e2e.py`.
see e.g. :ref:`sphx_glr_auto_examples_v2_transforms_plot_transforms_v2_e2e.py`.
.. autosummary::
:toctree: generated/
......
......@@ -13,7 +13,7 @@ Transforming and augmenting images
are fully backward compatible with the current ones, and you'll see them
documented below with a `v2.` prefix. To get started with those new
transforms, you can check out
:ref:`sphx_glr_auto_examples_plot_transforms_v2_e2e.py`.
:ref:`sphx_glr_auto_examples_v2_transforms_plot_transforms_v2_e2e.py`.
Note that these transforms are still BETA, and while we don't expect major
breaking changes in the future, some APIs may still change according to user
feedback. Please submit any feedback you may have `here
......@@ -54,15 +54,15 @@ across calls. For reproducible transformations across calls, you may use
The following examples illustrate the use of the available transforms:
* :ref:`sphx_glr_auto_examples_plot_transforms.py`
* :ref:`sphx_glr_auto_examples_others_plot_transforms.py`
.. figure:: ../source/auto_examples/images/sphx_glr_plot_transforms_001.png
.. figure:: ../source/auto_examples/others/images/sphx_glr_plot_transforms_001.png
:align: center
:scale: 65%
* :ref:`sphx_glr_auto_examples_plot_scripted_tensor_transforms.py`
* :ref:`sphx_glr_auto_examples_others_plot_scripted_tensor_transforms.py`
.. figure:: ../source/auto_examples/images/sphx_glr_plot_scripted_tensor_transforms_001.png
.. figure:: ../source/auto_examples/others/images/sphx_glr_plot_scripted_tensor_transforms_001.png
:align: center
:scale: 30%
......@@ -269,7 +269,7 @@ CutMix and MixUp are special transforms that
are meant to be used on batches rather than on individual images, because they
are combining pairs of images together. These can be used after the dataloader
(once the samples are batched), or part of a collation function. See
:ref:`sphx_glr_auto_examples_plot_cutmix_mixup.py` for detailed usage examples.
:ref:`sphx_glr_auto_examples_v2_transforms_plot_cutmix_mixup.py` for detailed usage examples.
.. autosummary::
:toctree: generated/
......
......@@ -4,7 +4,7 @@ Utils
=====
The ``torchvision.utils`` module contains various utilities, mostly :ref:`for
visualization <sphx_glr_auto_examples_plot_visualization_utils.py>`.
visualization <sphx_glr_auto_examples_others_plot_visualization_utils.py>`.
.. currentmodule:: torchvision.utils
......
Example gallery
===============
Below is a gallery of examples
Examples and tutorials
======================
......@@ -20,7 +20,7 @@ import matplotlib.pyplot as plt
import torchvision.transforms.functional as F
ASSETS_DIRECTORY = "assets"
ASSETS_DIRECTORY = "../assets"
plt.rcParams["savefig.bbox"] = "tight"
......
......@@ -49,8 +49,8 @@ def show(imgs):
# The :func:`~torchvision.io.read_image` function allows to read an image and
# directly load it as a tensor
dog1 = read_image(str(Path('assets') / 'dog1.jpg'))
dog2 = read_image(str(Path('assets') / 'dog2.jpg'))
dog1 = read_image(str(Path('../assets') / 'dog1.jpg'))
dog2 = read_image(str(Path('../assets') / 'dog2.jpg'))
show([dog1, dog2])
# %%
......@@ -58,7 +58,7 @@ show([dog1, dog2])
# --------------------------
# Most transforms natively support tensors on top of PIL images (to visualize
# the effect of the transforms, you may refer to see
# :ref:`sphx_glr_auto_examples_plot_transforms.py`).
# :ref:`sphx_glr_auto_examples_others_plot_transforms.py`).
# Using tensor images, we can run the transforms on GPUs if cuda is available!
import torch.nn as nn
......@@ -121,7 +121,7 @@ res_scripted = scripted_predictor(batch)
import json
with open(Path('assets') / 'imagenet_class_index.json') as labels_file:
with open(Path('../assets') / 'imagenet_class_index.json') as labels_file:
labels = json.load(labels_file)
for i, (pred, pred_scripted) in enumerate(zip(res, res_scripted)):
......
......@@ -19,7 +19,7 @@ import torchvision.transforms as T
plt.rcParams["savefig.bbox"] = 'tight'
orig_img = Image.open(Path('assets') / 'astronaut.jpg')
orig_img = Image.open(Path('../assets') / 'astronaut.jpg')
# if you change the seed, make sure that the randomly-applied transforms
# properly show that the image can be both transformed and *not* transformed!
torch.manual_seed(0)
......
......@@ -41,8 +41,8 @@ from torchvision.utils import make_grid
from torchvision.io import read_image
from pathlib import Path
dog1_int = read_image(str(Path('assets') / 'dog1.jpg'))
dog2_int = read_image(str(Path('assets') / 'dog2.jpg'))
dog1_int = read_image(str(Path('../assets') / 'dog1.jpg'))
dog2_int = read_image(str(Path('../assets') / 'dog2.jpg'))
dog_list = [dog1_int, dog2_int]
grid = make_grid(dog_list)
......@@ -360,7 +360,7 @@ show(dogs_with_masks)
from torchvision.models.detection import keypointrcnn_resnet50_fpn, KeypointRCNN_ResNet50_FPN_Weights
from torchvision.io import read_image
person_int = read_image(str(Path("assets") / "person1.jpg"))
person_int = read_image(str(Path("../assets") / "person1.jpg"))
weights = KeypointRCNN_ResNet50_FPN_Weights.DEFAULT
transforms = weights.transforms()
......
V2 transforms
-------------
......@@ -6,7 +6,7 @@ How to write your own Datapoint class
This guide is intended for advanced users and downstream library maintainers. We explain how to
write your own datapoint class, and how to make it compatible with the built-in
Torchvision v2 transforms. Before continuing, make sure you have read
:ref:`sphx_glr_auto_examples_plot_datapoints.py`.
:ref:`sphx_glr_auto_examples_v2_transforms_plot_datapoints.py`.
"""
# %%
......
......@@ -92,7 +92,7 @@ print(float_image)
# In addition, :class:`~torchvision.datapoints.Image` and :class:`~torchvision.datapoints.Mask` can also take a
# :class:`PIL.Image.Image` directly:
image = datapoints.Image(PIL.Image.open("assets/astronaut.jpg"))
image = datapoints.Image(PIL.Image.open("../assets/astronaut.jpg"))
print(image.shape, image.dtype)
# %%
......
......@@ -19,7 +19,7 @@ def load_data():
from torchvision import datapoints
from torchvision.ops import masks_to_boxes
assets_directory = pathlib.Path("assets")
assets_directory = pathlib.Path("../assets")
path = assets_directory / "FudanPed00054.png"
image = datapoints.Image(read_image(str(path)))
......@@ -72,9 +72,9 @@ new_image, new_target = transform((image, {"boxes": bounding_boxes, "labels": la
# %%
# Under the hood, :mod:`torchvision.transforms.v2` relies on :mod:`torchvision.datapoints` for the dispatch to the
# appropriate function for the input data: :ref:`sphx_glr_auto_examples_plot_datapoints.py`. Note however, that as
# appropriate function for the input data: :ref:`sphx_glr_auto_examples_v2_transforms_plot_datapoints.py`. Note however, that as
# regular user, you likely don't have to touch this yourself. See
# :ref:`sphx_glr_auto_examples_plot_transforms_v2_e2e.py`.
# :ref:`sphx_glr_auto_examples_v2_transforms_plot_transforms_v2_e2e.py`.
#
# All "foreign" types like :class:`str`'s or :class:`pathlib.Path`'s are passed through, allowing to store extra
# information directly with the sample:
......
......@@ -55,7 +55,7 @@ import torchvision.transforms.v2 as transforms
def load_example_coco_detection_dataset(**kwargs):
# This loads fake data for illustration purposes of this example. In practice, you'll have
# to replace this with the proper data
root = pathlib.Path("assets") / "coco"
root = pathlib.Path("../assets") / "coco"
return datasets.CocoDetection(str(root / "images"), str(root / "instances.json"), **kwargs)
......
......@@ -17,7 +17,7 @@ class Datapoint(torch.Tensor):
You probably don't want to use this class unless you're defining your own
custom Datapoints. See
:ref:`sphx_glr_auto_examples_plot_custom_datapoints.py` for details.
:ref:`sphx_glr_auto_examples_v2_transforms_plot_custom_datapoints.py` for details.
"""
@staticmethod
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment