"docs/source/vscode:/vscode.git/clone" did not exist on "7d7e3b78a3c265ab3c57eeff43af56f509907998"
Commit c54e0486 authored by Jeremy Reizenstein's avatar Jeremy Reizenstein Committed by Facebook GitHub Bot
Browse files

more readthedocs

Summary: Quote formats, spelling

Reviewed By: shapovalov

Differential Revision: D40913734

fbshipit-source-id: d6dea65d5204b3c463c656a07ef9b447b7be6a0a
parent f7ac7b60
......@@ -49,7 +49,7 @@ def iterate_directory(directory_path, dest):
toc = []
if not dest.exists():
dest.mkdir()
for file in directory_path.glob("*.py"):
for file in sorted(directory_path.glob("*.py")):
if file.stem.startswith("_"):
continue
module = paths_to_modules([file])
......@@ -121,7 +121,7 @@ basic_dataset = [
]
basic_dataset_modules = [f"pytorch3d.implicitron.dataset.{i}" for i in basic_dataset]
create_one_file(
"pytorch3d.implicitron.dataset",
"pytorch3d.implicitron.dataset in general",
"Basics of data for implicitron",
basic_dataset_modules,
DEST_DIR / "data_basics.rst",
......@@ -131,7 +131,7 @@ specific_dataset_files = [
i for i in dataset_files if i.stem.find("_dataset_map_provider") != -1
]
create_one_file(
"pytorch3d.impliciton.dataset",
"pytorch3d.implicitron.dataset specific datasets",
"specific datasets",
paths_to_modules(specific_dataset_files),
DEST_DIR / "datasets.rst",
......@@ -139,7 +139,7 @@ create_one_file(
evaluation_files = sorted(ROOT_DIR.glob("pytorch3d/implicitron/evaluation/*.py"))
create_one_file(
"pytorch3d.impliciton.evaluation",
"pytorch3d.implicitron.evaluation",
"evaluation",
paths_to_modules(evaluation_files),
DEST_DIR / "evaluation.rst",
......
pytorch3d.implicitron.dataset
=============================
pytorch3d.implicitron.dataset in general
========================================
Basics of data for implicitron
......
pytorch3d.impliciton.dataset
============================
pytorch3d.implicitron.dataset specific datasets
===============================================
specific datasets
......
pytorch3d.impliciton.evaluation
===============================
pytorch3d.implicitron.evaluation
================================
evaluation
......
......@@ -4,10 +4,10 @@ pytorch3d.implicitron.models.implicit_function
.. toctree::
base
decoding_functions
idr_feature_field
neural_radiance_field
scene_representation_networks
utils
decoding_functions
voxel_grid
voxel_grid_implicit_function
......@@ -57,8 +57,8 @@ class JsonIndexDatasetMapProviderV2(DatasetMapProviderBase): # pyre-ignore [13]
Generates the training, validation, and testing dataset objects for
a dataset laid out on disk like CO3Dv2, with annotations in gzipped json files.
The dataset is organized in the filesystem as follows:
```
The dataset is organized in the filesystem as follows::
self.dataset_root
├── <category_0>
│ ├── <sequence_name_0>
......@@ -90,7 +90,6 @@ class JsonIndexDatasetMapProviderV2(DatasetMapProviderBase): # pyre-ignore [13]
├── <category_1>
├── ...
├── <category_K>
```
The dataset contains sequences named `<sequence_name_i>` from `K` categories with
names `<category_j>`. Each category comprises sequence folders
......@@ -106,8 +105,8 @@ class JsonIndexDatasetMapProviderV2(DatasetMapProviderBase): # pyre-ignore [13]
the list of all frames and sequences of the given category stored as lists of
`FrameAnnotation` and `SequenceAnnotation` objects respectivelly.
Each `set_lists_<subset_name_l>.json` file contains the following dictionary:
```
Each `set_lists_<subset_name_l>.json` file contains the following dictionary::
{
"train": [
(sequence_name: str, frame_number: int, image_path: str),
......@@ -122,7 +121,7 @@ class JsonIndexDatasetMapProviderV2(DatasetMapProviderBase): # pyre-ignore [13]
...
],
]
```
defining the list of frames (identified with their `sequence_name` and `frame_number`)
in the "train", "val", and "test" subsets of the dataset.
Note that `frame_number` can be obtained only from `frame_annotations.jgz` and
......@@ -131,8 +130,8 @@ class JsonIndexDatasetMapProviderV2(DatasetMapProviderBase): # pyre-ignore [13]
have its frame number set to `20`, not 5).
Each `eval_batches_<subset_name_l>.json` file contains a list of evaluation examples
in the following form:
```
in the following form::
[
[ # batch 1
(sequence_name: str, frame_number: int, image_path: str),
......@@ -143,7 +142,7 @@ class JsonIndexDatasetMapProviderV2(DatasetMapProviderBase): # pyre-ignore [13]
...
],
]
```
Note that the evaluation examples always come from the `"test"` subset of the dataset.
(test frames can repeat across batches).
......@@ -341,14 +340,13 @@ class JsonIndexDatasetMapProviderV2(DatasetMapProviderBase): # pyre-ignore [13]
Returns:
category_to_subset_name_list: A dictionary containing subset names available
per category of the following form:
```
per category of the following form::
{
category_0: [category_0_subset_name_0, category_0_subset_name_1, ...],
category_1: [category_1_subset_name_0, category_1_subset_name_1, ...],
...
}
```
"""
category_to_subset_name_list_json = "category_to_subset_name_list.json"
......
......@@ -554,8 +554,8 @@ def _get_flat_nvs_metric_key(result, metric_name) -> str:
def flatten_nvs_results(results):
"""
Takes input `results` list of dicts of the form:
```
Takes input `results` list of dicts of the form::
[
{
'subset':'train/test/...',
......@@ -564,12 +564,14 @@ def flatten_nvs_results(results):
},
...
]
```
And converts to a flat dict as follows:
And converts to a flat dict as follows::
{
'subset=train/test/...|subsubset=src=1/src=2/...': nvs_eval_metrics,
...
}
"""
results_flat = {}
for result in results:
......
......@@ -29,21 +29,21 @@ class MultiPassEmissionAbsorptionRenderer( # pyre-ignore: 13
During each ray marching pass, features, depth map, and masks
are integrated: Let o_i be the opacity estimated by the implicit function,
and d_i be the offset between points `i` and `i+1` along the respective ray.
Ray marching is performed using the following equations:
```
Ray marching is performed using the following equations::
ray_opacity_n = cap_fn(sum_i=1^n cap_fn(d_i * o_i)),
weight_n = weight_fn(cap_fn(d_i * o_i), 1 - ray_opacity_{n-1}),
```
and the final rendered quantities are computed by a dot-product of ray values
with the weights, e.g. `features = sum_n(weight_n * ray_features_n)`.
By default, for the EA raymarcher from [1] (
activated with `self.raymarcher_class_type="EmissionAbsorptionRaymarcher"`
):
```
)::
cap_fn(x) = 1 - exp(-x),
weight_fn(x) = w * x.
```
Note that the latter can altered by changing `self.raymarcher_class_type`,
e.g. to "CumsumRaymarcher" which implements the cumulative-sum raymarcher
from NeuralVolumes [2].
......
......@@ -250,12 +250,11 @@ class AngleWeightedReductionFeatureAggregator(torch.nn.Module, FeatureAggregator
Performs a weighted aggregation using a set of predefined `reduction_functions`
and concatenates the results of each aggregation function along the
channel dimension. The weights are proportional to the cosine of the
angle between the target ray and the source ray:
```
angle between the target ray and the source ray::
weight = (
dot(target_ray, source_ray) * 0.5 + 0.5 + self.min_ray_angle_weight
)**self.weight_by_ray_angle_gamma
```
The reduction functions singularize the second dimension
of the sampled features which stacks the source views.
......@@ -359,12 +358,11 @@ class AngleWeightedIdentityFeatureAggregator(torch.nn.Module, FeatureAggregatorB
"""
This aggregator does not perform any feature aggregation. It only weights
the features by the weights proportional to the cosine of the
angle between the target ray and the source ray:
```
angle between the target ray and the source ray::
weight = (
dot(target_ray, source_ray) * 0.5 + 0.5 + self.min_ray_angle_weight
)**self.weight_by_ray_angle_gamma
```
Settings:
min_ray_angle_weight: The minimum possible aggregation weight
......
......@@ -218,8 +218,8 @@ def cameras_points_cartesian_product(
) -> Tuple[CamerasBase, torch.Tensor]:
"""
Generates all pairs of pairs of elements from 'camera' and 'pts' and returns
`camera_rep` and `pts_rep` such that:
```
`camera_rep` and `pts_rep` such that::
camera_rep = [ pts_rep = [
camera[0] pts[0],
camera[0] pts[1],
......@@ -235,15 +235,14 @@ def cameras_points_cartesian_product(
camera[n_cameras-1] ...,
... pts[batch_pts-1],
] ]
```
Args:
camera: A batch of `n_cameras` cameras.
pts: A batch of `batch_pts` points of shape `(batch_pts, ..., dim)`
Returns:
camera_rep: A batch of batch_pts*n_cameras cameras such that:
```
camera_rep: A batch of batch_pts*n_cameras cameras such that::
camera_rep = [
camera[0]
camera[0]
......@@ -258,11 +257,11 @@ def cameras_points_cartesian_product(
camera[n_cameras-1]
camera[n_cameras-1]
]
```
pts_rep: Repeated `pts` of shape `(batch_pts*n_cameras, ..., dim)`,
such that:
```
such that::
pts_rep = [
pts[0],
pts[1],
......@@ -278,7 +277,7 @@ def cameras_points_cartesian_product(
...,
pts[batch_pts-1],
]
```
"""
n_cameras = camera.R.shape[0]
batch_pts = pts.shape[0]
......
......@@ -73,8 +73,8 @@ class Stats(object):
# TODO: update this with context manager
"""
stats logging object useful for gathering statistics of training a deep net in pytorch
Example:
```
Example::
# init stats structure that logs statistics 'objective' and 'top1e'
stats = Stats( ('objective','top1e') )
network = init_net() # init a pytorch module (=nueral network)
......@@ -86,7 +86,7 @@ class Stats(object):
# iterate over batches
for batch in dataloader:
output = network(batch) # run and save into a dict of output variables "output"
output = network(batch) # run and save into a dict of output variables
# stats.update() automatically parses the 'objective' and 'top1e' from
# the "output" dict and stores this into the db
......@@ -95,7 +95,7 @@ class Stats(object):
# stores the training plots into '/tmp/epoch_stats.pdf'
# and plots into a visdom server running at localhost (if running)
stats.plot_stats(plot_file='/tmp/epoch_stats.pdf')
```
"""
def __init__(
......
......@@ -181,11 +181,11 @@ class Timer:
"""
A simple class for timing execution.
Example:
```
Example::
with Timer():
print("This print statement is timed.")
```
"""
def __init__(self, name="timer", quiet=False):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment