Unverified Commit 7dc5e5bd authored by Philip Meier's avatar Philip Meier Committed by GitHub
Browse files

Fix typos and grammar errors (#7065)

* fix typos throughout the code base

* fix grammar

* revert formatting changes to gallery

* revert 'an uXX'

* remove 'number of the best'
parent ed2a0adb
......@@ -10,7 +10,7 @@ import torch.distributed as dist
import torchvision.models.optical_flow
import torchvision.prototype.models.depth.stereo
import utils
import vizualization
import visualization
from parsing import make_dataset, make_eval_transform, make_train_transform, VALID_DATASETS
from torch import nn
......@@ -148,7 +148,7 @@ def _evaluate(
*,
padder_mode,
print_freq=10,
writter=None,
writer=None,
step=None,
iterations=None,
batch_size=None,
......@@ -198,10 +198,10 @@ def _evaluate(
"the dataset is not divisible by the batch size. Try lowering the batch size or GPU number for more accurate results."
)
if writter is not None and args.rank == 0:
if writer is not None and args.rank == 0:
for meter_name, meter_value in logger.meters.items():
scalar_name = f"{meter_name} {header}"
writter.add_scalar(scalar_name, meter_value.avg, step)
writer.add_scalar(scalar_name, meter_value.avg, step)
logger.synchronize_between_processes()
print(header, logger)
......@@ -249,7 +249,7 @@ def make_eval_loader(dataset_name: str, args: argparse.Namespace) -> torch.utils
return val_loader
def evaluate(model, loaders, args, writter=None, step=None):
def evaluate(model, loaders, args, writer=None, step=None):
for loader_name, loader in loaders.items():
_evaluate(
model,
......@@ -259,7 +259,7 @@ def evaluate(model, loaders, args, writter=None, step=None):
padder_mode=args.padder_type,
header=f"{loader_name} evaluation",
batch_size=args.batch_size,
writter=writter,
writer=writer,
step=step,
)
......@@ -394,13 +394,13 @@ def run(model, optimizer, scheduler, train_loader, val_loaders, logger, writer,
for name, value in logger.meters.items():
writer.add_scalar(name, value.avg, step)
# log the images to tensorboard
pred_grid = vizualization.make_training_sample_grid(
pred_grid = visualization.make_training_sample_grid(
image_left, image_right, disp_mask, valid_disp_mask, disp_predictions
)
writer.add_image("predictions", pred_grid, step, dataformats="HWC")
# second thing we want to see is how relevant the iterative refinement is
pred_sequence_grid = vizualization.make_disparity_sequence_grid(disp_predictions, disp_mask)
pred_sequence_grid = visualization.make_disparity_sequence_grid(disp_predictions, disp_mask)
writer.add_image("sequence", pred_sequence_grid, step, dataformats="HWC")
if step % args.save_frequency == 0:
......@@ -446,13 +446,13 @@ def run(model, optimizer, scheduler, train_loader, val_loaders, logger, writer,
def main(args):
args.total_iterations = sum(args.dataset_steps)
# intialize DDP setting
# initialize DDP setting
utils.setup_ddp(args)
print(args)
args.test_only = args.train_datasets is None
# set the appropiate devices
# set the appropriate devices
if args.distributed and args.device == "cpu":
raise ValueError("The device must be cuda if we want to run in distributed mode using torchrun")
device = torch.device(args.device)
......@@ -495,7 +495,7 @@ def main(args):
# initialize the learning rate schedule
scheduler = make_lr_schedule(args, optimizer)
# load them from checkpoint if need
# load them from checkpoint if needed
args.start_step = 0
if args.resume_path is not None:
checkpoint = torch.load(args.resume_path, map_location="cpu")
......@@ -531,7 +531,7 @@ def main(args):
# the train dataset is preshuffled in order to respect the iteration order
sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=False, drop_last=True)
else:
# the train dataset is already shuffled so we can use a simple SequentialSampler
# the train dataset is already shuffled, so we can use a simple SequentialSampler
sampler = torch.utils.data.SequentialSampler(train_dataset)
train_loader = torch.utils.data.DataLoader(
......@@ -542,7 +542,7 @@ def main(args):
num_workers=args.workers,
)
# intialize the logger
# initialize the logger
if args.tensorboard_summaries:
from torch.utils.tensorboard import SummaryWriter
......
......@@ -253,7 +253,7 @@ class AsymetricGammaAdjust(torch.nn.Module):
class RandomErase(torch.nn.Module):
# Produces multiple symetric random erasures
# Produces multiple symmetric random erasures
# these can be viewed as occlusions present in both camera views.
# Similarly to Optical Flow occlusion prediction tasks, we mask these pixels in the disparity map
def __init__(
......@@ -400,7 +400,7 @@ class RandomSpatialShift(torch.nn.Module):
img_right = F.affine(
img_right,
angle=angle,
translate=[0, shift], # translation only on the y axis
translate=[0, shift], # translation only on the y-axis
center=[x, y],
scale=1.0,
shear=0.0,
......@@ -491,7 +491,7 @@ class RandomRescaleAndCrop(torch.nn.Module):
# The reason we don't rely on RandomResizedCrop is because of a significant
# difference in the parametrization of both transforms, in particular,
# because of the way the random parameters are sampled in both transforms,
# which leads to fairly different resuts (and different epe). For more details see
# which leads to fairly different results (and different epe). For more details see
# https://github.com/pytorch/vision/pull/5026/files#r762932579
def __init__(
self,
......@@ -533,7 +533,7 @@ class RandomRescaleAndCrop(torch.nn.Module):
# exponential scaling will draw a random scale in (min_scale, max_scale) and then raise
# 2 to the power of that random value. This final scale distribution will have a different
# mean and variance than a uniform distribution. Note that a scale of 1 will result in
# in a rescaling of 2X the original size, whereas a scale of -1 will result in a rescaling
# a rescaling of 2X the original size, whereas a scale of -1 will result in a rescaling
# of 0.5X the original size.
if self.scaling_type == "exponential":
scale = 2 ** torch.empty(1, dtype=torch.float32).uniform_(self.min_scale, self.max_scale).item()
......@@ -577,7 +577,7 @@ class RandomRescaleAndCrop(torch.nn.Module):
# Note: For sparse datasets (Kitti), the original code uses a "margin"
# See e.g. https://github.com/princeton-vl/RAFT/blob/master/core/utils/augmentor.py#L220:L220
# We don't, not sure it matters much
# We don't, not sure if it matters much
y0 = torch.randint(0, img_left.shape[1] - self.crop_size[0], size=(1,)).item()
x0 = torch.randint(0, img_right.shape[2] - self.crop_size[1], size=(1,)).item()
......
......@@ -54,7 +54,7 @@ def _sequence_loss_fn(
abs_diff = abs_diff.mean(axis=(1, 2, 3, 4))
num_predictions = flow_preds.shape[0]
# alocating on CPU and moving to device during run-time can force
# allocating on CPU and moving to device during run-time can force
# an unwanted GPU synchronization that produces a large overhead
if weights is None or len(weights) != num_predictions:
weights = gamma ** torch.arange(num_predictions - 1, -1, -1, device=flow_preds.device, dtype=flow_preds.dtype)
......@@ -303,7 +303,7 @@ def _flow_sequence_consistency_loss_fn(
# In the original paper, an additional refinement network is used to refine a flow prediction.
# Each step performed by the recurrent module in Raft or CREStereo is a refinement step using a delta_flow update.
# which should be consistent with the previous step. In this implementation, we simplify the overall loss
# term and ignore left-right consistency loss or photometric loss which can be treated separetely.
# term and ignore left-right consistency loss or photometric loss which can be treated separately.
torch._assert(
rescale_factor <= 1.0,
......
......@@ -15,7 +15,7 @@ def compute_metrics(
metrics_dict = {}
pixels_diffs = (flow_pred - flow_gt).abs()
# there is no Y flow in Stereo Matching, therefor flow.abs() = flow.pow(2).sum(dim=1).sqrt()
# there is no Y flow in Stereo Matching, therefore flow.abs() = flow.pow(2).sum(dim=1).sqrt()
flow_norm = flow_gt.abs()
if valid_flow_mask is not None:
......
......@@ -116,7 +116,7 @@ def _coco_remove_images_without_annotations(dataset, cat_list=None):
# if all boxes have close to zero area, there is no annotation
if _has_only_empty_bbox(anno):
return False
# keypoints task have a slight different critera for considering
# keypoints task have a slight different criteria for considering
# if an annotation is valid
if "keypoints" not in anno[0]:
return True
......
......@@ -63,7 +63,7 @@ class GroupedBatchSampler(BatchSampler):
expected_num_batches = len(self)
num_remaining = expected_num_batches - num_batches
if num_remaining > 0:
# for the remaining batches, take first the buffers with largest number
# for the remaining batches, take first the buffers with the largest number
# of elements
for group_id, _ in sorted(buffer_per_group.items(), key=lambda x: len(x[1]), reverse=True):
remaining = self.batch_size - len(buffer_per_group[group_id])
......
......@@ -56,7 +56,7 @@ torchrun --nproc_per_node 1 --nnodes 1 train.py --val-dataset sintel --batch-siz
This should give an epe of about 1.3822 on the clean pass and 2.7161 on the
final pass of Sintel-train. Results may vary slightly depending on the batch
size and the number of GPUs. For the most accurate resuts use 1 GPU and
size and the number of GPUs. For the most accurate results use 1 GPU and
`--batch-size 1`:
```
......
......@@ -82,7 +82,7 @@ def _evaluate(model, args, val_dataset, *, padder_mode, num_flow_updates=None, b
def inner_loop(blob):
if blob[0].dim() == 3:
# input is not batched so we add an extra dim for consistency
# input is not batched, so we add an extra dim for consistency
blob = [x[None, :, :, :] if x is not None else None for x in blob]
image1, image2, flow_gt = blob[:3]
......@@ -150,7 +150,7 @@ def evaluate(model, args):
for name in val_datasets:
if name == "kitti":
# Kitti has different image sizes so we need to individually pad them, we can't batch.
# Kitti has different image sizes, so we need to individually pad them, we can't batch.
# see comment in InputPadder
if args.batch_size != 1 and (not args.distributed or args.rank == 0):
warnings.warn(
......
......@@ -164,7 +164,7 @@ class RandomResizeAndCrop(torch.nn.Module):
# The reason we don't rely on RandomResizedCrop is because of a significant
# difference in the parametrization of both transforms, in particular,
# because of the way the random parameters are sampled in both transforms,
# which leads to fairly different resuts (and different epe). For more details see
# which leads to fairly different results (and different epe). For more details see
# https://github.com/pytorch/vision/pull/5026/files#r762932579
def __init__(self, crop_size, min_scale=-0.2, max_scale=0.5, stretch_prob=0.8):
super().__init__()
......@@ -208,7 +208,7 @@ class RandomResizeAndCrop(torch.nn.Module):
# Note: For sparse datasets (Kitti), the original code uses a "margin"
# See e.g. https://github.com/princeton-vl/RAFT/blob/master/core/utils/augmentor.py#L220:L220
# We don't, not sure it matters much
# We don't, not sure if it matters much
y0 = torch.randint(0, img1.shape[1] - self.crop_size[0], size=(1,)).item()
x0 = torch.randint(0, img1.shape[2] - self.crop_size[1], size=(1,)).item()
......
......@@ -181,7 +181,7 @@ def sequence_loss(flow_preds, flow_gt, valid_flow_mask, gamma=0.8, max_flow=400)
if gamma > 1:
raise ValueError(f"Gamma should be < 1, got {gamma}.")
# exlude invalid pixels and extremely large diplacements
# exclude invalid pixels and extremely large diplacements
flow_norm = torch.sum(flow_gt**2, dim=1).sqrt()
valid_flow_mask = valid_flow_mask & (flow_norm < max_flow)
......@@ -248,7 +248,7 @@ def setup_ddp(args):
# https://discuss.pytorch.org/t/what-is-the-difference-between-rank-and-local-rank/61940/2
if all(key in os.environ for key in ("LOCAL_RANK", "RANK", "WORLD_SIZE")):
# if we're here, the script was called with torchrun. Otherwise
# if we're here, the script was called with torchrun. Otherwise,
# these args will be set already by the run_with_submitit script
args.local_rank = int(os.environ["LOCAL_RANK"])
args.rank = int(os.environ["RANK"])
......
......@@ -260,7 +260,7 @@ def get_args_parser(add_help=True):
parser.add_argument("--data-path", default="/datasets01/COCO/022719/", type=str, help="dataset path")
parser.add_argument("--dataset", default="coco", type=str, help="dataset name")
parser.add_argument("--model", default="fcn_resnet101", type=str, help="model name")
parser.add_argument("--aux-loss", action="store_true", help="auxiliar loss")
parser.add_argument("--aux-loss", action="store_true", help="auxiliary loss")
parser.add_argument("--device", default="cuda", type=str, help="device (Use cuda or cpu Default: cuda)")
parser.add_argument(
"-b", "--batch-size", default=8, type=int, help="images per gpu, the total batch size is $NGPU x batch_size"
......
......@@ -48,7 +48,7 @@ class PKSampler(Sampler):
# Ensures there are enough classes to sample from
if len(self.groups) < p:
raise ValueError("There are not enought classes to sample from")
raise ValueError("There are not enough classes to sample from")
def __iter__(self):
# Shuffle samples within groups
......
......@@ -76,7 +76,7 @@ Input data augmentations at validation time (with optional parameters):
5. Convert BCHW to CBHW
This translates in the following set of command-line arguments. Please note that `--batch-size` parameter controls the
batch size per GPU. Moreover note that our default `--lr` is configured for 64 GPUs which is how many we used for the
batch size per GPU. Moreover, note that our default `--lr` is configured for 64 GPUs which is how many we used for the
Video resnet models:
```
# number of frames per clip
......
......@@ -16,7 +16,7 @@ def pytest_collection_modifyitems(items):
# This hook is called by pytest after it has collected the tests (google its name to check out its doc!)
# We can ignore some tests as we see fit here, or add marks, such as a skip mark.
#
# Typically here, we try to optimize CI time. In particular, the GPU CI instances don't need to run the
# Typically, here, we try to optimize CI time. In particular, the GPU CI instances don't need to run the
# tests that don't need CUDA, because those tests are extensively tested in the CPU CI instances already.
# This is true for both CircleCI and the fbcode internal CI.
# In the fbcode CI, we have an additional constraint: we try to avoid skipping tests. So instead of relying on
......@@ -57,7 +57,7 @@ def pytest_collection_modifyitems(items):
item.add_marker(pytest.mark.skip(reason=CIRCLECI_GPU_NO_CUDA_MSG))
if item.get_closest_marker("dont_collect") is not None:
# currently, this is only used for some tests we're sure we dont want to run on fbcode
# currently, this is only used for some tests we're sure we don't want to run on fbcode
continue
out_items.append(item)
......
......@@ -137,7 +137,7 @@ def test_all_configs(test):
.. note::
This will try to remove duplicate configurations. During this process it will not not preserve a potential
This will try to remove duplicate configurations. During this process it will not preserve a potential
ordering of the configurations or an inner ordering of a configuration.
"""
......@@ -146,7 +146,7 @@ def test_all_configs(test):
return [dict(config_) for config_ in {tuple(sorted(config.items())) for config in configs}]
except TypeError:
# A TypeError will be raised if a value of any config is not hashable, e.g. a list. In that case duplicate
# removal would be a lot more elaborate and we simply bail out.
# removal would be a lot more elaborate, and we simply bail out.
return configs
@functools.wraps(test)
......@@ -297,7 +297,7 @@ class DatasetTestCase(unittest.TestCase):
.. note::
The default behavior is only valid if the dataset to be tested has ``root`` as the only required parameter.
Otherwise you need to overwrite this method.
Otherwise, you need to overwrite this method.
Args:
tmpdir (str): Path to a temporary directory. For most cases this acts as root directory for the dataset
......@@ -604,7 +604,7 @@ class ImageDatasetTestCase(DatasetTestCase):
patch_checks=patch_checks,
**kwargs,
) as (dataset, info):
# PIL.Image.open() only loads the image meta data upfront and keeps the file open until the first access
# PIL.Image.open() only loads the image metadata upfront and keeps the file open until the first access
# to the pixel data occurs. Trying to delete such a file results in an PermissionError on Windows. Thus, we
# force-load opened images.
# This problem only occurs during testing since some tests, e.g. DatasetTestCase.test_feature_types open an
......@@ -786,7 +786,7 @@ def create_video_file(
fps: float = 25,
**kwargs: Any,
) -> pathlib.Path:
"""Create an video file from random data.
"""Create a video file from random data.
Args:
root (Union[str, pathlib.Path]): Root directory the video file will be placed in.
......
......@@ -13,7 +13,7 @@ SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision useable?",
"Is torchvision usable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
......
......@@ -194,7 +194,7 @@ class TestFxFeatureExtraction:
assert n1 == n2
assert p1.equal(p2)
# And that ouputs match
# And that outputs match
with torch.no_grad():
ilg_out = ilg_model(self.inp)
fgn_out = fx_model(self.inp)
......
......@@ -1026,7 +1026,7 @@ def test_raft(model_fn, scripted):
preds = model(img1, img2)
flow_pred = preds[-1]
# Tolerance is fairly high, but there are 2 * H * W outputs to check
# The .pkl were generated on the AWS cluter, on the CI it looks like the resuts are slightly different
# The .pkl were generated on the AWS cluter, on the CI it looks like the results are slightly different
_assert_expected(flow_pred.cpu(), name=model_fn.__name__, atol=1e-2, rtol=1)
......
......@@ -38,7 +38,7 @@ class TestModelsDetectionUtils:
def test_resnet_fpn_backbone_frozen_layers(self, train_layers, exp_froz_params):
# we know how many initial layers and parameters of the network should
# be frozen for each trainable_backbone_layers parameter value
# i.e all 53 params are frozen if trainable_backbone_layers=0
# i.e. all 53 params are frozen if trainable_backbone_layers=0
# ad first 24 params are frozen if trainable_backbone_layers=2
model = backbone_utils.resnet_fpn_backbone("resnet50", weights=None, trainable_layers=train_layers)
# boolean list that is true if the param at that index is frozen
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment