Unverified Commit 7dc5e5bd authored by Philip Meier's avatar Philip Meier Committed by GitHub
Browse files

Fix typos and grammar errors (#7065)

* fix typos throughout the code base

* fix grammar

* revert formatting changes to gallery

* revert 'an uXX'

* remove 'number of the best'
parent ed2a0adb
......@@ -87,7 +87,7 @@ class RoIOpTester(ABC):
x_dtype = self.dtype if x_dtype is None else x_dtype
rois_dtype = self.dtype if rois_dtype is None else rois_dtype
pool_size = 5
# n_channels % (pool_size ** 2) == 0 required for PS opeartions.
# n_channels % (pool_size ** 2) == 0 required for PS operations.
n_channels = 2 * (pool_size**2)
x = torch.rand(2, n_channels, 10, 10, dtype=x_dtype, device=device)
if not contiguous:
......@@ -647,11 +647,11 @@ class TestNMS:
@pytest.mark.parametrize("scale, zero_point", ((1, 0), (2, 50), (3, 10)))
def test_qnms(self, iou, scale, zero_point):
# Note: we compare qnms vs nms instead of qnms vs reference implementation.
# This is because with the int convertion, the trick used in _create_tensors_with_iou
# This is because with the int conversion, the trick used in _create_tensors_with_iou
# doesn't really work (in fact, nms vs reference implem will also fail with ints)
err_msg = "NMS and QNMS give different results for IoU={}"
boxes, scores = self._create_tensors_with_iou(1000, iou)
scores *= 100 # otherwise most scores would be 0 or 1 after int convertion
scores *= 100 # otherwise most scores would be 0 or 1 after int conversion
qboxes = torch.quantize_per_tensor(boxes, scale=scale, zero_point=zero_point, dtype=torch.quint8)
qscores = torch.quantize_per_tensor(scores, scale=scale, zero_point=zero_point, dtype=torch.quint8)
......
......@@ -135,7 +135,7 @@ class TestSmoke:
def test_mixup_cutmix(self, transform, input):
transform(input)
# add other data that should bypass and wont raise any error
# add other data that should bypass and won't raise any error
input_copy = dict(input)
input_copy["path"] = "/path/to/somewhere"
input_copy["num"] = 1234
......
......@@ -1818,7 +1818,7 @@ def test_random_erasing(seed):
tol = 0.05
assert 1 / 3 - tol <= aspect_ratio <= 3 + tol
# Make sure that h > w and h < w are equaly likely (log-scale sampling)
# Make sure that h > w and h < w are equally likely (log-scale sampling)
aspect_ratios = []
random.seed(42)
trial = 1000
......
......@@ -184,7 +184,7 @@ def test_draw_no_boxes():
boxes = torch.full((0, 4), 0, dtype=torch.float)
with pytest.warns(UserWarning, match=re.escape("boxes doesn't contain any box. No box was drawn")):
res = utils.draw_bounding_boxes(img, boxes)
# Check that the function didnt change the image
# Check that the function didn't change the image
assert res.eq(img).all()
......@@ -209,7 +209,7 @@ def test_draw_segmentation_masks(colors, alpha):
# For testing we enforce that there's no overlap between the masks. The
# current behaviour is that the last mask's color will take priority when
# masks overlap, but this makes testing slightly harder so we don't really
# masks overlap, but this makes testing slightly harder, so we don't really
# care
overlap = masks[0] & masks[1]
masks[:, overlap] = False
......@@ -283,7 +283,7 @@ def test_draw_no_segmention_mask():
masks = torch.full((0, 100, 100), 0, dtype=torch.bool)
with pytest.warns(UserWarning, match=re.escape("masks doesn't contain any mask. No mask was drawn")):
res = utils.draw_segmentation_masks(img, masks)
# Check that the function didnt change the image
# Check that the function didn't change the image
assert res.eq(img).all()
......
......@@ -127,7 +127,7 @@ def _read_from_stream(container, start_pts, end_pts, stream, stream_name, buffer
ascending order. We need to decode more frames even when we meet end
pts
"""
# seeking in the stream is imprecise. Thus, seek to an ealier PTS by a margin
# seeking in the stream is imprecise. Thus, seek to an earlier PTS by a margin
margin = 1
seek_offset = max(start_pts - margin, 0)
......
......@@ -301,7 +301,7 @@ struct DecoderMetadata {
};
/**
* Abstract class for decoding media bytes
* It has two diffrent modes. Internal media bytes retrieval for given uri and
* It has two different modes. Internal media bytes retrieval for given uri and
* external media bytes provider in case of memory streams
*/
class MediaDecoder {
......
......@@ -61,7 +61,7 @@ DecoderInCallback MemoryBuffer::getCallback(
}
// seek mode
if (!timeoutMs) {
// seek capabilty, yes - supported
// seek capability, yes - supported
return 0;
}
return object.seek(size, whence);
......
......@@ -368,7 +368,7 @@ TEST(SyncDecoder, TestMemoryBufferNoSeekableWithFullRead) {
}
// seek mode
if (!timeoutMs) {
// seek capabilty, yes - no
// seek capability, yes - no
return -1;
}
return object.seek(size, whence);
......@@ -408,7 +408,7 @@ TEST(SyncDecoder, TestMemoryBufferNoSeekableWithPartialRead) {
}
// seek mode
if (!timeoutMs) {
// seek capabilty, yes - no
// seek capability, yes - no
return -1;
}
return object.seek(size, whence);
......
......@@ -319,14 +319,14 @@ void Video::Seek(double ts, bool fastSeek = false) {
std::tuple<torch::Tensor, double> Video::Next() {
TORCH_CHECK(initialized, "Video object has to be initialized first");
// if failing to decode simply return a null tensor (note, should we
// raise an exeption?)
// raise an exception?)
double frame_pts_s;
torch::Tensor outFrame = torch::zeros({0}, torch::kByte);
// decode single frame
DecoderOutputMessage out;
int64_t res = decoder.decode(&out, decoderTimeoutMs);
// if successfull
// if successful
if (res == 0) {
frame_pts_s = double(double(out.header.pts) * 1e-6);
......
......@@ -42,8 +42,8 @@ struct Video : torch::CustomClassHolder {
private:
bool succeeded = false; // decoder init flag
// seekTS and doSeek act as a flag - if it's not set, next function simply
// retruns the next frame. If it's set, we look at the global seek
// time in comination with any_frame settings
// returns the next frame. If it's set, we look at the global seek
// time in combination with any_frame settings
double seekTS = -1;
bool initialized = false;
......
......@@ -60,7 +60,7 @@ void roi_align_forward_kernel_impl(
// When the grid is empty, output zeros.
const T count = std::max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4
// we want to precalculate indices and weights shared by all chanels,
// we want to precalculate indices and weights shared by all channels,
// this is the key point of optimization
std::vector<detail::PreCalc<T>> pre_calc(
roi_bin_grid_h * roi_bin_grid_w * pooled_width * pooled_height);
......
......@@ -164,7 +164,7 @@ void qroi_align_forward_kernel_impl(
const float count =
std::max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4
// we want to precalculate indices and weights shared by all chanels,
// we want to precalculate indices and weights shared by all channels,
// this is the key point of optimization
std::vector<detail::PreCalc<float>> pre_calc(
roi_bin_grid_h * roi_bin_grid_w * pooled_width * pooled_height);
......
......@@ -424,9 +424,9 @@ class Middlebury2014Stereo(StereoMatchingDataset):
split (string, optional): The dataset split of scenes, either "train" (default), "test", or "additional"
use_ambient_views (boolean, optional): Whether to use different expose or lightning views when possible.
The dataset samples with equal probability between ``[im1.png, im1E.png, im1L.png]``.
calibration (string, optional): Wether or not to use the calibrated (default) or uncalibrated scenes.
calibration (string, optional): Whether or not to use the calibrated (default) or uncalibrated scenes.
transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
download (boolean, optional): Wether or not to download the dataset in the ``root`` directory.
download (boolean, optional): Whether or not to download the dataset in the ``root`` directory.
"""
splits = {
......@@ -720,7 +720,7 @@ class CREStereo(StereoMatchingDataset):
class FallingThingsStereo(StereoMatchingDataset):
"""`FallingThings <https://research.nvidia.com/publication/2018-06_falling-things-synthetic-dataset-3d-object-detection-and-pose-estimation>`_ dataset.
The dataset is expected to have the following structre: ::
The dataset is expected to have the following structure: ::
root
FallingThings
......@@ -825,7 +825,7 @@ class SceneFlowStereo(StereoMatchingDataset):
"""Dataset interface for `Scene Flow <https://lmb.informatik.uni-freiburg.de/resources/datasets/SceneFlowDatasets.en.html>`_ datasets.
This interface provides access to the `FlyingThings3D, `Monkaa` and `Driving` datasets.
The dataset is expected to have the following structre: ::
The dataset is expected to have the following structure: ::
root
SceneFlow
......@@ -1031,7 +1031,7 @@ class SintelStereo(StereoMatchingDataset):
disparity_map = r * 4 + g / (2**6) + b / (2**14)
# reshape into (C, H, W) format
disparity_map = np.transpose(disparity_map, (2, 0, 1))
# find the appropiate file paths
# find the appropriate file paths
occlued_mask_path, out_of_frame_mask_path = self._get_occlussion_mask_paths(file_path)
# occlusion masks
valid_mask = np.asarray(Image.open(occlued_mask_path)) == 0
......@@ -1058,7 +1058,7 @@ class SintelStereo(StereoMatchingDataset):
class InStereo2k(StereoMatchingDataset):
"""`InStereo2k <https://github.com/YuhuaXu/StereoDataset>`_ dataset.
The dataset is expected to have the following structre: ::
The dataset is expected to have the following structure: ::
root
InStereo2k
......
......@@ -41,7 +41,7 @@ class CelebA(VisionDataset):
"""
base_folder = "celeba"
# There currently does not appear to be a easy way to extract 7z in python (without introducing additional
# There currently does not appear to be an easy way to extract 7z in python (without introducing additional
# dependencies). The "in-the-wild" (not aligned+cropped) images are only in 7z, so they are not available
# right now.
file_list = [
......
......@@ -177,7 +177,7 @@ class Cityscapes(VisionDataset):
index (int): Index
Returns:
tuple: (image, target) where target is a tuple of all target types if target_type is a list with more
than one item. Otherwise target is a json object if target_type="polygon", else the image segmentation.
than one item. Otherwise, target is a json object if target_type="polygon", else the image segmentation.
"""
image = Image.open(self.images[index]).convert("RGB")
......
......@@ -11,7 +11,7 @@ class Country211(ImageFolder):
This dataset was built by filtering the images from the YFCC100m dataset
that have GPS coordinate corresponding to a ISO-3166 country code. The
dataset is balanced by sampling 150 train images, 50 validation images, and
100 test images images for each country.
100 test images for each country.
Args:
root (string): Root directory of the dataset.
......
......@@ -102,7 +102,7 @@ class HMDB51(VisionDataset):
output_format=output_format,
)
# we bookkeep the full version of video clips because we want to be able
# to return the meta data of full version rather than the subset version of
# to return the metadata of full version rather than the subset version of
# video clips
self.full_video_clips = video_clips
self.fold = fold
......
......@@ -366,7 +366,7 @@ class QMNIST(MNIST):
that takes in the target and transforms it.
train (bool,optional,compatibility): When argument 'what' is
not specified, this boolean decides whether to load the
training set ot the testing set. Default: True.
training set or the testing set. Default: True.
"""
subsets = {"train": "train", "test": "test", "test10k": "test", "test50k": "test", "nist": "nist"}
......
......@@ -15,7 +15,7 @@ class Places365(VisionDataset):
root (string): Root directory of the Places365 dataset.
split (string, optional): The dataset split. Can be one of ``train-standard`` (default), ``train-challenge``,
``val``.
small (bool, optional): If ``True``, uses the small images, i. e. resized to 256 x 256 pixels, instead of the
small (bool, optional): If ``True``, uses the small images, i.e. resized to 256 x 256 pixels, instead of the
high resolution ones.
download (bool, optional): If ``True``, downloads the dataset components and places them in ``root``. Already
downloaded archives are not downloaded again.
......@@ -32,7 +32,7 @@ class Places365(VisionDataset):
targets (list): The class_index value for each image in the dataset
Raises:
RuntimeError: If ``download is False`` and the meta files, i. e. the devkit, are not present or corrupted.
RuntimeError: If ``download is False`` and the meta files, i.e. the devkit, are not present or corrupted.
RuntimeError: If ``download is True`` and the image archive is already extracted.
"""
_SPLITS = ("train-standard", "train-challenge", "val")
......
......@@ -15,7 +15,7 @@ class STL10(VisionDataset):
root (string): Root directory of dataset where directory
``stl10_binary`` exists.
split (string): One of {'train', 'test', 'unlabeled', 'train+unlabeled'}.
Accordingly dataset is selected.
Accordingly, dataset is selected.
folds (int, optional): One of {0-9} or None.
For training, loads one of the 10 pre-defined folds of 1k samples for the
standard evaluation procedure. If no value is passed, loads the 5k samples.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment