Unverified Commit cbc2491f authored by Tai-Wang's avatar Tai-Wang Committed by GitHub
Browse files

Add code-spell pre-commit hook and fix typos (#995)

parent 6b1602f1
...@@ -23,7 +23,7 @@ class BallQuery(Function): ...@@ -23,7 +23,7 @@ class BallQuery(Function):
center_xyz (Tensor): (B, npoint, 3) centers of the ball query. center_xyz (Tensor): (B, npoint, 3) centers of the ball query.
Returns: Returns:
Tensor: (B, npoint, nsample) tensor with the indicies of Tensor: (B, npoint, nsample) tensor with the indices of
the features that form the query balls. the features that form the query balls.
""" """
assert center_xyz.is_contiguous() assert center_xyz.is_contiguous()
......
...@@ -183,7 +183,7 @@ class GroupingOperation(Function): ...@@ -183,7 +183,7 @@ class GroupingOperation(Function):
Args: Args:
features (Tensor): (B, C, N) tensor of features to group. features (Tensor): (B, C, N) tensor of features to group.
indices (Tensor): (B, npoint, nsample) the indicies of indices (Tensor): (B, npoint, nsample) the indices of
features to group with. features to group with.
Returns: Returns:
......
...@@ -27,11 +27,11 @@ class KNN(Function): ...@@ -27,11 +27,11 @@ class KNN(Function):
center_xyz (Tensor): (B, npoint, 3) if transposed == False, center_xyz (Tensor): (B, npoint, 3) if transposed == False,
else (B, 3, npoint). centers of the knn query. else (B, 3, npoint). centers of the knn query.
transposed (bool): whether the input tensors are transposed. transposed (bool): whether the input tensors are transposed.
defaults to False. Should not expicitly use this keyword defaults to False. Should not explicitly use this keyword
when calling knn (=KNN.apply), just add the fourth param. when calling knn (=KNN.apply), just add the fourth param.
Returns: Returns:
Tensor: (B, k, npoint) tensor with the indicies of Tensor: (B, k, npoint) tensor with the indices of
the features that form k-nearest neighbours. the features that form k-nearest neighbours.
""" """
assert k > 0 assert k > 0
......
...@@ -26,7 +26,7 @@ class AllReduce(Function): ...@@ -26,7 +26,7 @@ class AllReduce(Function):
@NORM_LAYERS.register_module('naiveSyncBN1d') @NORM_LAYERS.register_module('naiveSyncBN1d')
class NaiveSyncBatchNorm1d(nn.BatchNorm1d): class NaiveSyncBatchNorm1d(nn.BatchNorm1d):
"""Syncronized Batch Normalization for 3D Tensors. """Synchronized Batch Normalization for 3D Tensors.
Note: Note:
This implementation is modified from This implementation is modified from
...@@ -37,7 +37,7 @@ class NaiveSyncBatchNorm1d(nn.BatchNorm1d): ...@@ -37,7 +37,7 @@ class NaiveSyncBatchNorm1d(nn.BatchNorm1d):
when the batch size on each worker is quite different when the batch size on each worker is quite different
(e.g., when scale augmentation is used). (e.g., when scale augmentation is used).
In 3D detection, different workers has points of different shapes, In 3D detection, different workers has points of different shapes,
whish also cause instability. which also cause instability.
Use this implementation before `nn.SyncBatchNorm` is fixed. Use this implementation before `nn.SyncBatchNorm` is fixed.
It is slower than `nn.SyncBatchNorm`. It is slower than `nn.SyncBatchNorm`.
...@@ -80,7 +80,7 @@ class NaiveSyncBatchNorm1d(nn.BatchNorm1d): ...@@ -80,7 +80,7 @@ class NaiveSyncBatchNorm1d(nn.BatchNorm1d):
@NORM_LAYERS.register_module('naiveSyncBN2d') @NORM_LAYERS.register_module('naiveSyncBN2d')
class NaiveSyncBatchNorm2d(nn.BatchNorm2d): class NaiveSyncBatchNorm2d(nn.BatchNorm2d):
"""Syncronized Batch Normalization for 4D Tensors. """Synchronized Batch Normalization for 4D Tensors.
Note: Note:
This implementation is modified from This implementation is modified from
......
...@@ -83,7 +83,7 @@ class ScoreNet(nn.Module): ...@@ -83,7 +83,7 @@ class ScoreNet(nn.Module):
Args: Args:
xyz_features (torch.Tensor): (B, C, N, K), features constructed xyz_features (torch.Tensor): (B, C, N, K), features constructed
from xyz coordinates of point pairs. May contain relative from xyz coordinates of point pairs. May contain relative
positions, Euclidian distance, etc. positions, Euclidean distance, etc.
Returns: Returns:
torch.Tensor: (B, N, K, M), predicted scores for `M` kernels. torch.Tensor: (B, N, K, M), predicted scores for `M` kernels.
...@@ -174,7 +174,7 @@ class PAConv(nn.Module): ...@@ -174,7 +174,7 @@ class PAConv(nn.Module):
# (grouped_xyz - center_xyz, grouped_xyz) # (grouped_xyz - center_xyz, grouped_xyz)
self.scorenet_in_channels = 6 self.scorenet_in_channels = 6
elif scorenet_input == 'w_neighbor_dist': elif scorenet_input == 'w_neighbor_dist':
# (center_xyz, grouped_xyz - center_xyz, Euclidian distance) # (center_xyz, grouped_xyz - center_xyz, Euclidean distance)
self.scorenet_in_channels = 7 self.scorenet_in_channels = 7
else: else:
raise NotImplementedError( raise NotImplementedError(
......
...@@ -2,14 +2,14 @@ import torch ...@@ -2,14 +2,14 @@ import torch
def calc_euclidian_dist(xyz1, xyz2): def calc_euclidian_dist(xyz1, xyz2):
"""Calculate the Euclidian distance between two sets of points. """Calculate the Euclidean distance between two sets of points.
Args: Args:
xyz1 (torch.Tensor): (N, 3), the first set of points. xyz1 (torch.Tensor): (N, 3), the first set of points.
xyz2 (torch.Tensor): (N, 3), the second set of points. xyz2 (torch.Tensor): (N, 3), the second set of points.
Returns: Returns:
torch.Tensor: (N, ), the Euclidian distance between each point pair. torch.Tensor: (N, ), the Euclidean distance between each point pair.
""" """
assert xyz1.shape[0] == xyz2.shape[0], 'number of points are not the same' assert xyz1.shape[0] == xyz2.shape[0], 'number of points are not the same'
assert xyz1.shape[1] == xyz2.shape[1] == 3, \ assert xyz1.shape[1] == xyz2.shape[1] == 3, \
......
...@@ -28,7 +28,7 @@ class PAConvSAModuleMSG(BasePointSAModule): ...@@ -28,7 +28,7 @@ class PAConvSAModuleMSG(BasePointSAModule):
- 'w_neighbor': Use xyz coordinates and the difference with center - 'w_neighbor': Use xyz coordinates and the difference with center
points as input. points as input.
- 'w_neighbor_dist': Use xyz coordinates, the difference with - 'w_neighbor_dist': Use xyz coordinates, the difference with
center points and the Euclidian distance as input. center points and the Euclidean distance as input.
scorenet_cfg (dict, optional): Config of the ScoreNet module, which scorenet_cfg (dict, optional): Config of the ScoreNet module, which
may contain the following keys and values: may contain the following keys and values:
......
...@@ -143,16 +143,16 @@ class SparseConvolution(SparseModule): ...@@ -143,16 +143,16 @@ class SparseConvolution(SparseModule):
out_tensor.indice_dict = input.indice_dict out_tensor.indice_dict = input.indice_dict
out_tensor.grid = input.grid out_tensor.grid = input.grid
return out_tensor return out_tensor
datas = input.find_indice_pair(self.indice_key) data = input.find_indice_pair(self.indice_key)
if self.inverse: if self.inverse:
assert datas is not None and self.indice_key is not None assert data is not None and self.indice_key is not None
_, outids, indice_pairs, indice_pair_num, out_spatial_shape = datas _, outids, indice_pairs, indice_pair_num, out_spatial_shape = data
assert indice_pairs.shape[0] == np.prod( assert indice_pairs.shape[0] == np.prod(
self.kernel_size self.kernel_size
), 'inverse conv must have same kernel size as its couple conv' ), 'inverse conv must have same kernel size as its couple conv'
else: else:
if self.indice_key is not None and datas is not None: if self.indice_key is not None and data is not None:
outids, _, indice_pairs, indice_pair_num, _ = datas outids, _, indice_pairs, indice_pair_num, _ = data
else: else:
outids, indice_pairs, indice_pair_num = ops.get_indice_pairs( outids, indice_pairs, indice_pair_num = ops.get_indice_pairs(
indices, indices,
......
...@@ -93,7 +93,7 @@ struct delimiters { ...@@ -93,7 +93,7 @@ struct delimiters {
}; };
// Functor to print containers. You can use this directly if you want // Functor to print containers. You can use this directly if you want
// to specificy a non-default delimiters type. The printing logic can // to specify a non-default delimiters type. The printing logic can
// be customized by specializing the nested template. // be customized by specializing the nested template.
template <typename T, typename TChar = char, template <typename T, typename TChar = char,
......
...@@ -73,7 +73,7 @@ void sstream_print(SStream &ss, T val, TArgs... args) { ...@@ -73,7 +73,7 @@ void sstream_print(SStream &ss, T val, TArgs... args) {
if (!(expr)) { \ if (!(expr)) { \
std::stringstream __macro_s; \ std::stringstream __macro_s; \
__macro_s << __FILE__ << " " << __LINE__ << "\n"; \ __macro_s << __FILE__ << " " << __LINE__ << "\n"; \
__macro_s << #expr << " assert faild. "; \ __macro_s << #expr << " assert failed. "; \
tv::sstream_print(__macro_s, __VA_ARGS__); \ tv::sstream_print(__macro_s, __VA_ARGS__); \
throw std::runtime_error(__macro_s.str()); \ throw std::runtime_error(__macro_s.str()); \
} \ } \
...@@ -84,7 +84,7 @@ void sstream_print(SStream &ss, T val, TArgs... args) { ...@@ -84,7 +84,7 @@ void sstream_print(SStream &ss, T val, TArgs... args) {
if (!(expr)) { \ if (!(expr)) { \
std::stringstream __macro_s; \ std::stringstream __macro_s; \
__macro_s << __FILE__ << " " << __LINE__ << "\n"; \ __macro_s << __FILE__ << " " << __LINE__ << "\n"; \
__macro_s << #expr << " assert faild. "; \ __macro_s << #expr << " assert failed. "; \
tv::sstream_print(__macro_s, __VA_ARGS__); \ tv::sstream_print(__macro_s, __VA_ARGS__); \
throw std::invalid_argument(__macro_s.str()); \ throw std::invalid_argument(__macro_s.str()); \
} \ } \
......
...@@ -305,7 +305,7 @@ int hard_voxelize_gpu(const at::Tensor& points, at::Tensor& voxels, ...@@ -305,7 +305,7 @@ int hard_voxelize_gpu(const at::Tensor& points, at::Tensor& voxels,
cudaDeviceSynchronize(); cudaDeviceSynchronize();
AT_CUDA_CHECK(cudaGetLastError()); AT_CUDA_CHECK(cudaGetLastError());
// 3. determin voxel num and voxel's coor index // 3. determine voxel num and voxel's coor index
// make the logic in the CUDA device could accelerate about 10 times // make the logic in the CUDA device could accelerate about 10 times
auto coor_to_voxelidx = -at::ones( auto coor_to_voxelidx = -at::ones(
{ {
...@@ -316,7 +316,7 @@ int hard_voxelize_gpu(const at::Tensor& points, at::Tensor& voxels, ...@@ -316,7 +316,7 @@ int hard_voxelize_gpu(const at::Tensor& points, at::Tensor& voxels,
{ {
1, 1,
}, },
points.options().dtype(at::kInt)); // must be zero from the begining points.options().dtype(at::kInt)); // must be zero from the beginning
AT_DISPATCH_ALL_TYPES( AT_DISPATCH_ALL_TYPES(
temp_coors.scalar_type(), "determin_duplicate", ([&] { temp_coors.scalar_type(), "determin_duplicate", ([&] {
......
lyft_dataset_sdk lyft_dataset_sdk
networkx>=2.2,<2.3 networkx>=2.2,<2.3
# we may unlock the verion of numba in the future # we may unlock the version of numba in the future
numba==0.48.0 numba==0.48.0
numpy<1.20.0 numpy<1.20.0
nuscenes-devkit nuscenes-devkit
......
...@@ -11,3 +11,6 @@ known_first_party = mmdet,mmseg,mmdet3d ...@@ -11,3 +11,6 @@ known_first_party = mmdet,mmseg,mmdet3d
known_third_party = cv2,imageio,indoor3d_util,load_scannet_data,lyft_dataset_sdk,m2r,matplotlib,mmcv,nuimages,numba,numpy,nuscenes,pandas,plyfile,pycocotools,pyquaternion,pytest,pytorch_sphinx_theme,recommonmark,scannet_utils,scipy,seaborn,shapely,skimage,tensorflow,terminaltables,torch,trimesh,waymo_open_dataset known_third_party = cv2,imageio,indoor3d_util,load_scannet_data,lyft_dataset_sdk,m2r,matplotlib,mmcv,nuimages,numba,numpy,nuscenes,pandas,plyfile,pycocotools,pyquaternion,pytest,pytorch_sphinx_theme,recommonmark,scannet_utils,scipy,seaborn,shapely,skimage,tensorflow,terminaltables,torch,trimesh,waymo_open_dataset
no_lines_before = STDLIB,LOCALFOLDER no_lines_before = STDLIB,LOCALFOLDER
default_section = THIRDPARTY default_section = THIRDPARTY
[codespell]
ignore-words-list = ans,refridgerator,crate,hist,formating,dout,wan,nd,fo
{"images": [{"file_name": "training/image_2/000007.png", "id": 7, "Tri2v": [[0.9999976, 0.0007553071, -0.002035826, -0.8086759], [-0.0007854027, 0.9998898, -0.01482298, 0.3195559], [0.002024406, 0.01482454, 0.9998881, -0.7997231], [0.0, 0.0, 0.0, 1.0]], "Trv2c": [[0.007533745, -0.9999714, -0.000616602, -0.004069766], [0.01480249, 0.0007280733, -0.9998902, -0.07631618], [0.9998621, 0.00752379, 0.01480755, -0.2717806], [0.0, 0.0, 0.0, 1.0]], "rect": [[0.9999239, 0.00983776, -0.007445048, 0.0], [-0.009869795, 0.9999421, -0.004278459, 0.0], [0.007402527, 0.004351614, 0.9999631, 0.0], [0.0, 0.0, 0.0, 1.0]], "cam_intrinsic": [[721.5377, 0.0, 609.5593, 44.85728], [0.0, 721.5377, 172.854, 0.2163791], [0.0, 0.0, 1.0, 0.002745884], [0.0, 0.0, 0.0, 1.0]], "width": 1242, "height": 375}], "annotations": [{"file_name": "training/image_2/000007.png", "image_id": 7, "area": 2556.023616260146, "category_name": "Car", "category_id": 2, "bbox": [565.4822720402807, 175.01202566042497, 51.17323679197273, 49.94844525177848], "iscrowd": 0, "bbox_cam3d": [-0.627830982208252, 0.8849999904632568, 25.010000228881836, 3.200000047683716, 1.6100000143051147, 1.659999966621399, -1.590000033378601], "velo_cam3d": -1, "center2d": [591.3814672167642, 198.3730937263457, 25.012745884], "attribute_name": -1, "attribute_id": -1, "segmentation": [], "id": 2}, {"file_name": "training/image_2/000007.png", "image_id": 7, "area": 693.1538564468428, "category_name": "Car", "category_id": 2, "bbox": [481.8496708488522, 179.85710612050596, 30.55976691329198, 22.681909139344754], "iscrowd": 0, "bbox_cam3d": [-7.367831230163574, 1.1799999475479126, 47.54999923706055, 3.700000047683716, 1.399999976158142, 1.5099999904632568, 1.5499999523162842], "velo_cam3d": -1, "center2d": [497.72892067550754, 190.75320250122618, 47.552745884], "attribute_name": -1, "attribute_id": -1, "segmentation": [], "id": 3}, {"file_name": "training/image_2/000007.png", "image_id": 7, "area": 419.21693566410073, "category_name": "Car", "category_id": 2, "bbox": [542.2247151650495, 175.73341152322814, 23.019633917835904, 18.211277258379255], "iscrowd": 0, "bbox_cam3d": [-4.647830963134766, 0.9800000190734863, 60.52000045776367, 4.050000190734863, 1.4600000381469727, 1.659999966621399, 1.559999942779541], "velo_cam3d": -1, "center2d": [554.1213152040074, 184.53305847203026, 60.522745884], "attribute_name": -1, "attribute_id": -1, "segmentation": [], "id": 4}, {"file_name": "training/image_2/000007.png", "image_id": 7, "area": 928.9555081918186, "category_name": "Cyclist", "category_id": 1, "bbox": [330.84191493374504, 176.13804311926262, 24.65593879860404, 37.67674456769879], "iscrowd": 0, "bbox_cam3d": [-12.567831039428711, 1.0199999809265137, 34.09000015258789, 1.9500000476837158, 1.7200000286102295, 0.5, 1.5399999618530273], "velo_cam3d": -1, "center2d": [343.52506265845847, 194.43366972124528, 34.092745884], "attribute_name": -1, "attribute_id": -1, "segmentation": [], "id": 5}], "categories": [{"id": 0, "name": "Pedestrian"}, {"id": 1, "name": "Cyclist"}, {"id": 2, "name": "Car"}]} {"images": [{"file_name": "training/image_2/000007.png", "id": 7, "Tri2v": [[0.9999976, 0.0007553071, -0.002035826, -0.8086759], [-0.0007854027, 0.9998898, -0.01482298, 0.3195559], [0.002024406, 0.01482454, 0.9998881, -0.7997231], [0.0, 0.0, 0.0, 1.0]], "Trv2c": [[0.007533745, -0.9999714, -0.000616602, -0.004069766], [0.01480249, 0.0007280733, -0.9998902, -0.07631618], [0.9998621, 0.00752379, 0.01480755, -0.2717806], [0.0, 0.0, 0.0, 1.0]], "rect": [[0.9999239, 0.00983776, -0.007445048, 0.0], [-0.009869795, 0.9999421, -0.004278459, 0.0], [0.007402527, 0.004351614, 0.9999631, 0.0], [0.0, 0.0, 0.0, 1.0]], "cam_intrinsic": [[721.5377, 0.0, 609.5593, 44.85728], [0.0, 721.5377, 172.854, 0.2163791], [0.0, 0.0, 1.0, 0.002745884], [0.0, 0.0, 0.0, 1.0]], "width": 1242, "height": 375}], "annotations": [{"file_name": "training/image_2/000007.png", "image_id": 7, "area": 2556.023616260146, "category_name": "Car", "category_id": 2, "bbox": [565.4822720402807, 175.01202566042497, 51.17323679197273, 49.94844525177848], "iscrowd": 0, "bbox_cam3d": [-0.627830982208252, 0.8849999904632568, 25.010000228881836, 3.200000047683716, 1.6100000143051147, 1.659999966621399, -1.590000033378601], "velo_cam3d": -1, "center2d": [591.3814672167642, 198.3730937263457, 25.012745884], "attribute_name": -1, "attribute_id": -1, "segmentation": [], "id": 2}, {"file_name": "training/image_2/000007.png", "image_id": 7, "area": 693.1538564468428, "category_name": "Car", "category_id": 2, "bbox": [481.8496708488522, 179.85710612050596, 30.55976691329198, 22.681909139344754], "iscrowd": 0, "bbox_cam3d": [-7.367831230163574, 1.1799999475479126, 47.54999923706055, 3.700000047683716, 1.399999976158142, 1.5099999904632568, 1.5499999523162842], "velo_cam3d": -1, "center2d": [497.72892067550754, 190.75320250122618, 47.552745884], "attribute_name": -1, "attribute_id": -1, "segmentation": [], "id": 3}, {"file_name": "training/image_2/000007.png", "image_id": 7, "area": 419.21693566410073, "category_name": "Car", "category_id": 2, "bbox": [542.2247151650495, 175.73341152322814, 23.019633917835904, 18.211277258379255], "iscrowd": 0, "bbox_cam3d": [-4.647830963134766, 0.9800000190734863, 60.52000045776367, 4.050000190734863, 1.4600000381469727, 1.659999966621399, 1.559999942779541], "velo_cam3d": -1, "center2d": [554.1213152040074, 184.53305847203026, 60.522745884], "attribute_name": -1, "attribute_id": -1, "segmentation": [], "id": 4}, {"file_name": "training/image_2/000007.png", "image_id": 7, "area": 928.9555081918186, "category_name": "Cyclist", "category_id": 1, "bbox": [330.84191493374504, 176.13804311926262, 24.65593879860404, 37.67674456769879], "iscrowd": 0, "bbox_cam3d": [-12.567831039428711, 1.0199999809265137, 34.09000015258789, 1.9500000476837158, 1.7200000286102295, 0.5, 1.5399999618530273], "velo_cam3d": -1, "center2d": [343.52506265845847, 194.43366972124528, 34.092745884], "attribute_name": -1, "attribute_id": -1, "segmentation": [], "id": 5}], "categories": [{"id": 0, "name": "Pedestrian"}, {"id": 1, "name": "Cyclist"}, {"id": 2, "name": "Car"}]}
\ No newline at end of file
...@@ -95,7 +95,7 @@ def test_paconv_regularization_loss(): ...@@ -95,7 +95,7 @@ def test_paconv_regularization_loss():
set_random_seed(0, True) set_random_seed(0, True)
model = ToyModel() model = ToyModel()
# reduction shoule be in ['none', 'mean', 'sum'] # reduction should be in ['none', 'mean', 'sum']
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
paconv_corr_loss = PAConvRegularizationLoss(reduction='l2') paconv_corr_loss = PAConvRegularizationLoss(reduction='l2')
...@@ -116,7 +116,7 @@ def test_paconv_regularization_loss(): ...@@ -116,7 +116,7 @@ def test_paconv_regularization_loss():
def test_uncertain_smooth_l1_loss(): def test_uncertain_smooth_l1_loss():
from mmdet3d.models.losses import UncertainL1Loss, UncertainSmoothL1Loss from mmdet3d.models.losses import UncertainL1Loss, UncertainSmoothL1Loss
# reduction shoule be in ['none', 'mean', 'sum'] # reduction should be in ['none', 'mean', 'sum']
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
uncertain_l1_loss = UncertainL1Loss(reduction='l2') uncertain_l1_loss = UncertainL1Loss(reduction='l2')
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
......
...@@ -656,7 +656,7 @@ def test_boxes_conversion(): ...@@ -656,7 +656,7 @@ def test_boxes_conversion():
dtype=torch.float32) dtype=torch.float32)
rt_mat = rect @ Trv2c rt_mat = rect @ Trv2c
# test coversion with Box type # test conversion with Box type
cam_to_lidar_box = Box3DMode.convert(camera_boxes, Box3DMode.CAM, cam_to_lidar_box = Box3DMode.convert(camera_boxes, Box3DMode.CAM,
Box3DMode.LIDAR, rt_mat.inverse()) Box3DMode.LIDAR, rt_mat.inverse())
assert torch.allclose(cam_to_lidar_box.tensor, expected_tensor) assert torch.allclose(cam_to_lidar_box.tensor, expected_tensor)
......
...@@ -493,7 +493,7 @@ def get_2d_boxes(info, occluded, mono3d=True): ...@@ -493,7 +493,7 @@ def get_2d_boxes(info, occluded, mono3d=True):
def generate_record(ann_rec, x1, y1, x2, y2, sample_data_token, filename): def generate_record(ann_rec, x1, y1, x2, y2, sample_data_token, filename):
"""Generate one 2D annotation record given various informations on top of """Generate one 2D annotation record given various information on top of
the 2D bounding box coordinates. the 2D bounding box coordinates.
Args: Args:
...@@ -508,7 +508,7 @@ def generate_record(ann_rec, x1, y1, x2, y2, sample_data_token, filename): ...@@ -508,7 +508,7 @@ def generate_record(ann_rec, x1, y1, x2, y2, sample_data_token, filename):
Returns: Returns:
dict: A sample 2D annotation record. dict: A sample 2D annotation record.
- file_name (str): flie name - file_name (str): file name
- image_id (str): sample data token - image_id (str): sample data token
- area (float): 2d box area - area (float): 2d box area
- category_name (str): category name - category_name (str): category name
......
...@@ -565,7 +565,7 @@ def post_process_coords( ...@@ -565,7 +565,7 @@ def post_process_coords(
def generate_record(ann_rec: dict, x1: float, y1: float, x2: float, y2: float, def generate_record(ann_rec: dict, x1: float, y1: float, x2: float, y2: float,
sample_data_token: str, filename: str) -> OrderedDict: sample_data_token: str, filename: str) -> OrderedDict:
"""Generate one 2D annotation record given various informations on top of """Generate one 2D annotation record given various information on top of
the 2D bounding box coordinates. the 2D bounding box coordinates.
Args: Args:
...@@ -580,7 +580,7 @@ def generate_record(ann_rec: dict, x1: float, y1: float, x2: float, y2: float, ...@@ -580,7 +580,7 @@ def generate_record(ann_rec: dict, x1: float, y1: float, x2: float, y2: float,
Returns: Returns:
dict: A sample 2D annotation record. dict: A sample 2D annotation record.
- file_name (str): flie name - file_name (str): file name
- image_id (str): sample data token - image_id (str): sample data token
- area (float): 2d box area - area (float): 2d box area
- category_name (str): category name - category_name (str): category name
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment