Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Feature] Support DSVT training #2738

Merged
merged 28 commits into from
Dec 28, 2023
Merged
Show file tree
Hide file tree
Changes from 22 commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -134,3 +134,4 @@ data/sunrgbd/OFFICIAL_SUNRGBD/
# Waymo evaluation
mmdet3d/evaluation/functional/waymo_utils/compute_detection_metrics_main
mmdet3d/evaluation/functional/waymo_utils/compute_detection_let_metrics_main
mmdet3d/evaluation/functional/waymo_utils/compute_segmentation_metrics_main
16 changes: 9 additions & 7 deletions mmdet3d/datasets/convert_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from shapely.geometry import MultiPoint, box
from shapely.geometry.polygon import Polygon

from mmdet3d.structures import Box3DMode, CameraInstance3DBoxes, points_cam2img
from mmdet3d.structures import Box3DMode, LiDARInstance3DBoxes, points_cam2img
from mmdet3d.structures.ops import box_np_ops

kitti_categories = ('Pedestrian', 'Cyclist', 'Car', 'Van', 'Truck',
Expand Down Expand Up @@ -318,21 +318,23 @@ def get_kitti_style_2d_boxes(info: dict,
def convert_annos(info: dict, cam_idx: int) -> dict:
"""Convert front-cam anns to i-th camera (KITTI-style info)."""
rect = info['calib']['R0_rect'].astype(np.float32)
lidar2cam0 = info['calib']['Tr_velo_to_cam'].astype(np.float32)
lidar2cami = info['calib'][f'Tr_velo_to_cam{cam_idx}'].astype(np.float32)
if cam_idx == 0:
lidar2cami = info['calib']['Tr_velo_to_cam'].astype(np.float32)
else:
lidar2cami = info['calib'][f'Tr_velo_to_cam{cam_idx}'].astype(
np.float32)
annos = info['annos']
converted_annos = copy.deepcopy(annos)
loc = annos['location']
dims = annos['dimensions']
rots = annos['rotation_y']
gt_bboxes_3d = np.concatenate([loc, dims, rots[..., np.newaxis]],
axis=1).astype(np.float32)
# convert gt_bboxes_3d to velodyne coordinates
gt_bboxes_3d = CameraInstance3DBoxes(gt_bboxes_3d).convert_to(
Box3DMode.LIDAR, np.linalg.inv(rect @ lidar2cam0), correct_yaw=True)
# BC-breaking: gt_bboxes_3d is already in lidar coordinates
# convert gt_bboxes_3d to cam coordinates
gt_bboxes_3d = gt_bboxes_3d.convert_to(
gt_bboxes_3d = LiDARInstance3DBoxes(gt_bboxes_3d).convert_to(
Box3DMode.CAM, rect @ lidar2cami, correct_yaw=True).numpy()

converted_annos['location'] = gt_bboxes_3d[:, :3]
converted_annos['dimensions'] = gt_bboxes_3d[:, 3:6]
converted_annos['rotation_y'] = gt_bboxes_3d[:, 6]
Expand Down
17 changes: 8 additions & 9 deletions mmdet3d/datasets/det3d_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,18 +113,15 @@ def __init__(self,
ori_label = self.METAINFO['classes'].index(name)
self.label_mapping[ori_label] = label_idx

self.num_ins_per_cat = {name: 0 for name in metainfo['classes']}
self.num_ins_per_cat = [0] * len(metainfo['classes'])
else:
self.label_mapping = {
i: i
for i in range(len(self.METAINFO['classes']))
}
self.label_mapping[-1] = -1

self.num_ins_per_cat = {
name: 0
for name in self.METAINFO['classes']
}
self.num_ins_per_cat = [0] * len(self.METAINFO['classes'])

super().__init__(
ann_file=ann_file,
Expand All @@ -146,9 +143,12 @@ def __init__(self,

# show statistics of this dataset
print_log('-' * 30, 'current')
print_log(f'The length of the dataset: {len(self)}', 'current')
print_log(
f'The length of {"test" if self.test_mode else "training"} dataset: {len(self)}', # noqa: E501
'current')
content_show = [['category', 'number']]
for cat_name, num in self.num_ins_per_cat.items():
for label, num in enumerate(self.num_ins_per_cat):
cat_name = self.metainfo['classes'][label]
content_show.append([cat_name, num])
table = AsciiTable(content_show)
print_log(
Expand Down Expand Up @@ -256,8 +256,7 @@ def parse_ann_info(self, info: dict) -> Union[dict, None]:

for label in ann_info['gt_labels_3d']:
if label != -1:
cat_name = self.metainfo['classes'][label]
self.num_ins_per_cat[cat_name] += 1
self.num_ins_per_cat[label] += 1

return ann_info

Expand Down
67 changes: 56 additions & 11 deletions mmdet3d/datasets/waymo_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,11 @@
from typing import Callable, List, Union

import numpy as np
from mmengine import print_log
from mmengine.fileio import load

from mmdet3d.registry import DATASETS
from mmdet3d.structures import CameraInstance3DBoxes
from mmdet3d.structures import LiDARInstance3DBoxes
from .det3d_dataset import Det3DDataset
from .kitti_dataset import KittiDataset

Expand Down Expand Up @@ -163,13 +165,7 @@ def parse_ann_info(self, info: dict) -> dict:
centers_2d = np.zeros((0, 2), dtype=np.float32)
depths = np.zeros((0), dtype=np.float32)

# in waymo, lidar2cam = R0_rect @ Tr_velo_to_cam
# convert gt_bboxes_3d to velodyne coordinates with `lidar2cam`
lidar2cam = np.array(info['images'][self.default_cam_key]['lidar2cam'])
gt_bboxes_3d = CameraInstance3DBoxes(
ann_info['gt_bboxes_3d']).convert_to(self.box_mode_3d,
np.linalg.inv(lidar2cam))
ann_info['gt_bboxes_3d'] = gt_bboxes_3d
gt_bboxes_3d = LiDARInstance3DBoxes(ann_info['gt_bboxes_3d'])

anns_results = dict(
gt_bboxes_3d=gt_bboxes_3d,
Expand All @@ -182,9 +178,58 @@ def parse_ann_info(self, info: dict) -> dict:
return anns_results

def load_data_list(self) -> List[dict]:
"""Add the load interval."""
data_list = super().load_data_list()
data_list = data_list[::self.load_interval]
"""Add the load interval.

Returns:
list[dict]: A list of annotation.
""" # noqa: E501
# `self.ann_file` denotes the absolute annotation file path if
# `self.root=None` or relative path if `self.root=/path/to/data/`.
annotations = load(self.ann_file)
if not isinstance(annotations, dict):
raise TypeError(f'The annotations loaded from annotation file '
f'should be a dict, but got {type(annotations)}!')
if 'data_list' not in annotations or 'metainfo' not in annotations:
raise ValueError('Annotation must have data_list and metainfo '
'keys')
metainfo = annotations['metainfo']
raw_data_list = annotations['data_list']
raw_data_list = raw_data_list[::self.load_interval]
if self.load_interval > 1:
print_log(
f'Sample size will be reduced to 1/{self.load_interval} of'
'the original data sample',
logger='current')

# Meta information load from annotation file will not influence the
# existed meta information load from `BaseDataset.METAINFO` and
# `metainfo` arguments defined in constructor.
for k, v in metainfo.items():
self._metainfo.setdefault(k, v)

# load and parse data_infos.
data_list = []
for raw_data_info in raw_data_list:
# parse raw data information to target format
data_info = self.parse_data_info(raw_data_info)
if isinstance(data_info, dict):
# For image tasks, `data_info` should information if single
# image, such as dict(img_path='xxx', width=360, ...)
data_list.append(data_info)
elif isinstance(data_info, list):
# For video tasks, `data_info` could contain image
# information of multiple frames, such as
# [dict(video_path='xxx', timestamps=...),
# dict(video_path='xxx', timestamps=...)]
for item in data_info:
if not isinstance(item, dict):
raise TypeError('data_info must be list of dict, but '
f'got {type(item)}')
data_list.extend(data_info)
else:
raise TypeError('data_info should be a dict or list of dict, '
f'but got {type(data_info)}')

return data_list

def parse_data_info(self, info: dict) -> Union[dict, List[dict]]:
Expand Down
4 changes: 2 additions & 2 deletions mmdet3d/engine/hooks/visualization_hook.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,11 +78,11 @@ def __init__(self,
'needs to be excluded.')
self.vis_task = vis_task

if wait_time == -1:
if show and wait_time == -1:
print_log(
'Manual control mode, press [Right] to next sample.',
logger='current')
else:
elif show:
print_log(
'Autoplay mode, press [SPACE] to pause.', logger='current')
self.wait_time = wait_time
Expand Down
Loading