Exported source
class SemanticKITTIDataset(Dataset):
"Load the SemanticKITTI data in a pytorch Dataset object."
def __init__(self, data_path, split='train', transform=None):
= Path(data_path)
data_path = data_path/'semantic-kitti.yaml'
yaml_path self.velodyne_path = data_path/'data_odometry_velodyne/dataset/sequences'
self.labels_path = data_path/'data_odometry_labels/dataset/sequences'
with open(yaml_path, 'r') as file:
= yaml.safe_load(file)
metadata
= metadata['split'][split]
sequences = []
velodyne_fns for seq in sequences:
+= list(self.velodyne_path.rglob(f'*{seq:02}/velodyne/*.bin'))
velodyne_fns
self.frame_ids = [fn.stem for fn in velodyne_fns]
self.frame_sequences = [fn.parts[-3] for fn in velodyne_fns]
self.labels_dict = metadata['labels']
self.content = metadata['content']
= sorted(self.content.keys())[-1]
max_key self.content_np = np.zeros((max_key+1,), dtype=np.float32)
for k, v in self.content.items():
self.content_np[k] = v
self.learning_map = metadata['learning_map']
self.learning_map_np = np.zeros((max_key+1,), dtype=np.uint32)
for k, v in self.learning_map.items():
self.learning_map_np[k] = v
self.learning_map_inv = metadata['learning_map_inv']
self.learning_map_inv_np = np.zeros((len(self.learning_map_inv),), dtype=np.uint32)
= np.zeros_like(self.learning_map_inv_np, dtype=np.float32)
content_sum_np for k, v in self.learning_map_inv.items():
self.learning_map_inv_np[k] = v
= self.content_np[self.learning_map_np == k].sum()
content_sum_np[k] self.content_weights = 1./content_sum_np
self.color_map_bgr = metadata['color_map']
self.color_map_rgb_np = np.zeros((max_key+1, 3), dtype=np.float32)
for k, v in self.color_map_bgr.items():
self.color_map_rgb_np[k] = np.array(v[::-1], np.float32)
self.transform = transform
self.is_test = (split == 'test')
def learning_remap(self, remapping_rules):
= np.zeros_like(self.learning_map_np, dtype=np.uint32)
new_map_np = sorted(remapping_rules.values())[-1]
max_key = np.zeros((max_key+1,), dtype=np.uint32)
new_map_inv_np for k, v in remapping_rules.items():
self.learning_map_np == k] = v
new_map_np[if new_map_inv_np[v] == 0:
= self.learning_map_inv_np[k]
new_map_inv_np[v]
= np.zeros_like(new_map_inv_np, dtype=np.float32)
new_content_sum_np for k in range(len(new_map_inv_np)):
= self.content_np[new_map_np == k].sum()
new_content_sum_np[k]
self.learning_map_np = new_map_np
self.learning_map_inv_np = new_map_inv_np
self.content_weights = 1./new_content_sum_np
def set_transform(self, transform):
self.transform = transform
def __len__(self):
return len(self.frame_ids)
def __getitem__(self, idx):
= self.frame_ids[idx]
frame_id = self.frame_sequences[idx]
frame_sequence
= self.velodyne_path/frame_sequence/'velodyne'/(frame_id + '.bin')
frame_path with open(frame_path, 'rb') as f:
= np.fromfile(f, dtype=np.float32).reshape(-1, 4)
frame
= None
label = None
mask if not self.is_test:
= self.labels_path/frame_sequence/'labels'/(frame_id + '.label')
label_path with open(label_path, 'rb') as f:
= np.fromfile(f, dtype=np.uint32)
label = label & 0xFFFF
label = self.learning_map_np[label]
label = label != 0 # see the field *learning_ignore* in the yaml file
mask
= {
item 'frame': frame,
'label': label,
'mask': mask
}if self.transform:
= self.transform(item)
item
return item