@inproceedings{Gaehlert19_VGN, type = {inproceedings}, key = {Gaehlert19_VGN}, title = {Visibility Guided NMS: Efficient Boosting of Amodal Object Detection in Crowded Traffic Scenes}, author = {Nils Gählert and Niklas Hanselmann and Uwe Franke and Joachim Denzler}, booktitle = {Machine Learning for Autonomous Driving Workshop at NeurIPS (NeurIPS-WS)}, year = {2019}, address = {Vancouver, Canada}, abstract = {Object detection is an important task in environment perception for autonomous driving. Modern 2D object detection frameworks such as Yolo, SSD or Faster R-CNN predict multiple bounding boxes per object that are refined using Non- Maximum Suppression (NMS) to suppress all but one bounding box. While object detection itself is fully end-to-end learnable and does not require any manual parameter selection, standard NMS is parametrized by an overlap threshold that has to be chosen by hand. In practice, this often leads to an inability of standard NMS strategies to distinguish different objects in crowded scenes in the presence of high mutual occlusion, e.g. for parked cars or crowds of pedestians. Our novel Visibility Guided NMS (vg-NMS) leverages both pixel-based as well as amodal object detection paradigms and improves the detection performance especially for highly occluded objects with little computational overhead. We evaluate vg-NMS using KITTI, VIPER as well as the Synscapes dataset and show that it outperforms current state-of-the-art NMS.}, }