@inproceedings{Koerschens22:ORIC, type = {inproceedings}, key = {Koerschens22:ORIC}, title = {Occlusion-Robustness of Convolutional Neural Networks via Inverted Cutout}, author = {Matthias Körschens and Paul Bodesheim and Joachim Denzler}, booktitle = {International Conference on Pattern Recognition (ICPR)}, year = {2022}, doi = {10.1109/ICPR56361.2022.9956044}, pages = {2829-2835}, organization = {IEEE}, abstract = {Convolutional Neural Networks (CNNs) are able to reliably classify objects in images if they are clearly visible and only slightly affected by small occlusions. However, heavy occlusions can strongly deteriorate the performance of CNNs, which is critical for tasks where correct identification is paramount. For many real-world applications, images are taken in unconstrained environments under suboptimal conditions, where occluded objects are inevitable. We propose a novel data augmentation method called Inverted Cutout, which can be used for training a CNN by showing only small patches of the images. Together with this augmentation method, we present several ways of making the network robust against occlusion. On the one hand, we utilize a spatial aggregation module without modifying the base network and on the other hand, we achieve occlusion-robustness with appropriate fine-tuning in conjunction with Inverted Cutout. In our experiments, we compare two different aggregation modules and two loss functions on the Occluded-Vehicles and Occluded-COCO-Vehicles datasets, showing that our approach outperforms existing state-of-the-art methods for object categorization under varying levels of occlusion.}, groups = {data_augmentation}, }