@inproceedings{Theiss2022TUBM, type = {inproceedings}, key = {Theiss2022TUBM}, title = {Towards a Unified Benchmark for Monocular Radial Distortion Correction and the Importance of Testing on Real World Data}, author = {Christoph Theiß and Joachim Denzler}, booktitle = {International Conference on Pattern Recognition and Artificial Intelligence (ICPRAI)}, year = {2022}, pages = {59-71}, series = {Lecture Notes in Computer Science}, volume = {13363}, abstract = {Radial distortion correction for a single image is an often overlooked problem in computer vision. It is possible to rectify images accurately when the camera and lens are known or physically available to take additional images with a calibration pattern. However, some- times it is impossible to identify the type of camera or lens of an image, e.g. crowd-sourced datasets. Nonetheless, it is still important to cor- rect that image for radial distortion in these cases. Especially in the last few years, solving the radial distortion correction problem from a single image with a deep neural network approach increased in popular- ity. This paper shows that these approaches tend to overfit completely on the synthetic data generation process used to train such networks. Additionally, we investigate which parts of this process are responsi- ble for overfitting, and apply an explainability tool to further investi- gate the behavior of the trained models. Furthermore, we introduce a new dataset based on the popular ImageNet dataset as a new bench- mark for comparison. Lastly, we propose a efficient solution to the over- fitting problem by feeding edge images to the neural networks instead of the images. Source code, data, and models are publicly available at https://github.com/cvjena/deeprect.}, doi = {10.1007/978-3-031-09037-0_6}, url = {https://link.springer.com/chapter/10.1007/978-3-031-09037-0_6}, }