@inproceedings{venkataramanan2023gaussian, type = {inproceedings}, key = {venkataramanan2023gaussian}, author = {Aishwarya Venkataramanan and Assia Benbihi and Martin Laviale and Cédric Pradalier}, title = {Gaussian Latent Representations for Uncertainty Estimation using Mahalanobis Distance in Deep Classifiers}, booktitle = {ICCV Workshop on Workshop on Uncertainty Quantification for Computer Vision (ICCV-WS)}, year = {2023}, eprint = {2305.13849}, archiveprefix = {arXiv}, primaryclass = {cs.LG}, url = {https://arxiv.org/abs/2305.13849}, abstract = {Recent works show that the data distribution in a network's latent space is useful for estimating classification uncertainty and detecting Out-of-distribution (OOD) samples. To obtain a well-regularized latent space that is conducive for uncertainty estimation, existing methods bring in significant changes to model architectures and training procedures. In this paper, we present a lightweight, fast, and high-performance regularization method for Mahalanobis distance-based uncertainty prediction, and that requires minimal changes to the network's architecture. To derive Gaussian latent representation favourable for Mahalanobis Distance calculation, we introduce a self-supervised representation learning method that separates in-class representations into multiple Gaussians. Classes with non-Gaussian representations are automatically identified and dynamically clustered into multiple new classes that are approximately Gaussian. Evaluation on standard OOD benchmarks shows that our method achieves state-of-the-art results on OOD detection with minimal inference time, and is very competitive on predictive probability calibration. Finally, we show the applicability of our method to a real-life computer vision use case on microorganism classification.}, }