@inproceedings{Reimers21:CAD, type = {inproceedings}, key = {Reimers21:CAD}, title = {Conditional Adversarial Debiasing: Towards Learning Unbiased Classifiers from Biased Data}, author = {Christian Reimers and Paul Bodesheim and Jakob Runge and Joachim Denzler}, booktitle = {DAGM German Conference on Pattern Recognition (DAGM-GCPR)}, year = {2021}, pages = {48-62}, groups = {debiasing,understanding-dl}, abstract = {Bias in classifiers is a severe issue of modern deep learning methods, especially for their application in safety- and security-critical areas. Often, the bias of a classifier is a direct consequence of a bias in the training set, frequently caused by the co-occurrence of relevant features and irrelevant ones. To mitigate this issue, we require learning algorithms that prevent the propagation of known bias from the dataset into the classifier. We present a novel adversarial debiasing method, which addresses a feature of which we know that it is spuriously connected to the labels of training images but statistically independent of the labels for test images. The debiasing stops the classifier from falsly identifying this irrelevant feature as important. Irrelevant features co-occur with important features in a wide range of bias-related problems for many computer vision tasks, such as automatic skin cancer detection or driver assistance. We argue by a mathematical proof that our approach is superior to existing techniques for the abovementioned bias. Our experiments show that our approach performs better than the state-of-the-art on a well-known benchmark dataset with real-world images of cats and dogs.}, doi = {10.1007/978-3-030-92659-5_4}, }