@inproceedings{penzel2024reducing, type = {inproceedings}, key = {penzel2024reducing}, title = {Reducing Bias in Pre-trained Models by Tuning while Penalizing Change}, booktitle = {International Conference on Computer Vision Theory and Applications (VISAPP)}, author = {Niklas Penzel and Gideon Stein and Joachim Denzler}, year = {2024}, pages = {90-101}, organization = {INSTICC}, publisher = {SciTePress}, abstract = {Deep models trained on large amounts of data often incorporate implicit biases present during training time. If later such a bias is discovered during inference or deployment, it is often necessary to acquire new data and retrain the model. This behavior is especially problematic in critical areas such as autonomous driving or medical decision-making. In these scenarios, new data is often expensive and hard to come by. In this work, we present a method based on change penalization that takes a pre-trained model and adapts the weights to mitigate a previously detected bias. We achieve this by tuning a zero-initialized copy of a frozen pre-trained network. Our method needs very few, in extreme cases only a single, examples that contradict the bias to increase performance. Additionally, we propose an early stopping criterion to modify baselines and reduce overfitting. We evaluate our approach on a well-known bias in skin lesion classification and three other datasets from the domain shift literature. We find that our approach works especially well with very few images. Simple fine-tuning combined with our early stopping also leads to performance benefits for a larger number of tuning samples.}, groups = {debiasing,understanding-dl}, doi = {10.5220/0012345800003660}, url = {https://www.scitepress.org/PublicationsDetail.aspx?ID=cHH94DJt+3g=&t=1}, isbn = {978-989-758-679-8}, issn = {2184-4321}, note = {}, }