@article{penzel2024change, type = {article}, key = {penzel2024change}, title = {Change Penalized Tuning to Reduce Pre-trained Biases}, author = {Niklas Penzel and Gideon Stein and Joachim Denzler}, journal = {Communications in Computer and Information Science}, year = {2025}, month = {}, pages = {}, doi = {}, url = {}, publisher = {Springer International Publishing}, abstract = {Due to the data-centric approach of modern machine learning, biases present in the training data are frequently learned by deep models. It is often necessary to collect new data and retrain the models from scratch to remedy these issues, which can be expensive in critical areas such as medicine. We investigate whether it is possible to fix pre-trained model behavior using very few unbiased examples. We show that we can improve performance by tuning the models while penalizing parameter changes. Hence, we are keeping pre-trained knowledge while simultaneously correcting the harmful behavior. Toward this goal, we tune a zero-initialized copy of the frozen pre-trained network using strong parameter norms. Secondly, we introduce an early stopping scheme to modify baselines and reduce overfitting. Our approaches lead to improvements in four datasets common in the debiasing and domain shift literature. We especially see benefits in an iterative setting, where new samples are added continuously. Hence, we demonstrate the effectiveness of tuning while penalizing change to fix pre-trained models without retraining from scratch.}, note = {(in press)}, }