@inproceedings{asaad2025gradient, type = {inproceedings}, key = {asaad2025gradient}, author = {Ihab Asaad and Maha Shadaydeh and Joachim Denzler}, title = {Gradient Extrapolation for Debiased Representation Learning}, booktitle = {}, year = {2025}, abstract = {Machine learning classification models trained with empirical risk minimization (ERM) often inadvertently rely on spurious correlations. When absent in the test data, these unintended associations between non-target attributes and target labels lead to poor generalization. This paper addresses this problem from a model optimization perspective and proposes a novel method, Gradient Extrapolation for Debiased Representation Learning (GERNE), designed to learn debiased representations in both known and unknown attribute training cases. GERNE uses two distinct batches with different amounts of spurious correlations to define the target gradient as the linear extrapolation of two gradients computed from each batch's loss. It is demonstrated that the extrapolated gradient, if directed toward the gradient of the batch with fewer amount of spurious correlation, can guide the training process toward learning a debiased model. GERNE can serve as a general framework for debiasing with methods, such as ERM, reweighting, and resampling, being shown as special cases. The theoretical upper and lower bounds of the extrapolation factor are derived to ensure convergence. By adjusting this factor, GERNE can be adapted to maximize the Group-Balanced Accuracy (GBA) or the Worst-Group Accuracy. The proposed approach is validated on five vision and one NLP benchmarks, demonstrating competitive and often superior performance compared to state-of-the-art baseline methods.}, groups = {}, doi = {10.48550/arXiv.2503.13236}, url = {}, arxiv = {https://arxiv.org/abs/2503.13236}, isbn = {}, issn = {}, langid = {english}, publish = {}, code = {}, note = {}, }