@inproceedings{Blunk23:FS, type = {inproceedings}, key = {Blunk23:FS}, author = {Jan Blunk and Niklas Penzel and Paul Bodesheim and Joachim Denzler}, booktitle = {DAGM German Conference on Pattern Recognition (DAGM-GCPR)}, title = {Beyond Debiasing: Actively Steering Feature Selection via Loss Regularization}, year = {2023}, note = {}, abstract = {It is common for domain experts like physicians in medical studies to examine features for their reliability with respect to a specific domain task. When introducing machine learning, a common expectation is that machine learning models use the same features as human experts to solve a task but that is not always the case. Moreover, datasets often contain features that are known from domain knowledge to generalize badly to the real world, referred to as biases. Current debiasing methods only remove such influences. To additionally integrate the domain knowledge about well-established features into the training of a model, their relevance should be increased. We present a method that permits the manipulation of the relevance of features by actively steering the model's feature selection during the training process. That is, it allows both the discouragement of biases and encouragement of well-established features to incorporate domain knowledge about the feature reliability. We model our objectives for actively steering the feature selection process as a constrained optimization problem, which we implement via a loss regularization that is based on batch-wise feature attributions. We evaluate our approach on a novel synthetic regression dataset and a computer vision dataset. We observe that it successfully steers the features a model selects during the training process. This is a strong indicator that our method can be used to integrate domain knowledge about well-established features into a model.}, grouds = {debiasing,understanding-dl}, }