@inproceedings{stein_24_embracing, type = {inproceedings}, key = {stein_24_embracing}, title = {Embracing the Black Box: Heading Towards Foundation Models for Causal Discovery from Time Series Data}, booktitle = {AAAI Workshop on AI for Time-series (AAAI-WS)}, author = {Gideon Stein and Maha Shadaydeh and Joachim Denzler}, year = {2024}, eprint = {2402.09305}, archiveprefix = {arXiv}, primaryclass = {cs.LG}, note = {(accepted)}, url = {https://arxiv.org/abs/2402.09305}, abstract = {Causal discovery from time series data encompasses many existing solutions, including those based on deep learning techniques. However, these methods typically do not endorse one of the most prevalent paradigms in deep learning: End-to-end learning. To address this gap, we explore what we call Causal Pretraining. A methodology that aims to learn a direct mapping from multivariate time series to the underlying causal graphs in a supervised manner. Our empirical findings suggest that causal discovery in a supervised manner is possible, assuming that the training and test time series samples share most of their dynamics. More importantly, we found evidence that the performance of Causal Pretraining can increase with data and model size, even if the additional data do not share the same dynamics. Further, we provide examples where causal discovery for real-world data with causally pretrained neural networks is possible within limits. We argue that this hints at the possibility of a foundation model for causal discovery.}, }