@inproceedings{vemuri2025finr, type = {inproceedings}, key = {vemuri2025finr}, author = {Sai Karthikeya Vemuri and Tim Büchner and Joachim Denzler}, title = {F-INR: Functional Tensor Decomposition for Implicit Neural Representations}, booktitle = {}, year = {2025}, abstract = {Implicit Neural Representation (INR) has emerged as a powerful tool for encoding discrete signals into continuous, differentiable functions using neural networks. However, these models often have an unfortunate reliance on monolithic architectures to represent high-dimensional data, leading to prohibitive computational costs as dimensionality grows. We propose F-INR, a framework that reformulates INR learning through functional tensor decomposition, breaking down high-dimensional tasks into lightweight, axis-specific sub-networks. Each sub-network learns a low-dimensional data component (e.g., spatial or temporal). Then, we combine these components via tensor operations, reducing forward pass complexity while improving accuracy through specialized learning. F-INR is modular and, therefore, architecture-agnostic, compatible with MLPs, SIREN, WIRE, or other state-of-the-art INR architecture. It is also decomposition-agnostic, supporting CP, TT, and Tucker modes with user-defined rank for speed-accuracy control. In our experiments, F-INR trains 100× faster than existing approaches on video tasks while achieving higher fidelity (+3.4 dB PSNR). Similar gains hold for image compression, physics simulations, and 3D geometry reconstruction. Through this, F-INR offers a new scalable, flexible solution for high-dimensional signal modeling. }, groups = {}, doi = {10.48550/arXiv.2503.21507}, url = {}, arxiv = {https://arxiv.org/abs/2503.21507}, isbn = {}, issn = {}, langid = {english}, publish = {}, code = {}, note = {}, }