@inproceedings{kowsher-etal-2025-llm,
title = "{LLM}-Mixer: Multiscale Mixing in {LLM}s for Time Series Forecasting",
author = "Kowsher, Md and
Sobuj, Md. Shohanur Islam and
Prottasha, Nusrat Jahan and
Alanis, E. Alejandro and
Garibay, Ozlem and
Yousefi, Niloofar",
editor = "Chang, Shuaichen and
Hulsebos, Madelon and
Liu, Qian and
Chen, Wenhu and
Sun, Huan",
booktitle = "Proceedings of the 4th Table Representation Learning Workshop",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.trl-1.12/",
doi = "10.18653/v1/2025.trl-1.12",
pages = "156--165",
ISBN = "979-8-89176-268-8",
abstract = "Time series forecasting is a challenging task, especially when dealing with data that contains both short-term variations and long-term trends. In this study, we introduce LLM-Mixer, a novel framework that combines multiscale time-series decomposition with the power of pre-trained Large Language Models (LLMs). LLM-Mixer breaks down time-series data into multiple temporal resolutions using downsampling and processes these multiscale representations with a frozen LLM, guided by a carefully designed text prompt that encodes information about the dataset{'}s features and structure. To understand the role of downsampling, we conduct a detailed analysis using Neural Tangent Kernel (NTK) distance, showing that incorporating multiple scales improves the model{'}s learning dynamics.We evaluate LLM-Mixer across a diverse set of forecasting tasks, including long-term multivariate, short-term multivariate, and long-term univariate scenarios. Experimental results demonstrate that LLM-Mixer achieves competitive performance compared to recent state-of-the-art models across various forecasting horizons."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kowsher-etal-2025-llm">
<titleInfo>
<title>LLM-Mixer: Multiscale Mixing in LLMs for Time Series Forecasting</title>
</titleInfo>
<name type="personal">
<namePart type="given">Md</namePart>
<namePart type="family">Kowsher</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Md.</namePart>
<namePart type="given">Shohanur</namePart>
<namePart type="given">Islam</namePart>
<namePart type="family">Sobuj</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nusrat</namePart>
<namePart type="given">Jahan</namePart>
<namePart type="family">Prottasha</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">E</namePart>
<namePart type="given">Alejandro</namePart>
<namePart type="family">Alanis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ozlem</namePart>
<namePart type="family">Garibay</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Niloofar</namePart>
<namePart type="family">Yousefi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 4th Table Representation Learning Workshop</title>
</titleInfo>
<name type="personal">
<namePart type="given">Shuaichen</namePart>
<namePart type="family">Chang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Madelon</namePart>
<namePart type="family">Hulsebos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Qian</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wenhu</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Huan</namePart>
<namePart type="family">Sun</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vienna, Austria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-268-8</identifier>
</relatedItem>
<abstract>Time series forecasting is a challenging task, especially when dealing with data that contains both short-term variations and long-term trends. In this study, we introduce LLM-Mixer, a novel framework that combines multiscale time-series decomposition with the power of pre-trained Large Language Models (LLMs). LLM-Mixer breaks down time-series data into multiple temporal resolutions using downsampling and processes these multiscale representations with a frozen LLM, guided by a carefully designed text prompt that encodes information about the dataset’s features and structure. To understand the role of downsampling, we conduct a detailed analysis using Neural Tangent Kernel (NTK) distance, showing that incorporating multiple scales improves the model’s learning dynamics.We evaluate LLM-Mixer across a diverse set of forecasting tasks, including long-term multivariate, short-term multivariate, and long-term univariate scenarios. Experimental results demonstrate that LLM-Mixer achieves competitive performance compared to recent state-of-the-art models across various forecasting horizons.</abstract>
<identifier type="citekey">kowsher-etal-2025-llm</identifier>
<identifier type="doi">10.18653/v1/2025.trl-1.12</identifier>
<location>
<url>https://aclanthology.org/2025.trl-1.12/</url>
</location>
<part>
<date>2025-07</date>
<extent unit="page">
<start>156</start>
<end>165</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T LLM-Mixer: Multiscale Mixing in LLMs for Time Series Forecasting
%A Kowsher, Md
%A Sobuj, Md. Shohanur Islam
%A Prottasha, Nusrat Jahan
%A Alanis, E. Alejandro
%A Garibay, Ozlem
%A Yousefi, Niloofar
%Y Chang, Shuaichen
%Y Hulsebos, Madelon
%Y Liu, Qian
%Y Chen, Wenhu
%Y Sun, Huan
%S Proceedings of the 4th Table Representation Learning Workshop
%D 2025
%8 July
%I Association for Computational Linguistics
%C Vienna, Austria
%@ 979-8-89176-268-8
%F kowsher-etal-2025-llm
%X Time series forecasting is a challenging task, especially when dealing with data that contains both short-term variations and long-term trends. In this study, we introduce LLM-Mixer, a novel framework that combines multiscale time-series decomposition with the power of pre-trained Large Language Models (LLMs). LLM-Mixer breaks down time-series data into multiple temporal resolutions using downsampling and processes these multiscale representations with a frozen LLM, guided by a carefully designed text prompt that encodes information about the dataset’s features and structure. To understand the role of downsampling, we conduct a detailed analysis using Neural Tangent Kernel (NTK) distance, showing that incorporating multiple scales improves the model’s learning dynamics.We evaluate LLM-Mixer across a diverse set of forecasting tasks, including long-term multivariate, short-term multivariate, and long-term univariate scenarios. Experimental results demonstrate that LLM-Mixer achieves competitive performance compared to recent state-of-the-art models across various forecasting horizons.
%R 10.18653/v1/2025.trl-1.12
%U https://aclanthology.org/2025.trl-1.12/
%U https://doi.org/10.18653/v1/2025.trl-1.12
%P 156-165
Markdown (Informal)
[LLM-Mixer: Multiscale Mixing in LLMs for Time Series Forecasting](https://aclanthology.org/2025.trl-1.12/) (Kowsher et al., TRL 2025)
ACL
- Md Kowsher, Md. Shohanur Islam Sobuj, Nusrat Jahan Prottasha, E. Alejandro Alanis, Ozlem Garibay, and Niloofar Yousefi. 2025. LLM-Mixer: Multiscale Mixing in LLMs for Time Series Forecasting. In Proceedings of the 4th Table Representation Learning Workshop, pages 156–165, Vienna, Austria. Association for Computational Linguistics.