@inproceedings{liu-etal-2024-llms-learn,
title = "{LLM}s learn governing principles of dynamical systems, revealing an in-context neural scaling law",
author = {Liu, Toni J.b. and
Boulle, Nicolas and
Sarfati, Rapha{\"e}l and
Earls, Christopher},
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.emnlp-main.842/",
doi = "10.18653/v1/2024.emnlp-main.842",
pages = "15097--15117",
abstract = "We study LLMs' ability to extrapolate the behavior of various dynamical systems, including stochastic, chaotic, continuous, and discrete systems, whose evolution is governed by principles of physical interest. Our results show that LLaMA-2, a language model trained on text, achieves accurate predictions of dynamical system time series without fine-tuning or prompt engineering. Moreover, the accuracy of the learned physical rules increases with the length of the input context window, revealing an in-context version of a neural scaling law. Along the way, we present a flexible and efficient algorithm for extracting probability density functions of multi-digit numbers directly from LLMs."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="liu-etal-2024-llms-learn">
<titleInfo>
<title>LLMs learn governing principles of dynamical systems, revealing an in-context neural scaling law</title>
</titleInfo>
<name type="personal">
<namePart type="given">Toni</namePart>
<namePart type="given">J.b.</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nicolas</namePart>
<namePart type="family">Boulle</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Raphaël</namePart>
<namePart type="family">Sarfati</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Christopher</namePart>
<namePart type="family">Earls</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yaser</namePart>
<namePart type="family">Al-Onaizan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohit</namePart>
<namePart type="family">Bansal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yun-Nung</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Miami, Florida, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We study LLMs’ ability to extrapolate the behavior of various dynamical systems, including stochastic, chaotic, continuous, and discrete systems, whose evolution is governed by principles of physical interest. Our results show that LLaMA-2, a language model trained on text, achieves accurate predictions of dynamical system time series without fine-tuning or prompt engineering. Moreover, the accuracy of the learned physical rules increases with the length of the input context window, revealing an in-context version of a neural scaling law. Along the way, we present a flexible and efficient algorithm for extracting probability density functions of multi-digit numbers directly from LLMs.</abstract>
<identifier type="citekey">liu-etal-2024-llms-learn</identifier>
<identifier type="doi">10.18653/v1/2024.emnlp-main.842</identifier>
<location>
<url>https://aclanthology.org/2024.emnlp-main.842/</url>
</location>
<part>
<date>2024-11</date>
<extent unit="page">
<start>15097</start>
<end>15117</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T LLMs learn governing principles of dynamical systems, revealing an in-context neural scaling law
%A Liu, Toni J.b.
%A Boulle, Nicolas
%A Sarfati, Raphaël
%A Earls, Christopher
%Y Al-Onaizan, Yaser
%Y Bansal, Mohit
%Y Chen, Yun-Nung
%S Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing
%D 2024
%8 November
%I Association for Computational Linguistics
%C Miami, Florida, USA
%F liu-etal-2024-llms-learn
%X We study LLMs’ ability to extrapolate the behavior of various dynamical systems, including stochastic, chaotic, continuous, and discrete systems, whose evolution is governed by principles of physical interest. Our results show that LLaMA-2, a language model trained on text, achieves accurate predictions of dynamical system time series without fine-tuning or prompt engineering. Moreover, the accuracy of the learned physical rules increases with the length of the input context window, revealing an in-context version of a neural scaling law. Along the way, we present a flexible and efficient algorithm for extracting probability density functions of multi-digit numbers directly from LLMs.
%R 10.18653/v1/2024.emnlp-main.842
%U https://aclanthology.org/2024.emnlp-main.842/
%U https://doi.org/10.18653/v1/2024.emnlp-main.842
%P 15097-15117
Markdown (Informal)
[LLMs learn governing principles of dynamical systems, revealing an in-context neural scaling law](https://aclanthology.org/2024.emnlp-main.842/) (Liu et al., EMNLP 2024)
ACL