@inproceedings{bueno-etal-2022-induced,
title = "Induced Natural Language Rationales and Interleaved Markup Tokens Enable Extrapolation in Large Language Models",
author = "Bueno, Mirelle Candida and
Gemmell, Carlos and
Dalton, Jeff and
Lotufo, Roberto and
Nogueira, Rodrigo",
editor = "Ferreira, Deborah and
Valentino, Marco and
Freitas, Andre and
Welleck, Sean and
Schubotz, Moritz",
booktitle = "Proceedings of the 1st Workshop on Mathematical Natural Language Processing (MathNLP)",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.mathnlp-1.3",
doi = "10.18653/v1/2022.mathnlp-1.3",
pages = "17--24",
abstract = "The ability to extrapolate, i.e., to make predictions on sequences that are longer than those presented as training examples, is a challenging problem for current deep learning models. Recent work shows that this limitation persists in state-of-the-art Transformer-based models. Most solutions to this problem use specific architectures or training methods that do not generalize to other tasks. We demonstrate that large language models can succeed in extrapolation without modifying their architecture or training procedure. Our experimental results show that generating step-by-step rationales and introducing marker tokens are both required for effective extrapolation. First, we induce a language model to produce step-by-step rationales before outputting the answer to effectively communicate the task to the model. However, as sequences become longer, we find that current models struggle to keep track of token positions. To address this issue, we interleave output tokens with markup tokens that act as explicit positional and counting symbols. Our findings show how these two complementary approaches enable remarkable sequence extrapolation and highlight a limitation of current architectures to effectively generalize without explicit surface form guidance. Code available at \url{https://anonymous.4open.science/r/induced-rationales-markup-tokens-0650/README.md}",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="bueno-etal-2022-induced">
<titleInfo>
<title>Induced Natural Language Rationales and Interleaved Markup Tokens Enable Extrapolation in Large Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Mirelle</namePart>
<namePart type="given">Candida</namePart>
<namePart type="family">Bueno</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carlos</namePart>
<namePart type="family">Gemmell</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jeff</namePart>
<namePart type="family">Dalton</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Roberto</namePart>
<namePart type="family">Lotufo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rodrigo</namePart>
<namePart type="family">Nogueira</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 1st Workshop on Mathematical Natural Language Processing (MathNLP)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Deborah</namePart>
<namePart type="family">Ferreira</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marco</namePart>
<namePart type="family">Valentino</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Andre</namePart>
<namePart type="family">Freitas</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sean</namePart>
<namePart type="family">Welleck</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Moritz</namePart>
<namePart type="family">Schubotz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, United Arab Emirates (Hybrid)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The ability to extrapolate, i.e., to make predictions on sequences that are longer than those presented as training examples, is a challenging problem for current deep learning models. Recent work shows that this limitation persists in state-of-the-art Transformer-based models. Most solutions to this problem use specific architectures or training methods that do not generalize to other tasks. We demonstrate that large language models can succeed in extrapolation without modifying their architecture or training procedure. Our experimental results show that generating step-by-step rationales and introducing marker tokens are both required for effective extrapolation. First, we induce a language model to produce step-by-step rationales before outputting the answer to effectively communicate the task to the model. However, as sequences become longer, we find that current models struggle to keep track of token positions. To address this issue, we interleave output tokens with markup tokens that act as explicit positional and counting symbols. Our findings show how these two complementary approaches enable remarkable sequence extrapolation and highlight a limitation of current architectures to effectively generalize without explicit surface form guidance. Code available at https://anonymous.4open.science/r/induced-rationales-markup-tokens-0650/README.md</abstract>
<identifier type="citekey">bueno-etal-2022-induced</identifier>
<identifier type="doi">10.18653/v1/2022.mathnlp-1.3</identifier>
<location>
<url>https://aclanthology.org/2022.mathnlp-1.3</url>
</location>
<part>
<date>2022-12</date>
<extent unit="page">
<start>17</start>
<end>24</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Induced Natural Language Rationales and Interleaved Markup Tokens Enable Extrapolation in Large Language Models
%A Bueno, Mirelle Candida
%A Gemmell, Carlos
%A Dalton, Jeff
%A Lotufo, Roberto
%A Nogueira, Rodrigo
%Y Ferreira, Deborah
%Y Valentino, Marco
%Y Freitas, Andre
%Y Welleck, Sean
%Y Schubotz, Moritz
%S Proceedings of the 1st Workshop on Mathematical Natural Language Processing (MathNLP)
%D 2022
%8 December
%I Association for Computational Linguistics
%C Abu Dhabi, United Arab Emirates (Hybrid)
%F bueno-etal-2022-induced
%X The ability to extrapolate, i.e., to make predictions on sequences that are longer than those presented as training examples, is a challenging problem for current deep learning models. Recent work shows that this limitation persists in state-of-the-art Transformer-based models. Most solutions to this problem use specific architectures or training methods that do not generalize to other tasks. We demonstrate that large language models can succeed in extrapolation without modifying their architecture or training procedure. Our experimental results show that generating step-by-step rationales and introducing marker tokens are both required for effective extrapolation. First, we induce a language model to produce step-by-step rationales before outputting the answer to effectively communicate the task to the model. However, as sequences become longer, we find that current models struggle to keep track of token positions. To address this issue, we interleave output tokens with markup tokens that act as explicit positional and counting symbols. Our findings show how these two complementary approaches enable remarkable sequence extrapolation and highlight a limitation of current architectures to effectively generalize without explicit surface form guidance. Code available at https://anonymous.4open.science/r/induced-rationales-markup-tokens-0650/README.md
%R 10.18653/v1/2022.mathnlp-1.3
%U https://aclanthology.org/2022.mathnlp-1.3
%U https://doi.org/10.18653/v1/2022.mathnlp-1.3
%P 17-24
Markdown (Informal)
[Induced Natural Language Rationales and Interleaved Markup Tokens Enable Extrapolation in Large Language Models](https://aclanthology.org/2022.mathnlp-1.3) (Bueno et al., MathNLP 2022)
ACL