@inproceedings{tam-etal-2024-speak,
title = "Let Me Speak Freely? A Study On The Impact Of Format Restrictions On Large Language Model Performance.",
author = "Tam, Zhi Rui and
Wu, Cheng-Kuang and
Tsai, Yi-Lin and
Lin, Chieh-Yen and
Lee, Hung-yi and
Chen, Yun-Nung",
editor = "Dernoncourt, Franck and
Preo{\c{t}}iuc-Pietro, Daniel and
Shimorina, Anastasia",
booktitle = "Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: Industry Track",
month = nov,
year = "2024",
address = "Miami, Florida, US",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.emnlp-industry.91",
doi = "10.18653/v1/2024.emnlp-industry.91",
pages = "1218--1236",
abstract = "Structured generation, the process of producing content in standardized formats like JSON and XML, is widely utilized in real-world applications to extract key output information from large language models (LLMs).This study investigates whether such constraints on generation space impact LLMs{'} abilities, including reasoning and domain knowledge comprehension. Specifically, we evaluate LLMs{'} performance when restricted to adhere to structured formats versus generating free-form responses across various common tasks. Surprisingly, we observe a significant decline in LLMs{'} reasoning abilities under format restrictions. Furthermore, we find that stricter format constraints generally lead to greater performance degradation in reasoning tasks.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="tam-etal-2024-speak">
<titleInfo>
<title>Let Me Speak Freely? A Study On The Impact Of Format Restrictions On Large Language Model Performance.</title>
</titleInfo>
<name type="personal">
<namePart type="given">Zhi</namePart>
<namePart type="given">Rui</namePart>
<namePart type="family">Tam</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Cheng-Kuang</namePart>
<namePart type="family">Wu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yi-Lin</namePart>
<namePart type="family">Tsai</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chieh-Yen</namePart>
<namePart type="family">Lin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hung-yi</namePart>
<namePart type="family">Lee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yun-Nung</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: Industry Track</title>
</titleInfo>
<name type="personal">
<namePart type="given">Franck</namePart>
<namePart type="family">Dernoncourt</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Daniel</namePart>
<namePart type="family">Preoţiuc-Pietro</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anastasia</namePart>
<namePart type="family">Shimorina</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Miami, Florida, US</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Structured generation, the process of producing content in standardized formats like JSON and XML, is widely utilized in real-world applications to extract key output information from large language models (LLMs).This study investigates whether such constraints on generation space impact LLMs’ abilities, including reasoning and domain knowledge comprehension. Specifically, we evaluate LLMs’ performance when restricted to adhere to structured formats versus generating free-form responses across various common tasks. Surprisingly, we observe a significant decline in LLMs’ reasoning abilities under format restrictions. Furthermore, we find that stricter format constraints generally lead to greater performance degradation in reasoning tasks.</abstract>
<identifier type="citekey">tam-etal-2024-speak</identifier>
<identifier type="doi">10.18653/v1/2024.emnlp-industry.91</identifier>
<location>
<url>https://aclanthology.org/2024.emnlp-industry.91</url>
</location>
<part>
<date>2024-11</date>
<extent unit="page">
<start>1218</start>
<end>1236</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Let Me Speak Freely? A Study On The Impact Of Format Restrictions On Large Language Model Performance.
%A Tam, Zhi Rui
%A Wu, Cheng-Kuang
%A Tsai, Yi-Lin
%A Lin, Chieh-Yen
%A Lee, Hung-yi
%A Chen, Yun-Nung
%Y Dernoncourt, Franck
%Y Preoţiuc-Pietro, Daniel
%Y Shimorina, Anastasia
%S Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: Industry Track
%D 2024
%8 November
%I Association for Computational Linguistics
%C Miami, Florida, US
%F tam-etal-2024-speak
%X Structured generation, the process of producing content in standardized formats like JSON and XML, is widely utilized in real-world applications to extract key output information from large language models (LLMs).This study investigates whether such constraints on generation space impact LLMs’ abilities, including reasoning and domain knowledge comprehension. Specifically, we evaluate LLMs’ performance when restricted to adhere to structured formats versus generating free-form responses across various common tasks. Surprisingly, we observe a significant decline in LLMs’ reasoning abilities under format restrictions. Furthermore, we find that stricter format constraints generally lead to greater performance degradation in reasoning tasks.
%R 10.18653/v1/2024.emnlp-industry.91
%U https://aclanthology.org/2024.emnlp-industry.91
%U https://doi.org/10.18653/v1/2024.emnlp-industry.91
%P 1218-1236
Markdown (Informal)
[Let Me Speak Freely? A Study On The Impact Of Format Restrictions On Large Language Model Performance.](https://aclanthology.org/2024.emnlp-industry.91) (Tam et al., EMNLP 2024)
ACL