@inproceedings{durandard-etal-2025-language,
title = "Language Style Matching in Large Language Models",
author = "Durandard, No{\'e} and
Dhawan, Saurabh and
Poibeau, Thierry",
editor = "B{\'e}chet, Fr{\'e}d{\'e}ric and
Lef{\`e}vre, Fabrice and
Asher, Nicholas and
Kim, Seokhwan and
Merlin, Teva",
booktitle = "Proceedings of the 26th Annual Meeting of the Special Interest Group on Discourse and Dialogue",
month = aug,
year = "2025",
address = "Avignon, France",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.sigdial-1.50/",
pages = "620--636",
abstract = "Language Style Matching (LSM){---}the subconscious alignment of linguistic style between conversational partners{---}is a key indicator of social coordination in human dialogue. We present the first systematic study of LSM in Large Language Models (LLMs) focusing on two primary objectives: measuring the degree of LSM exhibited in LLM-generated responses and developing techniques to enhance it. First, in order to measure whether LLMs natively show LSM, we computed LIWC-based LSM scores across diverse interaction scenarios and found that LSM scores for text generated by LLMs were either below or near the lower range of such scores observed in human dialogue. Second, we show that LLMs' adaptive behavior in this regard can be improved using inference-time techniques. We introduce and evaluate an inference-time sampling strategy{---}Logit-Constrained Generation{---}which can substantially enhance LSM scores in text generated by an LLM while preserving fluency. By advancing our understanding of LSM in LLMs and proposing effective enhancement strategies, this research contributes to the development of more socially attuned and communicatively adaptive AI systems."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="durandard-etal-2025-language">
<titleInfo>
<title>Language Style Matching in Large Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Noé</namePart>
<namePart type="family">Durandard</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Saurabh</namePart>
<namePart type="family">Dhawan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Thierry</namePart>
<namePart type="family">Poibeau</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 26th Annual Meeting of the Special Interest Group on Discourse and Dialogue</title>
</titleInfo>
<name type="personal">
<namePart type="given">Frédéric</namePart>
<namePart type="family">Béchet</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Fabrice</namePart>
<namePart type="family">Lefèvre</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nicholas</namePart>
<namePart type="family">Asher</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Seokhwan</namePart>
<namePart type="family">Kim</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Teva</namePart>
<namePart type="family">Merlin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Avignon, France</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Language Style Matching (LSM)—the subconscious alignment of linguistic style between conversational partners—is a key indicator of social coordination in human dialogue. We present the first systematic study of LSM in Large Language Models (LLMs) focusing on two primary objectives: measuring the degree of LSM exhibited in LLM-generated responses and developing techniques to enhance it. First, in order to measure whether LLMs natively show LSM, we computed LIWC-based LSM scores across diverse interaction scenarios and found that LSM scores for text generated by LLMs were either below or near the lower range of such scores observed in human dialogue. Second, we show that LLMs’ adaptive behavior in this regard can be improved using inference-time techniques. We introduce and evaluate an inference-time sampling strategy—Logit-Constrained Generation—which can substantially enhance LSM scores in text generated by an LLM while preserving fluency. By advancing our understanding of LSM in LLMs and proposing effective enhancement strategies, this research contributes to the development of more socially attuned and communicatively adaptive AI systems.</abstract>
<identifier type="citekey">durandard-etal-2025-language</identifier>
<location>
<url>https://aclanthology.org/2025.sigdial-1.50/</url>
</location>
<part>
<date>2025-08</date>
<extent unit="page">
<start>620</start>
<end>636</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Language Style Matching in Large Language Models
%A Durandard, Noé
%A Dhawan, Saurabh
%A Poibeau, Thierry
%Y Béchet, Frédéric
%Y Lefèvre, Fabrice
%Y Asher, Nicholas
%Y Kim, Seokhwan
%Y Merlin, Teva
%S Proceedings of the 26th Annual Meeting of the Special Interest Group on Discourse and Dialogue
%D 2025
%8 August
%I Association for Computational Linguistics
%C Avignon, France
%F durandard-etal-2025-language
%X Language Style Matching (LSM)—the subconscious alignment of linguistic style between conversational partners—is a key indicator of social coordination in human dialogue. We present the first systematic study of LSM in Large Language Models (LLMs) focusing on two primary objectives: measuring the degree of LSM exhibited in LLM-generated responses and developing techniques to enhance it. First, in order to measure whether LLMs natively show LSM, we computed LIWC-based LSM scores across diverse interaction scenarios and found that LSM scores for text generated by LLMs were either below or near the lower range of such scores observed in human dialogue. Second, we show that LLMs’ adaptive behavior in this regard can be improved using inference-time techniques. We introduce and evaluate an inference-time sampling strategy—Logit-Constrained Generation—which can substantially enhance LSM scores in text generated by an LLM while preserving fluency. By advancing our understanding of LSM in LLMs and proposing effective enhancement strategies, this research contributes to the development of more socially attuned and communicatively adaptive AI systems.
%U https://aclanthology.org/2025.sigdial-1.50/
%P 620-636
Markdown (Informal)
[Language Style Matching in Large Language Models](https://aclanthology.org/2025.sigdial-1.50/) (Durandard et al., SIGDIAL 2025)
ACL
- Noé Durandard, Saurabh Dhawan, and Thierry Poibeau. 2025. Language Style Matching in Large Language Models. In Proceedings of the 26th Annual Meeting of the Special Interest Group on Discourse and Dialogue, pages 620–636, Avignon, France. Association for Computational Linguistics.