@inproceedings{choudhary-etal-2025-exploring,
title = "Exploring Context Strategies in {LLM}s for Discourse-Aware Machine Translation",
author = "Choudhary, Ritvik and
Hida, Rem and
Hamada, Masaki and
Futami, Hayato and
Sekiya, Toshiyuki",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.findings-emnlp.1324/",
pages = "24382--24391",
ISBN = "979-8-89176-335-7",
abstract = "While large language models (LLMs) excel at machine translation (MT), the impact of how LLMs utilize different forms of contextual information on discourse-level phenomena remains underexplored. We systematically investigate how different forms of context such as prior source sentences, models' generated hypotheses, and reference translations influence standard MT metrics and specific discourse phenomena (formality, pronoun selection, and lexical cohesion). Evaluating multiple LLMs across multiple domains and language pairs, our findings consistently show that context boosts both translation and discourse-specific performance. Notably, the context strategy of combining source text with the model{'}s own prior hypotheses effectively improves discourse consistency without gold references, demonstrating effective use of model{'}s own imperfect generations as diverse contextual cues."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="choudhary-etal-2025-exploring">
<titleInfo>
<title>Exploring Context Strategies in LLMs for Discourse-Aware Machine Translation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ritvik</namePart>
<namePart type="family">Choudhary</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rem</namePart>
<namePart type="family">Hida</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Masaki</namePart>
<namePart type="family">Hamada</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hayato</namePart>
<namePart type="family">Futami</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Toshiyuki</namePart>
<namePart type="family">Sekiya</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Christos</namePart>
<namePart type="family">Christodoulopoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tanmoy</namePart>
<namePart type="family">Chakraborty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carolyn</namePart>
<namePart type="family">Rose</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Violet</namePart>
<namePart type="family">Peng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Suzhou, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-335-7</identifier>
</relatedItem>
<abstract>While large language models (LLMs) excel at machine translation (MT), the impact of how LLMs utilize different forms of contextual information on discourse-level phenomena remains underexplored. We systematically investigate how different forms of context such as prior source sentences, models’ generated hypotheses, and reference translations influence standard MT metrics and specific discourse phenomena (formality, pronoun selection, and lexical cohesion). Evaluating multiple LLMs across multiple domains and language pairs, our findings consistently show that context boosts both translation and discourse-specific performance. Notably, the context strategy of combining source text with the model’s own prior hypotheses effectively improves discourse consistency without gold references, demonstrating effective use of model’s own imperfect generations as diverse contextual cues.</abstract>
<identifier type="citekey">choudhary-etal-2025-exploring</identifier>
<location>
<url>https://aclanthology.org/2025.findings-emnlp.1324/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>24382</start>
<end>24391</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Exploring Context Strategies in LLMs for Discourse-Aware Machine Translation
%A Choudhary, Ritvik
%A Hida, Rem
%A Hamada, Masaki
%A Futami, Hayato
%A Sekiya, Toshiyuki
%Y Christodoulopoulos, Christos
%Y Chakraborty, Tanmoy
%Y Rose, Carolyn
%Y Peng, Violet
%S Findings of the Association for Computational Linguistics: EMNLP 2025
%D 2025
%8 November
%I Association for Computational Linguistics
%C Suzhou, China
%@ 979-8-89176-335-7
%F choudhary-etal-2025-exploring
%X While large language models (LLMs) excel at machine translation (MT), the impact of how LLMs utilize different forms of contextual information on discourse-level phenomena remains underexplored. We systematically investigate how different forms of context such as prior source sentences, models’ generated hypotheses, and reference translations influence standard MT metrics and specific discourse phenomena (formality, pronoun selection, and lexical cohesion). Evaluating multiple LLMs across multiple domains and language pairs, our findings consistently show that context boosts both translation and discourse-specific performance. Notably, the context strategy of combining source text with the model’s own prior hypotheses effectively improves discourse consistency without gold references, demonstrating effective use of model’s own imperfect generations as diverse contextual cues.
%U https://aclanthology.org/2025.findings-emnlp.1324/
%P 24382-24391
Markdown (Informal)
[Exploring Context Strategies in LLMs for Discourse-Aware Machine Translation](https://aclanthology.org/2025.findings-emnlp.1324/) (Choudhary et al., Findings 2025)
ACL