@inproceedings{lyn-graham-2025-translatex,
title = "{T}rans{L}a{T}e{X}: Exposing the Last-Mile Execution Gap in {LLM}-Agent for Scientific Formatting",
author = "Lyn, Jiawen and
Graham, Yvette",
editor = "Zhao, Wei and
D{'}Souza, Jennifer and
Eger, Steffen and
Lauscher, Anne and
Hou, Yufang and
Sadat Moosavi, Nafise and
Miller, Tristan and
Lin, Chenghua",
booktitle = "Proceedings of The First Workshop on Human{--}LLM Collaboration for Ethical and Responsible Science Production (SciProdLLM)",
month = dec,
year = "2025",
address = "Mumbai, India (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.sciprodllm-1.3/",
pages = "19--24",
ISBN = "979-8-89176-307-4",
abstract = "Large Language Models (LLMs) have achieved remarkable progress in tasks such as survey writing and language polishing, yet the final stage of LaTeX formatting and template adaptation remains a neglected and error-prone bottleneck.We identify an execution illusion, where LLMs produce linguistically fluent but unexecutable LaTeX code.To address this, we introduce TransLaTeX{---}the first reasoning-and-control framework that converts documents between scholarly templates with compiler-level verifiability.TransLaTeX achieves three key innovations:(1) Structure{--}content separation via placeholder masking, ensuring privacy and less token consumption;(2) SafeFormatBench, the first benchmark dedicated to executable LaTeX generation and template conversion; and(3) Execution-grounded verification across compilation, policy compliance, and visual consistency.TransLaTeX outperforms Pandoc and full-text LLM baselines on SafeFormatBench in compilation rate, ACL policy compliance, and layout fidelity, effectively mitigating the execution illusion."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="lyn-graham-2025-translatex">
<titleInfo>
<title>TransLaTeX: Exposing the Last-Mile Execution Gap in LLM-Agent for Scientific Formatting</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jiawen</namePart>
<namePart type="family">Lyn</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yvette</namePart>
<namePart type="family">Graham</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of The First Workshop on Human–LLM Collaboration for Ethical and Responsible Science Production (SciProdLLM)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Wei</namePart>
<namePart type="family">Zhao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jennifer</namePart>
<namePart type="family">D’Souza</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Steffen</namePart>
<namePart type="family">Eger</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anne</namePart>
<namePart type="family">Lauscher</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yufang</namePart>
<namePart type="family">Hou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nafise</namePart>
<namePart type="family">Sadat Moosavi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tristan</namePart>
<namePart type="family">Miller</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chenghua</namePart>
<namePart type="family">Lin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Mumbai, India (Hybrid)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-307-4</identifier>
</relatedItem>
<abstract>Large Language Models (LLMs) have achieved remarkable progress in tasks such as survey writing and language polishing, yet the final stage of LaTeX formatting and template adaptation remains a neglected and error-prone bottleneck.We identify an execution illusion, where LLMs produce linguistically fluent but unexecutable LaTeX code.To address this, we introduce TransLaTeX—the first reasoning-and-control framework that converts documents between scholarly templates with compiler-level verifiability.TransLaTeX achieves three key innovations:(1) Structure–content separation via placeholder masking, ensuring privacy and less token consumption;(2) SafeFormatBench, the first benchmark dedicated to executable LaTeX generation and template conversion; and(3) Execution-grounded verification across compilation, policy compliance, and visual consistency.TransLaTeX outperforms Pandoc and full-text LLM baselines on SafeFormatBench in compilation rate, ACL policy compliance, and layout fidelity, effectively mitigating the execution illusion.</abstract>
<identifier type="citekey">lyn-graham-2025-translatex</identifier>
<location>
<url>https://aclanthology.org/2025.sciprodllm-1.3/</url>
</location>
<part>
<date>2025-12</date>
<extent unit="page">
<start>19</start>
<end>24</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T TransLaTeX: Exposing the Last-Mile Execution Gap in LLM-Agent for Scientific Formatting
%A Lyn, Jiawen
%A Graham, Yvette
%Y Zhao, Wei
%Y D’Souza, Jennifer
%Y Eger, Steffen
%Y Lauscher, Anne
%Y Hou, Yufang
%Y Sadat Moosavi, Nafise
%Y Miller, Tristan
%Y Lin, Chenghua
%S Proceedings of The First Workshop on Human–LLM Collaboration for Ethical and Responsible Science Production (SciProdLLM)
%D 2025
%8 December
%I Association for Computational Linguistics
%C Mumbai, India (Hybrid)
%@ 979-8-89176-307-4
%F lyn-graham-2025-translatex
%X Large Language Models (LLMs) have achieved remarkable progress in tasks such as survey writing and language polishing, yet the final stage of LaTeX formatting and template adaptation remains a neglected and error-prone bottleneck.We identify an execution illusion, where LLMs produce linguistically fluent but unexecutable LaTeX code.To address this, we introduce TransLaTeX—the first reasoning-and-control framework that converts documents between scholarly templates with compiler-level verifiability.TransLaTeX achieves three key innovations:(1) Structure–content separation via placeholder masking, ensuring privacy and less token consumption;(2) SafeFormatBench, the first benchmark dedicated to executable LaTeX generation and template conversion; and(3) Execution-grounded verification across compilation, policy compliance, and visual consistency.TransLaTeX outperforms Pandoc and full-text LLM baselines on SafeFormatBench in compilation rate, ACL policy compliance, and layout fidelity, effectively mitigating the execution illusion.
%U https://aclanthology.org/2025.sciprodllm-1.3/
%P 19-24
Markdown (Informal)
[TransLaTeX: Exposing the Last-Mile Execution Gap in LLM-Agent for Scientific Formatting](https://aclanthology.org/2025.sciprodllm-1.3/) (Lyn & Graham, SciProdLLM 2025)
ACL