@inproceedings{vajjala-2025-onenrc,
title = "{O}ne{NRC}@{TSAR}2025 Shared Task Small Models for Readability Controlled Text Simplification",
author = "Vajjala, Sowmya",
editor = "Shardlow, Matthew and
Alva-Manchego, Fernando and
North, Kai and
Stodden, Regina and
Saggion, Horacio and
Khallaf, Nouran and
Hayakawa, Akio",
booktitle = "Proceedings of the Fourth Workshop on Text Simplification, Accessibility and Readability (TSAR 2025)",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.tsar-1.9/",
pages = "131--136",
ISBN = "979-8-89176-176-6",
abstract = "In this system description paper, we describe the team OneNRC{'}s experiments on readability controlled text simplification, focused on using smaller, quantized language models ({\ensuremath{<}}20B). We compare these with one large proprietary model and show that the smaller models offer comparable or even better results in some experimental settings. The approach primarily comprises of prompt optimization, agentic workflow, and tool calling. The best results were achieved while using a CEFR proficiency classifier as a verification tool for the language model agent. In terms of comparison with other systems, our submission that used a quantized Gemma3:12B model that ran on a laptop achieved a rank of 9.88 among the submitted systems as per the AUTORANK framework used by the organizers. We hope these results will lead into further exploration on the usefulness of smaller models for text simplification."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="vajjala-2025-onenrc">
<titleInfo>
<title>OneNRC@TSAR2025 Shared Task Small Models for Readability Controlled Text Simplification</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sowmya</namePart>
<namePart type="family">Vajjala</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Fourth Workshop on Text Simplification, Accessibility and Readability (TSAR 2025)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Matthew</namePart>
<namePart type="family">Shardlow</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Fernando</namePart>
<namePart type="family">Alva-Manchego</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kai</namePart>
<namePart type="family">North</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Regina</namePart>
<namePart type="family">Stodden</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Horacio</namePart>
<namePart type="family">Saggion</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nouran</namePart>
<namePart type="family">Khallaf</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Akio</namePart>
<namePart type="family">Hayakawa</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Suzhou, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-176-6</identifier>
</relatedItem>
<abstract>In this system description paper, we describe the team OneNRC’s experiments on readability controlled text simplification, focused on using smaller, quantized language models (\ensuremath<20B). We compare these with one large proprietary model and show that the smaller models offer comparable or even better results in some experimental settings. The approach primarily comprises of prompt optimization, agentic workflow, and tool calling. The best results were achieved while using a CEFR proficiency classifier as a verification tool for the language model agent. In terms of comparison with other systems, our submission that used a quantized Gemma3:12B model that ran on a laptop achieved a rank of 9.88 among the submitted systems as per the AUTORANK framework used by the organizers. We hope these results will lead into further exploration on the usefulness of smaller models for text simplification.</abstract>
<identifier type="citekey">vajjala-2025-onenrc</identifier>
<location>
<url>https://aclanthology.org/2025.tsar-1.9/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>131</start>
<end>136</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T OneNRC@TSAR2025 Shared Task Small Models for Readability Controlled Text Simplification
%A Vajjala, Sowmya
%Y Shardlow, Matthew
%Y Alva-Manchego, Fernando
%Y North, Kai
%Y Stodden, Regina
%Y Saggion, Horacio
%Y Khallaf, Nouran
%Y Hayakawa, Akio
%S Proceedings of the Fourth Workshop on Text Simplification, Accessibility and Readability (TSAR 2025)
%D 2025
%8 November
%I Association for Computational Linguistics
%C Suzhou, China
%@ 979-8-89176-176-6
%F vajjala-2025-onenrc
%X In this system description paper, we describe the team OneNRC’s experiments on readability controlled text simplification, focused on using smaller, quantized language models (\ensuremath<20B). We compare these with one large proprietary model and show that the smaller models offer comparable or even better results in some experimental settings. The approach primarily comprises of prompt optimization, agentic workflow, and tool calling. The best results were achieved while using a CEFR proficiency classifier as a verification tool for the language model agent. In terms of comparison with other systems, our submission that used a quantized Gemma3:12B model that ran on a laptop achieved a rank of 9.88 among the submitted systems as per the AUTORANK framework used by the organizers. We hope these results will lead into further exploration on the usefulness of smaller models for text simplification.
%U https://aclanthology.org/2025.tsar-1.9/
%P 131-136
Markdown (Informal)
[OneNRC@TSAR2025 Shared Task Small Models for Readability Controlled Text Simplification](https://aclanthology.org/2025.tsar-1.9/) (Vajjala, TSAR 2025)
ACL