@inproceedings{min-etal-2025-punctuation,
title = "Punctuation Restoration Improves Structure Understanding without Supervision",
author = "Min, Junghyun and
Lee, Minho and
Lee, Woochul and
Lee, Yeonsoo",
editor = "Adlakha, Vaibhav and
Chronopoulou, Alexandra and
Li, Xiang Lorraine and
Majumder, Bodhisattwa Prasad and
Shi, Freda and
Vernikos, Giorgos",
booktitle = "Proceedings of the 10th Workshop on Representation Learning for NLP (RepL4NLP-2025)",
month = may,
year = "2025",
address = "Albuquerque, NM",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.repl4nlp-1.10/",
pages = "120--130",
ISBN = "979-8-89176-245-9",
abstract = "Unsupervised learning objectives like autoregressive and masked language modeling constitute a significant part in producing pre-trained representations that perform various downstream applications from natural language understanding to conversational tasks. However, despite impressive generative capabilities of recent large language models, their abilities to capture syntactic or semantic structure within text lag behind. We hypothesize that the mismatch between linguistic performance and competence in machines is attributable to insufficient learning of linguistic structure knowledge via currently popular pre-training objectives. Working with English, we show that punctuation restoration as a learning objective improves performance on structure-related tasks like named entity recognition, open information extraction, chunking, and part-of-speech tagging. Punctuation restoration results in ▲{\ensuremath{\geq}}2{\%}p improvement in 16 out of 18 experiments, across 6 out of 7 tasks. Our results show that punctuation restoration is an effective learning objective that can improve structure understanding and yield a more robust structure-aware representations of natural language in base-sized models."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="min-etal-2025-punctuation">
<titleInfo>
<title>Punctuation Restoration Improves Structure Understanding without Supervision</title>
</titleInfo>
<name type="personal">
<namePart type="given">Junghyun</namePart>
<namePart type="family">Min</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Minho</namePart>
<namePart type="family">Lee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Woochul</namePart>
<namePart type="family">Lee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yeonsoo</namePart>
<namePart type="family">Lee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 10th Workshop on Representation Learning for NLP (RepL4NLP-2025)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Vaibhav</namePart>
<namePart type="family">Adlakha</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alexandra</namePart>
<namePart type="family">Chronopoulou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xiang</namePart>
<namePart type="given">Lorraine</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bodhisattwa</namePart>
<namePart type="given">Prasad</namePart>
<namePart type="family">Majumder</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Freda</namePart>
<namePart type="family">Shi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Giorgos</namePart>
<namePart type="family">Vernikos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Albuquerque, NM</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-245-9</identifier>
</relatedItem>
<abstract>Unsupervised learning objectives like autoregressive and masked language modeling constitute a significant part in producing pre-trained representations that perform various downstream applications from natural language understanding to conversational tasks. However, despite impressive generative capabilities of recent large language models, their abilities to capture syntactic or semantic structure within text lag behind. We hypothesize that the mismatch between linguistic performance and competence in machines is attributable to insufficient learning of linguistic structure knowledge via currently popular pre-training objectives. Working with English, we show that punctuation restoration as a learning objective improves performance on structure-related tasks like named entity recognition, open information extraction, chunking, and part-of-speech tagging. Punctuation restoration results in ▲\ensuremath\geq2%p improvement in 16 out of 18 experiments, across 6 out of 7 tasks. Our results show that punctuation restoration is an effective learning objective that can improve structure understanding and yield a more robust structure-aware representations of natural language in base-sized models.</abstract>
<identifier type="citekey">min-etal-2025-punctuation</identifier>
<location>
<url>https://aclanthology.org/2025.repl4nlp-1.10/</url>
</location>
<part>
<date>2025-05</date>
<extent unit="page">
<start>120</start>
<end>130</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Punctuation Restoration Improves Structure Understanding without Supervision
%A Min, Junghyun
%A Lee, Minho
%A Lee, Woochul
%A Lee, Yeonsoo
%Y Adlakha, Vaibhav
%Y Chronopoulou, Alexandra
%Y Li, Xiang Lorraine
%Y Majumder, Bodhisattwa Prasad
%Y Shi, Freda
%Y Vernikos, Giorgos
%S Proceedings of the 10th Workshop on Representation Learning for NLP (RepL4NLP-2025)
%D 2025
%8 May
%I Association for Computational Linguistics
%C Albuquerque, NM
%@ 979-8-89176-245-9
%F min-etal-2025-punctuation
%X Unsupervised learning objectives like autoregressive and masked language modeling constitute a significant part in producing pre-trained representations that perform various downstream applications from natural language understanding to conversational tasks. However, despite impressive generative capabilities of recent large language models, their abilities to capture syntactic or semantic structure within text lag behind. We hypothesize that the mismatch between linguistic performance and competence in machines is attributable to insufficient learning of linguistic structure knowledge via currently popular pre-training objectives. Working with English, we show that punctuation restoration as a learning objective improves performance on structure-related tasks like named entity recognition, open information extraction, chunking, and part-of-speech tagging. Punctuation restoration results in ▲\ensuremath\geq2%p improvement in 16 out of 18 experiments, across 6 out of 7 tasks. Our results show that punctuation restoration is an effective learning objective that can improve structure understanding and yield a more robust structure-aware representations of natural language in base-sized models.
%U https://aclanthology.org/2025.repl4nlp-1.10/
%P 120-130
Markdown (Informal)
[Punctuation Restoration Improves Structure Understanding without Supervision](https://aclanthology.org/2025.repl4nlp-1.10/) (Min et al., RepL4NLP 2025)
ACL