@inproceedings{sheang-etal-2022-controllable,
title = "Controllable Lexical Simplification for {E}nglish",
author = "Sheang, Kim Cheng and
Ferr{\'e}s, Daniel and
Saggion, Horacio",
editor = "{\v{S}}tajner, Sanja and
Saggion, Horacio and
Ferr{\'e}s, Daniel and
Shardlow, Matthew and
Sheang, Kim Cheng and
North, Kai and
Zampieri, Marcos and
Xu, Wei",
booktitle = "Proceedings of the Workshop on Text Simplification, Accessibility, and Readability (TSAR-2022)",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates (Virtual)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.tsar-1.19",
doi = "10.18653/v1/2022.tsar-1.19",
pages = "199--206",
abstract = "Fine-tuning Transformer-based approaches have recently shown exciting results on sentence simplification task. However, so far, no research has applied similar approaches to the Lexical Simplification (LS) task. In this paper, we present ConLS, a Controllable Lexical Simplification system fine-tuned with T5 (a Transformer-based model pre-trained with a BERT-style approach and several other tasks). The evaluation results on three datasets (LexMTurk, BenchLS, and NNSeval) have shown that our model performs comparable to LSBert (the current state-of-the-art) and even outperforms it in some cases. We also conducted a detailed comparison on the effectiveness of control tokens to give a clear view of how each token contributes to the model.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="sheang-etal-2022-controllable">
<titleInfo>
<title>Controllable Lexical Simplification for English</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kim</namePart>
<namePart type="given">Cheng</namePart>
<namePart type="family">Sheang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Daniel</namePart>
<namePart type="family">Ferrés</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Horacio</namePart>
<namePart type="family">Saggion</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Workshop on Text Simplification, Accessibility, and Readability (TSAR-2022)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sanja</namePart>
<namePart type="family">Štajner</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Horacio</namePart>
<namePart type="family">Saggion</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Daniel</namePart>
<namePart type="family">Ferrés</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Matthew</namePart>
<namePart type="family">Shardlow</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kim</namePart>
<namePart type="given">Cheng</namePart>
<namePart type="family">Sheang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kai</namePart>
<namePart type="family">North</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marcos</namePart>
<namePart type="family">Zampieri</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wei</namePart>
<namePart type="family">Xu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, United Arab Emirates (Virtual)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Fine-tuning Transformer-based approaches have recently shown exciting results on sentence simplification task. However, so far, no research has applied similar approaches to the Lexical Simplification (LS) task. In this paper, we present ConLS, a Controllable Lexical Simplification system fine-tuned with T5 (a Transformer-based model pre-trained with a BERT-style approach and several other tasks). The evaluation results on three datasets (LexMTurk, BenchLS, and NNSeval) have shown that our model performs comparable to LSBert (the current state-of-the-art) and even outperforms it in some cases. We also conducted a detailed comparison on the effectiveness of control tokens to give a clear view of how each token contributes to the model.</abstract>
<identifier type="citekey">sheang-etal-2022-controllable</identifier>
<identifier type="doi">10.18653/v1/2022.tsar-1.19</identifier>
<location>
<url>https://aclanthology.org/2022.tsar-1.19</url>
</location>
<part>
<date>2022-12</date>
<extent unit="page">
<start>199</start>
<end>206</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Controllable Lexical Simplification for English
%A Sheang, Kim Cheng
%A Ferrés, Daniel
%A Saggion, Horacio
%Y Štajner, Sanja
%Y Saggion, Horacio
%Y Ferrés, Daniel
%Y Shardlow, Matthew
%Y Sheang, Kim Cheng
%Y North, Kai
%Y Zampieri, Marcos
%Y Xu, Wei
%S Proceedings of the Workshop on Text Simplification, Accessibility, and Readability (TSAR-2022)
%D 2022
%8 December
%I Association for Computational Linguistics
%C Abu Dhabi, United Arab Emirates (Virtual)
%F sheang-etal-2022-controllable
%X Fine-tuning Transformer-based approaches have recently shown exciting results on sentence simplification task. However, so far, no research has applied similar approaches to the Lexical Simplification (LS) task. In this paper, we present ConLS, a Controllable Lexical Simplification system fine-tuned with T5 (a Transformer-based model pre-trained with a BERT-style approach and several other tasks). The evaluation results on three datasets (LexMTurk, BenchLS, and NNSeval) have shown that our model performs comparable to LSBert (the current state-of-the-art) and even outperforms it in some cases. We also conducted a detailed comparison on the effectiveness of control tokens to give a clear view of how each token contributes to the model.
%R 10.18653/v1/2022.tsar-1.19
%U https://aclanthology.org/2022.tsar-1.19
%U https://doi.org/10.18653/v1/2022.tsar-1.19
%P 199-206
Markdown (Informal)
[Controllable Lexical Simplification for English](https://aclanthology.org/2022.tsar-1.19) (Sheang et al., TSAR 2022)
ACL
- Kim Cheng Sheang, Daniel Ferrés, and Horacio Saggion. 2022. Controllable Lexical Simplification for English. In Proceedings of the Workshop on Text Simplification, Accessibility, and Readability (TSAR-2022), pages 199–206, Abu Dhabi, United Arab Emirates (Virtual). Association for Computational Linguistics.