@inproceedings{li-etal-2022-text,
title = "Text Revision by On-the-Fly Representation Optimization",
author = "Li, Jingjing and
Li, Zichao and
Ge, Tao and
King, Irwin and
Lyu, Michael",
editor = "Huang, Ting-Hao 'Kenneth' and
Raheja, Vipul and
Kang, Dongyeop and
Chung, John Joon Young and
Gissin, Daniel and
Lee, Mina and
Gero, Katy Ilonka",
booktitle = "Proceedings of the First Workshop on Intelligent and Interactive Writing Assistants (In2Writing 2022)",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.in2writing-1.7",
doi = "10.18653/v1/2022.in2writing-1.7",
pages = "58--59",
abstract = "Text revision refers to a family of natural language generation tasks, where the source and target sequences share moderate resemblance in surface form but differentiate in attributes, such as text formality and simplicity. Current state-of-the-art methods formulate these tasks as sequence-to-sequence learning problems, which rely on large-scale parallel training corpus. In this paper, we present an iterative inplace editing approach for text revision, which requires no parallel data. In this approach, we simply fine-tune a pre-trained Transformer with masked language modeling and attribute classification. During inference, the editing at each iteration is realized by two-step span replacement. At the first step, the distributed representation of the text optimizes on the fly towards an attribute function. At the second step, a text span is masked and another new one is proposed conditioned on the optimized representation. The empirical experiments on two typical and important text revision tasks, text formalization and text simplification, show the effectiveness of our approach. It achieves competitive and even better performance than state-of-the-art supervised methods on text simplification, and gains better performance than strong unsupervised methods on text formalization.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="li-etal-2022-text">
<titleInfo>
<title>Text Revision by On-the-Fly Representation Optimization</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jingjing</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zichao</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tao</namePart>
<namePart type="family">Ge</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Irwin</namePart>
<namePart type="family">King</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Michael</namePart>
<namePart type="family">Lyu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the First Workshop on Intelligent and Interactive Writing Assistants (In2Writing 2022)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ting-Hao</namePart>
<namePart type="given">’Kenneth’</namePart>
<namePart type="family">Huang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vipul</namePart>
<namePart type="family">Raheja</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dongyeop</namePart>
<namePart type="family">Kang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">John</namePart>
<namePart type="given">Joon</namePart>
<namePart type="given">Young</namePart>
<namePart type="family">Chung</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Daniel</namePart>
<namePart type="family">Gissin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mina</namePart>
<namePart type="family">Lee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Katy</namePart>
<namePart type="given">Ilonka</namePart>
<namePart type="family">Gero</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dublin, Ireland</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Text revision refers to a family of natural language generation tasks, where the source and target sequences share moderate resemblance in surface form but differentiate in attributes, such as text formality and simplicity. Current state-of-the-art methods formulate these tasks as sequence-to-sequence learning problems, which rely on large-scale parallel training corpus. In this paper, we present an iterative inplace editing approach for text revision, which requires no parallel data. In this approach, we simply fine-tune a pre-trained Transformer with masked language modeling and attribute classification. During inference, the editing at each iteration is realized by two-step span replacement. At the first step, the distributed representation of the text optimizes on the fly towards an attribute function. At the second step, a text span is masked and another new one is proposed conditioned on the optimized representation. The empirical experiments on two typical and important text revision tasks, text formalization and text simplification, show the effectiveness of our approach. It achieves competitive and even better performance than state-of-the-art supervised methods on text simplification, and gains better performance than strong unsupervised methods on text formalization.</abstract>
<identifier type="citekey">li-etal-2022-text</identifier>
<identifier type="doi">10.18653/v1/2022.in2writing-1.7</identifier>
<location>
<url>https://aclanthology.org/2022.in2writing-1.7</url>
</location>
<part>
<date>2022-05</date>
<extent unit="page">
<start>58</start>
<end>59</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Text Revision by On-the-Fly Representation Optimization
%A Li, Jingjing
%A Li, Zichao
%A Ge, Tao
%A King, Irwin
%A Lyu, Michael
%Y Huang, Ting-Hao ’Kenneth’
%Y Raheja, Vipul
%Y Kang, Dongyeop
%Y Chung, John Joon Young
%Y Gissin, Daniel
%Y Lee, Mina
%Y Gero, Katy Ilonka
%S Proceedings of the First Workshop on Intelligent and Interactive Writing Assistants (In2Writing 2022)
%D 2022
%8 May
%I Association for Computational Linguistics
%C Dublin, Ireland
%F li-etal-2022-text
%X Text revision refers to a family of natural language generation tasks, where the source and target sequences share moderate resemblance in surface form but differentiate in attributes, such as text formality and simplicity. Current state-of-the-art methods formulate these tasks as sequence-to-sequence learning problems, which rely on large-scale parallel training corpus. In this paper, we present an iterative inplace editing approach for text revision, which requires no parallel data. In this approach, we simply fine-tune a pre-trained Transformer with masked language modeling and attribute classification. During inference, the editing at each iteration is realized by two-step span replacement. At the first step, the distributed representation of the text optimizes on the fly towards an attribute function. At the second step, a text span is masked and another new one is proposed conditioned on the optimized representation. The empirical experiments on two typical and important text revision tasks, text formalization and text simplification, show the effectiveness of our approach. It achieves competitive and even better performance than state-of-the-art supervised methods on text simplification, and gains better performance than strong unsupervised methods on text formalization.
%R 10.18653/v1/2022.in2writing-1.7
%U https://aclanthology.org/2022.in2writing-1.7
%U https://doi.org/10.18653/v1/2022.in2writing-1.7
%P 58-59
Markdown (Informal)
[Text Revision by On-the-Fly Representation Optimization](https://aclanthology.org/2022.in2writing-1.7) (Li et al., In2Writing 2022)
ACL
- Jingjing Li, Zichao Li, Tao Ge, Irwin King, and Michael Lyu. 2022. Text Revision by On-the-Fly Representation Optimization. In Proceedings of the First Workshop on Intelligent and Interactive Writing Assistants (In2Writing 2022), pages 58–59, Dublin, Ireland. Association for Computational Linguistics.