@inproceedings{ruiter-etal-2022-exploiting,
title = "Exploiting Social Media Content for Self-Supervised Style Transfer",
author = "Ruiter, Dana and
Kleinbauer, Thomas and
Espa{\~n}a-Bonet, Cristina and
van Genabith, Josef and
Klakow, Dietrich",
editor = "Ku, Lun-Wei and
Li, Cheng-Te and
Tsai, Yu-Che and
Wang, Wei-Yao",
booktitle = "Proceedings of the Tenth International Workshop on Natural Language Processing for Social Media",
month = jul,
year = "2022",
address = "Seattle, Washington",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.socialnlp-1.2/",
doi = "10.18653/v1/2022.socialnlp-1.2",
pages = "11--34",
abstract = "Recent research on style transfer takes inspiration from unsupervised neural machine translation (UNMT), learning from large amounts of non-parallel data by exploiting cycle consistency loss, back-translation, and denoising autoencoders. By contrast, the use of selfsupervised NMT (SSNMT), which leverages (near) parallel instances hidden in non-parallel data more efficiently than UNMT, has not yet been explored for style transfer. In this paper we present a novel Self-Supervised Style Transfer (3ST) model, which augments SSNMT with UNMT methods in order to identify and efficiently exploit supervisory signals in non-parallel social media posts. We compare 3ST with state-of-the-art (SOTA) style transfer models across civil rephrasing, formality and polarity tasks. We show that 3ST is able to balance the three major objectives (fluency, content preservation, attribute transfer accuracy) the best, outperforming SOTA models on averaged performance across their tested tasks in automatic and human evaluation."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="ruiter-etal-2022-exploiting">
<titleInfo>
<title>Exploiting Social Media Content for Self-Supervised Style Transfer</title>
</titleInfo>
<name type="personal">
<namePart type="given">Dana</namePart>
<namePart type="family">Ruiter</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Thomas</namePart>
<namePart type="family">Kleinbauer</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Cristina</namePart>
<namePart type="family">España-Bonet</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Josef</namePart>
<namePart type="family">van Genabith</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dietrich</namePart>
<namePart type="family">Klakow</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Tenth International Workshop on Natural Language Processing for Social Media</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lun-Wei</namePart>
<namePart type="family">Ku</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Cheng-Te</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yu-Che</namePart>
<namePart type="family">Tsai</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wei-Yao</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Seattle, Washington</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Recent research on style transfer takes inspiration from unsupervised neural machine translation (UNMT), learning from large amounts of non-parallel data by exploiting cycle consistency loss, back-translation, and denoising autoencoders. By contrast, the use of selfsupervised NMT (SSNMT), which leverages (near) parallel instances hidden in non-parallel data more efficiently than UNMT, has not yet been explored for style transfer. In this paper we present a novel Self-Supervised Style Transfer (3ST) model, which augments SSNMT with UNMT methods in order to identify and efficiently exploit supervisory signals in non-parallel social media posts. We compare 3ST with state-of-the-art (SOTA) style transfer models across civil rephrasing, formality and polarity tasks. We show that 3ST is able to balance the three major objectives (fluency, content preservation, attribute transfer accuracy) the best, outperforming SOTA models on averaged performance across their tested tasks in automatic and human evaluation.</abstract>
<identifier type="citekey">ruiter-etal-2022-exploiting</identifier>
<identifier type="doi">10.18653/v1/2022.socialnlp-1.2</identifier>
<location>
<url>https://aclanthology.org/2022.socialnlp-1.2/</url>
</location>
<part>
<date>2022-07</date>
<extent unit="page">
<start>11</start>
<end>34</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Exploiting Social Media Content for Self-Supervised Style Transfer
%A Ruiter, Dana
%A Kleinbauer, Thomas
%A España-Bonet, Cristina
%A van Genabith, Josef
%A Klakow, Dietrich
%Y Ku, Lun-Wei
%Y Li, Cheng-Te
%Y Tsai, Yu-Che
%Y Wang, Wei-Yao
%S Proceedings of the Tenth International Workshop on Natural Language Processing for Social Media
%D 2022
%8 July
%I Association for Computational Linguistics
%C Seattle, Washington
%F ruiter-etal-2022-exploiting
%X Recent research on style transfer takes inspiration from unsupervised neural machine translation (UNMT), learning from large amounts of non-parallel data by exploiting cycle consistency loss, back-translation, and denoising autoencoders. By contrast, the use of selfsupervised NMT (SSNMT), which leverages (near) parallel instances hidden in non-parallel data more efficiently than UNMT, has not yet been explored for style transfer. In this paper we present a novel Self-Supervised Style Transfer (3ST) model, which augments SSNMT with UNMT methods in order to identify and efficiently exploit supervisory signals in non-parallel social media posts. We compare 3ST with state-of-the-art (SOTA) style transfer models across civil rephrasing, formality and polarity tasks. We show that 3ST is able to balance the three major objectives (fluency, content preservation, attribute transfer accuracy) the best, outperforming SOTA models on averaged performance across their tested tasks in automatic and human evaluation.
%R 10.18653/v1/2022.socialnlp-1.2
%U https://aclanthology.org/2022.socialnlp-1.2/
%U https://doi.org/10.18653/v1/2022.socialnlp-1.2
%P 11-34
Markdown (Informal)
[Exploiting Social Media Content for Self-Supervised Style Transfer](https://aclanthology.org/2022.socialnlp-1.2/) (Ruiter et al., SocialNLP 2022)
ACL
- Dana Ruiter, Thomas Kleinbauer, Cristina España-Bonet, Josef van Genabith, and Dietrich Klakow. 2022. Exploiting Social Media Content for Self-Supervised Style Transfer. In Proceedings of the Tenth International Workshop on Natural Language Processing for Social Media, pages 11–34, Seattle, Washington. Association for Computational Linguistics.