@inproceedings{min-etal-2020-advancing,
title = "Advancing Seq2seq with Joint Paraphrase Learning",
author = "Min, So Yeon and
Raghavan, Preethi and
Szolovits, Peter",
editor = "Rumshisky, Anna and
Roberts, Kirk and
Bethard, Steven and
Naumann, Tristan",
booktitle = "Proceedings of the 3rd Clinical Natural Language Processing Workshop",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.clinicalnlp-1.30",
doi = "10.18653/v1/2020.clinicalnlp-1.30",
pages = "269--279",
abstract = "We address the problem of model generalization for sequence to sequence (seq2seq) architectures. We propose going beyond data augmentation via paraphrase-optimized multi-task learning and observe that it is useful in correctly handling unseen sentential paraphrases as inputs. Our models greatly outperform SOTA seq2seq models for semantic parsing on diverse domains (Overnight - up to 3.2{\%} and emrQA - 7{\%}) and Nematus, the winning solution for WMT 2017, for Czech to English translation (CzENG 1.6 - 1.5 BLEU).",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="min-etal-2020-advancing">
<titleInfo>
<title>Advancing Seq2seq with Joint Paraphrase Learning</title>
</titleInfo>
<name type="personal">
<namePart type="given">So</namePart>
<namePart type="given">Yeon</namePart>
<namePart type="family">Min</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Preethi</namePart>
<namePart type="family">Raghavan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Peter</namePart>
<namePart type="family">Szolovits</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 3rd Clinical Natural Language Processing Workshop</title>
</titleInfo>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Rumshisky</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kirk</namePart>
<namePart type="family">Roberts</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Steven</namePart>
<namePart type="family">Bethard</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tristan</namePart>
<namePart type="family">Naumann</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We address the problem of model generalization for sequence to sequence (seq2seq) architectures. We propose going beyond data augmentation via paraphrase-optimized multi-task learning and observe that it is useful in correctly handling unseen sentential paraphrases as inputs. Our models greatly outperform SOTA seq2seq models for semantic parsing on diverse domains (Overnight - up to 3.2% and emrQA - 7%) and Nematus, the winning solution for WMT 2017, for Czech to English translation (CzENG 1.6 - 1.5 BLEU).</abstract>
<identifier type="citekey">min-etal-2020-advancing</identifier>
<identifier type="doi">10.18653/v1/2020.clinicalnlp-1.30</identifier>
<location>
<url>https://aclanthology.org/2020.clinicalnlp-1.30</url>
</location>
<part>
<date>2020-11</date>
<extent unit="page">
<start>269</start>
<end>279</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Advancing Seq2seq with Joint Paraphrase Learning
%A Min, So Yeon
%A Raghavan, Preethi
%A Szolovits, Peter
%Y Rumshisky, Anna
%Y Roberts, Kirk
%Y Bethard, Steven
%Y Naumann, Tristan
%S Proceedings of the 3rd Clinical Natural Language Processing Workshop
%D 2020
%8 November
%I Association for Computational Linguistics
%C Online
%F min-etal-2020-advancing
%X We address the problem of model generalization for sequence to sequence (seq2seq) architectures. We propose going beyond data augmentation via paraphrase-optimized multi-task learning and observe that it is useful in correctly handling unseen sentential paraphrases as inputs. Our models greatly outperform SOTA seq2seq models for semantic parsing on diverse domains (Overnight - up to 3.2% and emrQA - 7%) and Nematus, the winning solution for WMT 2017, for Czech to English translation (CzENG 1.6 - 1.5 BLEU).
%R 10.18653/v1/2020.clinicalnlp-1.30
%U https://aclanthology.org/2020.clinicalnlp-1.30
%U https://doi.org/10.18653/v1/2020.clinicalnlp-1.30
%P 269-279
Markdown (Informal)
[Advancing Seq2seq with Joint Paraphrase Learning](https://aclanthology.org/2020.clinicalnlp-1.30) (Min et al., ClinicalNLP 2020)
ACL
- So Yeon Min, Preethi Raghavan, and Peter Szolovits. 2020. Advancing Seq2seq with Joint Paraphrase Learning. In Proceedings of the 3rd Clinical Natural Language Processing Workshop, pages 269–279, Online. Association for Computational Linguistics.