@inproceedings{liu-sun-2023-end,
title = "End-to-end Adversarial Sample Generation for Data Augmentation",
author = "Liu, Tianyuan and
Sun, Yuqing",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.findings-emnlp.760",
doi = "10.18653/v1/2023.findings-emnlp.760",
pages = "11359--11368",
abstract = "Adversarial samples pose a significant challenge to neural inference models. In this paper, we propose a novel enhancing approach A3 for the robustness of the neural NLP models, which combines the adversarial training and data augmentation. We propose an adversarial sample generator that consists of a conditioned paraphrasing model and a condition generator. The latter aims to generate conditions which guides the paraphrasing model to generate adversarial samples. A pretrained discriminator is introduced to help the adversarial sample generator adapt to the data characteristics for different tasks. We adopt a weighted loss to incorporate the generated adversarial samples with the original samples for augmented training. Compared to existing methods, our approach is much efficient since the generation process is independent to the target model and the generated samples are reusable for different models. Experimental results on several tasks show that our approach improves the overall performance of the trained model. Specially, the enhanced model is robust for various attacking techniques.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="liu-sun-2023-end">
<titleInfo>
<title>End-to-end Adversarial Sample Generation for Data Augmentation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Tianyuan</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yuqing</namePart>
<namePart type="family">Sun</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2023</title>
</titleInfo>
<name type="personal">
<namePart type="given">Houda</namePart>
<namePart type="family">Bouamor</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Juan</namePart>
<namePart type="family">Pino</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kalika</namePart>
<namePart type="family">Bali</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Singapore</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Adversarial samples pose a significant challenge to neural inference models. In this paper, we propose a novel enhancing approach A3 for the robustness of the neural NLP models, which combines the adversarial training and data augmentation. We propose an adversarial sample generator that consists of a conditioned paraphrasing model and a condition generator. The latter aims to generate conditions which guides the paraphrasing model to generate adversarial samples. A pretrained discriminator is introduced to help the adversarial sample generator adapt to the data characteristics for different tasks. We adopt a weighted loss to incorporate the generated adversarial samples with the original samples for augmented training. Compared to existing methods, our approach is much efficient since the generation process is independent to the target model and the generated samples are reusable for different models. Experimental results on several tasks show that our approach improves the overall performance of the trained model. Specially, the enhanced model is robust for various attacking techniques.</abstract>
<identifier type="citekey">liu-sun-2023-end</identifier>
<identifier type="doi">10.18653/v1/2023.findings-emnlp.760</identifier>
<location>
<url>https://aclanthology.org/2023.findings-emnlp.760</url>
</location>
<part>
<date>2023-12</date>
<extent unit="page">
<start>11359</start>
<end>11368</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T End-to-end Adversarial Sample Generation for Data Augmentation
%A Liu, Tianyuan
%A Sun, Yuqing
%Y Bouamor, Houda
%Y Pino, Juan
%Y Bali, Kalika
%S Findings of the Association for Computational Linguistics: EMNLP 2023
%D 2023
%8 December
%I Association for Computational Linguistics
%C Singapore
%F liu-sun-2023-end
%X Adversarial samples pose a significant challenge to neural inference models. In this paper, we propose a novel enhancing approach A3 for the robustness of the neural NLP models, which combines the adversarial training and data augmentation. We propose an adversarial sample generator that consists of a conditioned paraphrasing model and a condition generator. The latter aims to generate conditions which guides the paraphrasing model to generate adversarial samples. A pretrained discriminator is introduced to help the adversarial sample generator adapt to the data characteristics for different tasks. We adopt a weighted loss to incorporate the generated adversarial samples with the original samples for augmented training. Compared to existing methods, our approach is much efficient since the generation process is independent to the target model and the generated samples are reusable for different models. Experimental results on several tasks show that our approach improves the overall performance of the trained model. Specially, the enhanced model is robust for various attacking techniques.
%R 10.18653/v1/2023.findings-emnlp.760
%U https://aclanthology.org/2023.findings-emnlp.760
%U https://doi.org/10.18653/v1/2023.findings-emnlp.760
%P 11359-11368
Markdown (Informal)
[End-to-end Adversarial Sample Generation for Data Augmentation](https://aclanthology.org/2023.findings-emnlp.760) (Liu & Sun, Findings 2023)
ACL