@inproceedings{jindal-etal-2020-augmenting,
title = "Augmenting {NLP} models using Latent Feature Interpolations",
author = "Jindal, Amit and
Ghosh Chowdhury, Arijit and
Didolkar, Aniket and
Jin, Di and
Sawhney, Ramit and
Shah, Rajiv Ratn",
editor = "Scott, Donia and
Bel, Nuria and
Zong, Chengqing",
booktitle = "Proceedings of the 28th International Conference on Computational Linguistics",
month = dec,
year = "2020",
address = "Barcelona, Spain (Online)",
publisher = "International Committee on Computational Linguistics",
url = "https://aclanthology.org/2020.coling-main.611",
doi = "10.18653/v1/2020.coling-main.611",
pages = "6931--6936",
abstract = "Models with a large number of parameters are prone to over-fitting and often fail to capture the underlying input distribution. We introduce Emix, a data augmentation method that uses interpolations of word embeddings and hidden layer representations to construct virtual examples. We show that Emix shows significant improvements over previously used interpolation based regularizers and data augmentation techniques. We also demonstrate how our proposed method is more robust to sparsification. We highlight the merits of our proposed methodology by performing thorough quantitative and qualitative assessments.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="jindal-etal-2020-augmenting">
<titleInfo>
<title>Augmenting NLP models using Latent Feature Interpolations</title>
</titleInfo>
<name type="personal">
<namePart type="given">Amit</namePart>
<namePart type="family">Jindal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Arijit</namePart>
<namePart type="family">Ghosh Chowdhury</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aniket</namePart>
<namePart type="family">Didolkar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Di</namePart>
<namePart type="family">Jin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ramit</namePart>
<namePart type="family">Sawhney</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rajiv</namePart>
<namePart type="given">Ratn</namePart>
<namePart type="family">Shah</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 28th International Conference on Computational Linguistics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Donia</namePart>
<namePart type="family">Scott</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nuria</namePart>
<namePart type="family">Bel</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chengqing</namePart>
<namePart type="family">Zong</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>International Committee on Computational Linguistics</publisher>
<place>
<placeTerm type="text">Barcelona, Spain (Online)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Models with a large number of parameters are prone to over-fitting and often fail to capture the underlying input distribution. We introduce Emix, a data augmentation method that uses interpolations of word embeddings and hidden layer representations to construct virtual examples. We show that Emix shows significant improvements over previously used interpolation based regularizers and data augmentation techniques. We also demonstrate how our proposed method is more robust to sparsification. We highlight the merits of our proposed methodology by performing thorough quantitative and qualitative assessments.</abstract>
<identifier type="citekey">jindal-etal-2020-augmenting</identifier>
<identifier type="doi">10.18653/v1/2020.coling-main.611</identifier>
<location>
<url>https://aclanthology.org/2020.coling-main.611</url>
</location>
<part>
<date>2020-12</date>
<extent unit="page">
<start>6931</start>
<end>6936</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Augmenting NLP models using Latent Feature Interpolations
%A Jindal, Amit
%A Ghosh Chowdhury, Arijit
%A Didolkar, Aniket
%A Jin, Di
%A Sawhney, Ramit
%A Shah, Rajiv Ratn
%Y Scott, Donia
%Y Bel, Nuria
%Y Zong, Chengqing
%S Proceedings of the 28th International Conference on Computational Linguistics
%D 2020
%8 December
%I International Committee on Computational Linguistics
%C Barcelona, Spain (Online)
%F jindal-etal-2020-augmenting
%X Models with a large number of parameters are prone to over-fitting and often fail to capture the underlying input distribution. We introduce Emix, a data augmentation method that uses interpolations of word embeddings and hidden layer representations to construct virtual examples. We show that Emix shows significant improvements over previously used interpolation based regularizers and data augmentation techniques. We also demonstrate how our proposed method is more robust to sparsification. We highlight the merits of our proposed methodology by performing thorough quantitative and qualitative assessments.
%R 10.18653/v1/2020.coling-main.611
%U https://aclanthology.org/2020.coling-main.611
%U https://doi.org/10.18653/v1/2020.coling-main.611
%P 6931-6936
Markdown (Informal)
[Augmenting NLP models using Latent Feature Interpolations](https://aclanthology.org/2020.coling-main.611) (Jindal et al., COLING 2020)
ACL
- Amit Jindal, Arijit Ghosh Chowdhury, Aniket Didolkar, Di Jin, Ramit Sawhney, and Rajiv Ratn Shah. 2020. Augmenting NLP models using Latent Feature Interpolations. In Proceedings of the 28th International Conference on Computational Linguistics, pages 6931–6936, Barcelona, Spain (Online). International Committee on Computational Linguistics.