@inproceedings{gantt-etal-2020-natural,
title = "Natural Language Inference with Mixed Effects",
author = "Gantt, William and
Kane, Benjamin and
White, Aaron Steven",
editor = "Gurevych, Iryna and
Apidianaki, Marianna and
Faruqui, Manaal",
booktitle = "Proceedings of the Ninth Joint Conference on Lexical and Computational Semantics",
month = dec,
year = "2020",
address = "Barcelona, Spain (Online)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.starsem-1.9",
pages = "81--87",
abstract = "There is growing evidence that the prevalence of disagreement in the raw annotations used to construct natural language inference datasets makes the common practice of aggregating those annotations to a single label problematic. We propose a generic method that allows one to skip the aggregation step and train on the raw annotations directly without subjecting the model to unwanted noise that can arise from annotator response biases. We demonstrate that this method, which generalizes the notion of a mixed effects model by incorporating annotator random effects into any existing neural model, improves performance over models that do not incorporate such effects.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="gantt-etal-2020-natural">
<titleInfo>
<title>Natural Language Inference with Mixed Effects</title>
</titleInfo>
<name type="personal">
<namePart type="given">William</namePart>
<namePart type="family">Gantt</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Benjamin</namePart>
<namePart type="family">Kane</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aaron</namePart>
<namePart type="given">Steven</namePart>
<namePart type="family">White</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Ninth Joint Conference on Lexical and Computational Semantics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Iryna</namePart>
<namePart type="family">Gurevych</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marianna</namePart>
<namePart type="family">Apidianaki</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Manaal</namePart>
<namePart type="family">Faruqui</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Barcelona, Spain (Online)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>There is growing evidence that the prevalence of disagreement in the raw annotations used to construct natural language inference datasets makes the common practice of aggregating those annotations to a single label problematic. We propose a generic method that allows one to skip the aggregation step and train on the raw annotations directly without subjecting the model to unwanted noise that can arise from annotator response biases. We demonstrate that this method, which generalizes the notion of a mixed effects model by incorporating annotator random effects into any existing neural model, improves performance over models that do not incorporate such effects.</abstract>
<identifier type="citekey">gantt-etal-2020-natural</identifier>
<location>
<url>https://aclanthology.org/2020.starsem-1.9</url>
</location>
<part>
<date>2020-12</date>
<extent unit="page">
<start>81</start>
<end>87</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Natural Language Inference with Mixed Effects
%A Gantt, William
%A Kane, Benjamin
%A White, Aaron Steven
%Y Gurevych, Iryna
%Y Apidianaki, Marianna
%Y Faruqui, Manaal
%S Proceedings of the Ninth Joint Conference on Lexical and Computational Semantics
%D 2020
%8 December
%I Association for Computational Linguistics
%C Barcelona, Spain (Online)
%F gantt-etal-2020-natural
%X There is growing evidence that the prevalence of disagreement in the raw annotations used to construct natural language inference datasets makes the common practice of aggregating those annotations to a single label problematic. We propose a generic method that allows one to skip the aggregation step and train on the raw annotations directly without subjecting the model to unwanted noise that can arise from annotator response biases. We demonstrate that this method, which generalizes the notion of a mixed effects model by incorporating annotator random effects into any existing neural model, improves performance over models that do not incorporate such effects.
%U https://aclanthology.org/2020.starsem-1.9
%P 81-87
Markdown (Informal)
[Natural Language Inference with Mixed Effects](https://aclanthology.org/2020.starsem-1.9) (Gantt et al., *SEM 2020)
ACL
- William Gantt, Benjamin Kane, and Aaron Steven White. 2020. Natural Language Inference with Mixed Effects. In Proceedings of the Ninth Joint Conference on Lexical and Computational Semantics, pages 81–87, Barcelona, Spain (Online). Association for Computational Linguistics.