@inproceedings{hartmann-sonntag-2022-survey,
title = "A survey on improving {NLP} models with human explanations",
author = "Hartmann, Mareike and
Sonntag, Daniel",
editor = "Andreas, Jacob and
Narasimhan, Karthik and
Nematzadeh, Aida",
booktitle = "Proceedings of the First Workshop on Learning with Natural Language Supervision",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.lnls-1.5",
doi = "10.18653/v1/2022.lnls-1.5",
pages = "40--47",
abstract = "Training a model with access to human explanations can improve data efficiency and model performance on in- and out-of-domain data. Adding to these empirical findings, similarity with the process of human learning makes learning from explanations a promising way to establish a fruitful human-machine interaction. Several methods have been proposed for improving natural language processing (NLP) models with human explanations, that rely on different explanation types and mechanism for integrating these explanations into the learning process. These methods are rarely compared with each other, making it hard for practitioners to choose the best combination of explanation type and integration mechanism for a specific use-case. In this paper, we give an overview of different methods for learning from human explanations, and discuss different factors that can inform the decision of which method to choose for a specific use-case.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="hartmann-sonntag-2022-survey">
<titleInfo>
<title>A survey on improving NLP models with human explanations</title>
</titleInfo>
<name type="personal">
<namePart type="given">Mareike</namePart>
<namePart type="family">Hartmann</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Daniel</namePart>
<namePart type="family">Sonntag</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the First Workshop on Learning with Natural Language Supervision</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jacob</namePart>
<namePart type="family">Andreas</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Karthik</namePart>
<namePart type="family">Narasimhan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aida</namePart>
<namePart type="family">Nematzadeh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dublin, Ireland</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Training a model with access to human explanations can improve data efficiency and model performance on in- and out-of-domain data. Adding to these empirical findings, similarity with the process of human learning makes learning from explanations a promising way to establish a fruitful human-machine interaction. Several methods have been proposed for improving natural language processing (NLP) models with human explanations, that rely on different explanation types and mechanism for integrating these explanations into the learning process. These methods are rarely compared with each other, making it hard for practitioners to choose the best combination of explanation type and integration mechanism for a specific use-case. In this paper, we give an overview of different methods for learning from human explanations, and discuss different factors that can inform the decision of which method to choose for a specific use-case.</abstract>
<identifier type="citekey">hartmann-sonntag-2022-survey</identifier>
<identifier type="doi">10.18653/v1/2022.lnls-1.5</identifier>
<location>
<url>https://aclanthology.org/2022.lnls-1.5</url>
</location>
<part>
<date>2022-05</date>
<extent unit="page">
<start>40</start>
<end>47</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T A survey on improving NLP models with human explanations
%A Hartmann, Mareike
%A Sonntag, Daniel
%Y Andreas, Jacob
%Y Narasimhan, Karthik
%Y Nematzadeh, Aida
%S Proceedings of the First Workshop on Learning with Natural Language Supervision
%D 2022
%8 May
%I Association for Computational Linguistics
%C Dublin, Ireland
%F hartmann-sonntag-2022-survey
%X Training a model with access to human explanations can improve data efficiency and model performance on in- and out-of-domain data. Adding to these empirical findings, similarity with the process of human learning makes learning from explanations a promising way to establish a fruitful human-machine interaction. Several methods have been proposed for improving natural language processing (NLP) models with human explanations, that rely on different explanation types and mechanism for integrating these explanations into the learning process. These methods are rarely compared with each other, making it hard for practitioners to choose the best combination of explanation type and integration mechanism for a specific use-case. In this paper, we give an overview of different methods for learning from human explanations, and discuss different factors that can inform the decision of which method to choose for a specific use-case.
%R 10.18653/v1/2022.lnls-1.5
%U https://aclanthology.org/2022.lnls-1.5
%U https://doi.org/10.18653/v1/2022.lnls-1.5
%P 40-47
Markdown (Informal)
[A survey on improving NLP models with human explanations](https://aclanthology.org/2022.lnls-1.5) (Hartmann & Sonntag, LNLS 2022)
ACL