@inproceedings{gao-etal-2022-simulating,
title = "Simulating Bandit Learning from User Feedback for Extractive Question Answering",
author = "Gao, Ge and
Choi, Eunsol and
Artzi, Yoav",
booktitle = "Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.acl-long.355",
doi = "10.18653/v1/2022.acl-long.355",
pages = "5167--5179",
abstract = "We study learning from user feedback for extractive question answering by simulating feedback using supervised data. We cast the problem as contextual bandit learning, and analyze the characteristics of several learning scenarios with focus on reducing data annotation. We show that systems initially trained on few examples can dramatically improve given feedback from users on model-predicted answers, and that one can use existing datasets to deploy systems in new domains without any annotation effort, but instead improving the system on-the-fly via user feedback.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="gao-etal-2022-simulating">
<titleInfo>
<title>Simulating Bandit Learning from User Feedback for Extractive Question Answering</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ge</namePart>
<namePart type="family">Gao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Eunsol</namePart>
<namePart type="family">Choi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yoav</namePart>
<namePart type="family">Artzi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dublin, Ireland</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We study learning from user feedback for extractive question answering by simulating feedback using supervised data. We cast the problem as contextual bandit learning, and analyze the characteristics of several learning scenarios with focus on reducing data annotation. We show that systems initially trained on few examples can dramatically improve given feedback from users on model-predicted answers, and that one can use existing datasets to deploy systems in new domains without any annotation effort, but instead improving the system on-the-fly via user feedback.</abstract>
<identifier type="citekey">gao-etal-2022-simulating</identifier>
<identifier type="doi">10.18653/v1/2022.acl-long.355</identifier>
<location>
<url>https://aclanthology.org/2022.acl-long.355</url>
</location>
<part>
<date>2022-05</date>
<extent unit="page">
<start>5167</start>
<end>5179</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Simulating Bandit Learning from User Feedback for Extractive Question Answering
%A Gao, Ge
%A Choi, Eunsol
%A Artzi, Yoav
%S Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)
%D 2022
%8 May
%I Association for Computational Linguistics
%C Dublin, Ireland
%F gao-etal-2022-simulating
%X We study learning from user feedback for extractive question answering by simulating feedback using supervised data. We cast the problem as contextual bandit learning, and analyze the characteristics of several learning scenarios with focus on reducing data annotation. We show that systems initially trained on few examples can dramatically improve given feedback from users on model-predicted answers, and that one can use existing datasets to deploy systems in new domains without any annotation effort, but instead improving the system on-the-fly via user feedback.
%R 10.18653/v1/2022.acl-long.355
%U https://aclanthology.org/2022.acl-long.355
%U https://doi.org/10.18653/v1/2022.acl-long.355
%P 5167-5179
Markdown (Informal)
[Simulating Bandit Learning from User Feedback for Extractive Question Answering](https://aclanthology.org/2022.acl-long.355) (Gao et al., ACL 2022)
ACL