@inproceedings{papakostas-papadopoulou-2023-model,
title = "Model Analysis {\&} Evaluation for Ambiguous Question Answering",
author = "Papakostas, Konstantinos and
Papadopoulou, Irene",
editor = "Rogers, Anna and
Boyd-Graber, Jordan and
Okazaki, Naoaki",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2023",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.findings-acl.279",
doi = "10.18653/v1/2023.findings-acl.279",
pages = "4570--4580",
abstract = "Ambiguous questions are a challenge for Question Answering models, as they require answers that cover multiple interpretations of the original query. To this end, these models are required to generate long-form answers that often combine conflicting pieces of information. Although recent advances in the field have shown strong capabilities in generating fluent responses, certain research questions remain unanswered. Does model/data scaling improve the answers{'} quality? Do automated metrics align with human judgment? To what extent do these models ground their answers in evidence? In this study, we aim to thoroughly investigate these aspects, and provide valuable insights into the limitations of the current approaches. To aid in reproducibility and further extension of our work, we open-source our code.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="papakostas-papadopoulou-2023-model">
<titleInfo>
<title>Model Analysis & Evaluation for Ambiguous Question Answering</title>
</titleInfo>
<name type="personal">
<namePart type="given">Konstantinos</namePart>
<namePart type="family">Papakostas</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Irene</namePart>
<namePart type="family">Papadopoulou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: ACL 2023</title>
</titleInfo>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Rogers</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jordan</namePart>
<namePart type="family">Boyd-Graber</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Naoaki</namePart>
<namePart type="family">Okazaki</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Toronto, Canada</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Ambiguous questions are a challenge for Question Answering models, as they require answers that cover multiple interpretations of the original query. To this end, these models are required to generate long-form answers that often combine conflicting pieces of information. Although recent advances in the field have shown strong capabilities in generating fluent responses, certain research questions remain unanswered. Does model/data scaling improve the answers’ quality? Do automated metrics align with human judgment? To what extent do these models ground their answers in evidence? In this study, we aim to thoroughly investigate these aspects, and provide valuable insights into the limitations of the current approaches. To aid in reproducibility and further extension of our work, we open-source our code.</abstract>
<identifier type="citekey">papakostas-papadopoulou-2023-model</identifier>
<identifier type="doi">10.18653/v1/2023.findings-acl.279</identifier>
<location>
<url>https://aclanthology.org/2023.findings-acl.279</url>
</location>
<part>
<date>2023-07</date>
<extent unit="page">
<start>4570</start>
<end>4580</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Model Analysis & Evaluation for Ambiguous Question Answering
%A Papakostas, Konstantinos
%A Papadopoulou, Irene
%Y Rogers, Anna
%Y Boyd-Graber, Jordan
%Y Okazaki, Naoaki
%S Findings of the Association for Computational Linguistics: ACL 2023
%D 2023
%8 July
%I Association for Computational Linguistics
%C Toronto, Canada
%F papakostas-papadopoulou-2023-model
%X Ambiguous questions are a challenge for Question Answering models, as they require answers that cover multiple interpretations of the original query. To this end, these models are required to generate long-form answers that often combine conflicting pieces of information. Although recent advances in the field have shown strong capabilities in generating fluent responses, certain research questions remain unanswered. Does model/data scaling improve the answers’ quality? Do automated metrics align with human judgment? To what extent do these models ground their answers in evidence? In this study, we aim to thoroughly investigate these aspects, and provide valuable insights into the limitations of the current approaches. To aid in reproducibility and further extension of our work, we open-source our code.
%R 10.18653/v1/2023.findings-acl.279
%U https://aclanthology.org/2023.findings-acl.279
%U https://doi.org/10.18653/v1/2023.findings-acl.279
%P 4570-4580
Markdown (Informal)
[Model Analysis & Evaluation for Ambiguous Question Answering](https://aclanthology.org/2023.findings-acl.279) (Papakostas & Papadopoulou, Findings 2023)
ACL