@inproceedings{montella-etal-2022-transfer,
title = "Transfer Learning and Masked Generation for Answer Verbalization",
author = "Montella, Sebastien and
Rojas-Barahona, Lina and
Bechet, Frederic and
Heinecke, Johannes and
Nasr, Alexis",
editor = "Chen, Wenhu and
Chen, Xinyun and
Chen, Zhiyu and
Yao, Ziyu and
Yasunaga, Michihiro and
Yu, Tao and
Zhang, Rui",
booktitle = "Proceedings of the Workshop on Structured and Unstructured Knowledge Integration (SUKI)",
month = jul,
year = "2022",
address = "Seattle, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.suki-1.6",
doi = "10.18653/v1/2022.suki-1.6",
pages = "47--54",
abstract = "Structured Knowledge has recently emerged as an essential component to support fine-grained Question Answering (QA). In general, QA systems query a Knowledge Base (KB) to detect and extract the raw answers as final prediction. However, as lacking of context, language generation can offer a much informative and complete response. In this paper, we propose to combine the power of transfer learning and the advantage of entity placeholders to produce high-quality verbalization of extracted answers from a KB. We claim that such approach is especially well-suited for answer generation. Our experiments show 44.25{\%}, 3.26{\%} and 29.10{\%} relative gain in BLEU over the state-of-the-art on the VQuAnDA, ParaQA and VANiLLa datasets, respectively. We additionally provide minor hallucinations corrections in VANiLLa standing for 5{\%} of each of the training and testing set. We witness a median absolute gain of 0.81 SacreBLEU. This strengthens the importance of data quality when using automated evaluation.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="montella-etal-2022-transfer">
<titleInfo>
<title>Transfer Learning and Masked Generation for Answer Verbalization</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sebastien</namePart>
<namePart type="family">Montella</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lina</namePart>
<namePart type="family">Rojas-Barahona</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Frederic</namePart>
<namePart type="family">Bechet</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Johannes</namePart>
<namePart type="family">Heinecke</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alexis</namePart>
<namePart type="family">Nasr</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Workshop on Structured and Unstructured Knowledge Integration (SUKI)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Wenhu</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xinyun</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhiyu</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ziyu</namePart>
<namePart type="family">Yao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Michihiro</namePart>
<namePart type="family">Yasunaga</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tao</namePart>
<namePart type="family">Yu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rui</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Seattle, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Structured Knowledge has recently emerged as an essential component to support fine-grained Question Answering (QA). In general, QA systems query a Knowledge Base (KB) to detect and extract the raw answers as final prediction. However, as lacking of context, language generation can offer a much informative and complete response. In this paper, we propose to combine the power of transfer learning and the advantage of entity placeholders to produce high-quality verbalization of extracted answers from a KB. We claim that such approach is especially well-suited for answer generation. Our experiments show 44.25%, 3.26% and 29.10% relative gain in BLEU over the state-of-the-art on the VQuAnDA, ParaQA and VANiLLa datasets, respectively. We additionally provide minor hallucinations corrections in VANiLLa standing for 5% of each of the training and testing set. We witness a median absolute gain of 0.81 SacreBLEU. This strengthens the importance of data quality when using automated evaluation.</abstract>
<identifier type="citekey">montella-etal-2022-transfer</identifier>
<identifier type="doi">10.18653/v1/2022.suki-1.6</identifier>
<location>
<url>https://aclanthology.org/2022.suki-1.6</url>
</location>
<part>
<date>2022-07</date>
<extent unit="page">
<start>47</start>
<end>54</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Transfer Learning and Masked Generation for Answer Verbalization
%A Montella, Sebastien
%A Rojas-Barahona, Lina
%A Bechet, Frederic
%A Heinecke, Johannes
%A Nasr, Alexis
%Y Chen, Wenhu
%Y Chen, Xinyun
%Y Chen, Zhiyu
%Y Yao, Ziyu
%Y Yasunaga, Michihiro
%Y Yu, Tao
%Y Zhang, Rui
%S Proceedings of the Workshop on Structured and Unstructured Knowledge Integration (SUKI)
%D 2022
%8 July
%I Association for Computational Linguistics
%C Seattle, USA
%F montella-etal-2022-transfer
%X Structured Knowledge has recently emerged as an essential component to support fine-grained Question Answering (QA). In general, QA systems query a Knowledge Base (KB) to detect and extract the raw answers as final prediction. However, as lacking of context, language generation can offer a much informative and complete response. In this paper, we propose to combine the power of transfer learning and the advantage of entity placeholders to produce high-quality verbalization of extracted answers from a KB. We claim that such approach is especially well-suited for answer generation. Our experiments show 44.25%, 3.26% and 29.10% relative gain in BLEU over the state-of-the-art on the VQuAnDA, ParaQA and VANiLLa datasets, respectively. We additionally provide minor hallucinations corrections in VANiLLa standing for 5% of each of the training and testing set. We witness a median absolute gain of 0.81 SacreBLEU. This strengthens the importance of data quality when using automated evaluation.
%R 10.18653/v1/2022.suki-1.6
%U https://aclanthology.org/2022.suki-1.6
%U https://doi.org/10.18653/v1/2022.suki-1.6
%P 47-54
Markdown (Informal)
[Transfer Learning and Masked Generation for Answer Verbalization](https://aclanthology.org/2022.suki-1.6) (Montella et al., SUKI 2022)
ACL
- Sebastien Montella, Lina Rojas-Barahona, Frederic Bechet, Johannes Heinecke, and Alexis Nasr. 2022. Transfer Learning and Masked Generation for Answer Verbalization. In Proceedings of the Workshop on Structured and Unstructured Knowledge Integration (SUKI), pages 47–54, Seattle, USA. Association for Computational Linguistics.