@inproceedings{lundberg-etal-2022-dialogue,
title = "Dialogue Summarization using {BART}",
author = "Lundberg, Conrad and
S{\'a}nchez Vi{\~n}uela, Leyre and
Biales, Siena",
editor = "Shaikh, Samira and
Ferreira, Thiago and
Stent, Amanda",
booktitle = "Proceedings of the 15th International Conference on Natural Language Generation: Generation Challenges",
month = jul,
year = "2022",
address = "Waterville, Maine, USA and virtual meeting",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.inlg-genchal.17",
pages = "121--125",
abstract = "This paper introduces the model and settings submitted to the INLG 2022 DialogSum Chal- lenge, a shared task to generate summaries of real-life scenario dialogues between two peo- ple. In this paper, we explored using interme- diate task transfer learning, reported speech, and the use of a supplementary dataset in addi- tion to our base fine-tuned BART model. How- ever, we did not use such a method in our final model, as none improved our results. Our final model for this dialogue task achieved scores only slightly below the top submission, with hidden test set scores of 49.62, 24.98, 46.25 and 91.54 for ROUGE-1, ROUGE-2, ROUGE-L and BERTSCORE respectively. The top submitted models will also receive human evaluation.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="lundberg-etal-2022-dialogue">
<titleInfo>
<title>Dialogue Summarization using BART</title>
</titleInfo>
<name type="personal">
<namePart type="given">Conrad</namePart>
<namePart type="family">Lundberg</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Leyre</namePart>
<namePart type="family">Sánchez Viñuela</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Siena</namePart>
<namePart type="family">Biales</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 15th International Conference on Natural Language Generation: Generation Challenges</title>
</titleInfo>
<name type="personal">
<namePart type="given">Samira</namePart>
<namePart type="family">Shaikh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Thiago</namePart>
<namePart type="family">Ferreira</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Amanda</namePart>
<namePart type="family">Stent</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Waterville, Maine, USA and virtual meeting</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper introduces the model and settings submitted to the INLG 2022 DialogSum Chal- lenge, a shared task to generate summaries of real-life scenario dialogues between two peo- ple. In this paper, we explored using interme- diate task transfer learning, reported speech, and the use of a supplementary dataset in addi- tion to our base fine-tuned BART model. How- ever, we did not use such a method in our final model, as none improved our results. Our final model for this dialogue task achieved scores only slightly below the top submission, with hidden test set scores of 49.62, 24.98, 46.25 and 91.54 for ROUGE-1, ROUGE-2, ROUGE-L and BERTSCORE respectively. The top submitted models will also receive human evaluation.</abstract>
<identifier type="citekey">lundberg-etal-2022-dialogue</identifier>
<location>
<url>https://aclanthology.org/2022.inlg-genchal.17</url>
</location>
<part>
<date>2022-07</date>
<extent unit="page">
<start>121</start>
<end>125</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Dialogue Summarization using BART
%A Lundberg, Conrad
%A Sánchez Viñuela, Leyre
%A Biales, Siena
%Y Shaikh, Samira
%Y Ferreira, Thiago
%Y Stent, Amanda
%S Proceedings of the 15th International Conference on Natural Language Generation: Generation Challenges
%D 2022
%8 July
%I Association for Computational Linguistics
%C Waterville, Maine, USA and virtual meeting
%F lundberg-etal-2022-dialogue
%X This paper introduces the model and settings submitted to the INLG 2022 DialogSum Chal- lenge, a shared task to generate summaries of real-life scenario dialogues between two peo- ple. In this paper, we explored using interme- diate task transfer learning, reported speech, and the use of a supplementary dataset in addi- tion to our base fine-tuned BART model. How- ever, we did not use such a method in our final model, as none improved our results. Our final model for this dialogue task achieved scores only slightly below the top submission, with hidden test set scores of 49.62, 24.98, 46.25 and 91.54 for ROUGE-1, ROUGE-2, ROUGE-L and BERTSCORE respectively. The top submitted models will also receive human evaluation.
%U https://aclanthology.org/2022.inlg-genchal.17
%P 121-125
Markdown (Informal)
[Dialogue Summarization using BART](https://aclanthology.org/2022.inlg-genchal.17) (Lundberg et al., INLG 2022)
ACL
- Conrad Lundberg, Leyre Sánchez Viñuela, and Siena Biales. 2022. Dialogue Summarization using BART. In Proceedings of the 15th International Conference on Natural Language Generation: Generation Challenges, pages 121–125, Waterville, Maine, USA and virtual meeting. Association for Computational Linguistics.