@inproceedings{park-etal-2022-leveraging,
title = "Leveraging Non-dialogue Summaries for Dialogue Summarization",
author = "Park, Seongmin and
Shin, Dongchan and
Lee, Jihwa",
editor = "Dernoncourt, Franck and
Nguyen, Thien Huu and
Lai, Viet Dac and
Veyseh, Amir Pouran Ben and
Bui, Trung H. and
Yoon, David Seunghyun",
booktitle = "Proceedings of the First Workshop On Transcript Understanding",
month = oct,
year = "2022",
address = "Gyeongju, South Korea",
publisher = "International Conference on Computational Linguistics",
url = "https://aclanthology.org/2022.tu-1.1",
pages = "1--7",
abstract = "To mitigate the lack of diverse dialogue summarization datasets in academia, we present methods to utilize non-dialogue summarization data for enhancing dialogue summarization systems. We apply transformations to document summarization data pairs to create training data that better befit dialogue summarization. The suggested transformations also retain desirable properties of non-dialogue datasets, such as improved faithfulness to the source text. We conduct extensive experiments across both English and Korean to verify our approach. Although absolute gains in ROUGE naturally plateau as more dialogue summarization samples are introduced, utilizing non-dialogue data for training significantly improves summarization performance in zero- and few-shot settings and enhances faithfulness across all training regimes.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="park-etal-2022-leveraging">
<titleInfo>
<title>Leveraging Non-dialogue Summaries for Dialogue Summarization</title>
</titleInfo>
<name type="personal">
<namePart type="given">Seongmin</namePart>
<namePart type="family">Park</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dongchan</namePart>
<namePart type="family">Shin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jihwa</namePart>
<namePart type="family">Lee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-10</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the First Workshop On Transcript Understanding</title>
</titleInfo>
<name type="personal">
<namePart type="given">Franck</namePart>
<namePart type="family">Dernoncourt</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Thien</namePart>
<namePart type="given">Huu</namePart>
<namePart type="family">Nguyen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Viet</namePart>
<namePart type="given">Dac</namePart>
<namePart type="family">Lai</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Amir</namePart>
<namePart type="given">Pouran</namePart>
<namePart type="given">Ben</namePart>
<namePart type="family">Veyseh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Trung</namePart>
<namePart type="given">H</namePart>
<namePart type="family">Bui</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">David</namePart>
<namePart type="given">Seunghyun</namePart>
<namePart type="family">Yoon</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>International Conference on Computational Linguistics</publisher>
<place>
<placeTerm type="text">Gyeongju, South Korea</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>To mitigate the lack of diverse dialogue summarization datasets in academia, we present methods to utilize non-dialogue summarization data for enhancing dialogue summarization systems. We apply transformations to document summarization data pairs to create training data that better befit dialogue summarization. The suggested transformations also retain desirable properties of non-dialogue datasets, such as improved faithfulness to the source text. We conduct extensive experiments across both English and Korean to verify our approach. Although absolute gains in ROUGE naturally plateau as more dialogue summarization samples are introduced, utilizing non-dialogue data for training significantly improves summarization performance in zero- and few-shot settings and enhances faithfulness across all training regimes.</abstract>
<identifier type="citekey">park-etal-2022-leveraging</identifier>
<location>
<url>https://aclanthology.org/2022.tu-1.1</url>
</location>
<part>
<date>2022-10</date>
<extent unit="page">
<start>1</start>
<end>7</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Leveraging Non-dialogue Summaries for Dialogue Summarization
%A Park, Seongmin
%A Shin, Dongchan
%A Lee, Jihwa
%Y Dernoncourt, Franck
%Y Nguyen, Thien Huu
%Y Lai, Viet Dac
%Y Veyseh, Amir Pouran Ben
%Y Bui, Trung H.
%Y Yoon, David Seunghyun
%S Proceedings of the First Workshop On Transcript Understanding
%D 2022
%8 October
%I International Conference on Computational Linguistics
%C Gyeongju, South Korea
%F park-etal-2022-leveraging
%X To mitigate the lack of diverse dialogue summarization datasets in academia, we present methods to utilize non-dialogue summarization data for enhancing dialogue summarization systems. We apply transformations to document summarization data pairs to create training data that better befit dialogue summarization. The suggested transformations also retain desirable properties of non-dialogue datasets, such as improved faithfulness to the source text. We conduct extensive experiments across both English and Korean to verify our approach. Although absolute gains in ROUGE naturally plateau as more dialogue summarization samples are introduced, utilizing non-dialogue data for training significantly improves summarization performance in zero- and few-shot settings and enhances faithfulness across all training regimes.
%U https://aclanthology.org/2022.tu-1.1
%P 1-7
Markdown (Informal)
[Leveraging Non-dialogue Summaries for Dialogue Summarization](https://aclanthology.org/2022.tu-1.1) (Park et al., TU 2022)
ACL