@inproceedings{chauhan-etal-2022-tcs,
title = "{TCS}{\_}{WITM}{\_}2022 @ {D}ialog{S}um : Topic oriented Summarization using Transformer based Encoder Decoder Model",
author = "Chauhan, Vipul and
Roy, Prasenjeet and
Dey, Lipika and
Goel, Tushar",
editor = "Shaikh, Samira and
Ferreira, Thiago and
Stent, Amanda",
booktitle = "Proceedings of the 15th International Conference on Natural Language Generation: Generation Challenges",
month = jul,
year = "2022",
address = "Waterville, Maine, USA and virtual meeting",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.inlg-genchal.15",
pages = "104--109",
abstract = "In this paper, we present our approach to the DialogSum challenge, which was proposed as a shared task aimed to summarize dialogues from real-life scenarios. The challenge was to design a system that can generate fluent and salient summaries of a multi-turn dialogue text. Dialogue summarization has many commercial applications as it can be used to summarize conversations between customers and service agents, meeting notes, conference proceedings etc. Appropriate dialogue summarization can enhance the experience of conversing with chat- bots or personal digital assistants. We have pro- posed a topic-based abstractive summarization method, which is generated by fine-tuning PE- GASUS1, which is the state of the art abstrac- tive summary generation model. We have com- pared different types of fine-tuning approaches that can lead to different types of summaries. We found that since conversations usually veer around a topic, using topics along with the di- aloagues, helps to generate more human-like summaries. The topics in this case resemble user perspective, around which summaries are usually sought. The generated summary has been evaluated with ground truth summaries provided by the challenge owners. We use the py-rouge score and BERT-Score metrics to compare the results.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="chauhan-etal-2022-tcs">
<titleInfo>
<title>TCS_WITM_2022 @ DialogSum : Topic oriented Summarization using Transformer based Encoder Decoder Model</title>
</titleInfo>
<name type="personal">
<namePart type="given">Vipul</namePart>
<namePart type="family">Chauhan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Prasenjeet</namePart>
<namePart type="family">Roy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lipika</namePart>
<namePart type="family">Dey</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tushar</namePart>
<namePart type="family">Goel</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 15th International Conference on Natural Language Generation: Generation Challenges</title>
</titleInfo>
<name type="personal">
<namePart type="given">Samira</namePart>
<namePart type="family">Shaikh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Thiago</namePart>
<namePart type="family">Ferreira</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Amanda</namePart>
<namePart type="family">Stent</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Waterville, Maine, USA and virtual meeting</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In this paper, we present our approach to the DialogSum challenge, which was proposed as a shared task aimed to summarize dialogues from real-life scenarios. The challenge was to design a system that can generate fluent and salient summaries of a multi-turn dialogue text. Dialogue summarization has many commercial applications as it can be used to summarize conversations between customers and service agents, meeting notes, conference proceedings etc. Appropriate dialogue summarization can enhance the experience of conversing with chat- bots or personal digital assistants. We have pro- posed a topic-based abstractive summarization method, which is generated by fine-tuning PE- GASUS1, which is the state of the art abstrac- tive summary generation model. We have com- pared different types of fine-tuning approaches that can lead to different types of summaries. We found that since conversations usually veer around a topic, using topics along with the di- aloagues, helps to generate more human-like summaries. The topics in this case resemble user perspective, around which summaries are usually sought. The generated summary has been evaluated with ground truth summaries provided by the challenge owners. We use the py-rouge score and BERT-Score metrics to compare the results.</abstract>
<identifier type="citekey">chauhan-etal-2022-tcs</identifier>
<location>
<url>https://aclanthology.org/2022.inlg-genchal.15</url>
</location>
<part>
<date>2022-07</date>
<extent unit="page">
<start>104</start>
<end>109</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T TCS_WITM_2022 @ DialogSum : Topic oriented Summarization using Transformer based Encoder Decoder Model
%A Chauhan, Vipul
%A Roy, Prasenjeet
%A Dey, Lipika
%A Goel, Tushar
%Y Shaikh, Samira
%Y Ferreira, Thiago
%Y Stent, Amanda
%S Proceedings of the 15th International Conference on Natural Language Generation: Generation Challenges
%D 2022
%8 July
%I Association for Computational Linguistics
%C Waterville, Maine, USA and virtual meeting
%F chauhan-etal-2022-tcs
%X In this paper, we present our approach to the DialogSum challenge, which was proposed as a shared task aimed to summarize dialogues from real-life scenarios. The challenge was to design a system that can generate fluent and salient summaries of a multi-turn dialogue text. Dialogue summarization has many commercial applications as it can be used to summarize conversations between customers and service agents, meeting notes, conference proceedings etc. Appropriate dialogue summarization can enhance the experience of conversing with chat- bots or personal digital assistants. We have pro- posed a topic-based abstractive summarization method, which is generated by fine-tuning PE- GASUS1, which is the state of the art abstrac- tive summary generation model. We have com- pared different types of fine-tuning approaches that can lead to different types of summaries. We found that since conversations usually veer around a topic, using topics along with the di- aloagues, helps to generate more human-like summaries. The topics in this case resemble user perspective, around which summaries are usually sought. The generated summary has been evaluated with ground truth summaries provided by the challenge owners. We use the py-rouge score and BERT-Score metrics to compare the results.
%U https://aclanthology.org/2022.inlg-genchal.15
%P 104-109
Markdown (Informal)
[TCS_WITM_2022 @ DialogSum : Topic oriented Summarization using Transformer based Encoder Decoder Model](https://aclanthology.org/2022.inlg-genchal.15) (Chauhan et al., INLG 2022)
ACL