@inproceedings{mahon-lapata-2024-modular,
title = "A Modular Approach for Multimodal Summarization of {TV} Shows",
author = "Mahon, Louis and
Lapata, Mirella",
editor = "Ku, Lun-Wei and
Martins, Andre and
Srikumar, Vivek",
booktitle = "Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.luhme-long.450/",
doi = "10.18653/v1/2024.acl-long.450",
pages = "8272--8291",
abstract = "In this paper we address the task of summarizing television shows, which touches key areas in AI research: complex reasoning, multiple modalities, and long narratives. We present a modular approach where separate components perform specialized sub-tasks which we argue affords greater flexibility compared to end-to-end methods. Our modules involve detecting scene boundaries, reordering scenes so as to minimize the number of cuts between different events, converting visual information to text, summarizing the dialogue in each scene, and fusing the scene summaries into a final summary for the entire episode. We also present a new metric, PRISMA (**P**recision and **R**ecall Evaluat**i**on of **s**ummary F**a**cts), to measure both precision and recall of generated summaries, which we decompose into atomic facts. Tested on the recently released SummScreen3D dataset (Papalampidi {\&} Lapata, 2023), our method produces higher quality summaries than comparison models, as measured with ROUGE and our new fact-based metric."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="mahon-lapata-2024-modular">
<titleInfo>
<title>A Modular Approach for Multimodal Summarization of TV Shows</title>
</titleInfo>
<name type="personal">
<namePart type="given">Louis</namePart>
<namePart type="family">Mahon</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mirella</namePart>
<namePart type="family">Lapata</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lun-Wei</namePart>
<namePart type="family">Ku</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Andre</namePart>
<namePart type="family">Martins</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vivek</namePart>
<namePart type="family">Srikumar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Bangkok, Thailand</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In this paper we address the task of summarizing television shows, which touches key areas in AI research: complex reasoning, multiple modalities, and long narratives. We present a modular approach where separate components perform specialized sub-tasks which we argue affords greater flexibility compared to end-to-end methods. Our modules involve detecting scene boundaries, reordering scenes so as to minimize the number of cuts between different events, converting visual information to text, summarizing the dialogue in each scene, and fusing the scene summaries into a final summary for the entire episode. We also present a new metric, PRISMA (**P**recision and **R**ecall Evaluat**i**on of **s**ummary F**a**cts), to measure both precision and recall of generated summaries, which we decompose into atomic facts. Tested on the recently released SummScreen3D dataset (Papalampidi & Lapata, 2023), our method produces higher quality summaries than comparison models, as measured with ROUGE and our new fact-based metric.</abstract>
<identifier type="citekey">mahon-lapata-2024-modular</identifier>
<identifier type="doi">10.18653/v1/2024.acl-long.450</identifier>
<location>
<url>https://aclanthology.org/2024.luhme-long.450/</url>
</location>
<part>
<date>2024-08</date>
<extent unit="page">
<start>8272</start>
<end>8291</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T A Modular Approach for Multimodal Summarization of TV Shows
%A Mahon, Louis
%A Lapata, Mirella
%Y Ku, Lun-Wei
%Y Martins, Andre
%Y Srikumar, Vivek
%S Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)
%D 2024
%8 August
%I Association for Computational Linguistics
%C Bangkok, Thailand
%F mahon-lapata-2024-modular
%X In this paper we address the task of summarizing television shows, which touches key areas in AI research: complex reasoning, multiple modalities, and long narratives. We present a modular approach where separate components perform specialized sub-tasks which we argue affords greater flexibility compared to end-to-end methods. Our modules involve detecting scene boundaries, reordering scenes so as to minimize the number of cuts between different events, converting visual information to text, summarizing the dialogue in each scene, and fusing the scene summaries into a final summary for the entire episode. We also present a new metric, PRISMA (**P**recision and **R**ecall Evaluat**i**on of **s**ummary F**a**cts), to measure both precision and recall of generated summaries, which we decompose into atomic facts. Tested on the recently released SummScreen3D dataset (Papalampidi & Lapata, 2023), our method produces higher quality summaries than comparison models, as measured with ROUGE and our new fact-based metric.
%R 10.18653/v1/2024.acl-long.450
%U https://aclanthology.org/2024.luhme-long.450/
%U https://doi.org/10.18653/v1/2024.acl-long.450
%P 8272-8291
Markdown (Informal)
[A Modular Approach for Multimodal Summarization of TV Shows](https://aclanthology.org/2024.luhme-long.450/) (Mahon & Lapata, ACL 2024)
ACL