@inproceedings{obaid-ul-islam-etal-2023-tackling,
title = "Tackling Hallucinations in Neural Chart Summarization",
author = "Obaid ul Islam, Saad and
{\v{S}}krjanec, Iza and
Dusek, Ondrej and
Demberg, Vera",
editor = "Keet, C. Maria and
Lee, Hung-Yi and
Zarrie{\ss}, Sina",
booktitle = "Proceedings of the 16th International Natural Language Generation Conference",
month = sep,
year = "2023",
address = "Prague, Czechia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.inlg-main.30",
doi = "10.18653/v1/2023.inlg-main.30",
pages = "414--423",
abstract = "Hallucinations in text generation occur when the system produces text that is not grounded in the input. In this work, we tackle the problem of hallucinations in neural chart summarization. Our analysis shows that the target side of chart summarization training datasets often contains additional information, leading to hallucinations. We propose a natural language inference (NLI) based method to preprocess the training data and show through human evaluation that our method significantly reduces hallucinations. We also found that shortening long-distance dependencies in the input sequence and adding chart-related information like title and legends improves the overall performance.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="obaid-ul-islam-etal-2023-tackling">
<titleInfo>
<title>Tackling Hallucinations in Neural Chart Summarization</title>
</titleInfo>
<name type="personal">
<namePart type="given">Saad</namePart>
<namePart type="family">Obaid ul Islam</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Iza</namePart>
<namePart type="family">Škrjanec</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ondrej</namePart>
<namePart type="family">Dusek</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vera</namePart>
<namePart type="family">Demberg</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 16th International Natural Language Generation Conference</title>
</titleInfo>
<name type="personal">
<namePart type="given">C</namePart>
<namePart type="given">Maria</namePart>
<namePart type="family">Keet</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hung-Yi</namePart>
<namePart type="family">Lee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sina</namePart>
<namePart type="family">Zarrieß</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Prague, Czechia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Hallucinations in text generation occur when the system produces text that is not grounded in the input. In this work, we tackle the problem of hallucinations in neural chart summarization. Our analysis shows that the target side of chart summarization training datasets often contains additional information, leading to hallucinations. We propose a natural language inference (NLI) based method to preprocess the training data and show through human evaluation that our method significantly reduces hallucinations. We also found that shortening long-distance dependencies in the input sequence and adding chart-related information like title and legends improves the overall performance.</abstract>
<identifier type="citekey">obaid-ul-islam-etal-2023-tackling</identifier>
<identifier type="doi">10.18653/v1/2023.inlg-main.30</identifier>
<location>
<url>https://aclanthology.org/2023.inlg-main.30</url>
</location>
<part>
<date>2023-09</date>
<extent unit="page">
<start>414</start>
<end>423</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Tackling Hallucinations in Neural Chart Summarization
%A Obaid ul Islam, Saad
%A Škrjanec, Iza
%A Dusek, Ondrej
%A Demberg, Vera
%Y Keet, C. Maria
%Y Lee, Hung-Yi
%Y Zarrieß, Sina
%S Proceedings of the 16th International Natural Language Generation Conference
%D 2023
%8 September
%I Association for Computational Linguistics
%C Prague, Czechia
%F obaid-ul-islam-etal-2023-tackling
%X Hallucinations in text generation occur when the system produces text that is not grounded in the input. In this work, we tackle the problem of hallucinations in neural chart summarization. Our analysis shows that the target side of chart summarization training datasets often contains additional information, leading to hallucinations. We propose a natural language inference (NLI) based method to preprocess the training data and show through human evaluation that our method significantly reduces hallucinations. We also found that shortening long-distance dependencies in the input sequence and adding chart-related information like title and legends improves the overall performance.
%R 10.18653/v1/2023.inlg-main.30
%U https://aclanthology.org/2023.inlg-main.30
%U https://doi.org/10.18653/v1/2023.inlg-main.30
%P 414-423
Markdown (Informal)
[Tackling Hallucinations in Neural Chart Summarization](https://aclanthology.org/2023.inlg-main.30) (Obaid ul Islam et al., INLG-SIGDIAL 2023)
ACL
- Saad Obaid ul Islam, Iza Škrjanec, Ondrej Dusek, and Vera Demberg. 2023. Tackling Hallucinations in Neural Chart Summarization. In Proceedings of the 16th International Natural Language Generation Conference, pages 414–423, Prague, Czechia. Association for Computational Linguistics.