@inproceedings{faille-etal-2020-natural,
title = "The Natural Language Pipeline, Neural Text Generation and Explainability",
author = "Faille, Juliette and
Gatt, Albert and
Gardent, Claire",
editor = "Alonso, Jose M. and
Catala, Alejandro",
booktitle = "2nd Workshop on Interactive Natural Language Technology for Explainable Artificial Intelligence",
month = nov,
year = "2020",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.nl4xai-1.5",
pages = "16--21",
abstract = "End-to-end encoder-decoder approaches to data-to-text generation are often black boxes whose predictions are difficult to explain. Breaking up the end-to-end model into sub-modules is a natural way to address this problem. The traditional pre-neural Natural Language Generation (NLG) pipeline provides a framework for breaking up the end-to-end encoder-decoder. We survey recent papers that integrate traditional NLG submodules in neural approaches and analyse their explainability. Our survey is a first step towards building explainable neural NLG models.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="faille-etal-2020-natural">
<titleInfo>
<title>The Natural Language Pipeline, Neural Text Generation and Explainability</title>
</titleInfo>
<name type="personal">
<namePart type="given">Juliette</namePart>
<namePart type="family">Faille</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Albert</namePart>
<namePart type="family">Gatt</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Claire</namePart>
<namePart type="family">Gardent</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>2nd Workshop on Interactive Natural Language Technology for Explainable Artificial Intelligence</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jose</namePart>
<namePart type="given">M</namePart>
<namePart type="family">Alonso</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alejandro</namePart>
<namePart type="family">Catala</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dublin, Ireland</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>End-to-end encoder-decoder approaches to data-to-text generation are often black boxes whose predictions are difficult to explain. Breaking up the end-to-end model into sub-modules is a natural way to address this problem. The traditional pre-neural Natural Language Generation (NLG) pipeline provides a framework for breaking up the end-to-end encoder-decoder. We survey recent papers that integrate traditional NLG submodules in neural approaches and analyse their explainability. Our survey is a first step towards building explainable neural NLG models.</abstract>
<identifier type="citekey">faille-etal-2020-natural</identifier>
<location>
<url>https://aclanthology.org/2020.nl4xai-1.5</url>
</location>
<part>
<date>2020-11</date>
<extent unit="page">
<start>16</start>
<end>21</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T The Natural Language Pipeline, Neural Text Generation and Explainability
%A Faille, Juliette
%A Gatt, Albert
%A Gardent, Claire
%Y Alonso, Jose M.
%Y Catala, Alejandro
%S 2nd Workshop on Interactive Natural Language Technology for Explainable Artificial Intelligence
%D 2020
%8 November
%I Association for Computational Linguistics
%C Dublin, Ireland
%F faille-etal-2020-natural
%X End-to-end encoder-decoder approaches to data-to-text generation are often black boxes whose predictions are difficult to explain. Breaking up the end-to-end model into sub-modules is a natural way to address this problem. The traditional pre-neural Natural Language Generation (NLG) pipeline provides a framework for breaking up the end-to-end encoder-decoder. We survey recent papers that integrate traditional NLG submodules in neural approaches and analyse their explainability. Our survey is a first step towards building explainable neural NLG models.
%U https://aclanthology.org/2020.nl4xai-1.5
%P 16-21
Markdown (Informal)
[The Natural Language Pipeline, Neural Text Generation and Explainability](https://aclanthology.org/2020.nl4xai-1.5) (Faille et al., NL4XAI 2020)
ACL