@inproceedings{sobrevilla-cabezudo-pardo-2020-nilc-webnlg,
title = "{NILC} at {W}eb{NLG}+: Pretrained Sequence-to-Sequence Models on {RDF}-to-Text Generation",
author = "Sobrevilla Cabezudo, Marco Antonio and
Pardo, Thiago A. S.",
editor = "Castro Ferreira, Thiago and
Gardent, Claire and
Ilinykh, Nikolai and
van der Lee, Chris and
Mille, Simon and
Moussallem, Diego and
Shimorina, Anastasia",
booktitle = "Proceedings of the 3rd International Workshop on Natural Language Generation from the Semantic Web (WebNLG+)",
month = "12",
year = "2020",
address = "Dublin, Ireland (Virtual)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.webnlg-1.14",
pages = "131--136",
abstract = "This paper describes the submission by the NILC Computational Linguistics research group of the University of S{\~a}o Paulo/Brazil to the RDF-to-Text task for English at the WebNLG+ challenge. The success of the current pretrained models like BERT or GPT-2 in text-to-text generation tasks is well-known, however, its application/success on data-totext generation has not been well-studied and proven. This way, we explore how good a pretrained model, in particular BART, performs on the data-to-text generation task. The results obtained were worse than the baseline and other systems in almost all automatic measures. However, the human evaluation shows better results for our system. Besides, results suggest that BART may generate paraphrases of reference texts.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="sobrevilla-cabezudo-pardo-2020-nilc-webnlg">
<titleInfo>
<title>NILC at WebNLG+: Pretrained Sequence-to-Sequence Models on RDF-to-Text Generation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Marco</namePart>
<namePart type="given">Antonio</namePart>
<namePart type="family">Sobrevilla Cabezudo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Thiago</namePart>
<namePart type="given">A</namePart>
<namePart type="given">S</namePart>
<namePart type="family">Pardo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 3rd International Workshop on Natural Language Generation from the Semantic Web (WebNLG+)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Thiago</namePart>
<namePart type="family">Castro Ferreira</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Claire</namePart>
<namePart type="family">Gardent</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nikolai</namePart>
<namePart type="family">Ilinykh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chris</namePart>
<namePart type="family">van der Lee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Simon</namePart>
<namePart type="family">Mille</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Diego</namePart>
<namePart type="family">Moussallem</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anastasia</namePart>
<namePart type="family">Shimorina</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dublin, Ireland (Virtual)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper describes the submission by the NILC Computational Linguistics research group of the University of São Paulo/Brazil to the RDF-to-Text task for English at the WebNLG+ challenge. The success of the current pretrained models like BERT or GPT-2 in text-to-text generation tasks is well-known, however, its application/success on data-totext generation has not been well-studied and proven. This way, we explore how good a pretrained model, in particular BART, performs on the data-to-text generation task. The results obtained were worse than the baseline and other systems in almost all automatic measures. However, the human evaluation shows better results for our system. Besides, results suggest that BART may generate paraphrases of reference texts.</abstract>
<identifier type="citekey">sobrevilla-cabezudo-pardo-2020-nilc-webnlg</identifier>
<location>
<url>https://aclanthology.org/2020.webnlg-1.14</url>
</location>
<part>
<date>2020-12</date>
<extent unit="page">
<start>131</start>
<end>136</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T NILC at WebNLG+: Pretrained Sequence-to-Sequence Models on RDF-to-Text Generation
%A Sobrevilla Cabezudo, Marco Antonio
%A Pardo, Thiago A. S.
%Y Castro Ferreira, Thiago
%Y Gardent, Claire
%Y Ilinykh, Nikolai
%Y van der Lee, Chris
%Y Mille, Simon
%Y Moussallem, Diego
%Y Shimorina, Anastasia
%S Proceedings of the 3rd International Workshop on Natural Language Generation from the Semantic Web (WebNLG+)
%D 2020
%8 December
%I Association for Computational Linguistics
%C Dublin, Ireland (Virtual)
%F sobrevilla-cabezudo-pardo-2020-nilc-webnlg
%X This paper describes the submission by the NILC Computational Linguistics research group of the University of São Paulo/Brazil to the RDF-to-Text task for English at the WebNLG+ challenge. The success of the current pretrained models like BERT or GPT-2 in text-to-text generation tasks is well-known, however, its application/success on data-totext generation has not been well-studied and proven. This way, we explore how good a pretrained model, in particular BART, performs on the data-to-text generation task. The results obtained were worse than the baseline and other systems in almost all automatic measures. However, the human evaluation shows better results for our system. Besides, results suggest that BART may generate paraphrases of reference texts.
%U https://aclanthology.org/2020.webnlg-1.14
%P 131-136
Markdown (Informal)
[NILC at WebNLG+: Pretrained Sequence-to-Sequence Models on RDF-to-Text Generation](https://aclanthology.org/2020.webnlg-1.14) (Sobrevilla Cabezudo & Pardo, WebNLG 2020)
ACL