@inproceedings{sobrevilla-cabezudo-pardo-2020-nilc,
title = "{NILC} at {SR}{'}20: Exploring Pre-Trained Models in Surface Realisation",
author = "Sobrevilla Cabezudo, Marco Antonio and
Pardo, Thiago",
editor = "Belz, Anya and
Bohnet, Bernd and
Ferreira, Thiago Castro and
Graham, Yvette and
Mille, Simon and
Wanner, Leo",
booktitle = "Proceedings of the Third Workshop on Multilingual Surface Realisation",
month = dec,
year = "2020",
address = "Barcelona, Spain (Online)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.msr-1.6",
pages = "50--56",
abstract = "This paper describes the submission by the NILC Computational Linguistics research group of the University of S ̃ao Paulo/Brazil to the English Track 2 (closed sub-track) at the Surface Realisation Shared Task 2020. The success of the current pre-trained models like BERT or GPT-2 in several tasks is well-known, however, this is not the case for data-to-text generation tasks and just recently some initiatives focused on it. This way, we explore how a pre-trained model (GPT-2) performs on the UD-to-text generation task. In general, the achieved results were poor, but there are some interesting ideas to explore. Among the learned lessons we may note that it is necessary to study strategies to represent UD inputs and to introduce structural knowledge into these pre-trained models.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="sobrevilla-cabezudo-pardo-2020-nilc">
<titleInfo>
<title>NILC at SR’20: Exploring Pre-Trained Models in Surface Realisation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Marco</namePart>
<namePart type="given">Antonio</namePart>
<namePart type="family">Sobrevilla Cabezudo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Thiago</namePart>
<namePart type="family">Pardo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Third Workshop on Multilingual Surface Realisation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Anya</namePart>
<namePart type="family">Belz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bernd</namePart>
<namePart type="family">Bohnet</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Thiago</namePart>
<namePart type="given">Castro</namePart>
<namePart type="family">Ferreira</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yvette</namePart>
<namePart type="family">Graham</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Simon</namePart>
<namePart type="family">Mille</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Leo</namePart>
<namePart type="family">Wanner</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Barcelona, Spain (Online)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper describes the submission by the NILC Computational Linguistics research group of the University of S ̃ao Paulo/Brazil to the English Track 2 (closed sub-track) at the Surface Realisation Shared Task 2020. The success of the current pre-trained models like BERT or GPT-2 in several tasks is well-known, however, this is not the case for data-to-text generation tasks and just recently some initiatives focused on it. This way, we explore how a pre-trained model (GPT-2) performs on the UD-to-text generation task. In general, the achieved results were poor, but there are some interesting ideas to explore. Among the learned lessons we may note that it is necessary to study strategies to represent UD inputs and to introduce structural knowledge into these pre-trained models.</abstract>
<identifier type="citekey">sobrevilla-cabezudo-pardo-2020-nilc</identifier>
<location>
<url>https://aclanthology.org/2020.msr-1.6</url>
</location>
<part>
<date>2020-12</date>
<extent unit="page">
<start>50</start>
<end>56</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T NILC at SR’20: Exploring Pre-Trained Models in Surface Realisation
%A Sobrevilla Cabezudo, Marco Antonio
%A Pardo, Thiago
%Y Belz, Anya
%Y Bohnet, Bernd
%Y Ferreira, Thiago Castro
%Y Graham, Yvette
%Y Mille, Simon
%Y Wanner, Leo
%S Proceedings of the Third Workshop on Multilingual Surface Realisation
%D 2020
%8 December
%I Association for Computational Linguistics
%C Barcelona, Spain (Online)
%F sobrevilla-cabezudo-pardo-2020-nilc
%X This paper describes the submission by the NILC Computational Linguistics research group of the University of S ̃ao Paulo/Brazil to the English Track 2 (closed sub-track) at the Surface Realisation Shared Task 2020. The success of the current pre-trained models like BERT or GPT-2 in several tasks is well-known, however, this is not the case for data-to-text generation tasks and just recently some initiatives focused on it. This way, we explore how a pre-trained model (GPT-2) performs on the UD-to-text generation task. In general, the achieved results were poor, but there are some interesting ideas to explore. Among the learned lessons we may note that it is necessary to study strategies to represent UD inputs and to introduce structural knowledge into these pre-trained models.
%U https://aclanthology.org/2020.msr-1.6
%P 50-56
Markdown (Informal)
[NILC at SR’20: Exploring Pre-Trained Models in Surface Realisation](https://aclanthology.org/2020.msr-1.6) (Sobrevilla Cabezudo & Pardo, MSR 2020)
ACL