@inproceedings{shcherbakov-etal-2020-exploring,
title = "Exploring Looping Effects in {RNN}-based Architectures",
author = "Shcherbakov, Andrei and
Muradoglu, Saliha and
Vylomova, Ekaterina",
editor = "Kim, Maria and
Beck, Daniel and
Mistica, Meladel",
booktitle = "Proceedings of the 18th Annual Workshop of the Australasian Language Technology Association",
month = dec,
year = "2020",
address = "Virtual Workshop",
publisher = "Australasian Language Technology Association",
url = "https://aclanthology.org/2020.alta-1.15",
pages = "115--120",
abstract = "The paper investigates repetitive loops, a common problem in contemporary text generation (such as machine translation, language modelling, morphological inflection) systems. More specifically, we conduct a study on neural models with recurrent units by explicitly altering their decoder internal state. We use a task of morphological reinflection task as a proxy to study the effects of the changes. Our results show that the probability of the occurrence of repetitive loops is significantly reduced by introduction of an extra neural decoder output. The output should be specifically trained to produce gradually increasing value upon generation of each character of a given sequence. We also explored variations of the technique and found that feeding the extra output back to the decoder amplifies the positive effects.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="shcherbakov-etal-2020-exploring">
<titleInfo>
<title>Exploring Looping Effects in RNN-based Architectures</title>
</titleInfo>
<name type="personal">
<namePart type="given">Andrei</namePart>
<namePart type="family">Shcherbakov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Saliha</namePart>
<namePart type="family">Muradoglu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ekaterina</namePart>
<namePart type="family">Vylomova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 18th Annual Workshop of the Australasian Language Technology Association</title>
</titleInfo>
<name type="personal">
<namePart type="given">Maria</namePart>
<namePart type="family">Kim</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Daniel</namePart>
<namePart type="family">Beck</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Meladel</namePart>
<namePart type="family">Mistica</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Australasian Language Technology Association</publisher>
<place>
<placeTerm type="text">Virtual Workshop</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The paper investigates repetitive loops, a common problem in contemporary text generation (such as machine translation, language modelling, morphological inflection) systems. More specifically, we conduct a study on neural models with recurrent units by explicitly altering their decoder internal state. We use a task of morphological reinflection task as a proxy to study the effects of the changes. Our results show that the probability of the occurrence of repetitive loops is significantly reduced by introduction of an extra neural decoder output. The output should be specifically trained to produce gradually increasing value upon generation of each character of a given sequence. We also explored variations of the technique and found that feeding the extra output back to the decoder amplifies the positive effects.</abstract>
<identifier type="citekey">shcherbakov-etal-2020-exploring</identifier>
<location>
<url>https://aclanthology.org/2020.alta-1.15</url>
</location>
<part>
<date>2020-12</date>
<extent unit="page">
<start>115</start>
<end>120</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Exploring Looping Effects in RNN-based Architectures
%A Shcherbakov, Andrei
%A Muradoglu, Saliha
%A Vylomova, Ekaterina
%Y Kim, Maria
%Y Beck, Daniel
%Y Mistica, Meladel
%S Proceedings of the 18th Annual Workshop of the Australasian Language Technology Association
%D 2020
%8 December
%I Australasian Language Technology Association
%C Virtual Workshop
%F shcherbakov-etal-2020-exploring
%X The paper investigates repetitive loops, a common problem in contemporary text generation (such as machine translation, language modelling, morphological inflection) systems. More specifically, we conduct a study on neural models with recurrent units by explicitly altering their decoder internal state. We use a task of morphological reinflection task as a proxy to study the effects of the changes. Our results show that the probability of the occurrence of repetitive loops is significantly reduced by introduction of an extra neural decoder output. The output should be specifically trained to produce gradually increasing value upon generation of each character of a given sequence. We also explored variations of the technique and found that feeding the extra output back to the decoder amplifies the positive effects.
%U https://aclanthology.org/2020.alta-1.15
%P 115-120
Markdown (Informal)
[Exploring Looping Effects in RNN-based Architectures](https://aclanthology.org/2020.alta-1.15) (Shcherbakov et al., ALTA 2020)
ACL
- Andrei Shcherbakov, Saliha Muradoglu, and Ekaterina Vylomova. 2020. Exploring Looping Effects in RNN-based Architectures. In Proceedings of the 18th Annual Workshop of the Australasian Language Technology Association, pages 115–120, Virtual Workshop. Australasian Language Technology Association.