@inproceedings{valerio-miceli-barone-etal-2022-distributionally,
title = "Distributionally Robust Recurrent Decoders with Random Network Distillation",
author = "Valerio Miceli Barone, Antonio and
Birch, Alexandra and
Sennrich, Rico",
booktitle = "Proceedings of the 7th Workshop on Representation Learning for NLP",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.repl4nlp-1.1",
doi = "10.18653/v1/2022.repl4nlp-1.1",
pages = "1--8",
abstract = "Neural machine learning models can successfully model language that is similar to their training distribution, but they are highly susceptible to degradation under distribution shift, which occurs in many practical applications when processing out-of-domain (OOD) text. This has been attributed to {``}shortcut learning{''}'':'' relying on weak correlations over arbitrary large contexts. We propose a method based on OOD detection with Random Network Distillation to allow an autoregressive language model to automatically disregard OOD context during inference, smoothly transitioning towards a less expressive but more robust model as the data becomes more OOD, while retaining its full context capability when operating in-distribution. We apply our method to a GRU architecture, demonstrating improvements on multiple language modeling (LM) datasets.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="valerio-miceli-barone-etal-2022-distributionally">
<titleInfo>
<title>Distributionally Robust Recurrent Decoders with Random Network Distillation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Antonio</namePart>
<namePart type="family">Valerio Miceli Barone</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alexandra</namePart>
<namePart type="family">Birch</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rico</namePart>
<namePart type="family">Sennrich</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 7th Workshop on Representation Learning for NLP</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dublin, Ireland</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Neural machine learning models can successfully model language that is similar to their training distribution, but they are highly susceptible to degradation under distribution shift, which occurs in many practical applications when processing out-of-domain (OOD) text. This has been attributed to “shortcut learning””:” relying on weak correlations over arbitrary large contexts. We propose a method based on OOD detection with Random Network Distillation to allow an autoregressive language model to automatically disregard OOD context during inference, smoothly transitioning towards a less expressive but more robust model as the data becomes more OOD, while retaining its full context capability when operating in-distribution. We apply our method to a GRU architecture, demonstrating improvements on multiple language modeling (LM) datasets.</abstract>
<identifier type="citekey">valerio-miceli-barone-etal-2022-distributionally</identifier>
<identifier type="doi">10.18653/v1/2022.repl4nlp-1.1</identifier>
<location>
<url>https://aclanthology.org/2022.repl4nlp-1.1</url>
</location>
<part>
<date>2022-05</date>
<extent unit="page">
<start>1</start>
<end>8</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Distributionally Robust Recurrent Decoders with Random Network Distillation
%A Valerio Miceli Barone, Antonio
%A Birch, Alexandra
%A Sennrich, Rico
%S Proceedings of the 7th Workshop on Representation Learning for NLP
%D 2022
%8 May
%I Association for Computational Linguistics
%C Dublin, Ireland
%F valerio-miceli-barone-etal-2022-distributionally
%X Neural machine learning models can successfully model language that is similar to their training distribution, but they are highly susceptible to degradation under distribution shift, which occurs in many practical applications when processing out-of-domain (OOD) text. This has been attributed to “shortcut learning””:” relying on weak correlations over arbitrary large contexts. We propose a method based on OOD detection with Random Network Distillation to allow an autoregressive language model to automatically disregard OOD context during inference, smoothly transitioning towards a less expressive but more robust model as the data becomes more OOD, while retaining its full context capability when operating in-distribution. We apply our method to a GRU architecture, demonstrating improvements on multiple language modeling (LM) datasets.
%R 10.18653/v1/2022.repl4nlp-1.1
%U https://aclanthology.org/2022.repl4nlp-1.1
%U https://doi.org/10.18653/v1/2022.repl4nlp-1.1
%P 1-8
Markdown (Informal)
[Distributionally Robust Recurrent Decoders with Random Network Distillation](https://aclanthology.org/2022.repl4nlp-1.1) (Valerio Miceli Barone et al., RepL4NLP 2022)
ACL