@inproceedings{noraset-etal-2018-estimating,
title = "Estimating Marginal Probabilities of n-grams for Recurrent Neural Language Models",
author = "Noraset, Thanapon and
Downey, Doug and
Bing, Lidong",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing",
month = oct # "-" # nov,
year = "2018",
address = "Brussels, Belgium",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/D18-1322",
doi = "10.18653/v1/D18-1322",
pages = "2930--2935",
abstract = "Recurrent neural network language models (RNNLMs) are the current standard-bearer for statistical language modeling. However, RNNLMs only estimate probabilities for complete sequences of text, whereas some applications require context-independent phrase probabilities instead. In this paper, we study how to compute an RNNLM{'}s em marginal probability: the probability that the model assigns to a short sequence of text when the preceding context is not known. We introduce a simple method of altering the RNNLM training to make the model more accurate at marginal estimation. Our experiments demonstrate that the technique is effective compared to baselines including the traditional RNNLM probability and an importance sampling approach. Finally, we show how we can use the marginal estimation to improve an RNNLM by training the marginals to match n-gram probabilities from a larger corpus.",
}

<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="noraset-etal-2018-estimating">
<titleInfo>
<title>Estimating Marginal Probabilities of n-grams for Recurrent Neural Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Thanapon</namePart>
<namePart type="family">Noraset</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Doug</namePart>
<namePart type="family">Downey</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lidong</namePart>
<namePart type="family">Bing</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-oct-nov</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Brussels, Belgium</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Recurrent neural network language models (RNNLMs) are the current standard-bearer for statistical language modeling. However, RNNLMs only estimate probabilities for complete sequences of text, whereas some applications require context-independent phrase probabilities instead. In this paper, we study how to compute an RNNLM’s em marginal probability: the probability that the model assigns to a short sequence of text when the preceding context is not known. We introduce a simple method of altering the RNNLM training to make the model more accurate at marginal estimation. Our experiments demonstrate that the technique is effective compared to baselines including the traditional RNNLM probability and an importance sampling approach. Finally, we show how we can use the marginal estimation to improve an RNNLM by training the marginals to match n-gram probabilities from a larger corpus.</abstract>
<identifier type="citekey">noraset-etal-2018-estimating</identifier>
<identifier type="doi">10.18653/v1/D18-1322</identifier>
<location>
<url>https://aclanthology.org/D18-1322</url>
</location>
<part>
<date>2018-oct-nov</date>
<extent unit="page">
<start>2930</start>
<end>2935</end>
</extent>
</part>
</mods>
</modsCollection>

%0 Conference Proceedings
%T Estimating Marginal Probabilities of n-grams for Recurrent Neural Language Models
%A Noraset, Thanapon
%A Downey, Doug
%A Bing, Lidong
%S Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing
%D 2018
%8 oct nov
%I Association for Computational Linguistics
%C Brussels, Belgium
%F noraset-etal-2018-estimating
%X Recurrent neural network language models (RNNLMs) are the current standard-bearer for statistical language modeling. However, RNNLMs only estimate probabilities for complete sequences of text, whereas some applications require context-independent phrase probabilities instead. In this paper, we study how to compute an RNNLM’s em marginal probability: the probability that the model assigns to a short sequence of text when the preceding context is not known. We introduce a simple method of altering the RNNLM training to make the model more accurate at marginal estimation. Our experiments demonstrate that the technique is effective compared to baselines including the traditional RNNLM probability and an importance sampling approach. Finally, we show how we can use the marginal estimation to improve an RNNLM by training the marginals to match n-gram probabilities from a larger corpus.
%R 10.18653/v1/D18-1322
%U https://aclanthology.org/D18-1322
%U https://doi.org/10.18653/v1/D18-1322
%P 2930-2935

##### Markdown (Informal)

[Estimating Marginal Probabilities of n-grams for Recurrent Neural Language Models](https://aclanthology.org/D18-1322) (Noraset et al., EMNLP 2018)

##### ACL