@article{sanchez-etal-2018-derivational,
title = "On the Derivational Entropy of Left-to-Right Probabilistic Finite-State Automata and Hidden {M}arkov Models",
author = "S{\'a}nchez, Joan Andreu and
Rocha, Martha Alicia and
Romero, Ver{\'o}nica and
Villegas, Mauricio",
journal = "Computational Linguistics",
volume = "44",
number = "1",
month = apr,
year = "2018",
address = "Cambridge, MA",
publisher = "MIT Press",
url = "https://aclanthology.org/J18-1002",
doi = "10.1162/COLI_a_00306",
pages = "17--37",
abstract = "Probabilistic finite-state automata are a formalism that is widely used in many problems of automatic speech recognition and natural language processing. Probabilistic finite-state automata are closely related to other finite-state models as weighted finite-state automata, word lattices, and hidden Markov models. Therefore, they share many similar properties and problems. Entropy measures of finite-state models have been investigated in the past in order to study the information capacity of these models. The derivational entropy quantifies the uncertainty that the model has about the probability distribution it represents. The derivational entropy in a finite-state automaton is computed from the probability that is accumulated in all of its individual state sequences. The computation of the entropy from a weighted finite-state automaton requires a normalized model. This article studies an efficient computation of the derivational entropy of left-to-right probabilistic finite-state automata, and it introduces an efficient algorithm for normalizing weighted finite-state automata. The efficient computation of the derivational entropy is also extended to continuous hidden Markov models.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="sanchez-etal-2018-derivational">
<titleInfo>
<title>On the Derivational Entropy of Left-to-Right Probabilistic Finite-State Automata and Hidden Markov Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Joan</namePart>
<namePart type="given">Andreu</namePart>
<namePart type="family">Sánchez</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Martha</namePart>
<namePart type="given">Alicia</namePart>
<namePart type="family">Rocha</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Verónica</namePart>
<namePart type="family">Romero</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mauricio</namePart>
<namePart type="family">Villegas</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-04</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<genre authority="bibutilsgt">journal article</genre>
<relatedItem type="host">
<titleInfo>
<title>Computational Linguistics</title>
</titleInfo>
<originInfo>
<issuance>continuing</issuance>
<publisher>MIT Press</publisher>
<place>
<placeTerm type="text">Cambridge, MA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">periodical</genre>
<genre authority="bibutilsgt">academic journal</genre>
</relatedItem>
<abstract>Probabilistic finite-state automata are a formalism that is widely used in many problems of automatic speech recognition and natural language processing. Probabilistic finite-state automata are closely related to other finite-state models as weighted finite-state automata, word lattices, and hidden Markov models. Therefore, they share many similar properties and problems. Entropy measures of finite-state models have been investigated in the past in order to study the information capacity of these models. The derivational entropy quantifies the uncertainty that the model has about the probability distribution it represents. The derivational entropy in a finite-state automaton is computed from the probability that is accumulated in all of its individual state sequences. The computation of the entropy from a weighted finite-state automaton requires a normalized model. This article studies an efficient computation of the derivational entropy of left-to-right probabilistic finite-state automata, and it introduces an efficient algorithm for normalizing weighted finite-state automata. The efficient computation of the derivational entropy is also extended to continuous hidden Markov models.</abstract>
<identifier type="citekey">sanchez-etal-2018-derivational</identifier>
<identifier type="doi">10.1162/COLI_a_00306</identifier>
<location>
<url>https://aclanthology.org/J18-1002</url>
</location>
<part>
<date>2018-04</date>
<detail type="volume"><number>44</number></detail>
<detail type="issue"><number>1</number></detail>
<extent unit="page">
<start>17</start>
<end>37</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Journal Article
%T On the Derivational Entropy of Left-to-Right Probabilistic Finite-State Automata and Hidden Markov Models
%A Sánchez, Joan Andreu
%A Rocha, Martha Alicia
%A Romero, Verónica
%A Villegas, Mauricio
%J Computational Linguistics
%D 2018
%8 April
%V 44
%N 1
%I MIT Press
%C Cambridge, MA
%F sanchez-etal-2018-derivational
%X Probabilistic finite-state automata are a formalism that is widely used in many problems of automatic speech recognition and natural language processing. Probabilistic finite-state automata are closely related to other finite-state models as weighted finite-state automata, word lattices, and hidden Markov models. Therefore, they share many similar properties and problems. Entropy measures of finite-state models have been investigated in the past in order to study the information capacity of these models. The derivational entropy quantifies the uncertainty that the model has about the probability distribution it represents. The derivational entropy in a finite-state automaton is computed from the probability that is accumulated in all of its individual state sequences. The computation of the entropy from a weighted finite-state automaton requires a normalized model. This article studies an efficient computation of the derivational entropy of left-to-right probabilistic finite-state automata, and it introduces an efficient algorithm for normalizing weighted finite-state automata. The efficient computation of the derivational entropy is also extended to continuous hidden Markov models.
%R 10.1162/COLI_a_00306
%U https://aclanthology.org/J18-1002
%U https://doi.org/10.1162/COLI_a_00306
%P 17-37
Markdown (Informal)
[On the Derivational Entropy of Left-to-Right Probabilistic Finite-State Automata and Hidden Markov Models](https://aclanthology.org/J18-1002) (Sánchez et al., CL 2018)
ACL