@inproceedings{sogaard-2020-neural,
title = "Neural Speed Reading Audited",
author = "S{\o}gaard, Anders",
editor = "Cohn, Trevor and
He, Yulan and
Liu, Yang",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2020",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.findings-emnlp.14",
doi = "10.18653/v1/2020.findings-emnlp.14",
pages = "148--153",
abstract = "Several approaches to neural speed reading have been presented at major NLP and machine learning conferences in 2017{--}20; i.e., {``}human-inspired{''} recurrent network architectures that learn to {``}read{''} text faster by skipping irrelevant words, typically optimizing the joint objective of minimizing classification error rate and FLOPs used at inference time. This paper reflects on the meaningfulness of the speed reading task, showing that (a) better and faster approaches to, say, document classification, already exist, which also learn to ignore part of the input (I give an example with 7{\%} error reduction and a 136x speed-up over the state of the art in neural speed reading); and that (b) any claims that neural speed reading is {``}human-inspired{''}, are ill-founded.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="sogaard-2020-neural">
<titleInfo>
<title>Neural Speed Reading Audited</title>
</titleInfo>
<name type="personal">
<namePart type="given">Anders</namePart>
<namePart type="family">Søgaard</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2020</title>
</titleInfo>
<name type="personal">
<namePart type="given">Trevor</namePart>
<namePart type="family">Cohn</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yulan</namePart>
<namePart type="family">He</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yang</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Several approaches to neural speed reading have been presented at major NLP and machine learning conferences in 2017–20; i.e., “human-inspired” recurrent network architectures that learn to “read” text faster by skipping irrelevant words, typically optimizing the joint objective of minimizing classification error rate and FLOPs used at inference time. This paper reflects on the meaningfulness of the speed reading task, showing that (a) better and faster approaches to, say, document classification, already exist, which also learn to ignore part of the input (I give an example with 7% error reduction and a 136x speed-up over the state of the art in neural speed reading); and that (b) any claims that neural speed reading is “human-inspired”, are ill-founded.</abstract>
<identifier type="citekey">sogaard-2020-neural</identifier>
<identifier type="doi">10.18653/v1/2020.findings-emnlp.14</identifier>
<location>
<url>https://aclanthology.org/2020.findings-emnlp.14</url>
</location>
<part>
<date>2020-11</date>
<extent unit="page">
<start>148</start>
<end>153</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Neural Speed Reading Audited
%A Søgaard, Anders
%Y Cohn, Trevor
%Y He, Yulan
%Y Liu, Yang
%S Findings of the Association for Computational Linguistics: EMNLP 2020
%D 2020
%8 November
%I Association for Computational Linguistics
%C Online
%F sogaard-2020-neural
%X Several approaches to neural speed reading have been presented at major NLP and machine learning conferences in 2017–20; i.e., “human-inspired” recurrent network architectures that learn to “read” text faster by skipping irrelevant words, typically optimizing the joint objective of minimizing classification error rate and FLOPs used at inference time. This paper reflects on the meaningfulness of the speed reading task, showing that (a) better and faster approaches to, say, document classification, already exist, which also learn to ignore part of the input (I give an example with 7% error reduction and a 136x speed-up over the state of the art in neural speed reading); and that (b) any claims that neural speed reading is “human-inspired”, are ill-founded.
%R 10.18653/v1/2020.findings-emnlp.14
%U https://aclanthology.org/2020.findings-emnlp.14
%U https://doi.org/10.18653/v1/2020.findings-emnlp.14
%P 148-153
Markdown (Informal)
[Neural Speed Reading Audited](https://aclanthology.org/2020.findings-emnlp.14) (Søgaard, Findings 2020)
ACL
- Anders Søgaard. 2020. Neural Speed Reading Audited. In Findings of the Association for Computational Linguistics: EMNLP 2020, pages 148–153, Online. Association for Computational Linguistics.