@inproceedings{zhu-etal-2018-multi,
title = "Multi-glance Reading Model for Text Understanding",
author = "Zhu, Pengcheng and
Yang, Yujiu and
Gao, Wenqiang and
Liu, Yi",
editor = "Idiart, Marco and
Lenci, Alessandro and
Poibeau, Thierry and
Villavicencio, Aline",
booktitle = "Proceedings of the Eight Workshop on Cognitive Aspects of Computational Language Learning and Processing",
month = jul,
year = "2018",
address = "Melbourne",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W18-2804",
doi = "10.18653/v1/W18-2804",
pages = "27--35",
abstract = "In recent years, a variety of recurrent neural networks have been proposed, e.g LSTM. However, existing models only read the text once, it cannot describe the situation of repeated reading in reading comprehension. In fact, when reading or analyzing a text, we may read the text several times rather than once if we couldn{'}t well understand it. So, how to model this kind of the reading behavior? To address the issue, we propose a multi-glance mechanism (MGM) for modeling the habit of reading behavior. In the proposed framework, the actual reading process can be fully simulated, and then the obtained information can be consistent with the task. Based on the multi-glance mechanism, we design two types of recurrent neural network models for repeated reading: Glance Cell Model (GCM) and Glance Gate Model (GGM). Visualization analysis of the GCM and the GGM demonstrates the effectiveness of multi-glance mechanisms. Experiments results on the large-scale datasets show that the proposed methods can achieve better performance.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zhu-etal-2018-multi">
<titleInfo>
<title>Multi-glance Reading Model for Text Understanding</title>
</titleInfo>
<name type="personal">
<namePart type="given">Pengcheng</namePart>
<namePart type="family">Zhu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yujiu</namePart>
<namePart type="family">Yang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wenqiang</namePart>
<namePart type="family">Gao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yi</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Eight Workshop on Cognitive Aspects of Computational Language Learning and Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Marco</namePart>
<namePart type="family">Idiart</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alessandro</namePart>
<namePart type="family">Lenci</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Thierry</namePart>
<namePart type="family">Poibeau</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aline</namePart>
<namePart type="family">Villavicencio</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Melbourne</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In recent years, a variety of recurrent neural networks have been proposed, e.g LSTM. However, existing models only read the text once, it cannot describe the situation of repeated reading in reading comprehension. In fact, when reading or analyzing a text, we may read the text several times rather than once if we couldn’t well understand it. So, how to model this kind of the reading behavior? To address the issue, we propose a multi-glance mechanism (MGM) for modeling the habit of reading behavior. In the proposed framework, the actual reading process can be fully simulated, and then the obtained information can be consistent with the task. Based on the multi-glance mechanism, we design two types of recurrent neural network models for repeated reading: Glance Cell Model (GCM) and Glance Gate Model (GGM). Visualization analysis of the GCM and the GGM demonstrates the effectiveness of multi-glance mechanisms. Experiments results on the large-scale datasets show that the proposed methods can achieve better performance.</abstract>
<identifier type="citekey">zhu-etal-2018-multi</identifier>
<identifier type="doi">10.18653/v1/W18-2804</identifier>
<location>
<url>https://aclanthology.org/W18-2804</url>
</location>
<part>
<date>2018-07</date>
<extent unit="page">
<start>27</start>
<end>35</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Multi-glance Reading Model for Text Understanding
%A Zhu, Pengcheng
%A Yang, Yujiu
%A Gao, Wenqiang
%A Liu, Yi
%Y Idiart, Marco
%Y Lenci, Alessandro
%Y Poibeau, Thierry
%Y Villavicencio, Aline
%S Proceedings of the Eight Workshop on Cognitive Aspects of Computational Language Learning and Processing
%D 2018
%8 July
%I Association for Computational Linguistics
%C Melbourne
%F zhu-etal-2018-multi
%X In recent years, a variety of recurrent neural networks have been proposed, e.g LSTM. However, existing models only read the text once, it cannot describe the situation of repeated reading in reading comprehension. In fact, when reading or analyzing a text, we may read the text several times rather than once if we couldn’t well understand it. So, how to model this kind of the reading behavior? To address the issue, we propose a multi-glance mechanism (MGM) for modeling the habit of reading behavior. In the proposed framework, the actual reading process can be fully simulated, and then the obtained information can be consistent with the task. Based on the multi-glance mechanism, we design two types of recurrent neural network models for repeated reading: Glance Cell Model (GCM) and Glance Gate Model (GGM). Visualization analysis of the GCM and the GGM demonstrates the effectiveness of multi-glance mechanisms. Experiments results on the large-scale datasets show that the proposed methods can achieve better performance.
%R 10.18653/v1/W18-2804
%U https://aclanthology.org/W18-2804
%U https://doi.org/10.18653/v1/W18-2804
%P 27-35
Markdown (Informal)
[Multi-glance Reading Model for Text Understanding](https://aclanthology.org/W18-2804) (Zhu et al., CogACLL 2018)
ACL
- Pengcheng Zhu, Yujiu Yang, Wenqiang Gao, and Yi Liu. 2018. Multi-glance Reading Model for Text Understanding. In Proceedings of the Eight Workshop on Cognitive Aspects of Computational Language Learning and Processing, pages 27–35, Melbourne. Association for Computational Linguistics.