@article{luong-etal-2013-parsing,
title = "Parsing entire discourses as very long strings: Capturing topic continuity in grounded language learning",
author = "Luong, Minh-Thang and
Frank, Michael C. and
Johnson, Mark",
editor = "Lin, Dekang and
Collins, Michael",
journal = "Transactions of the Association for Computational Linguistics",
volume = "1",
year = "2013",
address = "Cambridge, MA",
publisher = "MIT Press",
url = "https://aclanthology.org/Q13-1026",
doi = "10.1162/tacl_a_00230",
pages = "315--326",
abstract = "Grounded language learning, the task of mapping from natural language to a representation of meaning, has attracted more and more interest in recent years. In most work on this topic, however, utterances in a conversation are treated independently and discourse structure information is largely ignored. In the context of language acquisition, this independence assumption discards cues that are important to the learner, e.g., the fact that consecutive utterances are likely to share the same referent (Frank et al., 2013). The current paper describes an approach to the problem of simultaneously modeling grounded language at the sentence and discourse levels. We combine ideas from parsing and grammar induction to produce a parser that can handle long input strings with thousands of tokens, creating parse trees that represent full discourses. By casting grounded language learning as a grammatical inference task, we use our parser to extend the work of Johnson et al. (2012), investigating the importance of discourse continuity in children{'}s language acquisition and its interaction with social cues. Our model boosts performance in a language acquisition task and yields good discourse segmentations compared with human annotators.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="luong-etal-2013-parsing">
<titleInfo>
<title>Parsing entire discourses as very long strings: Capturing topic continuity in grounded language learning</title>
</titleInfo>
<name type="personal">
<namePart type="given">Minh-Thang</namePart>
<namePart type="family">Luong</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Michael</namePart>
<namePart type="given">C</namePart>
<namePart type="family">Frank</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mark</namePart>
<namePart type="family">Johnson</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2013</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<genre authority="bibutilsgt">journal article</genre>
<relatedItem type="host">
<titleInfo>
<title>Transactions of the Association for Computational Linguistics</title>
</titleInfo>
<originInfo>
<issuance>continuing</issuance>
<publisher>MIT Press</publisher>
<place>
<placeTerm type="text">Cambridge, MA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">periodical</genre>
<genre authority="bibutilsgt">academic journal</genre>
</relatedItem>
<abstract>Grounded language learning, the task of mapping from natural language to a representation of meaning, has attracted more and more interest in recent years. In most work on this topic, however, utterances in a conversation are treated independently and discourse structure information is largely ignored. In the context of language acquisition, this independence assumption discards cues that are important to the learner, e.g., the fact that consecutive utterances are likely to share the same referent (Frank et al., 2013). The current paper describes an approach to the problem of simultaneously modeling grounded language at the sentence and discourse levels. We combine ideas from parsing and grammar induction to produce a parser that can handle long input strings with thousands of tokens, creating parse trees that represent full discourses. By casting grounded language learning as a grammatical inference task, we use our parser to extend the work of Johnson et al. (2012), investigating the importance of discourse continuity in children’s language acquisition and its interaction with social cues. Our model boosts performance in a language acquisition task and yields good discourse segmentations compared with human annotators.</abstract>
<identifier type="citekey">luong-etal-2013-parsing</identifier>
<identifier type="doi">10.1162/tacl_a_00230</identifier>
<location>
<url>https://aclanthology.org/Q13-1026</url>
</location>
<part>
<date>2013</date>
<detail type="volume"><number>1</number></detail>
<extent unit="page">
<start>315</start>
<end>326</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Journal Article
%T Parsing entire discourses as very long strings: Capturing topic continuity in grounded language learning
%A Luong, Minh-Thang
%A Frank, Michael C.
%A Johnson, Mark
%J Transactions of the Association for Computational Linguistics
%D 2013
%V 1
%I MIT Press
%C Cambridge, MA
%F luong-etal-2013-parsing
%X Grounded language learning, the task of mapping from natural language to a representation of meaning, has attracted more and more interest in recent years. In most work on this topic, however, utterances in a conversation are treated independently and discourse structure information is largely ignored. In the context of language acquisition, this independence assumption discards cues that are important to the learner, e.g., the fact that consecutive utterances are likely to share the same referent (Frank et al., 2013). The current paper describes an approach to the problem of simultaneously modeling grounded language at the sentence and discourse levels. We combine ideas from parsing and grammar induction to produce a parser that can handle long input strings with thousands of tokens, creating parse trees that represent full discourses. By casting grounded language learning as a grammatical inference task, we use our parser to extend the work of Johnson et al. (2012), investigating the importance of discourse continuity in children’s language acquisition and its interaction with social cues. Our model boosts performance in a language acquisition task and yields good discourse segmentations compared with human annotators.
%R 10.1162/tacl_a_00230
%U https://aclanthology.org/Q13-1026
%U https://doi.org/10.1162/tacl_a_00230
%P 315-326
Markdown (Informal)
[Parsing entire discourses as very long strings: Capturing topic continuity in grounded language learning](https://aclanthology.org/Q13-1026) (Luong et al., TACL 2013)
ACL