@inproceedings{nishida-nakayama-2018-coherence,
title = "Coherence Modeling Improves Implicit Discourse Relation Recognition",
author = "Nishida, Noriki and
Nakayama, Hideki",
editor = "Komatani, Kazunori and
Litman, Diane and
Yu, Kai and
Papangelis, Alex and
Cavedon, Lawrence and
Nakano, Mikio",
booktitle = "Proceedings of the 19th Annual {SIG}dial Meeting on Discourse and Dialogue",
month = jul,
year = "2018",
address = "Melbourne, Australia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W18-5040",
doi = "10.18653/v1/W18-5040",
pages = "344--349",
abstract = "The research described in this paper examines how to learn linguistic knowledge associated with discourse relations from unlabeled corpora. We introduce an unsupervised learning method on text coherence that could produce numerical representations that improve implicit discourse relation recognition in a semi-supervised manner. We also empirically examine two variants of coherence modeling: order-oriented and topic-oriented negative sampling, showing that, of the two, topic-oriented negative sampling tends to be more effective.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="nishida-nakayama-2018-coherence">
<titleInfo>
<title>Coherence Modeling Improves Implicit Discourse Relation Recognition</title>
</titleInfo>
<name type="personal">
<namePart type="given">Noriki</namePart>
<namePart type="family">Nishida</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hideki</namePart>
<namePart type="family">Nakayama</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 19th Annual SIGdial Meeting on Discourse and Dialogue</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kazunori</namePart>
<namePart type="family">Komatani</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Diane</namePart>
<namePart type="family">Litman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kai</namePart>
<namePart type="family">Yu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alex</namePart>
<namePart type="family">Papangelis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lawrence</namePart>
<namePart type="family">Cavedon</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mikio</namePart>
<namePart type="family">Nakano</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Melbourne, Australia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The research described in this paper examines how to learn linguistic knowledge associated with discourse relations from unlabeled corpora. We introduce an unsupervised learning method on text coherence that could produce numerical representations that improve implicit discourse relation recognition in a semi-supervised manner. We also empirically examine two variants of coherence modeling: order-oriented and topic-oriented negative sampling, showing that, of the two, topic-oriented negative sampling tends to be more effective.</abstract>
<identifier type="citekey">nishida-nakayama-2018-coherence</identifier>
<identifier type="doi">10.18653/v1/W18-5040</identifier>
<location>
<url>https://aclanthology.org/W18-5040</url>
</location>
<part>
<date>2018-07</date>
<extent unit="page">
<start>344</start>
<end>349</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Coherence Modeling Improves Implicit Discourse Relation Recognition
%A Nishida, Noriki
%A Nakayama, Hideki
%Y Komatani, Kazunori
%Y Litman, Diane
%Y Yu, Kai
%Y Papangelis, Alex
%Y Cavedon, Lawrence
%Y Nakano, Mikio
%S Proceedings of the 19th Annual SIGdial Meeting on Discourse and Dialogue
%D 2018
%8 July
%I Association for Computational Linguistics
%C Melbourne, Australia
%F nishida-nakayama-2018-coherence
%X The research described in this paper examines how to learn linguistic knowledge associated with discourse relations from unlabeled corpora. We introduce an unsupervised learning method on text coherence that could produce numerical representations that improve implicit discourse relation recognition in a semi-supervised manner. We also empirically examine two variants of coherence modeling: order-oriented and topic-oriented negative sampling, showing that, of the two, topic-oriented negative sampling tends to be more effective.
%R 10.18653/v1/W18-5040
%U https://aclanthology.org/W18-5040
%U https://doi.org/10.18653/v1/W18-5040
%P 344-349
Markdown (Informal)
[Coherence Modeling Improves Implicit Discourse Relation Recognition](https://aclanthology.org/W18-5040) (Nishida & Nakayama, SIGDIAL 2018)
ACL