@inproceedings{kadowaki-etal-2019-event,
title = "Event Causality Recognition Exploiting Multiple Annotators{'} Judgments and Background Knowledge",
author = "Kadowaki, Kazuma and
Iida, Ryu and
Torisawa, Kentaro and
Oh, Jong-Hoon and
Kloetzer, Julien",
editor = "Inui, Kentaro and
Jiang, Jing and
Ng, Vincent and
Wan, Xiaojun",
booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)",
month = nov,
year = "2019",
address = "Hong Kong, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/D19-1590",
doi = "10.18653/v1/D19-1590",
pages = "5816--5822",
abstract = "We propose new BERT-based methods for recognizing event causality such as {``}smoke cigarettes{''} {--}{\textgreater} {``}die of lung cancer{''} written in web texts. In our methods, we grasp each annotator{'}s policy by training multiple classifiers, each of which predicts the labels given by a single annotator, and combine the resulting classifiers{'} outputs to predict the final labels determined by majority vote. Furthermore, we investigate the effect of supplying background knowledge to our classifiers. Since BERT models are pre-trained with a large corpus, some sort of background knowledge for event causality may be learned during pre-training. Our experiments with a Japanese dataset suggest that this is actually the case: Performance improved when we pre-trained the BERT models with web texts containing a large number of event causalities instead of Wikipedia articles or randomly sampled web texts. However, this effect was limited. Therefore, we further improved performance by simply adding texts related to an input causality candidate as background knowledge to the input of the BERT models. We believe these findings indicate a promising future research direction.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kadowaki-etal-2019-event">
<titleInfo>
<title>Event Causality Recognition Exploiting Multiple Annotators’ Judgments and Background Knowledge</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kazuma</namePart>
<namePart type="family">Kadowaki</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ryu</namePart>
<namePart type="family">Iida</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kentaro</namePart>
<namePart type="family">Torisawa</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jong-Hoon</namePart>
<namePart type="family">Oh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Julien</namePart>
<namePart type="family">Kloetzer</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kentaro</namePart>
<namePart type="family">Inui</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jing</namePart>
<namePart type="family">Jiang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vincent</namePart>
<namePart type="family">Ng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xiaojun</namePart>
<namePart type="family">Wan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Hong Kong, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We propose new BERT-based methods for recognizing event causality such as “smoke cigarettes” –\textgreater “die of lung cancer” written in web texts. In our methods, we grasp each annotator’s policy by training multiple classifiers, each of which predicts the labels given by a single annotator, and combine the resulting classifiers’ outputs to predict the final labels determined by majority vote. Furthermore, we investigate the effect of supplying background knowledge to our classifiers. Since BERT models are pre-trained with a large corpus, some sort of background knowledge for event causality may be learned during pre-training. Our experiments with a Japanese dataset suggest that this is actually the case: Performance improved when we pre-trained the BERT models with web texts containing a large number of event causalities instead of Wikipedia articles or randomly sampled web texts. However, this effect was limited. Therefore, we further improved performance by simply adding texts related to an input causality candidate as background knowledge to the input of the BERT models. We believe these findings indicate a promising future research direction.</abstract>
<identifier type="citekey">kadowaki-etal-2019-event</identifier>
<identifier type="doi">10.18653/v1/D19-1590</identifier>
<location>
<url>https://aclanthology.org/D19-1590</url>
</location>
<part>
<date>2019-11</date>
<extent unit="page">
<start>5816</start>
<end>5822</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Event Causality Recognition Exploiting Multiple Annotators’ Judgments and Background Knowledge
%A Kadowaki, Kazuma
%A Iida, Ryu
%A Torisawa, Kentaro
%A Oh, Jong-Hoon
%A Kloetzer, Julien
%Y Inui, Kentaro
%Y Jiang, Jing
%Y Ng, Vincent
%Y Wan, Xiaojun
%S Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)
%D 2019
%8 November
%I Association for Computational Linguistics
%C Hong Kong, China
%F kadowaki-etal-2019-event
%X We propose new BERT-based methods for recognizing event causality such as “smoke cigarettes” –\textgreater “die of lung cancer” written in web texts. In our methods, we grasp each annotator’s policy by training multiple classifiers, each of which predicts the labels given by a single annotator, and combine the resulting classifiers’ outputs to predict the final labels determined by majority vote. Furthermore, we investigate the effect of supplying background knowledge to our classifiers. Since BERT models are pre-trained with a large corpus, some sort of background knowledge for event causality may be learned during pre-training. Our experiments with a Japanese dataset suggest that this is actually the case: Performance improved when we pre-trained the BERT models with web texts containing a large number of event causalities instead of Wikipedia articles or randomly sampled web texts. However, this effect was limited. Therefore, we further improved performance by simply adding texts related to an input causality candidate as background knowledge to the input of the BERT models. We believe these findings indicate a promising future research direction.
%R 10.18653/v1/D19-1590
%U https://aclanthology.org/D19-1590
%U https://doi.org/10.18653/v1/D19-1590
%P 5816-5822
Markdown (Informal)
[Event Causality Recognition Exploiting Multiple Annotators’ Judgments and Background Knowledge](https://aclanthology.org/D19-1590) (Kadowaki et al., EMNLP-IJCNLP 2019)
ACL