@inproceedings{konno-etal-2021-pseudo,
title = "Pseudo Zero Pronoun Resolution Improves Zero Anaphora Resolution",
author = "Konno, Ryuto and
Kiyono, Shun and
Matsubayashi, Yuichiroh and
Ouchi, Hiroki and
Inui, Kentaro",
editor = "Moens, Marie-Francine and
Huang, Xuanjing and
Specia, Lucia and
Yih, Scott Wen-tau",
booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2021",
address = "Online and Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.emnlp-main.308",
doi = "10.18653/v1/2021.emnlp-main.308",
pages = "3790--3806",
abstract = "Masked language models (MLMs) have contributed to drastic performance improvements with regard to zero anaphora resolution (ZAR). To further improve this approach, in this study, we made two proposals. The first is a new pretraining task that trains MLMs on anaphoric relations with explicit supervision, and the second proposal is a new finetuning method that remedies a notorious issue, the pretrain-finetune discrepancy. Our experiments on Japanese ZAR demonstrated that our two proposals boost the state-of-the-art performance, and our detailed analysis provides new insights on the remaining challenges.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="konno-etal-2021-pseudo">
<titleInfo>
<title>Pseudo Zero Pronoun Resolution Improves Zero Anaphora Resolution</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ryuto</namePart>
<namePart type="family">Konno</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shun</namePart>
<namePart type="family">Kiyono</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yuichiroh</namePart>
<namePart type="family">Matsubayashi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hiroki</namePart>
<namePart type="family">Ouchi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kentaro</namePart>
<namePart type="family">Inui</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Marie-Francine</namePart>
<namePart type="family">Moens</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xuanjing</namePart>
<namePart type="family">Huang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lucia</namePart>
<namePart type="family">Specia</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Scott</namePart>
<namePart type="given">Wen-tau</namePart>
<namePart type="family">Yih</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online and Punta Cana, Dominican Republic</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Masked language models (MLMs) have contributed to drastic performance improvements with regard to zero anaphora resolution (ZAR). To further improve this approach, in this study, we made two proposals. The first is a new pretraining task that trains MLMs on anaphoric relations with explicit supervision, and the second proposal is a new finetuning method that remedies a notorious issue, the pretrain-finetune discrepancy. Our experiments on Japanese ZAR demonstrated that our two proposals boost the state-of-the-art performance, and our detailed analysis provides new insights on the remaining challenges.</abstract>
<identifier type="citekey">konno-etal-2021-pseudo</identifier>
<identifier type="doi">10.18653/v1/2021.emnlp-main.308</identifier>
<location>
<url>https://aclanthology.org/2021.emnlp-main.308</url>
</location>
<part>
<date>2021-11</date>
<extent unit="page">
<start>3790</start>
<end>3806</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Pseudo Zero Pronoun Resolution Improves Zero Anaphora Resolution
%A Konno, Ryuto
%A Kiyono, Shun
%A Matsubayashi, Yuichiroh
%A Ouchi, Hiroki
%A Inui, Kentaro
%Y Moens, Marie-Francine
%Y Huang, Xuanjing
%Y Specia, Lucia
%Y Yih, Scott Wen-tau
%S Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing
%D 2021
%8 November
%I Association for Computational Linguistics
%C Online and Punta Cana, Dominican Republic
%F konno-etal-2021-pseudo
%X Masked language models (MLMs) have contributed to drastic performance improvements with regard to zero anaphora resolution (ZAR). To further improve this approach, in this study, we made two proposals. The first is a new pretraining task that trains MLMs on anaphoric relations with explicit supervision, and the second proposal is a new finetuning method that remedies a notorious issue, the pretrain-finetune discrepancy. Our experiments on Japanese ZAR demonstrated that our two proposals boost the state-of-the-art performance, and our detailed analysis provides new insights on the remaining challenges.
%R 10.18653/v1/2021.emnlp-main.308
%U https://aclanthology.org/2021.emnlp-main.308
%U https://doi.org/10.18653/v1/2021.emnlp-main.308
%P 3790-3806
Markdown (Informal)
[Pseudo Zero Pronoun Resolution Improves Zero Anaphora Resolution](https://aclanthology.org/2021.emnlp-main.308) (Konno et al., EMNLP 2021)
ACL
- Ryuto Konno, Shun Kiyono, Yuichiroh Matsubayashi, Hiroki Ouchi, and Kentaro Inui. 2021. Pseudo Zero Pronoun Resolution Improves Zero Anaphora Resolution. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 3790–3806, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.