@inproceedings{kocijan-etal-2019-surprisingly,
title = "A Surprisingly Robust Trick for the {W}inograd Schema Challenge",
author = "Kocijan, Vid and
Cretu, Ana-Maria and
Camburu, Oana-Maria and
Yordanov, Yordan and
Lukasiewicz, Thomas",
editor = "Korhonen, Anna and
Traum, David and
M{\`a}rquez, Llu{\'\i}s",
booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
month = jul,
year = "2019",
address = "Florence, Italy",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/P19-1478",
doi = "10.18653/v1/P19-1478",
pages = "4837--4842",
abstract = "The Winograd Schema Challenge (WSC) dataset WSC273 and its inference counterpart WNLI are popular benchmarks for natural language understanding and commonsense reasoning. In this paper, we show that the performance of three language models on WSC273 consistently and robustly improves when fine-tuned on a similar pronoun disambiguation problem dataset (denoted WSCR). We additionally generate a large unsupervised WSC-like dataset. By fine-tuning the BERT language model both on the introduced and on the WSCR dataset, we achieve overall accuracies of 72.5{\%} and 74.7{\%} on WSC273 and WNLI, improving the previous state-of-the-art solutions by 8.8{\%} and 9.6{\%}, respectively. Furthermore, our fine-tuned models are also consistently more accurate on the {``}complex{''} subsets of WSC273, introduced by Trichelair et al. (2018).",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kocijan-etal-2019-surprisingly">
<titleInfo>
<title>A Surprisingly Robust Trick for the Winograd Schema Challenge</title>
</titleInfo>
<name type="personal">
<namePart type="given">Vid</namePart>
<namePart type="family">Kocijan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ana-Maria</namePart>
<namePart type="family">Cretu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Oana-Maria</namePart>
<namePart type="family">Camburu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yordan</namePart>
<namePart type="family">Yordanov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Thomas</namePart>
<namePart type="family">Lukasiewicz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Korhonen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">David</namePart>
<namePart type="family">Traum</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lluís</namePart>
<namePart type="family">Màrquez</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Florence, Italy</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The Winograd Schema Challenge (WSC) dataset WSC273 and its inference counterpart WNLI are popular benchmarks for natural language understanding and commonsense reasoning. In this paper, we show that the performance of three language models on WSC273 consistently and robustly improves when fine-tuned on a similar pronoun disambiguation problem dataset (denoted WSCR). We additionally generate a large unsupervised WSC-like dataset. By fine-tuning the BERT language model both on the introduced and on the WSCR dataset, we achieve overall accuracies of 72.5% and 74.7% on WSC273 and WNLI, improving the previous state-of-the-art solutions by 8.8% and 9.6%, respectively. Furthermore, our fine-tuned models are also consistently more accurate on the “complex” subsets of WSC273, introduced by Trichelair et al. (2018).</abstract>
<identifier type="citekey">kocijan-etal-2019-surprisingly</identifier>
<identifier type="doi">10.18653/v1/P19-1478</identifier>
<location>
<url>https://aclanthology.org/P19-1478</url>
</location>
<part>
<date>2019-07</date>
<extent unit="page">
<start>4837</start>
<end>4842</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T A Surprisingly Robust Trick for the Winograd Schema Challenge
%A Kocijan, Vid
%A Cretu, Ana-Maria
%A Camburu, Oana-Maria
%A Yordanov, Yordan
%A Lukasiewicz, Thomas
%Y Korhonen, Anna
%Y Traum, David
%Y Màrquez, Lluís
%S Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics
%D 2019
%8 July
%I Association for Computational Linguistics
%C Florence, Italy
%F kocijan-etal-2019-surprisingly
%X The Winograd Schema Challenge (WSC) dataset WSC273 and its inference counterpart WNLI are popular benchmarks for natural language understanding and commonsense reasoning. In this paper, we show that the performance of three language models on WSC273 consistently and robustly improves when fine-tuned on a similar pronoun disambiguation problem dataset (denoted WSCR). We additionally generate a large unsupervised WSC-like dataset. By fine-tuning the BERT language model both on the introduced and on the WSCR dataset, we achieve overall accuracies of 72.5% and 74.7% on WSC273 and WNLI, improving the previous state-of-the-art solutions by 8.8% and 9.6%, respectively. Furthermore, our fine-tuned models are also consistently more accurate on the “complex” subsets of WSC273, introduced by Trichelair et al. (2018).
%R 10.18653/v1/P19-1478
%U https://aclanthology.org/P19-1478
%U https://doi.org/10.18653/v1/P19-1478
%P 4837-4842
Markdown (Informal)
[A Surprisingly Robust Trick for the Winograd Schema Challenge](https://aclanthology.org/P19-1478) (Kocijan et al., ACL 2019)
ACL
- Vid Kocijan, Ana-Maria Cretu, Oana-Maria Camburu, Yordan Yordanov, and Thomas Lukasiewicz. 2019. A Surprisingly Robust Trick for the Winograd Schema Challenge. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 4837–4842, Florence, Italy. Association for Computational Linguistics.