@inproceedings{do-etal-2016-facing,
    title = "Facing the most difficult case of Semantic Role Labeling: A collaboration of word embeddings and co-training",
    author = "Do, Quynh Ngoc Thi  and
      Bethard, Steven  and
      Moens, Marie-Francine",
    editor = "Matsumoto, Yuji  and
      Prasad, Rashmi",
    booktitle = "Proceedings of {COLING} 2016, the 26th International Conference on Computational Linguistics: Technical Papers",
    month = dec,
    year = "2016",
    address = "Osaka, Japan",
    publisher = "The COLING 2016 Organizing Committee",
    url = "https://aclanthology.org/C16-1121/",
    pages = "1275--1284",
    abstract = "We present a successful collaboration of word embeddings and co-training to tackle in the most difficult test case of semantic role labeling: predicting out-of-domain and unseen semantic frames. Despite the fact that co-training is a successful traditional semi-supervised method, its application in SRL is very limited especially when a huge amount of labeled data is available. In this work, co-training is used together with word embeddings to improve the performance of a system trained on a large training dataset. We also introduce a semantic role labeling system with a simple learning architecture and effective inference that is easily adaptable to semi-supervised settings with new training data and/or new features. On the out-of-domain testing set of the standard benchmark CoNLL 2009 data our simple approach achieves high performance and improves state-of-the-art results."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="do-etal-2016-facing">
    <titleInfo>
        <title>Facing the most difficult case of Semantic Role Labeling: A collaboration of word embeddings and co-training</title>
    </titleInfo>
    <name type="personal">
        <namePart type="given">Quynh</namePart>
        <namePart type="given">Ngoc</namePart>
        <namePart type="given">Thi</namePart>
        <namePart type="family">Do</namePart>
        <role>
            <roleTerm authority="marcrelator" type="text">author</roleTerm>
        </role>
    </name>
    <name type="personal">
        <namePart type="given">Steven</namePart>
        <namePart type="family">Bethard</namePart>
        <role>
            <roleTerm authority="marcrelator" type="text">author</roleTerm>
        </role>
    </name>
    <name type="personal">
        <namePart type="given">Marie-Francine</namePart>
        <namePart type="family">Moens</namePart>
        <role>
            <roleTerm authority="marcrelator" type="text">author</roleTerm>
        </role>
    </name>
    <originInfo>
        <dateIssued>2016-12</dateIssued>
    </originInfo>
    <typeOfResource>text</typeOfResource>
    <relatedItem type="host">
        <titleInfo>
            <title>Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers</title>
        </titleInfo>
        <name type="personal">
            <namePart type="given">Yuji</namePart>
            <namePart type="family">Matsumoto</namePart>
            <role>
                <roleTerm authority="marcrelator" type="text">editor</roleTerm>
            </role>
        </name>
        <name type="personal">
            <namePart type="given">Rashmi</namePart>
            <namePart type="family">Prasad</namePart>
            <role>
                <roleTerm authority="marcrelator" type="text">editor</roleTerm>
            </role>
        </name>
        <originInfo>
            <publisher>The COLING 2016 Organizing Committee</publisher>
            <place>
                <placeTerm type="text">Osaka, Japan</placeTerm>
            </place>
        </originInfo>
        <genre authority="marcgt">conference publication</genre>
    </relatedItem>
    <abstract>We present a successful collaboration of word embeddings and co-training to tackle in the most difficult test case of semantic role labeling: predicting out-of-domain and unseen semantic frames. Despite the fact that co-training is a successful traditional semi-supervised method, its application in SRL is very limited especially when a huge amount of labeled data is available. In this work, co-training is used together with word embeddings to improve the performance of a system trained on a large training dataset. We also introduce a semantic role labeling system with a simple learning architecture and effective inference that is easily adaptable to semi-supervised settings with new training data and/or new features. On the out-of-domain testing set of the standard benchmark CoNLL 2009 data our simple approach achieves high performance and improves state-of-the-art results.</abstract>
    <identifier type="citekey">do-etal-2016-facing</identifier>
    <location>
        <url>https://aclanthology.org/C16-1121/</url>
    </location>
    <part>
        <date>2016-12</date>
        <extent unit="page">
            <start>1275</start>
            <end>1284</end>
        </extent>
    </part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Facing the most difficult case of Semantic Role Labeling: A collaboration of word embeddings and co-training
%A Do, Quynh Ngoc Thi
%A Bethard, Steven
%A Moens, Marie-Francine
%Y Matsumoto, Yuji
%Y Prasad, Rashmi
%S Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers
%D 2016
%8 December
%I The COLING 2016 Organizing Committee
%C Osaka, Japan
%F do-etal-2016-facing
%X We present a successful collaboration of word embeddings and co-training to tackle in the most difficult test case of semantic role labeling: predicting out-of-domain and unseen semantic frames. Despite the fact that co-training is a successful traditional semi-supervised method, its application in SRL is very limited especially when a huge amount of labeled data is available. In this work, co-training is used together with word embeddings to improve the performance of a system trained on a large training dataset. We also introduce a semantic role labeling system with a simple learning architecture and effective inference that is easily adaptable to semi-supervised settings with new training data and/or new features. On the out-of-domain testing set of the standard benchmark CoNLL 2009 data our simple approach achieves high performance and improves state-of-the-art results.
%U https://aclanthology.org/C16-1121/
%P 1275-1284
Markdown (Informal)
[Facing the most difficult case of Semantic Role Labeling: A collaboration of word embeddings and co-training](https://aclanthology.org/C16-1121/) (Do et al., COLING 2016)
ACL