@inproceedings{kashefi-hwa-2021-contrapositive,
title = "Contrapositive Local Class Inference",
author = "Kashefi, Omid and
Hwa, Rebecca",
booktitle = "Proceedings of the Seventh Workshop on Noisy User-generated Text (W-NUT 2021)",
month = nov,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.wnut-1.41",
doi = "10.18653/v1/2021.wnut-1.41",
pages = "371--380",
abstract = "Certain types of classification problems may be performed at multiple levels of granularity; for example, we might want to know the sentiment polarity of a document or a sentence, or a phrase. Often, the prediction at a greater-context (e.g., sentences or paragraphs) may be informative for a more localized prediction at a smaller semantic unit (e.g., words or phrases). However, directly inferring the most salient local features from the global prediction may overlook the semantics of this relationship. This work argues that inference along the contraposition relationship of the local prediction and the corresponding global prediction makes an inference framework that is more accurate and robust to noise. We show how this contraposition framework can be implemented as a transfer function that rewrites a greater-context from one class to another and demonstrate how an appropriate transfer function can be trained from a noisy user-generated corpus. The experimental results validate our insight that the proposed contrapositive framework outperforms the alternative approaches on resource-constrained problem domains.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kashefi-hwa-2021-contrapositive">
<titleInfo>
<title>Contrapositive Local Class Inference</title>
</titleInfo>
<name type="personal">
<namePart type="given">Omid</namePart>
<namePart type="family">Kashefi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rebecca</namePart>
<namePart type="family">Hwa</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Seventh Workshop on Noisy User-generated Text (W-NUT 2021)</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Certain types of classification problems may be performed at multiple levels of granularity; for example, we might want to know the sentiment polarity of a document or a sentence, or a phrase. Often, the prediction at a greater-context (e.g., sentences or paragraphs) may be informative for a more localized prediction at a smaller semantic unit (e.g., words or phrases). However, directly inferring the most salient local features from the global prediction may overlook the semantics of this relationship. This work argues that inference along the contraposition relationship of the local prediction and the corresponding global prediction makes an inference framework that is more accurate and robust to noise. We show how this contraposition framework can be implemented as a transfer function that rewrites a greater-context from one class to another and demonstrate how an appropriate transfer function can be trained from a noisy user-generated corpus. The experimental results validate our insight that the proposed contrapositive framework outperforms the alternative approaches on resource-constrained problem domains.</abstract>
<identifier type="citekey">kashefi-hwa-2021-contrapositive</identifier>
<identifier type="doi">10.18653/v1/2021.wnut-1.41</identifier>
<location>
<url>https://aclanthology.org/2021.wnut-1.41</url>
</location>
<part>
<date>2021-11</date>
<extent unit="page">
<start>371</start>
<end>380</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Contrapositive Local Class Inference
%A Kashefi, Omid
%A Hwa, Rebecca
%S Proceedings of the Seventh Workshop on Noisy User-generated Text (W-NUT 2021)
%D 2021
%8 November
%I Association for Computational Linguistics
%C Online
%F kashefi-hwa-2021-contrapositive
%X Certain types of classification problems may be performed at multiple levels of granularity; for example, we might want to know the sentiment polarity of a document or a sentence, or a phrase. Often, the prediction at a greater-context (e.g., sentences or paragraphs) may be informative for a more localized prediction at a smaller semantic unit (e.g., words or phrases). However, directly inferring the most salient local features from the global prediction may overlook the semantics of this relationship. This work argues that inference along the contraposition relationship of the local prediction and the corresponding global prediction makes an inference framework that is more accurate and robust to noise. We show how this contraposition framework can be implemented as a transfer function that rewrites a greater-context from one class to another and demonstrate how an appropriate transfer function can be trained from a noisy user-generated corpus. The experimental results validate our insight that the proposed contrapositive framework outperforms the alternative approaches on resource-constrained problem domains.
%R 10.18653/v1/2021.wnut-1.41
%U https://aclanthology.org/2021.wnut-1.41
%U https://doi.org/10.18653/v1/2021.wnut-1.41
%P 371-380
Markdown (Informal)
[Contrapositive Local Class Inference](https://aclanthology.org/2021.wnut-1.41) (Kashefi & Hwa, WNUT 2021)
ACL
- Omid Kashefi and Rebecca Hwa. 2021. Contrapositive Local Class Inference. In Proceedings of the Seventh Workshop on Noisy User-generated Text (W-NUT 2021), pages 371–380, Online. Association for Computational Linguistics.