@inproceedings{meng-degen-2022-conisi,
title = "{C}on{I}s{I}: A Contrastive Framework with Inter-sentence Interaction for Self-supervised Sentence Representation",
author = "Meng, Sun and
Degen, Huang",
booktitle = "Proceedings of the 21st Chinese National Conference on Computational Linguistics",
month = oct,
year = "2022",
address = "Nanchang, China",
publisher = "Chinese Information Processing Society of China",
url = "https://aclanthology.org/2022.ccl-1.67",
pages = "748--760",
abstract = "{``}Learning sentence representation is a fundamental task in natural language processing and has been studied extensively. Recently, many works have obtained high-quality sentence representation based on contrastive learning from pre-trained models. However, these works suffer the inconsistency of input forms between the pre-training and fine-tuning stages. Also, they typically encode a sentence independently and lack feature interaction between sentences. To conquer these issues, we propose a novel Contrastive framework with Inter-sentence Interaction (ConIsI), which introduces a sentence-level objective to improve sentence representation based on contrastive learning by fine-grained interaction between sentences. The sentence-level objective guides the model to focus on fine-grained semantic information by feature interaction between sentences, and we design three different sentence construction strategies to explore its effect. We conduct experiments on seven Semantic Textual Similarity (STS) tasks. The experimental results show that our ConIsI models based on BERTbase and RoBERTabase achieve state-ofthe-art performance, substantially outperforming previous best models SimCSE-BERTbase and SimCSE-RoBERTabase by 2.05{\%} and 0.77{\%} respectively.{''}",
language = "English",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="meng-degen-2022-conisi">
<titleInfo>
<title>ConIsI: A Contrastive Framework with Inter-sentence Interaction for Self-supervised Sentence Representation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sun</namePart>
<namePart type="family">Meng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Huang</namePart>
<namePart type="family">Degen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-10</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<language>
<languageTerm type="text">English</languageTerm>
<languageTerm type="code" authority="iso639-2b">eng</languageTerm>
</language>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 21st Chinese National Conference on Computational Linguistics</title>
</titleInfo>
<originInfo>
<publisher>Chinese Information Processing Society of China</publisher>
<place>
<placeTerm type="text">Nanchang, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>“Learning sentence representation is a fundamental task in natural language processing and has been studied extensively. Recently, many works have obtained high-quality sentence representation based on contrastive learning from pre-trained models. However, these works suffer the inconsistency of input forms between the pre-training and fine-tuning stages. Also, they typically encode a sentence independently and lack feature interaction between sentences. To conquer these issues, we propose a novel Contrastive framework with Inter-sentence Interaction (ConIsI), which introduces a sentence-level objective to improve sentence representation based on contrastive learning by fine-grained interaction between sentences. The sentence-level objective guides the model to focus on fine-grained semantic information by feature interaction between sentences, and we design three different sentence construction strategies to explore its effect. We conduct experiments on seven Semantic Textual Similarity (STS) tasks. The experimental results show that our ConIsI models based on BERTbase and RoBERTabase achieve state-ofthe-art performance, substantially outperforming previous best models SimCSE-BERTbase and SimCSE-RoBERTabase by 2.05% and 0.77% respectively.”</abstract>
<identifier type="citekey">meng-degen-2022-conisi</identifier>
<location>
<url>https://aclanthology.org/2022.ccl-1.67</url>
</location>
<part>
<date>2022-10</date>
<extent unit="page">
<start>748</start>
<end>760</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T ConIsI: A Contrastive Framework with Inter-sentence Interaction for Self-supervised Sentence Representation
%A Meng, Sun
%A Degen, Huang
%S Proceedings of the 21st Chinese National Conference on Computational Linguistics
%D 2022
%8 October
%I Chinese Information Processing Society of China
%C Nanchang, China
%G English
%F meng-degen-2022-conisi
%X “Learning sentence representation is a fundamental task in natural language processing and has been studied extensively. Recently, many works have obtained high-quality sentence representation based on contrastive learning from pre-trained models. However, these works suffer the inconsistency of input forms between the pre-training and fine-tuning stages. Also, they typically encode a sentence independently and lack feature interaction between sentences. To conquer these issues, we propose a novel Contrastive framework with Inter-sentence Interaction (ConIsI), which introduces a sentence-level objective to improve sentence representation based on contrastive learning by fine-grained interaction between sentences. The sentence-level objective guides the model to focus on fine-grained semantic information by feature interaction between sentences, and we design three different sentence construction strategies to explore its effect. We conduct experiments on seven Semantic Textual Similarity (STS) tasks. The experimental results show that our ConIsI models based on BERTbase and RoBERTabase achieve state-ofthe-art performance, substantially outperforming previous best models SimCSE-BERTbase and SimCSE-RoBERTabase by 2.05% and 0.77% respectively.”
%U https://aclanthology.org/2022.ccl-1.67
%P 748-760
Markdown (Informal)
[ConIsI: A Contrastive Framework with Inter-sentence Interaction for Self-supervised Sentence Representation](https://aclanthology.org/2022.ccl-1.67) (Meng & Degen, CCL 2022)
ACL