@inproceedings{yamagiwa-etal-2023-improving,
title = "Improving word mover{'}s distance by leveraging self-attention matrix",
author = "Yamagiwa, Hiroaki and
Yokoi, Sho and
Shimodaira, Hidetoshi",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.findings-emnlp.746",
doi = "10.18653/v1/2023.findings-emnlp.746",
pages = "11160--11183",
abstract = "Measuring the semantic similarity between two sentences is still an important task. The word mover{'}s distance (WMD) computes the similarity via the optimal alignment between the sets of word embeddings. However, WMD does not utilize word order, making it challenging to distinguish sentences with significant overlaps of similar words, even if they are semantically very different. Here, we attempt to improve WMD by incorporating the sentence structure represented by BERT{'}s self-attention matrix (SAM). The proposed method is based on the Fused Gromov-Wasserstein distance, which simultaneously considers the similarity of the word embedding and the SAM for calculating the optimal transport between two sentences. Experiments demonstrate the proposed method enhances WMD and its variants in paraphrase identification with near-equivalent performance in semantic textual similarity.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="yamagiwa-etal-2023-improving">
<titleInfo>
<title>Improving word mover’s distance by leveraging self-attention matrix</title>
</titleInfo>
<name type="personal">
<namePart type="given">Hiroaki</namePart>
<namePart type="family">Yamagiwa</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sho</namePart>
<namePart type="family">Yokoi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hidetoshi</namePart>
<namePart type="family">Shimodaira</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2023</title>
</titleInfo>
<name type="personal">
<namePart type="given">Houda</namePart>
<namePart type="family">Bouamor</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Juan</namePart>
<namePart type="family">Pino</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kalika</namePart>
<namePart type="family">Bali</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Singapore</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Measuring the semantic similarity between two sentences is still an important task. The word mover’s distance (WMD) computes the similarity via the optimal alignment between the sets of word embeddings. However, WMD does not utilize word order, making it challenging to distinguish sentences with significant overlaps of similar words, even if they are semantically very different. Here, we attempt to improve WMD by incorporating the sentence structure represented by BERT’s self-attention matrix (SAM). The proposed method is based on the Fused Gromov-Wasserstein distance, which simultaneously considers the similarity of the word embedding and the SAM for calculating the optimal transport between two sentences. Experiments demonstrate the proposed method enhances WMD and its variants in paraphrase identification with near-equivalent performance in semantic textual similarity.</abstract>
<identifier type="citekey">yamagiwa-etal-2023-improving</identifier>
<identifier type="doi">10.18653/v1/2023.findings-emnlp.746</identifier>
<location>
<url>https://aclanthology.org/2023.findings-emnlp.746</url>
</location>
<part>
<date>2023-12</date>
<extent unit="page">
<start>11160</start>
<end>11183</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Improving word mover’s distance by leveraging self-attention matrix
%A Yamagiwa, Hiroaki
%A Yokoi, Sho
%A Shimodaira, Hidetoshi
%Y Bouamor, Houda
%Y Pino, Juan
%Y Bali, Kalika
%S Findings of the Association for Computational Linguistics: EMNLP 2023
%D 2023
%8 December
%I Association for Computational Linguistics
%C Singapore
%F yamagiwa-etal-2023-improving
%X Measuring the semantic similarity between two sentences is still an important task. The word mover’s distance (WMD) computes the similarity via the optimal alignment between the sets of word embeddings. However, WMD does not utilize word order, making it challenging to distinguish sentences with significant overlaps of similar words, even if they are semantically very different. Here, we attempt to improve WMD by incorporating the sentence structure represented by BERT’s self-attention matrix (SAM). The proposed method is based on the Fused Gromov-Wasserstein distance, which simultaneously considers the similarity of the word embedding and the SAM for calculating the optimal transport between two sentences. Experiments demonstrate the proposed method enhances WMD and its variants in paraphrase identification with near-equivalent performance in semantic textual similarity.
%R 10.18653/v1/2023.findings-emnlp.746
%U https://aclanthology.org/2023.findings-emnlp.746
%U https://doi.org/10.18653/v1/2023.findings-emnlp.746
%P 11160-11183
Markdown (Informal)
[Improving word mover’s distance by leveraging self-attention matrix](https://aclanthology.org/2023.findings-emnlp.746) (Yamagiwa et al., Findings 2023)
ACL