@inproceedings{ji-etal-2023-improving,
title = "Improving Span Representation by Efficient Span-Level Attention",
author = "Ji, Pengyu and
Yang, Songlin and
Tu, Kewei",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.findings-emnlp.747",
doi = "10.18653/v1/2023.findings-emnlp.747",
pages = "11184--11192",
abstract = "High-quality span representations are crucial to natural language processing tasks involving span prediction and classification. Most existing methods derive a span representation by aggregation of token representations within the span. In contrast, we aim to improve span representations by considering span-span interactions as well as more comprehensive span-token interactions. Specifically, we introduce layers of span-level attention on top of a normal token-level transformer encoder. Given that attention between all span pairs results in $O(n^4)$ complexity ($n$ being the sentence length) and not all span interactions are intuitively meaningful, we restrict the range of spans that a given span could attend to, thereby reducing overall complexity to $O(n^3)$. We conduct experiments on various span-related tasks and show superior performance of our model surpassing baseline models. Our code is publicly available at \url{https://github.com/jipy0222/Span-Level-Attention}.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="ji-etal-2023-improving">
<titleInfo>
<title>Improving Span Representation by Efficient Span-Level Attention</title>
</titleInfo>
<name type="personal">
<namePart type="given">Pengyu</namePart>
<namePart type="family">Ji</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Songlin</namePart>
<namePart type="family">Yang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kewei</namePart>
<namePart type="family">Tu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2023</title>
</titleInfo>
<name type="personal">
<namePart type="given">Houda</namePart>
<namePart type="family">Bouamor</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Juan</namePart>
<namePart type="family">Pino</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kalika</namePart>
<namePart type="family">Bali</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Singapore</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>High-quality span representations are crucial to natural language processing tasks involving span prediction and classification. Most existing methods derive a span representation by aggregation of token representations within the span. In contrast, we aim to improve span representations by considering span-span interactions as well as more comprehensive span-token interactions. Specifically, we introduce layers of span-level attention on top of a normal token-level transformer encoder. Given that attention between all span pairs results in O(n⁴) complexity (n being the sentence length) and not all span interactions are intuitively meaningful, we restrict the range of spans that a given span could attend to, thereby reducing overall complexity to O(n³). We conduct experiments on various span-related tasks and show superior performance of our model surpassing baseline models. Our code is publicly available at https://github.com/jipy0222/Span-Level-Attention.</abstract>
<identifier type="citekey">ji-etal-2023-improving</identifier>
<identifier type="doi">10.18653/v1/2023.findings-emnlp.747</identifier>
<location>
<url>https://aclanthology.org/2023.findings-emnlp.747</url>
</location>
<part>
<date>2023-12</date>
<extent unit="page">
<start>11184</start>
<end>11192</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Improving Span Representation by Efficient Span-Level Attention
%A Ji, Pengyu
%A Yang, Songlin
%A Tu, Kewei
%Y Bouamor, Houda
%Y Pino, Juan
%Y Bali, Kalika
%S Findings of the Association for Computational Linguistics: EMNLP 2023
%D 2023
%8 December
%I Association for Computational Linguistics
%C Singapore
%F ji-etal-2023-improving
%X High-quality span representations are crucial to natural language processing tasks involving span prediction and classification. Most existing methods derive a span representation by aggregation of token representations within the span. In contrast, we aim to improve span representations by considering span-span interactions as well as more comprehensive span-token interactions. Specifically, we introduce layers of span-level attention on top of a normal token-level transformer encoder. Given that attention between all span pairs results in O(n⁴) complexity (n being the sentence length) and not all span interactions are intuitively meaningful, we restrict the range of spans that a given span could attend to, thereby reducing overall complexity to O(n³). We conduct experiments on various span-related tasks and show superior performance of our model surpassing baseline models. Our code is publicly available at https://github.com/jipy0222/Span-Level-Attention.
%R 10.18653/v1/2023.findings-emnlp.747
%U https://aclanthology.org/2023.findings-emnlp.747
%U https://doi.org/10.18653/v1/2023.findings-emnlp.747
%P 11184-11192
Markdown (Informal)
[Improving Span Representation by Efficient Span-Level Attention](https://aclanthology.org/2023.findings-emnlp.747) (Ji et al., Findings 2023)
ACL