@inproceedings{finegan-dollak-verma-2020-layout,
title = "Layout-Aware Text Representations Harm Clustering Documents by Type",
author = "Finegan-Dollak, Catherine and
Verma, Ashish",
editor = "Rogers, Anna and
Sedoc, Jo{\~a}o and
Rumshisky, Anna",
booktitle = "Proceedings of the First Workshop on Insights from Negative Results in NLP",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.insights-1.9",
doi = "10.18653/v1/2020.insights-1.9",
pages = "60--65",
abstract = "Clustering documents by type{---}grouping invoices with invoices and articles with articles{---}is a desirable first step for organizing large collections of document scans. Humans approaching this task use both the semantics of the text and the document layout to assist in grouping like documents. LayoutLM (Xu et al., 2019), a layout-aware transformer built on top of BERT with state-of-the-art performance on document-type classification, could reasonably be expected to outperform regular BERT (Devlin et al., 2018) for document-type clustering. However, we find experimentally that BERT significantly outperforms LayoutLM on this task (p {\textless}0.001). We analyze clusters to show where layout awareness is an asset and where it is a liability.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="finegan-dollak-verma-2020-layout">
<titleInfo>
<title>Layout-Aware Text Representations Harm Clustering Documents by Type</title>
</titleInfo>
<name type="personal">
<namePart type="given">Catherine</namePart>
<namePart type="family">Finegan-Dollak</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ashish</namePart>
<namePart type="family">Verma</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the First Workshop on Insights from Negative Results in NLP</title>
</titleInfo>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Rogers</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">João</namePart>
<namePart type="family">Sedoc</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Rumshisky</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Clustering documents by type—grouping invoices with invoices and articles with articles—is a desirable first step for organizing large collections of document scans. Humans approaching this task use both the semantics of the text and the document layout to assist in grouping like documents. LayoutLM (Xu et al., 2019), a layout-aware transformer built on top of BERT with state-of-the-art performance on document-type classification, could reasonably be expected to outperform regular BERT (Devlin et al., 2018) for document-type clustering. However, we find experimentally that BERT significantly outperforms LayoutLM on this task (p \textless0.001). We analyze clusters to show where layout awareness is an asset and where it is a liability.</abstract>
<identifier type="citekey">finegan-dollak-verma-2020-layout</identifier>
<identifier type="doi">10.18653/v1/2020.insights-1.9</identifier>
<location>
<url>https://aclanthology.org/2020.insights-1.9</url>
</location>
<part>
<date>2020-11</date>
<extent unit="page">
<start>60</start>
<end>65</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Layout-Aware Text Representations Harm Clustering Documents by Type
%A Finegan-Dollak, Catherine
%A Verma, Ashish
%Y Rogers, Anna
%Y Sedoc, João
%Y Rumshisky, Anna
%S Proceedings of the First Workshop on Insights from Negative Results in NLP
%D 2020
%8 November
%I Association for Computational Linguistics
%C Online
%F finegan-dollak-verma-2020-layout
%X Clustering documents by type—grouping invoices with invoices and articles with articles—is a desirable first step for organizing large collections of document scans. Humans approaching this task use both the semantics of the text and the document layout to assist in grouping like documents. LayoutLM (Xu et al., 2019), a layout-aware transformer built on top of BERT with state-of-the-art performance on document-type classification, could reasonably be expected to outperform regular BERT (Devlin et al., 2018) for document-type clustering. However, we find experimentally that BERT significantly outperforms LayoutLM on this task (p \textless0.001). We analyze clusters to show where layout awareness is an asset and where it is a liability.
%R 10.18653/v1/2020.insights-1.9
%U https://aclanthology.org/2020.insights-1.9
%U https://doi.org/10.18653/v1/2020.insights-1.9
%P 60-65
Markdown (Informal)
[Layout-Aware Text Representations Harm Clustering Documents by Type](https://aclanthology.org/2020.insights-1.9) (Finegan-Dollak & Verma, insights 2020)
ACL