@inproceedings{shwartz-2021-long,
title = "A Long Hard Look at {MWE}s in the Age of Language Models",
author = "Shwartz, Vered",
editor = "Cook, Paul and
Mitrovi{\'c}, Jelena and
Escart{\'\i}n, Carla Parra and
Vaidya, Ashwini and
Osenova, Petya and
Taslimipoor, Shiva and
Ramisch, Carlos",
booktitle = "Proceedings of the 17th Workshop on Multiword Expressions (MWE 2021)",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.mwe-1.1",
doi = "10.18653/v1/2021.mwe-1.1",
pages = "1",
abstract = "In recent years, language models (LMs) have become almost synonymous with NLP. Pre-trained to {``}read{''} a large text corpus, such models are useful as both a representation layer as well as a source of world knowledge. But how well do they represent MWEs? This talk will discuss various problems in representing MWEs, and the extent to which LMs address them: {\mbox{$\bullet$}} Do LMs capture the implicit relationship between constituents in compositional MWEs (from baby oil through parsley cake to cheeseburger stabbing)? {\mbox{$\bullet$}} Do LMs recognize when words are used nonliterally in non-compositional MWEs (e.g. do they know whether there are fleas in the flea market)? {\mbox{$\bullet$}} Do LMs know idioms, and can they infer the meaning of new idioms from the context as humans often do?",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="shwartz-2021-long">
<titleInfo>
<title>A Long Hard Look at MWEs in the Age of Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Vered</namePart>
<namePart type="family">Shwartz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 17th Workshop on Multiword Expressions (MWE 2021)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Paul</namePart>
<namePart type="family">Cook</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jelena</namePart>
<namePart type="family">Mitrović</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carla</namePart>
<namePart type="given">Parra</namePart>
<namePart type="family">Escartín</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ashwini</namePart>
<namePart type="family">Vaidya</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Petya</namePart>
<namePart type="family">Osenova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shiva</namePart>
<namePart type="family">Taslimipoor</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carlos</namePart>
<namePart type="family">Ramisch</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In recent years, language models (LMs) have become almost synonymous with NLP. Pre-trained to “read” a large text corpus, such models are useful as both a representation layer as well as a source of world knowledge. But how well do they represent MWEs? This talk will discuss various problems in representing MWEs, and the extent to which LMs address them: \bullet Do LMs capture the implicit relationship between constituents in compositional MWEs (from baby oil through parsley cake to cheeseburger stabbing)? \bullet Do LMs recognize when words are used nonliterally in non-compositional MWEs (e.g. do they know whether there are fleas in the flea market)? \bullet Do LMs know idioms, and can they infer the meaning of new idioms from the context as humans often do?</abstract>
<identifier type="citekey">shwartz-2021-long</identifier>
<identifier type="doi">10.18653/v1/2021.mwe-1.1</identifier>
<location>
<url>https://aclanthology.org/2021.mwe-1.1</url>
</location>
<part>
<date>2021-08</date>
<detail type="page"><number>1</number></detail>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T A Long Hard Look at MWEs in the Age of Language Models
%A Shwartz, Vered
%Y Cook, Paul
%Y Mitrović, Jelena
%Y Escartín, Carla Parra
%Y Vaidya, Ashwini
%Y Osenova, Petya
%Y Taslimipoor, Shiva
%Y Ramisch, Carlos
%S Proceedings of the 17th Workshop on Multiword Expressions (MWE 2021)
%D 2021
%8 August
%I Association for Computational Linguistics
%C Online
%F shwartz-2021-long
%X In recent years, language models (LMs) have become almost synonymous with NLP. Pre-trained to “read” a large text corpus, such models are useful as both a representation layer as well as a source of world knowledge. But how well do they represent MWEs? This talk will discuss various problems in representing MWEs, and the extent to which LMs address them: \bullet Do LMs capture the implicit relationship between constituents in compositional MWEs (from baby oil through parsley cake to cheeseburger stabbing)? \bullet Do LMs recognize when words are used nonliterally in non-compositional MWEs (e.g. do they know whether there are fleas in the flea market)? \bullet Do LMs know idioms, and can they infer the meaning of new idioms from the context as humans often do?
%R 10.18653/v1/2021.mwe-1.1
%U https://aclanthology.org/2021.mwe-1.1
%U https://doi.org/10.18653/v1/2021.mwe-1.1
%P 1
Markdown (Informal)
[A Long Hard Look at MWEs in the Age of Language Models](https://aclanthology.org/2021.mwe-1.1) (Shwartz, MWE 2021)
ACL