@inproceedings{nguyen-razniewski-2022-materialized,
title = "Materialized Knowledge Bases from Commonsense Transformers",
author = "Nguyen, Tuan-Phong and
Razniewski, Simon",
editor = "Bosselut, Antoine and
Li, Xiang and
Lin, Bill Yuchen and
Shwartz, Vered and
Majumder, Bodhisattwa Prasad and
Lal, Yash Kumar and
Rudinger, Rachel and
Ren, Xiang and
Tandon, Niket and
Zouhar, Vil{\'e}m",
booktitle = "Proceedings of the First Workshop on Commonsense Representation and Reasoning (CSRR 2022)",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.csrr-1.5",
doi = "10.18653/v1/2022.csrr-1.5",
pages = "36--42",
abstract = "Starting from the COMET methodology by Bosselut et al. (2019), generating commonsense knowledge directly from pre-trained language models has recently received significant attention. Surprisingly, up to now no materialized resource of commonsense knowledge generated this way is publicly available. This paper fills this gap, and uses the materialized resources to perform a detailed analysis of the potential of this approach in terms of precision and recall. Furthermore, we identify common problem cases, and outline use cases enabled by materialized resources. We posit that the availability of these resources is important for the advancement of the field, as it enables an off-the-shelf-use of the resulting knowledge, as well as further analyses on its strengths and weaknesses.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="nguyen-razniewski-2022-materialized">
<titleInfo>
<title>Materialized Knowledge Bases from Commonsense Transformers</title>
</titleInfo>
<name type="personal">
<namePart type="given">Tuan-Phong</namePart>
<namePart type="family">Nguyen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Simon</namePart>
<namePart type="family">Razniewski</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the First Workshop on Commonsense Representation and Reasoning (CSRR 2022)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Antoine</namePart>
<namePart type="family">Bosselut</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xiang</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bill</namePart>
<namePart type="given">Yuchen</namePart>
<namePart type="family">Lin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vered</namePart>
<namePart type="family">Shwartz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bodhisattwa</namePart>
<namePart type="given">Prasad</namePart>
<namePart type="family">Majumder</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yash</namePart>
<namePart type="given">Kumar</namePart>
<namePart type="family">Lal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rachel</namePart>
<namePart type="family">Rudinger</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xiang</namePart>
<namePart type="family">Ren</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Niket</namePart>
<namePart type="family">Tandon</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vilém</namePart>
<namePart type="family">Zouhar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dublin, Ireland</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Starting from the COMET methodology by Bosselut et al. (2019), generating commonsense knowledge directly from pre-trained language models has recently received significant attention. Surprisingly, up to now no materialized resource of commonsense knowledge generated this way is publicly available. This paper fills this gap, and uses the materialized resources to perform a detailed analysis of the potential of this approach in terms of precision and recall. Furthermore, we identify common problem cases, and outline use cases enabled by materialized resources. We posit that the availability of these resources is important for the advancement of the field, as it enables an off-the-shelf-use of the resulting knowledge, as well as further analyses on its strengths and weaknesses.</abstract>
<identifier type="citekey">nguyen-razniewski-2022-materialized</identifier>
<identifier type="doi">10.18653/v1/2022.csrr-1.5</identifier>
<location>
<url>https://aclanthology.org/2022.csrr-1.5</url>
</location>
<part>
<date>2022-05</date>
<extent unit="page">
<start>36</start>
<end>42</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Materialized Knowledge Bases from Commonsense Transformers
%A Nguyen, Tuan-Phong
%A Razniewski, Simon
%Y Bosselut, Antoine
%Y Li, Xiang
%Y Lin, Bill Yuchen
%Y Shwartz, Vered
%Y Majumder, Bodhisattwa Prasad
%Y Lal, Yash Kumar
%Y Rudinger, Rachel
%Y Ren, Xiang
%Y Tandon, Niket
%Y Zouhar, Vilém
%S Proceedings of the First Workshop on Commonsense Representation and Reasoning (CSRR 2022)
%D 2022
%8 May
%I Association for Computational Linguistics
%C Dublin, Ireland
%F nguyen-razniewski-2022-materialized
%X Starting from the COMET methodology by Bosselut et al. (2019), generating commonsense knowledge directly from pre-trained language models has recently received significant attention. Surprisingly, up to now no materialized resource of commonsense knowledge generated this way is publicly available. This paper fills this gap, and uses the materialized resources to perform a detailed analysis of the potential of this approach in terms of precision and recall. Furthermore, we identify common problem cases, and outline use cases enabled by materialized resources. We posit that the availability of these resources is important for the advancement of the field, as it enables an off-the-shelf-use of the resulting knowledge, as well as further analyses on its strengths and weaknesses.
%R 10.18653/v1/2022.csrr-1.5
%U https://aclanthology.org/2022.csrr-1.5
%U https://doi.org/10.18653/v1/2022.csrr-1.5
%P 36-42
Markdown (Informal)
[Materialized Knowledge Bases from Commonsense Transformers](https://aclanthology.org/2022.csrr-1.5) (Nguyen & Razniewski, CSRR 2022)
ACL