@inproceedings{ruiz-dolz-lawrence-2023-detecting,
title = "Detecting Argumentative Fallacies in the Wild: Problems and Limitations of Large Language Models",
author = "Ruiz-Dolz, Ramon and
Lawrence, John",
editor = "Alshomary, Milad and
Chen, Chung-Chi and
Muresan, Smaranda and
Park, Joonsuk and
Romberg, Julia",
booktitle = "Proceedings of the 10th Workshop on Argument Mining",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.argmining-1.1",
doi = "10.18653/v1/2023.argmining-1.1",
pages = "1--10",
abstract = "Previous work on the automatic identification of fallacies in natural language text has typically approached the problem in constrained experimental setups that make it difficult to understand the applicability and usefulness of the proposals in the real world. In this paper, we present the first analysis of the limitations that these data-driven approaches could show in real situations. For that purpose, we first create a validation corpus consisting of natural language argumentation schemes. Second, we provide new empirical results to the emerging task of identifying fallacies in natural language text. Third, we analyse the errors observed outside of the testing data domains considering the new validation corpus. Finally, we point out some important limitations observed in our analysis that should be taken into account in future research in this topic. Specifically, if we want to deploy these systems in the Wild.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="ruiz-dolz-lawrence-2023-detecting">
<titleInfo>
<title>Detecting Argumentative Fallacies in the Wild: Problems and Limitations of Large Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ramon</namePart>
<namePart type="family">Ruiz-Dolz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">John</namePart>
<namePart type="family">Lawrence</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 10th Workshop on Argument Mining</title>
</titleInfo>
<name type="personal">
<namePart type="given">Milad</namePart>
<namePart type="family">Alshomary</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chung-Chi</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Smaranda</namePart>
<namePart type="family">Muresan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joonsuk</namePart>
<namePart type="family">Park</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Julia</namePart>
<namePart type="family">Romberg</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Singapore</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Previous work on the automatic identification of fallacies in natural language text has typically approached the problem in constrained experimental setups that make it difficult to understand the applicability and usefulness of the proposals in the real world. In this paper, we present the first analysis of the limitations that these data-driven approaches could show in real situations. For that purpose, we first create a validation corpus consisting of natural language argumentation schemes. Second, we provide new empirical results to the emerging task of identifying fallacies in natural language text. Third, we analyse the errors observed outside of the testing data domains considering the new validation corpus. Finally, we point out some important limitations observed in our analysis that should be taken into account in future research in this topic. Specifically, if we want to deploy these systems in the Wild.</abstract>
<identifier type="citekey">ruiz-dolz-lawrence-2023-detecting</identifier>
<identifier type="doi">10.18653/v1/2023.argmining-1.1</identifier>
<location>
<url>https://aclanthology.org/2023.argmining-1.1</url>
</location>
<part>
<date>2023-12</date>
<extent unit="page">
<start>1</start>
<end>10</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Detecting Argumentative Fallacies in the Wild: Problems and Limitations of Large Language Models
%A Ruiz-Dolz, Ramon
%A Lawrence, John
%Y Alshomary, Milad
%Y Chen, Chung-Chi
%Y Muresan, Smaranda
%Y Park, Joonsuk
%Y Romberg, Julia
%S Proceedings of the 10th Workshop on Argument Mining
%D 2023
%8 December
%I Association for Computational Linguistics
%C Singapore
%F ruiz-dolz-lawrence-2023-detecting
%X Previous work on the automatic identification of fallacies in natural language text has typically approached the problem in constrained experimental setups that make it difficult to understand the applicability and usefulness of the proposals in the real world. In this paper, we present the first analysis of the limitations that these data-driven approaches could show in real situations. For that purpose, we first create a validation corpus consisting of natural language argumentation schemes. Second, we provide new empirical results to the emerging task of identifying fallacies in natural language text. Third, we analyse the errors observed outside of the testing data domains considering the new validation corpus. Finally, we point out some important limitations observed in our analysis that should be taken into account in future research in this topic. Specifically, if we want to deploy these systems in the Wild.
%R 10.18653/v1/2023.argmining-1.1
%U https://aclanthology.org/2023.argmining-1.1
%U https://doi.org/10.18653/v1/2023.argmining-1.1
%P 1-10
Markdown (Informal)
[Detecting Argumentative Fallacies in the Wild: Problems and Limitations of Large Language Models](https://aclanthology.org/2023.argmining-1.1) (Ruiz-Dolz & Lawrence, ArgMining-WS 2023)
ACL