@inproceedings{li-etal-2024-reason,
title = "Reason from Fallacy: Enhancing Large Language Models{'} Logical Reasoning through Logical Fallacy Understanding",
author = "Li, Yanda and
Wang, Dixuan and
Liang, Jiaqing and
Jiang, Guochao and
He, Qianyu and
Xiao, Yanghua and
Yang, Deqing",
editor = "Duh, Kevin and
Gomez, Helena and
Bethard, Steven",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2024",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.findings-naacl.192",
doi = "10.18653/v1/2024.findings-naacl.192",
pages = "3053--3066",
abstract = "Large Language Models (LLMs) have demonstrated good performance in many reasoning tasks, but they still struggle with some complicated reasoning tasks including logical reasoning. One non-negligible reason for LLMs{'} suboptimal performance on logical reasoning is their overlooking of understanding logical fallacies correctly. To evaluate LLMs{'} capability of logical fallacy understanding (LFU), we propose five concrete tasks from three cognitive dimensions of WHAT, WHY, and HOW in this paper. Towards these LFU tasks, we have successfully constructed a new dataset LFUD based on GPT-4 accompanied by a little human effort. Our extensive experiments justify that our LFUD can be used not only to evaluate LLMs{'} LFU capability, but also to fine-tune LLMs to obtain significantly enhanced performance on logical reasoning.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="li-etal-2024-reason">
<titleInfo>
<title>Reason from Fallacy: Enhancing Large Language Models’ Logical Reasoning through Logical Fallacy Understanding</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yanda</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dixuan</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jiaqing</namePart>
<namePart type="family">Liang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Guochao</namePart>
<namePart type="family">Jiang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Qianyu</namePart>
<namePart type="family">He</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yanghua</namePart>
<namePart type="family">Xiao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Deqing</namePart>
<namePart type="family">Yang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: NAACL 2024</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kevin</namePart>
<namePart type="family">Duh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Helena</namePart>
<namePart type="family">Gomez</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Steven</namePart>
<namePart type="family">Bethard</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Mexico City, Mexico</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Large Language Models (LLMs) have demonstrated good performance in many reasoning tasks, but they still struggle with some complicated reasoning tasks including logical reasoning. One non-negligible reason for LLMs’ suboptimal performance on logical reasoning is their overlooking of understanding logical fallacies correctly. To evaluate LLMs’ capability of logical fallacy understanding (LFU), we propose five concrete tasks from three cognitive dimensions of WHAT, WHY, and HOW in this paper. Towards these LFU tasks, we have successfully constructed a new dataset LFUD based on GPT-4 accompanied by a little human effort. Our extensive experiments justify that our LFUD can be used not only to evaluate LLMs’ LFU capability, but also to fine-tune LLMs to obtain significantly enhanced performance on logical reasoning.</abstract>
<identifier type="citekey">li-etal-2024-reason</identifier>
<identifier type="doi">10.18653/v1/2024.findings-naacl.192</identifier>
<location>
<url>https://aclanthology.org/2024.findings-naacl.192</url>
</location>
<part>
<date>2024-06</date>
<extent unit="page">
<start>3053</start>
<end>3066</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Reason from Fallacy: Enhancing Large Language Models’ Logical Reasoning through Logical Fallacy Understanding
%A Li, Yanda
%A Wang, Dixuan
%A Liang, Jiaqing
%A Jiang, Guochao
%A He, Qianyu
%A Xiao, Yanghua
%A Yang, Deqing
%Y Duh, Kevin
%Y Gomez, Helena
%Y Bethard, Steven
%S Findings of the Association for Computational Linguistics: NAACL 2024
%D 2024
%8 June
%I Association for Computational Linguistics
%C Mexico City, Mexico
%F li-etal-2024-reason
%X Large Language Models (LLMs) have demonstrated good performance in many reasoning tasks, but they still struggle with some complicated reasoning tasks including logical reasoning. One non-negligible reason for LLMs’ suboptimal performance on logical reasoning is their overlooking of understanding logical fallacies correctly. To evaluate LLMs’ capability of logical fallacy understanding (LFU), we propose five concrete tasks from three cognitive dimensions of WHAT, WHY, and HOW in this paper. Towards these LFU tasks, we have successfully constructed a new dataset LFUD based on GPT-4 accompanied by a little human effort. Our extensive experiments justify that our LFUD can be used not only to evaluate LLMs’ LFU capability, but also to fine-tune LLMs to obtain significantly enhanced performance on logical reasoning.
%R 10.18653/v1/2024.findings-naacl.192
%U https://aclanthology.org/2024.findings-naacl.192
%U https://doi.org/10.18653/v1/2024.findings-naacl.192
%P 3053-3066
Markdown (Informal)
[Reason from Fallacy: Enhancing Large Language Models’ Logical Reasoning through Logical Fallacy Understanding](https://aclanthology.org/2024.findings-naacl.192) (Li et al., Findings 2024)
ACL