@inproceedings{cai-etal-2025-dr,
title = "{D}r.{ECI}: Infusing Large Language Models with Causal Knowledge for Decomposed Reasoning in Event Causality Identification",
author = "Cai, Ruichu and
Yu, Shengyin and
Zhang, Jiahao and
Chen, Wei and
Xu, Boyan and
Zhang, Keli",
editor = "Rambow, Owen and
Wanner, Leo and
Apidianaki, Marianna and
Al-Khalifa, Hend and
Eugenio, Barbara Di and
Schockaert, Steven",
booktitle = "Proceedings of the 31st International Conference on Computational Linguistics",
month = jan,
year = "2025",
address = "Abu Dhabi, UAE",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.coling-main.628/",
pages = "9346--9375",
abstract = "Despite the demonstrated potential of Large Language Models (LLMs) in diverse NLP tasks, their causal reasoning capability appears inadequate when evaluated within the context of the event causality identification (ECI) task. The ECI tasks pose significant complexity for LLMs and necessitate comprehensive causal priors for accurate identification. To improve the performance of LLMs for causal reasoning, we propose a multi-agent Decomposed reasoning framework for Event Causality Identification, designated as \textit{Dr.ECI}. In the discovery stage, \textit{Dr.ECI} incorporates specialized agents such as \textit{Causal Explorer} and \textit{Mediator Detector}, which capture implicit causality and indirect causality more effectively. In the reasoning stage, \textit{Dr.ECI} introduces the agents \textit{Direct Reasoner} and \textit{Indirect Reasoner}, which leverage the knowledge of the generalized causal structure specific to the ECI. Extensive evaluations demonstrate the state-of-the-art performance of \textit{Dr.ECI} comparing with baselines based on LLMs and supervised training. Our implementation will be open-sourced at \url{https://github.com/DMIRLAB-Group/Dr.ECI}."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="cai-etal-2025-dr">
<titleInfo>
<title>Dr.ECI: Infusing Large Language Models with Causal Knowledge for Decomposed Reasoning in Event Causality Identification</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ruichu</namePart>
<namePart type="family">Cai</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shengyin</namePart>
<namePart type="family">Yu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jiahao</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wei</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Boyan</namePart>
<namePart type="family">Xu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Keli</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-01</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 31st International Conference on Computational Linguistics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Owen</namePart>
<namePart type="family">Rambow</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Leo</namePart>
<namePart type="family">Wanner</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marianna</namePart>
<namePart type="family">Apidianaki</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hend</namePart>
<namePart type="family">Al-Khalifa</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Barbara</namePart>
<namePart type="given">Di</namePart>
<namePart type="family">Eugenio</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Steven</namePart>
<namePart type="family">Schockaert</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, UAE</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Despite the demonstrated potential of Large Language Models (LLMs) in diverse NLP tasks, their causal reasoning capability appears inadequate when evaluated within the context of the event causality identification (ECI) task. The ECI tasks pose significant complexity for LLMs and necessitate comprehensive causal priors for accurate identification. To improve the performance of LLMs for causal reasoning, we propose a multi-agent Decomposed reasoning framework for Event Causality Identification, designated as Dr.ECI. In the discovery stage, Dr.ECI incorporates specialized agents such as Causal Explorer and Mediator Detector, which capture implicit causality and indirect causality more effectively. In the reasoning stage, Dr.ECI introduces the agents Direct Reasoner and Indirect Reasoner, which leverage the knowledge of the generalized causal structure specific to the ECI. Extensive evaluations demonstrate the state-of-the-art performance of Dr.ECI comparing with baselines based on LLMs and supervised training. Our implementation will be open-sourced at https://github.com/DMIRLAB-Group/Dr.ECI.</abstract>
<identifier type="citekey">cai-etal-2025-dr</identifier>
<location>
<url>https://aclanthology.org/2025.coling-main.628/</url>
</location>
<part>
<date>2025-01</date>
<extent unit="page">
<start>9346</start>
<end>9375</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Dr.ECI: Infusing Large Language Models with Causal Knowledge for Decomposed Reasoning in Event Causality Identification
%A Cai, Ruichu
%A Yu, Shengyin
%A Zhang, Jiahao
%A Chen, Wei
%A Xu, Boyan
%A Zhang, Keli
%Y Rambow, Owen
%Y Wanner, Leo
%Y Apidianaki, Marianna
%Y Al-Khalifa, Hend
%Y Eugenio, Barbara Di
%Y Schockaert, Steven
%S Proceedings of the 31st International Conference on Computational Linguistics
%D 2025
%8 January
%I Association for Computational Linguistics
%C Abu Dhabi, UAE
%F cai-etal-2025-dr
%X Despite the demonstrated potential of Large Language Models (LLMs) in diverse NLP tasks, their causal reasoning capability appears inadequate when evaluated within the context of the event causality identification (ECI) task. The ECI tasks pose significant complexity for LLMs and necessitate comprehensive causal priors for accurate identification. To improve the performance of LLMs for causal reasoning, we propose a multi-agent Decomposed reasoning framework for Event Causality Identification, designated as Dr.ECI. In the discovery stage, Dr.ECI incorporates specialized agents such as Causal Explorer and Mediator Detector, which capture implicit causality and indirect causality more effectively. In the reasoning stage, Dr.ECI introduces the agents Direct Reasoner and Indirect Reasoner, which leverage the knowledge of the generalized causal structure specific to the ECI. Extensive evaluations demonstrate the state-of-the-art performance of Dr.ECI comparing with baselines based on LLMs and supervised training. Our implementation will be open-sourced at https://github.com/DMIRLAB-Group/Dr.ECI.
%U https://aclanthology.org/2025.coling-main.628/
%P 9346-9375
Markdown (Informal)
[Dr.ECI: Infusing Large Language Models with Causal Knowledge for Decomposed Reasoning in Event Causality Identification](https://aclanthology.org/2025.coling-main.628/) (Cai et al., COLING 2025)
ACL