@inproceedings{zhou-etal-2025-enhancing,
title = "Enhancing Extractive Question Answering in Multiparty Dialogues with Logical Inference Memory Network",
author = "Zhou, Shu and
Zhao, Rui and
Zhou, Zhengda and
Yi, Haohan and
Zheng, Xuhui and
Wang, Hao",
editor = "Rambow, Owen and
Wanner, Leo and
Apidianaki, Marianna and
Al-Khalifa, Hend and
Eugenio, Barbara Di and
Schockaert, Steven",
booktitle = "Proceedings of the 31st International Conference on Computational Linguistics",
month = jan,
year = "2025",
address = "Abu Dhabi, UAE",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.coling-main.583/",
pages = "8725--8738",
abstract = "Multiparty dialogue question answering (QA) in machine reading comprehension (MRC) is a challenging task due to its complex information flow interactions and logical QA inference. Existing models typically handle such QA tasks by decoupling dialogue information at both speaker and utterance levels. However, few of them consider the logical inference relations in multiparty dialogue QA, leading to suboptimal QA performance. To address this issue, this paper proposes a memory network with logical inference (LIMN) for extractive QA in multiparty dialogues. LIMN introduces an inference module, which is pretrained by incorporating plain QA articles as external knowledge. It generates logical inference-aware representations from latent space for multiparty dialogues. To further model complex interactions among logical dialogue contexts, questions and key-utterance information, a key-utterance-based interaction method is proposed for leverage. Moreover, a multitask learning strategy is adopted for robust MRC. Extensive experiments were conducted on Molweni and FriendsQA benchmarks, which included 25k and 10k questions, respectively. Comparative results showed that LIMN achieves state-of-the-art results on both benchmarks, demonstrating the enhancement of logical QA inference in multiparty dialogue QA tasks."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zhou-etal-2025-enhancing">
<titleInfo>
<title>Enhancing Extractive Question Answering in Multiparty Dialogues with Logical Inference Memory Network</title>
</titleInfo>
<name type="personal">
<namePart type="given">Shu</namePart>
<namePart type="family">Zhou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rui</namePart>
<namePart type="family">Zhao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhengda</namePart>
<namePart type="family">Zhou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Haohan</namePart>
<namePart type="family">Yi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xuhui</namePart>
<namePart type="family">Zheng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hao</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-01</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 31st International Conference on Computational Linguistics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Owen</namePart>
<namePart type="family">Rambow</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Leo</namePart>
<namePart type="family">Wanner</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marianna</namePart>
<namePart type="family">Apidianaki</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hend</namePart>
<namePart type="family">Al-Khalifa</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Barbara</namePart>
<namePart type="given">Di</namePart>
<namePart type="family">Eugenio</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Steven</namePart>
<namePart type="family">Schockaert</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, UAE</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Multiparty dialogue question answering (QA) in machine reading comprehension (MRC) is a challenging task due to its complex information flow interactions and logical QA inference. Existing models typically handle such QA tasks by decoupling dialogue information at both speaker and utterance levels. However, few of them consider the logical inference relations in multiparty dialogue QA, leading to suboptimal QA performance. To address this issue, this paper proposes a memory network with logical inference (LIMN) for extractive QA in multiparty dialogues. LIMN introduces an inference module, which is pretrained by incorporating plain QA articles as external knowledge. It generates logical inference-aware representations from latent space for multiparty dialogues. To further model complex interactions among logical dialogue contexts, questions and key-utterance information, a key-utterance-based interaction method is proposed for leverage. Moreover, a multitask learning strategy is adopted for robust MRC. Extensive experiments were conducted on Molweni and FriendsQA benchmarks, which included 25k and 10k questions, respectively. Comparative results showed that LIMN achieves state-of-the-art results on both benchmarks, demonstrating the enhancement of logical QA inference in multiparty dialogue QA tasks.</abstract>
<identifier type="citekey">zhou-etal-2025-enhancing</identifier>
<location>
<url>https://aclanthology.org/2025.coling-main.583/</url>
</location>
<part>
<date>2025-01</date>
<extent unit="page">
<start>8725</start>
<end>8738</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Enhancing Extractive Question Answering in Multiparty Dialogues with Logical Inference Memory Network
%A Zhou, Shu
%A Zhao, Rui
%A Zhou, Zhengda
%A Yi, Haohan
%A Zheng, Xuhui
%A Wang, Hao
%Y Rambow, Owen
%Y Wanner, Leo
%Y Apidianaki, Marianna
%Y Al-Khalifa, Hend
%Y Eugenio, Barbara Di
%Y Schockaert, Steven
%S Proceedings of the 31st International Conference on Computational Linguistics
%D 2025
%8 January
%I Association for Computational Linguistics
%C Abu Dhabi, UAE
%F zhou-etal-2025-enhancing
%X Multiparty dialogue question answering (QA) in machine reading comprehension (MRC) is a challenging task due to its complex information flow interactions and logical QA inference. Existing models typically handle such QA tasks by decoupling dialogue information at both speaker and utterance levels. However, few of them consider the logical inference relations in multiparty dialogue QA, leading to suboptimal QA performance. To address this issue, this paper proposes a memory network with logical inference (LIMN) for extractive QA in multiparty dialogues. LIMN introduces an inference module, which is pretrained by incorporating plain QA articles as external knowledge. It generates logical inference-aware representations from latent space for multiparty dialogues. To further model complex interactions among logical dialogue contexts, questions and key-utterance information, a key-utterance-based interaction method is proposed for leverage. Moreover, a multitask learning strategy is adopted for robust MRC. Extensive experiments were conducted on Molweni and FriendsQA benchmarks, which included 25k and 10k questions, respectively. Comparative results showed that LIMN achieves state-of-the-art results on both benchmarks, demonstrating the enhancement of logical QA inference in multiparty dialogue QA tasks.
%U https://aclanthology.org/2025.coling-main.583/
%P 8725-8738
Markdown (Informal)
[Enhancing Extractive Question Answering in Multiparty Dialogues with Logical Inference Memory Network](https://aclanthology.org/2025.coling-main.583/) (Zhou et al., COLING 2025)
ACL