@inproceedings{kadam-ferraro-2025-tag,
title = "{TAG}{--}{EQA}: Text{--}And{--}Graph for Event Question Answering via Structured Prompting Strategies",
author = "Kadam, Maithili Sanjay and
Ferraro, Francis",
editor = "Frermann, Lea and
Stevenson, Mark",
booktitle = "Proceedings of the 14th Joint Conference on Lexical and Computational Semantics (*SEM 2025)",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.starsem-1.24/",
pages = "304--315",
ISBN = "979-8-89176-340-1",
abstract = "Large language models (LLMs) excel at general language tasks but often struggle with event-based questions{---}especially those requiring causal or temporal reasoning. We introduce TAG-EQA (Text-And-Graph for Event Question Answering), a prompting framework that injects causal event graphs into LLM inputs by converting structured relations into natural-language statements. TAG-EQA spans nine prompting configurations, combining three strategies (zero-shot, few-shot, chain-of-thought) with three input modalities (text-only, graph-only, text+graph), enabling a systematic analysis of when and how structured knowledge aids inference. On the TORQUESTRA benchmark, TAG-EQA improves accuracy by {\textasciitilde}5{\%} on average over text-only baselines, with gains up to {\textasciitilde}12{\%} in zero-shot settings and {\textasciitilde}18{\%} when graph-augmented CoT prompting is effective. While performance varies by model and configuration, our findings show that causal graphs can enhance event reasoning in LLMs without fine-tuning, offering a flexible way to encode structure in prompt-based QA."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kadam-ferraro-2025-tag">
<titleInfo>
<title>TAG–EQA: Text–And–Graph for Event Question Answering via Structured Prompting Strategies</title>
</titleInfo>
<name type="personal">
<namePart type="given">Maithili</namePart>
<namePart type="given">Sanjay</namePart>
<namePart type="family">Kadam</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Francis</namePart>
<namePart type="family">Ferraro</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 14th Joint Conference on Lexical and Computational Semantics (*SEM 2025)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lea</namePart>
<namePart type="family">Frermann</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mark</namePart>
<namePart type="family">Stevenson</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Suzhou, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-340-1</identifier>
</relatedItem>
<abstract>Large language models (LLMs) excel at general language tasks but often struggle with event-based questions—especially those requiring causal or temporal reasoning. We introduce TAG-EQA (Text-And-Graph for Event Question Answering), a prompting framework that injects causal event graphs into LLM inputs by converting structured relations into natural-language statements. TAG-EQA spans nine prompting configurations, combining three strategies (zero-shot, few-shot, chain-of-thought) with three input modalities (text-only, graph-only, text+graph), enabling a systematic analysis of when and how structured knowledge aids inference. On the TORQUESTRA benchmark, TAG-EQA improves accuracy by ~5% on average over text-only baselines, with gains up to ~12% in zero-shot settings and ~18% when graph-augmented CoT prompting is effective. While performance varies by model and configuration, our findings show that causal graphs can enhance event reasoning in LLMs without fine-tuning, offering a flexible way to encode structure in prompt-based QA.</abstract>
<identifier type="citekey">kadam-ferraro-2025-tag</identifier>
<location>
<url>https://aclanthology.org/2025.starsem-1.24/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>304</start>
<end>315</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T TAG–EQA: Text–And–Graph for Event Question Answering via Structured Prompting Strategies
%A Kadam, Maithili Sanjay
%A Ferraro, Francis
%Y Frermann, Lea
%Y Stevenson, Mark
%S Proceedings of the 14th Joint Conference on Lexical and Computational Semantics (*SEM 2025)
%D 2025
%8 November
%I Association for Computational Linguistics
%C Suzhou, China
%@ 979-8-89176-340-1
%F kadam-ferraro-2025-tag
%X Large language models (LLMs) excel at general language tasks but often struggle with event-based questions—especially those requiring causal or temporal reasoning. We introduce TAG-EQA (Text-And-Graph for Event Question Answering), a prompting framework that injects causal event graphs into LLM inputs by converting structured relations into natural-language statements. TAG-EQA spans nine prompting configurations, combining three strategies (zero-shot, few-shot, chain-of-thought) with three input modalities (text-only, graph-only, text+graph), enabling a systematic analysis of when and how structured knowledge aids inference. On the TORQUESTRA benchmark, TAG-EQA improves accuracy by ~5% on average over text-only baselines, with gains up to ~12% in zero-shot settings and ~18% when graph-augmented CoT prompting is effective. While performance varies by model and configuration, our findings show that causal graphs can enhance event reasoning in LLMs without fine-tuning, offering a flexible way to encode structure in prompt-based QA.
%U https://aclanthology.org/2025.starsem-1.24/
%P 304-315
Markdown (Informal)
[TAG–EQA: Text–And–Graph for Event Question Answering via Structured Prompting Strategies](https://aclanthology.org/2025.starsem-1.24/) (Kadam & Ferraro, *SEM 2025)
ACL