@inproceedings{nararatwong-etal-2022-kiqa,
title = "{KIQA}: Knowledge-Infused Question Answering Model for Financial Table-Text Data",
author = "Nararatwong, Rungsiman and
Kertkeidkachorn, Natthawut and
Ichise, Ryutaro",
editor = "Agirre, Eneko and
Apidianaki, Marianna and
Vuli{\'c}, Ivan",
booktitle = "Proceedings of Deep Learning Inside Out (DeeLIO 2022): The 3rd Workshop on Knowledge Extraction and Integration for Deep Learning Architectures",
month = may,
year = "2022",
address = "Dublin, Ireland and Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.deelio-1.6/",
doi = "10.18653/v1/2022.deelio-1.6",
pages = "53--61",
abstract = "While entity retrieval models continue to advance their capabilities, our understanding of their wide-ranging applications is limited, especially in domain-specific settings. We highlighted this issue by using recent general-domain entity-linking models, LUKE and GENRE, to inject external knowledge into a question-answering (QA) model for a financial QA task with a hybrid tabular-textual dataset. We found that both models improved the baseline model by 1.57{\%} overall and 8.86{\%} on textual data. Nonetheless, the challenge remains as they still struggle to handle tabular inputs. We subsequently conducted a comprehensive attention-weight analysis, revealing how LUKE utilizes external knowledge supplied by GENRE. The analysis also elaborates how the injection of symbolic knowledge can be helpful and what needs further improvement, paving the way for future research on this challenging QA task and advancing our understanding of how a language model incorporates external knowledge."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="nararatwong-etal-2022-kiqa">
<titleInfo>
<title>KIQA: Knowledge-Infused Question Answering Model for Financial Table-Text Data</title>
</titleInfo>
<name type="personal">
<namePart type="given">Rungsiman</namePart>
<namePart type="family">Nararatwong</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Natthawut</namePart>
<namePart type="family">Kertkeidkachorn</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ryutaro</namePart>
<namePart type="family">Ichise</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of Deep Learning Inside Out (DeeLIO 2022): The 3rd Workshop on Knowledge Extraction and Integration for Deep Learning Architectures</title>
</titleInfo>
<name type="personal">
<namePart type="given">Eneko</namePart>
<namePart type="family">Agirre</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marianna</namePart>
<namePart type="family">Apidianaki</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ivan</namePart>
<namePart type="family">Vulić</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dublin, Ireland and Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>While entity retrieval models continue to advance their capabilities, our understanding of their wide-ranging applications is limited, especially in domain-specific settings. We highlighted this issue by using recent general-domain entity-linking models, LUKE and GENRE, to inject external knowledge into a question-answering (QA) model for a financial QA task with a hybrid tabular-textual dataset. We found that both models improved the baseline model by 1.57% overall and 8.86% on textual data. Nonetheless, the challenge remains as they still struggle to handle tabular inputs. We subsequently conducted a comprehensive attention-weight analysis, revealing how LUKE utilizes external knowledge supplied by GENRE. The analysis also elaborates how the injection of symbolic knowledge can be helpful and what needs further improvement, paving the way for future research on this challenging QA task and advancing our understanding of how a language model incorporates external knowledge.</abstract>
<identifier type="citekey">nararatwong-etal-2022-kiqa</identifier>
<identifier type="doi">10.18653/v1/2022.deelio-1.6</identifier>
<location>
<url>https://aclanthology.org/2022.deelio-1.6/</url>
</location>
<part>
<date>2022-05</date>
<extent unit="page">
<start>53</start>
<end>61</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T KIQA: Knowledge-Infused Question Answering Model for Financial Table-Text Data
%A Nararatwong, Rungsiman
%A Kertkeidkachorn, Natthawut
%A Ichise, Ryutaro
%Y Agirre, Eneko
%Y Apidianaki, Marianna
%Y Vulić, Ivan
%S Proceedings of Deep Learning Inside Out (DeeLIO 2022): The 3rd Workshop on Knowledge Extraction and Integration for Deep Learning Architectures
%D 2022
%8 May
%I Association for Computational Linguistics
%C Dublin, Ireland and Online
%F nararatwong-etal-2022-kiqa
%X While entity retrieval models continue to advance their capabilities, our understanding of their wide-ranging applications is limited, especially in domain-specific settings. We highlighted this issue by using recent general-domain entity-linking models, LUKE and GENRE, to inject external knowledge into a question-answering (QA) model for a financial QA task with a hybrid tabular-textual dataset. We found that both models improved the baseline model by 1.57% overall and 8.86% on textual data. Nonetheless, the challenge remains as they still struggle to handle tabular inputs. We subsequently conducted a comprehensive attention-weight analysis, revealing how LUKE utilizes external knowledge supplied by GENRE. The analysis also elaborates how the injection of symbolic knowledge can be helpful and what needs further improvement, paving the way for future research on this challenging QA task and advancing our understanding of how a language model incorporates external knowledge.
%R 10.18653/v1/2022.deelio-1.6
%U https://aclanthology.org/2022.deelio-1.6/
%U https://doi.org/10.18653/v1/2022.deelio-1.6
%P 53-61
Markdown (Informal)
[KIQA: Knowledge-Infused Question Answering Model for Financial Table-Text Data](https://aclanthology.org/2022.deelio-1.6/) (Nararatwong et al., DeeLIO 2022)
ACL