@inproceedings{marjanovic-etal-2024-dynamicqa,
title = "{DYNAMICQA}: Tracing Internal Knowledge Conflicts in Language Models",
author = "Marjanovic, Sara Vera and
Yu, Haeun and
Atanasova, Pepa and
Maistro, Maria and
Lioma, Christina and
Augenstein, Isabelle",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2024",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.findings-emnlp.838/",
doi = "10.18653/v1/2024.findings-emnlp.838",
pages = "14346--14360",
abstract = "Knowledge-intensive language understanding tasks require Language Models (LMs) to integrate relevant context, mitigating their inherent weaknesses, such as incomplete or outdated knowledge. However, conflicting knowledge can be present in the LM`s parameters, termed intra-memory conflict, which can affect a model`s propensity to accept contextual knowledge. To study the effect of intra-memory conflict on LM`s ability to accept the relevant context, we utilise two knowledge conflict measures and a novel dataset containing inherently conflicting data, DYNAMICQA. This dataset includes facts with a temporal dynamic nature where facts can change over time and disputable dynamic facts, which can change depending on the viewpoint. DYNAMICQA is the first to include real-world knowledge conflicts and provide context to study the link between the different types of knowledge conflicts. We also evaluate several measures on their ability to reflect the presence of intra-memory conflict: semantic entropy and a novel coherent persuasion score. With our extensive experiments, we verify that LMs show a greater degree of intra-memory conflict with dynamic facts compared to facts that have a single truth value. Further, we reveal that facts with intra-memory conflict are harder to update with context, suggesting that retrieval-augmented generation will struggle with the most commonly adapted facts"
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="marjanovic-etal-2024-dynamicqa">
<titleInfo>
<title>DYNAMICQA: Tracing Internal Knowledge Conflicts in Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sara</namePart>
<namePart type="given">Vera</namePart>
<namePart type="family">Marjanovic</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Haeun</namePart>
<namePart type="family">Yu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pepa</namePart>
<namePart type="family">Atanasova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Maria</namePart>
<namePart type="family">Maistro</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Christina</namePart>
<namePart type="family">Lioma</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Isabelle</namePart>
<namePart type="family">Augenstein</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2024</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yaser</namePart>
<namePart type="family">Al-Onaizan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohit</namePart>
<namePart type="family">Bansal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yun-Nung</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Miami, Florida, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Knowledge-intensive language understanding tasks require Language Models (LMs) to integrate relevant context, mitigating their inherent weaknesses, such as incomplete or outdated knowledge. However, conflicting knowledge can be present in the LM‘s parameters, termed intra-memory conflict, which can affect a model‘s propensity to accept contextual knowledge. To study the effect of intra-memory conflict on LM‘s ability to accept the relevant context, we utilise two knowledge conflict measures and a novel dataset containing inherently conflicting data, DYNAMICQA. This dataset includes facts with a temporal dynamic nature where facts can change over time and disputable dynamic facts, which can change depending on the viewpoint. DYNAMICQA is the first to include real-world knowledge conflicts and provide context to study the link between the different types of knowledge conflicts. We also evaluate several measures on their ability to reflect the presence of intra-memory conflict: semantic entropy and a novel coherent persuasion score. With our extensive experiments, we verify that LMs show a greater degree of intra-memory conflict with dynamic facts compared to facts that have a single truth value. Further, we reveal that facts with intra-memory conflict are harder to update with context, suggesting that retrieval-augmented generation will struggle with the most commonly adapted facts</abstract>
<identifier type="citekey">marjanovic-etal-2024-dynamicqa</identifier>
<identifier type="doi">10.18653/v1/2024.findings-emnlp.838</identifier>
<location>
<url>https://aclanthology.org/2024.findings-emnlp.838/</url>
</location>
<part>
<date>2024-11</date>
<extent unit="page">
<start>14346</start>
<end>14360</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T DYNAMICQA: Tracing Internal Knowledge Conflicts in Language Models
%A Marjanovic, Sara Vera
%A Yu, Haeun
%A Atanasova, Pepa
%A Maistro, Maria
%A Lioma, Christina
%A Augenstein, Isabelle
%Y Al-Onaizan, Yaser
%Y Bansal, Mohit
%Y Chen, Yun-Nung
%S Findings of the Association for Computational Linguistics: EMNLP 2024
%D 2024
%8 November
%I Association for Computational Linguistics
%C Miami, Florida, USA
%F marjanovic-etal-2024-dynamicqa
%X Knowledge-intensive language understanding tasks require Language Models (LMs) to integrate relevant context, mitigating their inherent weaknesses, such as incomplete or outdated knowledge. However, conflicting knowledge can be present in the LM‘s parameters, termed intra-memory conflict, which can affect a model‘s propensity to accept contextual knowledge. To study the effect of intra-memory conflict on LM‘s ability to accept the relevant context, we utilise two knowledge conflict measures and a novel dataset containing inherently conflicting data, DYNAMICQA. This dataset includes facts with a temporal dynamic nature where facts can change over time and disputable dynamic facts, which can change depending on the viewpoint. DYNAMICQA is the first to include real-world knowledge conflicts and provide context to study the link between the different types of knowledge conflicts. We also evaluate several measures on their ability to reflect the presence of intra-memory conflict: semantic entropy and a novel coherent persuasion score. With our extensive experiments, we verify that LMs show a greater degree of intra-memory conflict with dynamic facts compared to facts that have a single truth value. Further, we reveal that facts with intra-memory conflict are harder to update with context, suggesting that retrieval-augmented generation will struggle with the most commonly adapted facts
%R 10.18653/v1/2024.findings-emnlp.838
%U https://aclanthology.org/2024.findings-emnlp.838/
%U https://doi.org/10.18653/v1/2024.findings-emnlp.838
%P 14346-14360
Markdown (Informal)
[DYNAMICQA: Tracing Internal Knowledge Conflicts in Language Models](https://aclanthology.org/2024.findings-emnlp.838/) (Marjanovic et al., Findings 2024)
ACL
- Sara Vera Marjanovic, Haeun Yu, Pepa Atanasova, Maria Maistro, Christina Lioma, and Isabelle Augenstein. 2024. DYNAMICQA: Tracing Internal Knowledge Conflicts in Language Models. In Findings of the Association for Computational Linguistics: EMNLP 2024, pages 14346–14360, Miami, Florida, USA. Association for Computational Linguistics.