@inproceedings{moskvichev-mai-2023-narrativexl,
title = "{N}arrative{XL}: a Large-scale Dataset for Long-Term Memory Models",
author = "Moskvichev, Arsenii and
Mai, Ky-Vinh",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.findings-emnlp.1005",
doi = "10.18653/v1/2023.findings-emnlp.1005",
pages = "15058--15072",
abstract = "We propose a new large-scale (nearly a million questions) ultra-long-context (more than 50,000 words average document length) reading comprehension dataset. Using GPT 3.5, we summarized each scene in 1,500 hand-curated fiction books from Project Gutenberg, which resulted in approximately 150 scene-level summaries per book. After that, we created a number of reading comprehension questions based on these summaries, including three types of multiple-choice scene recognition questions, as well as free-form narrative reconstruction questions. With 990,595 total questions, our dataset is an order of magnitude larger than the closest alternatives. Crucially, most questions have a known {``}retention demand{''}, indicating how long-term of a memory is needed to answer them, which should aid long-term memory performance evaluation. We validate our data in four small-scale experiments: one with human labelers, and three with existing language models. We show that our questions 1) adequately represent the source material 2) can be used to diagnose a model{'}s memory capacity 3) are not trivial for modern language models even when the memory demand does not exceed those models{'} context lengths. Lastly, we provide our code which can be used to further expand the dataset with minimal human labor.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="moskvichev-mai-2023-narrativexl">
<titleInfo>
<title>NarrativeXL: a Large-scale Dataset for Long-Term Memory Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Arsenii</namePart>
<namePart type="family">Moskvichev</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ky-Vinh</namePart>
<namePart type="family">Mai</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2023</title>
</titleInfo>
<name type="personal">
<namePart type="given">Houda</namePart>
<namePart type="family">Bouamor</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Juan</namePart>
<namePart type="family">Pino</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kalika</namePart>
<namePart type="family">Bali</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Singapore</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We propose a new large-scale (nearly a million questions) ultra-long-context (more than 50,000 words average document length) reading comprehension dataset. Using GPT 3.5, we summarized each scene in 1,500 hand-curated fiction books from Project Gutenberg, which resulted in approximately 150 scene-level summaries per book. After that, we created a number of reading comprehension questions based on these summaries, including three types of multiple-choice scene recognition questions, as well as free-form narrative reconstruction questions. With 990,595 total questions, our dataset is an order of magnitude larger than the closest alternatives. Crucially, most questions have a known “retention demand”, indicating how long-term of a memory is needed to answer them, which should aid long-term memory performance evaluation. We validate our data in four small-scale experiments: one with human labelers, and three with existing language models. We show that our questions 1) adequately represent the source material 2) can be used to diagnose a model’s memory capacity 3) are not trivial for modern language models even when the memory demand does not exceed those models’ context lengths. Lastly, we provide our code which can be used to further expand the dataset with minimal human labor.</abstract>
<identifier type="citekey">moskvichev-mai-2023-narrativexl</identifier>
<identifier type="doi">10.18653/v1/2023.findings-emnlp.1005</identifier>
<location>
<url>https://aclanthology.org/2023.findings-emnlp.1005</url>
</location>
<part>
<date>2023-12</date>
<extent unit="page">
<start>15058</start>
<end>15072</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T NarrativeXL: a Large-scale Dataset for Long-Term Memory Models
%A Moskvichev, Arsenii
%A Mai, Ky-Vinh
%Y Bouamor, Houda
%Y Pino, Juan
%Y Bali, Kalika
%S Findings of the Association for Computational Linguistics: EMNLP 2023
%D 2023
%8 December
%I Association for Computational Linguistics
%C Singapore
%F moskvichev-mai-2023-narrativexl
%X We propose a new large-scale (nearly a million questions) ultra-long-context (more than 50,000 words average document length) reading comprehension dataset. Using GPT 3.5, we summarized each scene in 1,500 hand-curated fiction books from Project Gutenberg, which resulted in approximately 150 scene-level summaries per book. After that, we created a number of reading comprehension questions based on these summaries, including three types of multiple-choice scene recognition questions, as well as free-form narrative reconstruction questions. With 990,595 total questions, our dataset is an order of magnitude larger than the closest alternatives. Crucially, most questions have a known “retention demand”, indicating how long-term of a memory is needed to answer them, which should aid long-term memory performance evaluation. We validate our data in four small-scale experiments: one with human labelers, and three with existing language models. We show that our questions 1) adequately represent the source material 2) can be used to diagnose a model’s memory capacity 3) are not trivial for modern language models even when the memory demand does not exceed those models’ context lengths. Lastly, we provide our code which can be used to further expand the dataset with minimal human labor.
%R 10.18653/v1/2023.findings-emnlp.1005
%U https://aclanthology.org/2023.findings-emnlp.1005
%U https://doi.org/10.18653/v1/2023.findings-emnlp.1005
%P 15058-15072
Markdown (Informal)
[NarrativeXL: a Large-scale Dataset for Long-Term Memory Models](https://aclanthology.org/2023.findings-emnlp.1005) (Moskvichev & Mai, Findings 2023)
ACL