@inproceedings{yam-paek-2024-baby,
title = "What should Baby Models read? Exploring Sample-Efficient Data Composition on Model Performance",
author = "Yam, Hong Meng and
Paek, Nathan",
editor = "Hu, Michael Y. and
Mueller, Aaron and
Ross, Candace and
Williams, Adina and
Linzen, Tal and
Zhuang, Chengxu and
Choshen, Leshem and
Cotterell, Ryan and
Warstadt, Alex and
Wilcox, Ethan Gotlieb",
booktitle = "The 2nd BabyLM Challenge at the 28th Conference on Computational Natural Language Learning",
month = nov,
year = "2024",
address = "Miami, FL, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.conll-babylm.25/",
pages = "284--291",
abstract = "We explore the impact of pre-training data composition on the performance of small language models in a sample-efficient setting. Using datasets capped at 10 million words, we evaluate several data sources{---}including child-directed speech (CHILDES), classic fiction (Gutenberg), a mixed dataset (Mix), and synthetic TinyStories{---}across different model sizes ranging from 18 million to 705 million parameters. Our experiments show that smaller models (e.g., GPT2-18M and GPT2-44M) benefit from training on diverse datasets like Mix, achieving better performance on linguistic benchmarks. In contrast, larger models (e.g., GPT2-97M, GPT2-705M, and LLaMA-360M) perform better when trained on more complex and rich datasets like Gutenberg. Models trained on the CHILDES and TinyStories datasets underperformed across all model sizes. These findings suggest that the optimal dataset for sample-efficient training depends on the model size, and that neither child-directed speech nor simplified stories are optimal for small language models of all sizes. We highlight the importance of considering both dataset composition and model capacity for effective sample-efficient language model training."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="yam-paek-2024-baby">
<titleInfo>
<title>What should Baby Models read? Exploring Sample-Efficient Data Composition on Model Performance</title>
</titleInfo>
<name type="personal">
<namePart type="given">Hong</namePart>
<namePart type="given">Meng</namePart>
<namePart type="family">Yam</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nathan</namePart>
<namePart type="family">Paek</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>The 2nd BabyLM Challenge at the 28th Conference on Computational Natural Language Learning</title>
</titleInfo>
<name type="personal">
<namePart type="given">Michael</namePart>
<namePart type="given">Y</namePart>
<namePart type="family">Hu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aaron</namePart>
<namePart type="family">Mueller</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Candace</namePart>
<namePart type="family">Ross</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Adina</namePart>
<namePart type="family">Williams</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tal</namePart>
<namePart type="family">Linzen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chengxu</namePart>
<namePart type="family">Zhuang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Leshem</namePart>
<namePart type="family">Choshen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ryan</namePart>
<namePart type="family">Cotterell</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alex</namePart>
<namePart type="family">Warstadt</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ethan</namePart>
<namePart type="given">Gotlieb</namePart>
<namePart type="family">Wilcox</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Miami, FL, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We explore the impact of pre-training data composition on the performance of small language models in a sample-efficient setting. Using datasets capped at 10 million words, we evaluate several data sources—including child-directed speech (CHILDES), classic fiction (Gutenberg), a mixed dataset (Mix), and synthetic TinyStories—across different model sizes ranging from 18 million to 705 million parameters. Our experiments show that smaller models (e.g., GPT2-18M and GPT2-44M) benefit from training on diverse datasets like Mix, achieving better performance on linguistic benchmarks. In contrast, larger models (e.g., GPT2-97M, GPT2-705M, and LLaMA-360M) perform better when trained on more complex and rich datasets like Gutenberg. Models trained on the CHILDES and TinyStories datasets underperformed across all model sizes. These findings suggest that the optimal dataset for sample-efficient training depends on the model size, and that neither child-directed speech nor simplified stories are optimal for small language models of all sizes. We highlight the importance of considering both dataset composition and model capacity for effective sample-efficient language model training.</abstract>
<identifier type="citekey">yam-paek-2024-baby</identifier>
<location>
<url>https://aclanthology.org/2024.conll-babylm.25/</url>
</location>
<part>
<date>2024-11</date>
<extent unit="page">
<start>284</start>
<end>291</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T What should Baby Models read? Exploring Sample-Efficient Data Composition on Model Performance
%A Yam, Hong Meng
%A Paek, Nathan
%Y Hu, Michael Y.
%Y Mueller, Aaron
%Y Ross, Candace
%Y Williams, Adina
%Y Linzen, Tal
%Y Zhuang, Chengxu
%Y Choshen, Leshem
%Y Cotterell, Ryan
%Y Warstadt, Alex
%Y Wilcox, Ethan Gotlieb
%S The 2nd BabyLM Challenge at the 28th Conference on Computational Natural Language Learning
%D 2024
%8 November
%I Association for Computational Linguistics
%C Miami, FL, USA
%F yam-paek-2024-baby
%X We explore the impact of pre-training data composition on the performance of small language models in a sample-efficient setting. Using datasets capped at 10 million words, we evaluate several data sources—including child-directed speech (CHILDES), classic fiction (Gutenberg), a mixed dataset (Mix), and synthetic TinyStories—across different model sizes ranging from 18 million to 705 million parameters. Our experiments show that smaller models (e.g., GPT2-18M and GPT2-44M) benefit from training on diverse datasets like Mix, achieving better performance on linguistic benchmarks. In contrast, larger models (e.g., GPT2-97M, GPT2-705M, and LLaMA-360M) perform better when trained on more complex and rich datasets like Gutenberg. Models trained on the CHILDES and TinyStories datasets underperformed across all model sizes. These findings suggest that the optimal dataset for sample-efficient training depends on the model size, and that neither child-directed speech nor simplified stories are optimal for small language models of all sizes. We highlight the importance of considering both dataset composition and model capacity for effective sample-efficient language model training.
%U https://aclanthology.org/2024.conll-babylm.25/
%P 284-291
Markdown (Informal)
[What should Baby Models read? Exploring Sample-Efficient Data Composition on Model Performance](https://aclanthology.org/2024.conll-babylm.25/) (Yam & Paek, CoNLL-BabyLM 2024)
ACL