@inproceedings{johnson-etal-2025-large,
title = "Do large language models solve verbal analogies like children do?",
author = "Johnson, Tamar and
ter Veen, Mathilde and
Choenni, Rochelle and
van der Maas, Han and
Shutova, Ekaterina and
Stevenson, Claire E",
editor = "Boleda, Gemma and
Roth, Michael",
booktitle = "Proceedings of the 29th Conference on Computational Natural Language Learning",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.conll-1.40/",
doi = "10.18653/v1/2025.conll-1.40",
pages = "627--639",
ISBN = "979-8-89176-271-8",
abstract = "Analogy-making lies at the heart of human cognition. Adults solve analogies such as $\textit{horse belongs to stable like chicken belongs to …?}$ by mapping relations ($\textit{kept in}$) and answering $\textit{chicken coop}$. In contrast, young children often use association, e.g., answering $\textit{egg}$. This paper investigates whether large language models (LLMs) solve verbal analogies in A:B::C:? form using associations, similar to what children do. We use verbal analogies extracted from an online learning environment, where 14,006 7-12 year-olds from the Netherlands solved 872 analogies in Dutch. The eight tested LLMs performed at or above the level of children, with some models approaching adult performance estimates. However, when we control for solving by association this picture changes. We conclude that the LLMs we tested rely heavily on association like young children do. However, LLMs make different errors than children, and association doesn{'}t fully explain their superior performance on this children{'}s verbal analogy task. Future work will investigate whether LLMs associations and errors are more similar to adult relational reasoning."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="johnson-etal-2025-large">
<titleInfo>
<title>Do large language models solve verbal analogies like children do?</title>
</titleInfo>
<name type="personal">
<namePart type="given">Tamar</namePart>
<namePart type="family">Johnson</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mathilde</namePart>
<namePart type="family">ter Veen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rochelle</namePart>
<namePart type="family">Choenni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Han</namePart>
<namePart type="family">van der Maas</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ekaterina</namePart>
<namePart type="family">Shutova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Claire</namePart>
<namePart type="given">E</namePart>
<namePart type="family">Stevenson</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 29th Conference on Computational Natural Language Learning</title>
</titleInfo>
<name type="personal">
<namePart type="given">Gemma</namePart>
<namePart type="family">Boleda</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Michael</namePart>
<namePart type="family">Roth</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vienna, Austria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-271-8</identifier>
</relatedItem>
<abstract>Analogy-making lies at the heart of human cognition. Adults solve analogies such as horse belongs to stable like chicken belongs to …? by mapping relations (kept in) and answering chicken coop. In contrast, young children often use association, e.g., answering egg. This paper investigates whether large language models (LLMs) solve verbal analogies in A:B::C:? form using associations, similar to what children do. We use verbal analogies extracted from an online learning environment, where 14,006 7-12 year-olds from the Netherlands solved 872 analogies in Dutch. The eight tested LLMs performed at or above the level of children, with some models approaching adult performance estimates. However, when we control for solving by association this picture changes. We conclude that the LLMs we tested rely heavily on association like young children do. However, LLMs make different errors than children, and association doesn’t fully explain their superior performance on this children’s verbal analogy task. Future work will investigate whether LLMs associations and errors are more similar to adult relational reasoning.</abstract>
<identifier type="citekey">johnson-etal-2025-large</identifier>
<identifier type="doi">10.18653/v1/2025.conll-1.40</identifier>
<location>
<url>https://aclanthology.org/2025.conll-1.40/</url>
</location>
<part>
<date>2025-07</date>
<extent unit="page">
<start>627</start>
<end>639</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Do large language models solve verbal analogies like children do?
%A Johnson, Tamar
%A ter Veen, Mathilde
%A Choenni, Rochelle
%A van der Maas, Han
%A Shutova, Ekaterina
%A Stevenson, Claire E.
%Y Boleda, Gemma
%Y Roth, Michael
%S Proceedings of the 29th Conference on Computational Natural Language Learning
%D 2025
%8 July
%I Association for Computational Linguistics
%C Vienna, Austria
%@ 979-8-89176-271-8
%F johnson-etal-2025-large
%X Analogy-making lies at the heart of human cognition. Adults solve analogies such as horse belongs to stable like chicken belongs to …? by mapping relations (kept in) and answering chicken coop. In contrast, young children often use association, e.g., answering egg. This paper investigates whether large language models (LLMs) solve verbal analogies in A:B::C:? form using associations, similar to what children do. We use verbal analogies extracted from an online learning environment, where 14,006 7-12 year-olds from the Netherlands solved 872 analogies in Dutch. The eight tested LLMs performed at or above the level of children, with some models approaching adult performance estimates. However, when we control for solving by association this picture changes. We conclude that the LLMs we tested rely heavily on association like young children do. However, LLMs make different errors than children, and association doesn’t fully explain their superior performance on this children’s verbal analogy task. Future work will investigate whether LLMs associations and errors are more similar to adult relational reasoning.
%R 10.18653/v1/2025.conll-1.40
%U https://aclanthology.org/2025.conll-1.40/
%U https://doi.org/10.18653/v1/2025.conll-1.40
%P 627-639
Markdown (Informal)
[Do large language models solve verbal analogies like children do?](https://aclanthology.org/2025.conll-1.40/) (Johnson et al., CoNLL 2025)
ACL
- Tamar Johnson, Mathilde ter Veen, Rochelle Choenni, Han van der Maas, Ekaterina Shutova, and Claire E Stevenson. 2025. Do large language models solve verbal analogies like children do?. In Proceedings of the 29th Conference on Computational Natural Language Learning, pages 627–639, Vienna, Austria. Association for Computational Linguistics.