@inproceedings{yoo-etal-2025-imagine,
title = "Imagine to Hear: Auditory Knowledge Generation can be an Effective Assistant for Language Models",
author = "Yoo, Suho and
Ok, Hyunjong and
Lee, Jaeho",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.findings-acl.730/",
doi = "10.18653/v1/2025.findings-acl.730",
pages = "14182--14193",
ISBN = "979-8-89176-256-5",
abstract = "Language models pretrained on text-only corpora often struggle with tasks that require auditory commonsense knowledge.Previous work addresses this problem by augmenting the language model to retrieve knowledge from external audio databases.This approach has several limitations, such as the potential lack of relevant audio in databases and the high costs associated with constructing the databases. To address these issues, we propose Imagine to Hear, a novel approach that dynamically generates auditory knowledge using generative models. Our framework detects multiple audio-related textual spans from the given prompt and generates corresponding auditory knowledge. We develop several mechanisms to efficiently process multiple auditory knowledge, including a CLAP-based rejection sampler and a language-audio fusion module. Our experiments show that our method achieves state-of-the-art performance on AuditoryBench without relying on external databases, highlighting the effectiveness of our generation-based approach."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="yoo-etal-2025-imagine">
<titleInfo>
<title>Imagine to Hear: Auditory Knowledge Generation can be an Effective Assistant for Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Suho</namePart>
<namePart type="family">Yoo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hyunjong</namePart>
<namePart type="family">Ok</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jaeho</namePart>
<namePart type="family">Lee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: ACL 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Wanxiang</namePart>
<namePart type="family">Che</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joyce</namePart>
<namePart type="family">Nabende</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ekaterina</namePart>
<namePart type="family">Shutova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohammad</namePart>
<namePart type="given">Taher</namePart>
<namePart type="family">Pilehvar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vienna, Austria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-256-5</identifier>
</relatedItem>
<abstract>Language models pretrained on text-only corpora often struggle with tasks that require auditory commonsense knowledge.Previous work addresses this problem by augmenting the language model to retrieve knowledge from external audio databases.This approach has several limitations, such as the potential lack of relevant audio in databases and the high costs associated with constructing the databases. To address these issues, we propose Imagine to Hear, a novel approach that dynamically generates auditory knowledge using generative models. Our framework detects multiple audio-related textual spans from the given prompt and generates corresponding auditory knowledge. We develop several mechanisms to efficiently process multiple auditory knowledge, including a CLAP-based rejection sampler and a language-audio fusion module. Our experiments show that our method achieves state-of-the-art performance on AuditoryBench without relying on external databases, highlighting the effectiveness of our generation-based approach.</abstract>
<identifier type="citekey">yoo-etal-2025-imagine</identifier>
<identifier type="doi">10.18653/v1/2025.findings-acl.730</identifier>
<location>
<url>https://aclanthology.org/2025.findings-acl.730/</url>
</location>
<part>
<date>2025-07</date>
<extent unit="page">
<start>14182</start>
<end>14193</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Imagine to Hear: Auditory Knowledge Generation can be an Effective Assistant for Language Models
%A Yoo, Suho
%A Ok, Hyunjong
%A Lee, Jaeho
%Y Che, Wanxiang
%Y Nabende, Joyce
%Y Shutova, Ekaterina
%Y Pilehvar, Mohammad Taher
%S Findings of the Association for Computational Linguistics: ACL 2025
%D 2025
%8 July
%I Association for Computational Linguistics
%C Vienna, Austria
%@ 979-8-89176-256-5
%F yoo-etal-2025-imagine
%X Language models pretrained on text-only corpora often struggle with tasks that require auditory commonsense knowledge.Previous work addresses this problem by augmenting the language model to retrieve knowledge from external audio databases.This approach has several limitations, such as the potential lack of relevant audio in databases and the high costs associated with constructing the databases. To address these issues, we propose Imagine to Hear, a novel approach that dynamically generates auditory knowledge using generative models. Our framework detects multiple audio-related textual spans from the given prompt and generates corresponding auditory knowledge. We develop several mechanisms to efficiently process multiple auditory knowledge, including a CLAP-based rejection sampler and a language-audio fusion module. Our experiments show that our method achieves state-of-the-art performance on AuditoryBench without relying on external databases, highlighting the effectiveness of our generation-based approach.
%R 10.18653/v1/2025.findings-acl.730
%U https://aclanthology.org/2025.findings-acl.730/
%U https://doi.org/10.18653/v1/2025.findings-acl.730
%P 14182-14193
Markdown (Informal)
[Imagine to Hear: Auditory Knowledge Generation can be an Effective Assistant for Language Models](https://aclanthology.org/2025.findings-acl.730/) (Yoo et al., Findings 2025)
ACL