@inproceedings{wu-liu-2025-embodiment,
title = "Embodiment in Multimodal Semantics: Comparing Sensory, Emotional, and Visual Features in {C}hinese Color Metaphors",
author = "Wu, Yufeng and
Liu, Meichun",
editor = "Chang, Kai-Wei and
Lu, Ke-Han and
Yang, Chih-Kai and
Tam, Zhi-Rui and
Chang, Wen-Yu and
Wang, Chung-Che",
booktitle = "Proceedings of the 37th Conference on Computational Linguistics and Speech Processing (ROCLING 2025)",
month = nov,
year = "2025",
address = "National Taiwan University, Taipei City, Taiwan",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.rocling-main.26/",
pages = "249--257",
ISBN = "979-8-89176-379-1",
abstract = "This study examines how sensory-motor experience, emotional valence and arousal, and visual image statistics contribute to multimodal alignment in Chinese color metaphors. Using 184 metaphorical lexemes from six basic color terms, we combined textual data from the Chinese Corpus Internet (CCI 3.0) with image sets from Baidu, embedding both with Chinese-CLIP and measuring alignment via cosine similarity. Sensory-motor ratings, particularly effector exclusivity and tactile strength, correlated negatively with alignment, while emotional valence showed strong positive correlations and visual features such as color variability and entropy contributed positively. Regression and importance analyses confirmed emotion as the most reliable predictor, with sensory ratings offering little explanatory power. The findings indicate that affective salience and perceptual richness, rather than generalized sensory norms, are central to the embodied grounding of metaphorical words in multimodal contexts."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="wu-liu-2025-embodiment">
<titleInfo>
<title>Embodiment in Multimodal Semantics: Comparing Sensory, Emotional, and Visual Features in Chinese Color Metaphors</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yufeng</namePart>
<namePart type="family">Wu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Meichun</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 37th Conference on Computational Linguistics and Speech Processing (ROCLING 2025)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kai-Wei</namePart>
<namePart type="family">Chang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ke-Han</namePart>
<namePart type="family">Lu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chih-Kai</namePart>
<namePart type="family">Yang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhi-Rui</namePart>
<namePart type="family">Tam</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wen-Yu</namePart>
<namePart type="family">Chang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chung-Che</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">National Taiwan University, Taipei City, Taiwan</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-379-1</identifier>
</relatedItem>
<abstract>This study examines how sensory-motor experience, emotional valence and arousal, and visual image statistics contribute to multimodal alignment in Chinese color metaphors. Using 184 metaphorical lexemes from six basic color terms, we combined textual data from the Chinese Corpus Internet (CCI 3.0) with image sets from Baidu, embedding both with Chinese-CLIP and measuring alignment via cosine similarity. Sensory-motor ratings, particularly effector exclusivity and tactile strength, correlated negatively with alignment, while emotional valence showed strong positive correlations and visual features such as color variability and entropy contributed positively. Regression and importance analyses confirmed emotion as the most reliable predictor, with sensory ratings offering little explanatory power. The findings indicate that affective salience and perceptual richness, rather than generalized sensory norms, are central to the embodied grounding of metaphorical words in multimodal contexts.</abstract>
<identifier type="citekey">wu-liu-2025-embodiment</identifier>
<location>
<url>https://aclanthology.org/2025.rocling-main.26/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>249</start>
<end>257</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Embodiment in Multimodal Semantics: Comparing Sensory, Emotional, and Visual Features in Chinese Color Metaphors
%A Wu, Yufeng
%A Liu, Meichun
%Y Chang, Kai-Wei
%Y Lu, Ke-Han
%Y Yang, Chih-Kai
%Y Tam, Zhi-Rui
%Y Chang, Wen-Yu
%Y Wang, Chung-Che
%S Proceedings of the 37th Conference on Computational Linguistics and Speech Processing (ROCLING 2025)
%D 2025
%8 November
%I Association for Computational Linguistics
%C National Taiwan University, Taipei City, Taiwan
%@ 979-8-89176-379-1
%F wu-liu-2025-embodiment
%X This study examines how sensory-motor experience, emotional valence and arousal, and visual image statistics contribute to multimodal alignment in Chinese color metaphors. Using 184 metaphorical lexemes from six basic color terms, we combined textual data from the Chinese Corpus Internet (CCI 3.0) with image sets from Baidu, embedding both with Chinese-CLIP and measuring alignment via cosine similarity. Sensory-motor ratings, particularly effector exclusivity and tactile strength, correlated negatively with alignment, while emotional valence showed strong positive correlations and visual features such as color variability and entropy contributed positively. Regression and importance analyses confirmed emotion as the most reliable predictor, with sensory ratings offering little explanatory power. The findings indicate that affective salience and perceptual richness, rather than generalized sensory norms, are central to the embodied grounding of metaphorical words in multimodal contexts.
%U https://aclanthology.org/2025.rocling-main.26/
%P 249-257
Markdown (Informal)
[Embodiment in Multimodal Semantics: Comparing Sensory, Emotional, and Visual Features in Chinese Color Metaphors](https://aclanthology.org/2025.rocling-main.26/) (Wu & Liu, ROCLING 2025)
ACL