@inproceedings{khaliq-etal-2024-comparison,
title = "Comparison of Image Generation Models for Abstract and Concrete Event Descriptions",
author = "Khaliq, Mohammed and
Frassinelli, Diego and
Schulte Im Walde, Sabine",
editor = "Ghosh, Debanjan and
Muresan, Smaranda and
Feldman, Anna and
Chakrabarty, Tuhin and
Liu, Emmy",
booktitle = "Proceedings of the 4th Workshop on Figurative Language Processing (FigLang 2024)",
month = jun,
year = "2024",
address = "Mexico City, Mexico (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.figlang-1.3",
doi = "10.18653/v1/2024.figlang-1.3",
pages = "15--21",
abstract = "With the advent of diffusion-based image generation models such as DALL-E, Stable Diffusion and Midjourney, high quality images can be easily generated using textual inputs. It is unclear, however, to what extent the generated images resemble human mental representations, especially regarding abstract event knowledge. We analyse the capability of four state-of-the-art models in generating images of verb-object event pairs when we systematically manipulate the degrees of abstractness of both the verbs and the object nouns. Human judgements assess the generated images and demonstrate that DALL-E is strongest for event pairs with concrete nouns (e.g., {``}pour water{''}; {``}believe person{''}), while Midjourney is preferred for event pairs with abstract nouns (e.g., {``}raise awareness{''}; {``}remain mystery{''}), irrespective of the concreteness of the verb. Across models, humans were most unsatisfied with images of events pairs that combined concrete verbs with abstract direct-object nouns (e.g., {``}speak truth{''}), and an additional ad-hoc annotation contributes this to its potential for figurative language.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="khaliq-etal-2024-comparison">
<titleInfo>
<title>Comparison of Image Generation Models for Abstract and Concrete Event Descriptions</title>
</titleInfo>
<name type="personal">
<namePart type="given">Mohammed</namePart>
<namePart type="family">Khaliq</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Diego</namePart>
<namePart type="family">Frassinelli</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sabine</namePart>
<namePart type="family">Schulte Im Walde</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 4th Workshop on Figurative Language Processing (FigLang 2024)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Debanjan</namePart>
<namePart type="family">Ghosh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Smaranda</namePart>
<namePart type="family">Muresan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Feldman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tuhin</namePart>
<namePart type="family">Chakrabarty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Emmy</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Mexico City, Mexico (Hybrid)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>With the advent of diffusion-based image generation models such as DALL-E, Stable Diffusion and Midjourney, high quality images can be easily generated using textual inputs. It is unclear, however, to what extent the generated images resemble human mental representations, especially regarding abstract event knowledge. We analyse the capability of four state-of-the-art models in generating images of verb-object event pairs when we systematically manipulate the degrees of abstractness of both the verbs and the object nouns. Human judgements assess the generated images and demonstrate that DALL-E is strongest for event pairs with concrete nouns (e.g., “pour water”; “believe person”), while Midjourney is preferred for event pairs with abstract nouns (e.g., “raise awareness”; “remain mystery”), irrespective of the concreteness of the verb. Across models, humans were most unsatisfied with images of events pairs that combined concrete verbs with abstract direct-object nouns (e.g., “speak truth”), and an additional ad-hoc annotation contributes this to its potential for figurative language.</abstract>
<identifier type="citekey">khaliq-etal-2024-comparison</identifier>
<identifier type="doi">10.18653/v1/2024.figlang-1.3</identifier>
<location>
<url>https://aclanthology.org/2024.figlang-1.3</url>
</location>
<part>
<date>2024-06</date>
<extent unit="page">
<start>15</start>
<end>21</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Comparison of Image Generation Models for Abstract and Concrete Event Descriptions
%A Khaliq, Mohammed
%A Frassinelli, Diego
%A Schulte Im Walde, Sabine
%Y Ghosh, Debanjan
%Y Muresan, Smaranda
%Y Feldman, Anna
%Y Chakrabarty, Tuhin
%Y Liu, Emmy
%S Proceedings of the 4th Workshop on Figurative Language Processing (FigLang 2024)
%D 2024
%8 June
%I Association for Computational Linguistics
%C Mexico City, Mexico (Hybrid)
%F khaliq-etal-2024-comparison
%X With the advent of diffusion-based image generation models such as DALL-E, Stable Diffusion and Midjourney, high quality images can be easily generated using textual inputs. It is unclear, however, to what extent the generated images resemble human mental representations, especially regarding abstract event knowledge. We analyse the capability of four state-of-the-art models in generating images of verb-object event pairs when we systematically manipulate the degrees of abstractness of both the verbs and the object nouns. Human judgements assess the generated images and demonstrate that DALL-E is strongest for event pairs with concrete nouns (e.g., “pour water”; “believe person”), while Midjourney is preferred for event pairs with abstract nouns (e.g., “raise awareness”; “remain mystery”), irrespective of the concreteness of the verb. Across models, humans were most unsatisfied with images of events pairs that combined concrete verbs with abstract direct-object nouns (e.g., “speak truth”), and an additional ad-hoc annotation contributes this to its potential for figurative language.
%R 10.18653/v1/2024.figlang-1.3
%U https://aclanthology.org/2024.figlang-1.3
%U https://doi.org/10.18653/v1/2024.figlang-1.3
%P 15-21
Markdown (Informal)
[Comparison of Image Generation Models for Abstract and Concrete Event Descriptions](https://aclanthology.org/2024.figlang-1.3) (Khaliq et al., Fig-Lang-WS 2024)
ACL