@inproceedings{wasi-islam-2024-cogergllm,
title = "{C}og{E}rg{LLM}: Exploring Large Language Model Systems Design Perspective Using Cognitive Ergonomics",
author = "Wasi, Azmine Toushik and
Islam, Mst Rafia",
editor = "Peled-Cohen, Lotem and
Calderon, Nitay and
Lissak, Shir and
Reichart, Roi",
booktitle = "Proceedings of the 1st Workshop on NLP for Science (NLP4Science)",
month = nov,
year = "2024",
address = "Miami, FL, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.nlp4science-1.22",
pages = "249--258",
abstract = "Integrating cognitive ergonomics with LLMs is crucial for improving safety, reliability, and user satisfaction in human-AI interactions. Current LLM designs often lack this integration, resulting in systems that may not fully align with human cognitive capabilities and limitations. This oversight exacerbates biases in LLM outputs and leads to suboptimal user experiences due to inconsistent application of user-centered design principles. Researchers are increasingly leveraging NLP, particularly LLMs, to model and understand human behavior across social sciences, psychology, psychiatry, health, and neuroscience. Our position paper explores the need to integrate cognitive ergonomics into LLM design, providing a comprehensive framework and practical guidelines for ethical development. By addressing these challenges, we aim to advance safer, more reliable, and ethically sound human-AI interactions.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="wasi-islam-2024-cogergllm">
<titleInfo>
<title>CogErgLLM: Exploring Large Language Model Systems Design Perspective Using Cognitive Ergonomics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Azmine</namePart>
<namePart type="given">Toushik</namePart>
<namePart type="family">Wasi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mst</namePart>
<namePart type="given">Rafia</namePart>
<namePart type="family">Islam</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 1st Workshop on NLP for Science (NLP4Science)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lotem</namePart>
<namePart type="family">Peled-Cohen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nitay</namePart>
<namePart type="family">Calderon</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shir</namePart>
<namePart type="family">Lissak</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Roi</namePart>
<namePart type="family">Reichart</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Miami, FL, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Integrating cognitive ergonomics with LLMs is crucial for improving safety, reliability, and user satisfaction in human-AI interactions. Current LLM designs often lack this integration, resulting in systems that may not fully align with human cognitive capabilities and limitations. This oversight exacerbates biases in LLM outputs and leads to suboptimal user experiences due to inconsistent application of user-centered design principles. Researchers are increasingly leveraging NLP, particularly LLMs, to model and understand human behavior across social sciences, psychology, psychiatry, health, and neuroscience. Our position paper explores the need to integrate cognitive ergonomics into LLM design, providing a comprehensive framework and practical guidelines for ethical development. By addressing these challenges, we aim to advance safer, more reliable, and ethically sound human-AI interactions.</abstract>
<identifier type="citekey">wasi-islam-2024-cogergllm</identifier>
<location>
<url>https://aclanthology.org/2024.nlp4science-1.22</url>
</location>
<part>
<date>2024-11</date>
<extent unit="page">
<start>249</start>
<end>258</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T CogErgLLM: Exploring Large Language Model Systems Design Perspective Using Cognitive Ergonomics
%A Wasi, Azmine Toushik
%A Islam, Mst Rafia
%Y Peled-Cohen, Lotem
%Y Calderon, Nitay
%Y Lissak, Shir
%Y Reichart, Roi
%S Proceedings of the 1st Workshop on NLP for Science (NLP4Science)
%D 2024
%8 November
%I Association for Computational Linguistics
%C Miami, FL, USA
%F wasi-islam-2024-cogergllm
%X Integrating cognitive ergonomics with LLMs is crucial for improving safety, reliability, and user satisfaction in human-AI interactions. Current LLM designs often lack this integration, resulting in systems that may not fully align with human cognitive capabilities and limitations. This oversight exacerbates biases in LLM outputs and leads to suboptimal user experiences due to inconsistent application of user-centered design principles. Researchers are increasingly leveraging NLP, particularly LLMs, to model and understand human behavior across social sciences, psychology, psychiatry, health, and neuroscience. Our position paper explores the need to integrate cognitive ergonomics into LLM design, providing a comprehensive framework and practical guidelines for ethical development. By addressing these challenges, we aim to advance safer, more reliable, and ethically sound human-AI interactions.
%U https://aclanthology.org/2024.nlp4science-1.22
%P 249-258
Markdown (Informal)
[CogErgLLM: Exploring Large Language Model Systems Design Perspective Using Cognitive Ergonomics](https://aclanthology.org/2024.nlp4science-1.22) (Wasi & Islam, NLP4Science 2024)
ACL