@inproceedings{ivchenko-etal-2025-patients,
title = "Where Patients Slow Down: Surprisal, Uncertainty, and Simplification in {F}rench Clinical Reading",
author = "Ivchenko, Oksana and
Qazi, Alamgir Munir and
Abdul Nasir, Jamal",
editor = "Acarturk, Cengiz and
Nasir, Jamal and
Can, Burcu and
Coltekin, Cagr{\i}",
booktitle = "Proceedings of the First International Workshop on Gaze Data and Natural Language Processing",
month = sep,
year = "2025",
address = "Varna, Bulgaria",
publisher = "INCOMA Ltd., Shoumen, BULGARIA",
url = "https://aclanthology.org/2025.gaze4nlp-1.7/",
pages = "52--57",
abstract = "This eye-tracking study links language-model surprisal and contextual entropy to how 23 non-expert adults read French health texts. Participants read seven texts (clinical case, medical, general), each available in an Original and Simplified version. Surprisal and entropy were computed with eight autoregressive models (82M{--}8B parameters), and four complementary eye-tracking measures were analyzed. Surprisal correlates positively with early reading measures, peaking in the smallest GPT-2 models (r {\ensuremath{\approx}} 0.26) and weakening with model size. Entropy shows the opposite pattern, with negative correlations strongest in the 7B-8B models (r {\ensuremath{\approx}} {\ensuremath{-}}0.13), consistent with a skim-when-uncertain strategy. Surprisal effects are largest in Clinical Original passages and drop by {\ensuremath{\sim}}20{\%} after simplification, whereas entropy effects are stable across domain and version. These findings expose a scaling paradox {--} where different model sizes are optimal for different cognitive signals {--} and suggest that French plain-language editing should focus on rewriting high-surprisal passages to reduce processing difficulty, and on avoiding high-entropy contexts for critical information."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="ivchenko-etal-2025-patients">
<titleInfo>
<title>Where Patients Slow Down: Surprisal, Uncertainty, and Simplification in French Clinical Reading</title>
</titleInfo>
<name type="personal">
<namePart type="given">Oksana</namePart>
<namePart type="family">Ivchenko</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alamgir</namePart>
<namePart type="given">Munir</namePart>
<namePart type="family">Qazi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jamal</namePart>
<namePart type="family">Abdul Nasir</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the First International Workshop on Gaze Data and Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Cengiz</namePart>
<namePart type="family">Acarturk</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jamal</namePart>
<namePart type="family">Nasir</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Burcu</namePart>
<namePart type="family">Can</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Cagrı</namePart>
<namePart type="family">Coltekin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>INCOMA Ltd., Shoumen, BULGARIA</publisher>
<place>
<placeTerm type="text">Varna, Bulgaria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This eye-tracking study links language-model surprisal and contextual entropy to how 23 non-expert adults read French health texts. Participants read seven texts (clinical case, medical, general), each available in an Original and Simplified version. Surprisal and entropy were computed with eight autoregressive models (82M–8B parameters), and four complementary eye-tracking measures were analyzed. Surprisal correlates positively with early reading measures, peaking in the smallest GPT-2 models (r \ensuremath\approx 0.26) and weakening with model size. Entropy shows the opposite pattern, with negative correlations strongest in the 7B-8B models (r \ensuremath\approx \ensuremath-0.13), consistent with a skim-when-uncertain strategy. Surprisal effects are largest in Clinical Original passages and drop by \ensuremath\sim20% after simplification, whereas entropy effects are stable across domain and version. These findings expose a scaling paradox – where different model sizes are optimal for different cognitive signals – and suggest that French plain-language editing should focus on rewriting high-surprisal passages to reduce processing difficulty, and on avoiding high-entropy contexts for critical information.</abstract>
<identifier type="citekey">ivchenko-etal-2025-patients</identifier>
<location>
<url>https://aclanthology.org/2025.gaze4nlp-1.7/</url>
</location>
<part>
<date>2025-09</date>
<extent unit="page">
<start>52</start>
<end>57</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Where Patients Slow Down: Surprisal, Uncertainty, and Simplification in French Clinical Reading
%A Ivchenko, Oksana
%A Qazi, Alamgir Munir
%A Abdul Nasir, Jamal
%Y Acarturk, Cengiz
%Y Nasir, Jamal
%Y Can, Burcu
%Y Coltekin, Cagrı
%S Proceedings of the First International Workshop on Gaze Data and Natural Language Processing
%D 2025
%8 September
%I INCOMA Ltd., Shoumen, BULGARIA
%C Varna, Bulgaria
%F ivchenko-etal-2025-patients
%X This eye-tracking study links language-model surprisal and contextual entropy to how 23 non-expert adults read French health texts. Participants read seven texts (clinical case, medical, general), each available in an Original and Simplified version. Surprisal and entropy were computed with eight autoregressive models (82M–8B parameters), and four complementary eye-tracking measures were analyzed. Surprisal correlates positively with early reading measures, peaking in the smallest GPT-2 models (r \ensuremath\approx 0.26) and weakening with model size. Entropy shows the opposite pattern, with negative correlations strongest in the 7B-8B models (r \ensuremath\approx \ensuremath-0.13), consistent with a skim-when-uncertain strategy. Surprisal effects are largest in Clinical Original passages and drop by \ensuremath\sim20% after simplification, whereas entropy effects are stable across domain and version. These findings expose a scaling paradox – where different model sizes are optimal for different cognitive signals – and suggest that French plain-language editing should focus on rewriting high-surprisal passages to reduce processing difficulty, and on avoiding high-entropy contexts for critical information.
%U https://aclanthology.org/2025.gaze4nlp-1.7/
%P 52-57
Markdown (Informal)
[Where Patients Slow Down: Surprisal, Uncertainty, and Simplification in French Clinical Reading](https://aclanthology.org/2025.gaze4nlp-1.7/) (Ivchenko et al., Gaze4NLP 2025)
ACL