@inproceedings{haihong-etal-2024-assessing,
title = "Assessing Essay Fluency with Large Language Models",
author = "Wu, Haihong and
Ao, Chang and
Ni, Shiwen",
editor = "Hongfei, Lin and
Hongye, Tan and
Bin, Li",
booktitle = "Proceedings of the 23rd Chinese National Conference on Computational Linguistics (Volume 3: Evaluations)",
month = jul,
year = "2024",
address = "Taiyuan, China",
publisher = "Chinese Information Processing Society of China",
url = "https://aclanthology.org/2024.ccl-3.29/",
pages = "262--268",
language = "eng",
abstract = "``With the development of education and the widespread use of the internet, the scale of essay evaluation has increased, making the cost and efficiency of manual grading a significant challenge. To address this, The Twenty-third China National Conference on Computational Linguistics (CCL2024) established evaluation contest for essay fluency. This competition has three tracks corresponding to three sub-tasks. This paper conducts a detailed analysis of different tasks,employing the BERT model as well as the latest popular large language models Qwen to address these sub-tasks. As a result, our overall scores for the three tasks reached 37.26, 42.48, and 47.64.''"
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="haihong-etal-2024-assessing">
<titleInfo>
<title>Assessing Essay Fluency with Large Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Haihong</namePart>
<namePart type="family">Wu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chang</namePart>
<namePart type="family">Ao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shiwen</namePart>
<namePart type="family">Ni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<language>
<languageTerm type="text">eng</languageTerm>
</language>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 23rd Chinese National Conference on Computational Linguistics (Volume 3: Evaluations)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lin</namePart>
<namePart type="family">Hongfei</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tan</namePart>
<namePart type="family">Hongye</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Li</namePart>
<namePart type="family">Bin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Chinese Information Processing Society of China</publisher>
<place>
<placeTerm type="text">Taiyuan, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>“With the development of education and the widespread use of the internet, the scale of essay evaluation has increased, making the cost and efficiency of manual grading a significant challenge. To address this, The Twenty-third China National Conference on Computational Linguistics (CCL2024) established evaluation contest for essay fluency. This competition has three tracks corresponding to three sub-tasks. This paper conducts a detailed analysis of different tasks,employing the BERT model as well as the latest popular large language models Qwen to address these sub-tasks. As a result, our overall scores for the three tasks reached 37.26, 42.48, and 47.64.”</abstract>
<identifier type="citekey">haihong-etal-2024-assessing</identifier>
<location>
<url>https://aclanthology.org/2024.ccl-3.29/</url>
</location>
<part>
<date>2024-07</date>
<extent unit="page">
<start>262</start>
<end>268</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Assessing Essay Fluency with Large Language Models
%A Wu, Haihong
%A Ao, Chang
%A Ni, Shiwen
%Y Hongfei, Lin
%Y Hongye, Tan
%Y Bin, Li
%S Proceedings of the 23rd Chinese National Conference on Computational Linguistics (Volume 3: Evaluations)
%D 2024
%8 July
%I Chinese Information Processing Society of China
%C Taiyuan, China
%G eng
%F haihong-etal-2024-assessing
%X “With the development of education and the widespread use of the internet, the scale of essay evaluation has increased, making the cost and efficiency of manual grading a significant challenge. To address this, The Twenty-third China National Conference on Computational Linguistics (CCL2024) established evaluation contest for essay fluency. This competition has three tracks corresponding to three sub-tasks. This paper conducts a detailed analysis of different tasks,employing the BERT model as well as the latest popular large language models Qwen to address these sub-tasks. As a result, our overall scores for the three tasks reached 37.26, 42.48, and 47.64.”
%U https://aclanthology.org/2024.ccl-3.29/
%P 262-268
Markdown (Informal)
[Assessing Essay Fluency with Large Language Models](https://aclanthology.org/2024.ccl-3.29/) (Wu et al., CCL 2024)
ACL
- Haihong Wu, Chang Ao, and Shiwen Ni. 2024. Assessing Essay Fluency with Large Language Models. In Proceedings of the 23rd Chinese National Conference on Computational Linguistics (Volume 3: Evaluations), pages 262–268, Taiyuan, China. Chinese Information Processing Society of China.