@inproceedings{poon-etal-2024-shot,
title = "Few-shot Question Generation for Reading Comprehension",
author = "Poon, Yin and
Lee, John Sie Yuen and
Yuylam@hkmu.edu.hk, Yuylam@hkmu.edu.hk and
Wlsuen@hkmu.edu.hk, Wlsuen@hkmu.edu.hk and
Eong@hkmu.edu.hk, Eong@hkmu.edu.hk and
Skwchu@hkmu.edu.hk, Skwchu@hkmu.edu.hk",
editor = "Wong, Kam-Fai and
Zhang, Min and
Xu, Ruifeng and
Li, Jing and
Wei, Zhongyu and
Gui, Lin and
Liang, Bin and
Zhao, Runcong",
booktitle = "Proceedings of the 10th SIGHAN Workshop on Chinese Language Processing (SIGHAN-10)",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.sighan-1.3",
pages = "21--27",
abstract = "According to the internationally recognized PIRLS (Progress in International Reading Literacy Study) assessment standards, reading comprehension questions should require not only information retrieval, but also higher-order processes such as inferencing, interpreting and evaluation. However, these kinds of questions are often not available in large quantities for training question generation models. This paper investigates whether pre-trained Large Language Models (LLMs) can produce higher-order questions. Human assessment on a Chinese dataset shows that few-shot LLM prompting generates more usable and higher-order questions than two competitive neural baselines.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="poon-etal-2024-shot">
<titleInfo>
<title>Few-shot Question Generation for Reading Comprehension</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yin</namePart>
<namePart type="family">Poon</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">John</namePart>
<namePart type="given">Sie</namePart>
<namePart type="given">Yuen</namePart>
<namePart type="family">Lee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yuylam@hkmu.edu.hk</namePart>
<namePart type="family">Yuylam@hkmu.edu.hk</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wlsuen@hkmu.edu.hk</namePart>
<namePart type="family">Wlsuen@hkmu.edu.hk</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Eong@hkmu.edu.hk</namePart>
<namePart type="family">Eong@hkmu.edu.hk</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Skwchu@hkmu.edu.hk</namePart>
<namePart type="family">Skwchu@hkmu.edu.hk</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 10th SIGHAN Workshop on Chinese Language Processing (SIGHAN-10)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kam-Fai</namePart>
<namePart type="family">Wong</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Min</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ruifeng</namePart>
<namePart type="family">Xu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jing</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhongyu</namePart>
<namePart type="family">Wei</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lin</namePart>
<namePart type="family">Gui</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bin</namePart>
<namePart type="family">Liang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Runcong</namePart>
<namePart type="family">Zhao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Bangkok, Thailand</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>According to the internationally recognized PIRLS (Progress in International Reading Literacy Study) assessment standards, reading comprehension questions should require not only information retrieval, but also higher-order processes such as inferencing, interpreting and evaluation. However, these kinds of questions are often not available in large quantities for training question generation models. This paper investigates whether pre-trained Large Language Models (LLMs) can produce higher-order questions. Human assessment on a Chinese dataset shows that few-shot LLM prompting generates more usable and higher-order questions than two competitive neural baselines.</abstract>
<identifier type="citekey">poon-etal-2024-shot</identifier>
<location>
<url>https://aclanthology.org/2024.sighan-1.3</url>
</location>
<part>
<date>2024-08</date>
<extent unit="page">
<start>21</start>
<end>27</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Few-shot Question Generation for Reading Comprehension
%A Poon, Yin
%A Lee, John Sie Yuen
%A Yuylam@hkmu.edu.hk, Yuylam@hkmu.edu.hk
%A Wlsuen@hkmu.edu.hk, Wlsuen@hkmu.edu.hk
%A Eong@hkmu.edu.hk, Eong@hkmu.edu.hk
%A Skwchu@hkmu.edu.hk, Skwchu@hkmu.edu.hk
%Y Wong, Kam-Fai
%Y Zhang, Min
%Y Xu, Ruifeng
%Y Li, Jing
%Y Wei, Zhongyu
%Y Gui, Lin
%Y Liang, Bin
%Y Zhao, Runcong
%S Proceedings of the 10th SIGHAN Workshop on Chinese Language Processing (SIGHAN-10)
%D 2024
%8 August
%I Association for Computational Linguistics
%C Bangkok, Thailand
%F poon-etal-2024-shot
%X According to the internationally recognized PIRLS (Progress in International Reading Literacy Study) assessment standards, reading comprehension questions should require not only information retrieval, but also higher-order processes such as inferencing, interpreting and evaluation. However, these kinds of questions are often not available in large quantities for training question generation models. This paper investigates whether pre-trained Large Language Models (LLMs) can produce higher-order questions. Human assessment on a Chinese dataset shows that few-shot LLM prompting generates more usable and higher-order questions than two competitive neural baselines.
%U https://aclanthology.org/2024.sighan-1.3
%P 21-27
Markdown (Informal)
[Few-shot Question Generation for Reading Comprehension](https://aclanthology.org/2024.sighan-1.3) (Poon et al., SIGHAN-WS 2024)
ACL
- Yin Poon, John Sie Yuen Lee, Yuylam@hkmu.edu.hk Yuylam@hkmu.edu.hk, Wlsuen@hkmu.edu.hk Wlsuen@hkmu.edu.hk, Eong@hkmu.edu.hk Eong@hkmu.edu.hk, and Skwchu@hkmu.edu.hk Skwchu@hkmu.edu.hk. 2024. Few-shot Question Generation for Reading Comprehension. In Proceedings of the 10th SIGHAN Workshop on Chinese Language Processing (SIGHAN-10), pages 21–27, Bangkok, Thailand. Association for Computational Linguistics.