@inproceedings{nagumothu-etal-2022-pie,
title = "{PIE}-{QG}: Paraphrased Information Extraction for Unsupervised Question Generation from Small Corpora",
author = "Nagumothu, Dinesh and
Ofoghi, Bahadorreza and
Huang, Guangyan and
Eklund, Peter",
editor = "Fokkens, Antske and
Srikumar, Vivek",
booktitle = "Proceedings of the 26th Conference on Computational Natural Language Learning (CoNLL)",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.conll-1.24/",
doi = "10.18653/v1/2022.conll-1.24",
pages = "350--359",
abstract = "Supervised Question Answering systems (QA systems) rely on domain-specific human-labeled data for training. Unsupervised QA systems generate their own question-answer training pairs, typically using secondary knowledge sources to achieve this outcome. Our approach (called PIE-QG) uses Open Information Extraction (OpenIE) to generate synthetic training questions from paraphrased passages and uses the question-answer pairs as training data for a language model for a state-of-the-art QA system based on BERT. Triples in the form of {\ensuremath{<}}subject, predicate, object{\ensuremath{>}} are extracted from each passage, and questions are formed with subjects (or objects) and predicates while objects (or subjects) are considered as answers. Experimenting on five extractive QA datasets demonstrates that our technique achieves on-par performance with existing state-of-the-art QA systems with the benefit of being trained on an order of magnitude fewer documents and without any recourse to external reference data sources."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="nagumothu-etal-2022-pie">
<titleInfo>
<title>PIE-QG: Paraphrased Information Extraction for Unsupervised Question Generation from Small Corpora</title>
</titleInfo>
<name type="personal">
<namePart type="given">Dinesh</namePart>
<namePart type="family">Nagumothu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bahadorreza</namePart>
<namePart type="family">Ofoghi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Guangyan</namePart>
<namePart type="family">Huang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Peter</namePart>
<namePart type="family">Eklund</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 26th Conference on Computational Natural Language Learning (CoNLL)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Antske</namePart>
<namePart type="family">Fokkens</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vivek</namePart>
<namePart type="family">Srikumar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, United Arab Emirates (Hybrid)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Supervised Question Answering systems (QA systems) rely on domain-specific human-labeled data for training. Unsupervised QA systems generate their own question-answer training pairs, typically using secondary knowledge sources to achieve this outcome. Our approach (called PIE-QG) uses Open Information Extraction (OpenIE) to generate synthetic training questions from paraphrased passages and uses the question-answer pairs as training data for a language model for a state-of-the-art QA system based on BERT. Triples in the form of \ensuremath<subject, predicate, object\ensuremath> are extracted from each passage, and questions are formed with subjects (or objects) and predicates while objects (or subjects) are considered as answers. Experimenting on five extractive QA datasets demonstrates that our technique achieves on-par performance with existing state-of-the-art QA systems with the benefit of being trained on an order of magnitude fewer documents and without any recourse to external reference data sources.</abstract>
<identifier type="citekey">nagumothu-etal-2022-pie</identifier>
<identifier type="doi">10.18653/v1/2022.conll-1.24</identifier>
<location>
<url>https://aclanthology.org/2022.conll-1.24/</url>
</location>
<part>
<date>2022-12</date>
<extent unit="page">
<start>350</start>
<end>359</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T PIE-QG: Paraphrased Information Extraction for Unsupervised Question Generation from Small Corpora
%A Nagumothu, Dinesh
%A Ofoghi, Bahadorreza
%A Huang, Guangyan
%A Eklund, Peter
%Y Fokkens, Antske
%Y Srikumar, Vivek
%S Proceedings of the 26th Conference on Computational Natural Language Learning (CoNLL)
%D 2022
%8 December
%I Association for Computational Linguistics
%C Abu Dhabi, United Arab Emirates (Hybrid)
%F nagumothu-etal-2022-pie
%X Supervised Question Answering systems (QA systems) rely on domain-specific human-labeled data for training. Unsupervised QA systems generate their own question-answer training pairs, typically using secondary knowledge sources to achieve this outcome. Our approach (called PIE-QG) uses Open Information Extraction (OpenIE) to generate synthetic training questions from paraphrased passages and uses the question-answer pairs as training data for a language model for a state-of-the-art QA system based on BERT. Triples in the form of \ensuremath<subject, predicate, object\ensuremath> are extracted from each passage, and questions are formed with subjects (or objects) and predicates while objects (or subjects) are considered as answers. Experimenting on five extractive QA datasets demonstrates that our technique achieves on-par performance with existing state-of-the-art QA systems with the benefit of being trained on an order of magnitude fewer documents and without any recourse to external reference data sources.
%R 10.18653/v1/2022.conll-1.24
%U https://aclanthology.org/2022.conll-1.24/
%U https://doi.org/10.18653/v1/2022.conll-1.24
%P 350-359
Markdown (Informal)
[PIE-QG: Paraphrased Information Extraction for Unsupervised Question Generation from Small Corpora](https://aclanthology.org/2022.conll-1.24/) (Nagumothu et al., CoNLL 2022)
ACL