@inproceedings{andrews-witteveen-2019-unsupervised,
title = "Unsupervised Natural Question Answering with a Small Model",
author = "Andrews, Martin and
Witteveen, Sam",
editor = "Thorne, James and
Vlachos, Andreas and
Cocarascu, Oana and
Christodoulopoulos, Christos and
Mittal, Arpit",
booktitle = "Proceedings of the Second Workshop on Fact Extraction and VERification (FEVER)",
month = nov,
year = "2019",
address = "Hong Kong, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/D19-6606",
doi = "10.18653/v1/D19-6606",
pages = "34--38",
abstract = "The recent demonstration of the power of huge language models such as GPT-2 to memorise the answers to factoid questions raises questions about the extent to which knowledge is being embedded directly within these large models. This short paper describes an architecture through which much smaller models can also answer such questions - by making use of {`}raw{'} external knowledge. The contribution of this work is that the methods presented here rely on unsupervised learning techniques, complementing the unsupervised training of the Language Model. The goal of this line of research is to be able to add knowledge explicitly, without extensive training.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="andrews-witteveen-2019-unsupervised">
<titleInfo>
<title>Unsupervised Natural Question Answering with a Small Model</title>
</titleInfo>
<name type="personal">
<namePart type="given">Martin</namePart>
<namePart type="family">Andrews</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sam</namePart>
<namePart type="family">Witteveen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Second Workshop on Fact Extraction and VERification (FEVER)</title>
</titleInfo>
<name type="personal">
<namePart type="given">James</namePart>
<namePart type="family">Thorne</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Andreas</namePart>
<namePart type="family">Vlachos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Oana</namePart>
<namePart type="family">Cocarascu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Christos</namePart>
<namePart type="family">Christodoulopoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Arpit</namePart>
<namePart type="family">Mittal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Hong Kong, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The recent demonstration of the power of huge language models such as GPT-2 to memorise the answers to factoid questions raises questions about the extent to which knowledge is being embedded directly within these large models. This short paper describes an architecture through which much smaller models can also answer such questions - by making use of ‘raw’ external knowledge. The contribution of this work is that the methods presented here rely on unsupervised learning techniques, complementing the unsupervised training of the Language Model. The goal of this line of research is to be able to add knowledge explicitly, without extensive training.</abstract>
<identifier type="citekey">andrews-witteveen-2019-unsupervised</identifier>
<identifier type="doi">10.18653/v1/D19-6606</identifier>
<location>
<url>https://aclanthology.org/D19-6606</url>
</location>
<part>
<date>2019-11</date>
<extent unit="page">
<start>34</start>
<end>38</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Unsupervised Natural Question Answering with a Small Model
%A Andrews, Martin
%A Witteveen, Sam
%Y Thorne, James
%Y Vlachos, Andreas
%Y Cocarascu, Oana
%Y Christodoulopoulos, Christos
%Y Mittal, Arpit
%S Proceedings of the Second Workshop on Fact Extraction and VERification (FEVER)
%D 2019
%8 November
%I Association for Computational Linguistics
%C Hong Kong, China
%F andrews-witteveen-2019-unsupervised
%X The recent demonstration of the power of huge language models such as GPT-2 to memorise the answers to factoid questions raises questions about the extent to which knowledge is being embedded directly within these large models. This short paper describes an architecture through which much smaller models can also answer such questions - by making use of ‘raw’ external knowledge. The contribution of this work is that the methods presented here rely on unsupervised learning techniques, complementing the unsupervised training of the Language Model. The goal of this line of research is to be able to add knowledge explicitly, without extensive training.
%R 10.18653/v1/D19-6606
%U https://aclanthology.org/D19-6606
%U https://doi.org/10.18653/v1/D19-6606
%P 34-38
Markdown (Informal)
[Unsupervised Natural Question Answering with a Small Model](https://aclanthology.org/D19-6606) (Andrews & Witteveen, 2019)
ACL