@inproceedings{liu-etal-2020-uncertainty,
title = "Uncertainty Modeling for Machine Comprehension Systems using Efficient {B}ayesian Neural Networks",
author = "Liu, Zhengyuan and
Krishnaswamy, Pavitra and
Aw, Ai Ti and
Chen, Nancy",
editor = "Clifton, Ann and
Napoles, Courtney",
booktitle = "Proceedings of the 28th International Conference on Computational Linguistics: Industry Track",
month = dec,
year = "2020",
address = "Online",
publisher = "International Committee on Computational Linguistics",
url = "https://aclanthology.org/2020.coling-industry.21",
doi = "10.18653/v1/2020.coling-industry.21",
pages = "228--235",
abstract = "While neural approaches have achieved significant improvement in machine comprehension tasks, models often work as a black-box, resulting in lower interpretability, which requires special attention in domains such as healthcare or education. Quantifying uncertainty helps pave the way towards more interpretable neural networks. In classification and regression tasks, Bayesian neural networks have been effective in estimating model uncertainty. However, inference time increases linearly due to the required sampling process in Bayesian neural networks. Thus speed becomes a bottleneck in tasks with high system complexity such as question-answering or dialogue generation. In this work, we propose a hybrid neural architecture to quantify model uncertainty using Bayesian weight approximation but boosts up the inference speed by 80{\%} relative at test time, and apply it for a clinical dialogue comprehension task. The proposed approach is also used to enable active learning so that an updated model can be trained more optimally with new incoming data by selecting samples that are not well-represented in the current training scheme.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="liu-etal-2020-uncertainty">
<titleInfo>
<title>Uncertainty Modeling for Machine Comprehension Systems using Efficient Bayesian Neural Networks</title>
</titleInfo>
<name type="personal">
<namePart type="given">Zhengyuan</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pavitra</namePart>
<namePart type="family">Krishnaswamy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ai</namePart>
<namePart type="given">Ti</namePart>
<namePart type="family">Aw</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nancy</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 28th International Conference on Computational Linguistics: Industry Track</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ann</namePart>
<namePart type="family">Clifton</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Courtney</namePart>
<namePart type="family">Napoles</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>International Committee on Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>While neural approaches have achieved significant improvement in machine comprehension tasks, models often work as a black-box, resulting in lower interpretability, which requires special attention in domains such as healthcare or education. Quantifying uncertainty helps pave the way towards more interpretable neural networks. In classification and regression tasks, Bayesian neural networks have been effective in estimating model uncertainty. However, inference time increases linearly due to the required sampling process in Bayesian neural networks. Thus speed becomes a bottleneck in tasks with high system complexity such as question-answering or dialogue generation. In this work, we propose a hybrid neural architecture to quantify model uncertainty using Bayesian weight approximation but boosts up the inference speed by 80% relative at test time, and apply it for a clinical dialogue comprehension task. The proposed approach is also used to enable active learning so that an updated model can be trained more optimally with new incoming data by selecting samples that are not well-represented in the current training scheme.</abstract>
<identifier type="citekey">liu-etal-2020-uncertainty</identifier>
<identifier type="doi">10.18653/v1/2020.coling-industry.21</identifier>
<location>
<url>https://aclanthology.org/2020.coling-industry.21</url>
</location>
<part>
<date>2020-12</date>
<extent unit="page">
<start>228</start>
<end>235</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Uncertainty Modeling for Machine Comprehension Systems using Efficient Bayesian Neural Networks
%A Liu, Zhengyuan
%A Krishnaswamy, Pavitra
%A Aw, Ai Ti
%A Chen, Nancy
%Y Clifton, Ann
%Y Napoles, Courtney
%S Proceedings of the 28th International Conference on Computational Linguistics: Industry Track
%D 2020
%8 December
%I International Committee on Computational Linguistics
%C Online
%F liu-etal-2020-uncertainty
%X While neural approaches have achieved significant improvement in machine comprehension tasks, models often work as a black-box, resulting in lower interpretability, which requires special attention in domains such as healthcare or education. Quantifying uncertainty helps pave the way towards more interpretable neural networks. In classification and regression tasks, Bayesian neural networks have been effective in estimating model uncertainty. However, inference time increases linearly due to the required sampling process in Bayesian neural networks. Thus speed becomes a bottleneck in tasks with high system complexity such as question-answering or dialogue generation. In this work, we propose a hybrid neural architecture to quantify model uncertainty using Bayesian weight approximation but boosts up the inference speed by 80% relative at test time, and apply it for a clinical dialogue comprehension task. The proposed approach is also used to enable active learning so that an updated model can be trained more optimally with new incoming data by selecting samples that are not well-represented in the current training scheme.
%R 10.18653/v1/2020.coling-industry.21
%U https://aclanthology.org/2020.coling-industry.21
%U https://doi.org/10.18653/v1/2020.coling-industry.21
%P 228-235
Markdown (Informal)
[Uncertainty Modeling for Machine Comprehension Systems using Efficient Bayesian Neural Networks](https://aclanthology.org/2020.coling-industry.21) (Liu et al., COLING 2020)
ACL