@inproceedings{molla-2018-macquarie,
title = "{M}acquarie {U}niversity at {B}io{ASQ} 6b: Deep learning and deep reinforcement learning for query-based summarisation",
author = "Moll{\'a}, Diego",
editor = "Kakadiaris, Ioannis A. and
Paliouras, George and
Krithara, Anastasia",
booktitle = "Proceedings of the 6th {B}io{ASQ} Workshop A challenge on large-scale biomedical semantic indexing and question answering",
month = nov,
year = "2018",
address = "Brussels, Belgium",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W18-5303",
doi = "10.18653/v1/W18-5303",
pages = "22--29",
abstract = "This paper describes Macquarie University{'}s contribution to the BioASQ Challenge (BioASQ 6b, Phase B). We focused on the extraction of the ideal answers, and the task was approached as an instance of query-based multi-document summarisation. In particular, this paper focuses on the experiments related to the deep learning and reinforcement learning approaches used in the submitted runs. The best run used a deep learning model under a regression-based framework. The deep learning architecture used features derived from the output of LSTM chains on word embeddings, plus features based on similarity with the query, and sentence position. The reinforcement learning approach was a proof-of-concept prototype that trained a global policy using REINFORCE. The global policy was implemented as a neural network that used tf.idf features encoding the candidate sentence, question, and context.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="molla-2018-macquarie">
<titleInfo>
<title>Macquarie University at BioASQ 6b: Deep learning and deep reinforcement learning for query-based summarisation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Diego</namePart>
<namePart type="family">Mollá</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 6th BioASQ Workshop A challenge on large-scale biomedical semantic indexing and question answering</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ioannis</namePart>
<namePart type="given">A</namePart>
<namePart type="family">Kakadiaris</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">George</namePart>
<namePart type="family">Paliouras</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anastasia</namePart>
<namePart type="family">Krithara</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Brussels, Belgium</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper describes Macquarie University’s contribution to the BioASQ Challenge (BioASQ 6b, Phase B). We focused on the extraction of the ideal answers, and the task was approached as an instance of query-based multi-document summarisation. In particular, this paper focuses on the experiments related to the deep learning and reinforcement learning approaches used in the submitted runs. The best run used a deep learning model under a regression-based framework. The deep learning architecture used features derived from the output of LSTM chains on word embeddings, plus features based on similarity with the query, and sentence position. The reinforcement learning approach was a proof-of-concept prototype that trained a global policy using REINFORCE. The global policy was implemented as a neural network that used tf.idf features encoding the candidate sentence, question, and context.</abstract>
<identifier type="citekey">molla-2018-macquarie</identifier>
<identifier type="doi">10.18653/v1/W18-5303</identifier>
<location>
<url>https://aclanthology.org/W18-5303</url>
</location>
<part>
<date>2018-11</date>
<extent unit="page">
<start>22</start>
<end>29</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Macquarie University at BioASQ 6b: Deep learning and deep reinforcement learning for query-based summarisation
%A Mollá, Diego
%Y Kakadiaris, Ioannis A.
%Y Paliouras, George
%Y Krithara, Anastasia
%S Proceedings of the 6th BioASQ Workshop A challenge on large-scale biomedical semantic indexing and question answering
%D 2018
%8 November
%I Association for Computational Linguistics
%C Brussels, Belgium
%F molla-2018-macquarie
%X This paper describes Macquarie University’s contribution to the BioASQ Challenge (BioASQ 6b, Phase B). We focused on the extraction of the ideal answers, and the task was approached as an instance of query-based multi-document summarisation. In particular, this paper focuses on the experiments related to the deep learning and reinforcement learning approaches used in the submitted runs. The best run used a deep learning model under a regression-based framework. The deep learning architecture used features derived from the output of LSTM chains on word embeddings, plus features based on similarity with the query, and sentence position. The reinforcement learning approach was a proof-of-concept prototype that trained a global policy using REINFORCE. The global policy was implemented as a neural network that used tf.idf features encoding the candidate sentence, question, and context.
%R 10.18653/v1/W18-5303
%U https://aclanthology.org/W18-5303
%U https://doi.org/10.18653/v1/W18-5303
%P 22-29
Markdown (Informal)
[Macquarie University at BioASQ 6b: Deep learning and deep reinforcement learning for query-based summarisation](https://aclanthology.org/W18-5303) (Mollá, BioASQ 2018)
ACL