@inproceedings{chia-etal-2020-red,
title = "Red Dragon {AI} at {T}ext{G}raphs 2020 Shared Task : {LIT} : {LSTM}-Interleaved Transformer for Multi-Hop Explanation Ranking",
author = "Chia, Yew Ken and
Witteveen, Sam and
Andrews, Martin",
editor = "Ustalov, Dmitry and
Somasundaran, Swapna and
Panchenko, Alexander and
Malliaros, Fragkiskos D. and
Hulpu{\textcommabelow{s}}, Ioana and
Jansen, Peter and
Jana, Abhik",
booktitle = "Proceedings of the Graph-based Methods for Natural Language Processing (TextGraphs)",
month = dec,
year = "2020",
address = "Barcelona, Spain (Online)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.textgraphs-1.14",
doi = "10.18653/v1/2020.textgraphs-1.14",
pages = "115--120",
abstract = "Explainable question answering for science questions is a challenging task that requires multi-hop inference over a large set of fact sentences. To counter the limitations of methods that view each query-document pair in isolation, we propose the LSTM-Interleaved Transformer which incorporates cross-document interactions for improved multi-hop ranking. The LIT architecture can leverage prior ranking positions in the re-ranking setting. Our model is competitive on the current leaderboard for the TextGraphs 2020 shared task, achieving a test-set MAP of 0.5607, and would have gained third place had we submitted before the competition deadline. Our code implementation is made available at [\url{https://github.com/mdda/worldtree_corpus/tree/textgraphs_2020}](\url{https://github.com/mdda/worldtree_corpus/tree/textgraphs_2020}).",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="chia-etal-2020-red">
<titleInfo>
<title>Red Dragon AI at TextGraphs 2020 Shared Task : LIT : LSTM-Interleaved Transformer for Multi-Hop Explanation Ranking</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yew</namePart>
<namePart type="given">Ken</namePart>
<namePart type="family">Chia</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sam</namePart>
<namePart type="family">Witteveen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Martin</namePart>
<namePart type="family">Andrews</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Graph-based Methods for Natural Language Processing (TextGraphs)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Dmitry</namePart>
<namePart type="family">Ustalov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Swapna</namePart>
<namePart type="family">Somasundaran</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alexander</namePart>
<namePart type="family">Panchenko</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Fragkiskos</namePart>
<namePart type="given">D</namePart>
<namePart type="family">Malliaros</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ioana</namePart>
<namePart type="family">Hulpu\textcommabelows</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Peter</namePart>
<namePart type="family">Jansen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Abhik</namePart>
<namePart type="family">Jana</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Barcelona, Spain (Online)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Explainable question answering for science questions is a challenging task that requires multi-hop inference over a large set of fact sentences. To counter the limitations of methods that view each query-document pair in isolation, we propose the LSTM-Interleaved Transformer which incorporates cross-document interactions for improved multi-hop ranking. The LIT architecture can leverage prior ranking positions in the re-ranking setting. Our model is competitive on the current leaderboard for the TextGraphs 2020 shared task, achieving a test-set MAP of 0.5607, and would have gained third place had we submitted before the competition deadline. Our code implementation is made available at [https://github.com/mdda/worldtree_corpus/tree/textgraphs₂020](https://github.com/mdda/worldtree_corpus/tree/textgraphs₂020).</abstract>
<identifier type="citekey">chia-etal-2020-red</identifier>
<identifier type="doi">10.18653/v1/2020.textgraphs-1.14</identifier>
<location>
<url>https://aclanthology.org/2020.textgraphs-1.14</url>
</location>
<part>
<date>2020-12</date>
<extent unit="page">
<start>115</start>
<end>120</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Red Dragon AI at TextGraphs 2020 Shared Task : LIT : LSTM-Interleaved Transformer for Multi-Hop Explanation Ranking
%A Chia, Yew Ken
%A Witteveen, Sam
%A Andrews, Martin
%Y Ustalov, Dmitry
%Y Somasundaran, Swapna
%Y Panchenko, Alexander
%Y Malliaros, Fragkiskos D.
%Y Hulpu\textcommabelows, Ioana
%Y Jansen, Peter
%Y Jana, Abhik
%S Proceedings of the Graph-based Methods for Natural Language Processing (TextGraphs)
%D 2020
%8 December
%I Association for Computational Linguistics
%C Barcelona, Spain (Online)
%F chia-etal-2020-red
%X Explainable question answering for science questions is a challenging task that requires multi-hop inference over a large set of fact sentences. To counter the limitations of methods that view each query-document pair in isolation, we propose the LSTM-Interleaved Transformer which incorporates cross-document interactions for improved multi-hop ranking. The LIT architecture can leverage prior ranking positions in the re-ranking setting. Our model is competitive on the current leaderboard for the TextGraphs 2020 shared task, achieving a test-set MAP of 0.5607, and would have gained third place had we submitted before the competition deadline. Our code implementation is made available at [https://github.com/mdda/worldtree_corpus/tree/textgraphs₂020](https://github.com/mdda/worldtree_corpus/tree/textgraphs₂020).
%R 10.18653/v1/2020.textgraphs-1.14
%U https://aclanthology.org/2020.textgraphs-1.14
%U https://doi.org/10.18653/v1/2020.textgraphs-1.14
%P 115-120
Markdown (Informal)
[Red Dragon AI at TextGraphs 2020 Shared Task : LIT : LSTM-Interleaved Transformer for Multi-Hop Explanation Ranking](https://aclanthology.org/2020.textgraphs-1.14) (Chia et al., TextGraphs 2020)
ACL