@inproceedings{matsubara-etal-2022-ensemble,
title = "Ensemble Transformer for Efficient and Accurate Ranking Tasks: an Application to Question Answering Systems",
author = "Matsubara, Yoshitomo and
Soldaini, Luca and
Lind, Eric and
Moschitti, Alessandro",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.findings-emnlp.537",
pages = "7259--7272",
abstract = "Large transformer models can highly improve Answer Sentence Selection (AS2) tasks, but their high computational costs prevent their use in many real-world applications. In this paper, we explore the following research question: How can we make the AS2 models more accurate without significantly increasing their model complexity? To address the question, we propose a Multiple Heads Student architecture (named CERBERUS), an efficient neural network designed to distill an ensemble of large transformers into a single smaller model. CERBERUS consists of two components: a stack of transformer layers that is used to encode inputs, and a set of ranking heads; unlike traditional distillation technique, each of them is trained by distilling a different large transformer architecture in a way that preserves the diversity of the ensemble members. The resulting model captures the knowledge of heterogeneous transformer models by using just a few extra parameters. We show the effectiveness of CERBERUS on three English datasets for AS2; our proposed approach outperforms all single-model distillations we consider, rivaling the state-of-the-art large AS2 models that have 2.7{\mbox{$\times$}} more parameters and run 2.5{\mbox{$\times$}} slower. Code for our model is available at https://github.com/amazon-research/wqa-cerberus.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="matsubara-etal-2022-ensemble">
<titleInfo>
<title>Ensemble Transformer for Efficient and Accurate Ranking Tasks: an Application to Question Answering Systems</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yoshitomo</namePart>
<namePart type="family">Matsubara</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Luca</namePart>
<namePart type="family">Soldaini</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Eric</namePart>
<namePart type="family">Lind</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alessandro</namePart>
<namePart type="family">Moschitti</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2022</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, United Arab Emirates</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Large transformer models can highly improve Answer Sentence Selection (AS2) tasks, but their high computational costs prevent their use in many real-world applications. In this paper, we explore the following research question: How can we make the AS2 models more accurate without significantly increasing their model complexity? To address the question, we propose a Multiple Heads Student architecture (named CERBERUS), an efficient neural network designed to distill an ensemble of large transformers into a single smaller model. CERBERUS consists of two components: a stack of transformer layers that is used to encode inputs, and a set of ranking heads; unlike traditional distillation technique, each of them is trained by distilling a different large transformer architecture in a way that preserves the diversity of the ensemble members. The resulting model captures the knowledge of heterogeneous transformer models by using just a few extra parameters. We show the effectiveness of CERBERUS on three English datasets for AS2; our proposed approach outperforms all single-model distillations we consider, rivaling the state-of-the-art large AS2 models that have 2.7\times more parameters and run 2.5\times slower. Code for our model is available at https://github.com/amazon-research/wqa-cerberus.</abstract>
<identifier type="citekey">matsubara-etal-2022-ensemble</identifier>
<location>
<url>https://aclanthology.org/2022.findings-emnlp.537</url>
</location>
<part>
<date>2022-12</date>
<extent unit="page">
<start>7259</start>
<end>7272</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Ensemble Transformer for Efficient and Accurate Ranking Tasks: an Application to Question Answering Systems
%A Matsubara, Yoshitomo
%A Soldaini, Luca
%A Lind, Eric
%A Moschitti, Alessandro
%S Findings of the Association for Computational Linguistics: EMNLP 2022
%D 2022
%8 December
%I Association for Computational Linguistics
%C Abu Dhabi, United Arab Emirates
%F matsubara-etal-2022-ensemble
%X Large transformer models can highly improve Answer Sentence Selection (AS2) tasks, but their high computational costs prevent their use in many real-world applications. In this paper, we explore the following research question: How can we make the AS2 models more accurate without significantly increasing their model complexity? To address the question, we propose a Multiple Heads Student architecture (named CERBERUS), an efficient neural network designed to distill an ensemble of large transformers into a single smaller model. CERBERUS consists of two components: a stack of transformer layers that is used to encode inputs, and a set of ranking heads; unlike traditional distillation technique, each of them is trained by distilling a different large transformer architecture in a way that preserves the diversity of the ensemble members. The resulting model captures the knowledge of heterogeneous transformer models by using just a few extra parameters. We show the effectiveness of CERBERUS on three English datasets for AS2; our proposed approach outperforms all single-model distillations we consider, rivaling the state-of-the-art large AS2 models that have 2.7\times more parameters and run 2.5\times slower. Code for our model is available at https://github.com/amazon-research/wqa-cerberus.
%U https://aclanthology.org/2022.findings-emnlp.537
%P 7259-7272
Markdown (Informal)
[Ensemble Transformer for Efficient and Accurate Ranking Tasks: an Application to Question Answering Systems](https://aclanthology.org/2022.findings-emnlp.537) (Matsubara et al., Findings 2022)
ACL