@inproceedings{ye-etal-2022-eliciting,
title = "Eliciting and Understanding Cross-task Skills with Task-level Mixture-of-Experts",
author = "Ye, Qinyuan and
Zha, Juan and
Ren, Xiang",
editor = "Goldberg, Yoav and
Kozareva, Zornitsa and
Zhang, Yue",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.findings-emnlp.189",
doi = "10.18653/v1/2022.findings-emnlp.189",
pages = "2567--2592",
abstract = "Recent works suggest that transformer models are capable of multi-tasking on diverse NLP tasks and adapt to new tasks efficiently. However, the potential of these multi-task models may be limited as they use the same set of parameters for all tasks. In contrast, humans tackle tasks in a more flexible way, by making proper presumptions on what skills and knowledge are relevant and executing only the necessary computations. Inspired by this, we propose to use task-level mixture-of-expert models, which has a collection of transformer layers (i.e., experts) and a router component to choose among these experts dynamically and flexibly. We find that these models help improve the average performance gain (ARG) metric by 2.6{\%} when adapting to unseen tasks in few-shot settings, and by 5.6{\%} in zero-shot generalization settings. Further, we show that the learned routing decisions and experts partly rediscover human categorization of NLP tasks {--} certain experts are strongly associated with extractive tasks, some with classification tasks, and some with tasks requiring world knowledge.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="ye-etal-2022-eliciting">
<titleInfo>
<title>Eliciting and Understanding Cross-task Skills with Task-level Mixture-of-Experts</title>
</titleInfo>
<name type="personal">
<namePart type="given">Qinyuan</namePart>
<namePart type="family">Ye</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Juan</namePart>
<namePart type="family">Zha</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xiang</namePart>
<namePart type="family">Ren</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2022</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yoav</namePart>
<namePart type="family">Goldberg</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zornitsa</namePart>
<namePart type="family">Kozareva</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yue</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, United Arab Emirates</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Recent works suggest that transformer models are capable of multi-tasking on diverse NLP tasks and adapt to new tasks efficiently. However, the potential of these multi-task models may be limited as they use the same set of parameters for all tasks. In contrast, humans tackle tasks in a more flexible way, by making proper presumptions on what skills and knowledge are relevant and executing only the necessary computations. Inspired by this, we propose to use task-level mixture-of-expert models, which has a collection of transformer layers (i.e., experts) and a router component to choose among these experts dynamically and flexibly. We find that these models help improve the average performance gain (ARG) metric by 2.6% when adapting to unseen tasks in few-shot settings, and by 5.6% in zero-shot generalization settings. Further, we show that the learned routing decisions and experts partly rediscover human categorization of NLP tasks – certain experts are strongly associated with extractive tasks, some with classification tasks, and some with tasks requiring world knowledge.</abstract>
<identifier type="citekey">ye-etal-2022-eliciting</identifier>
<identifier type="doi">10.18653/v1/2022.findings-emnlp.189</identifier>
<location>
<url>https://aclanthology.org/2022.findings-emnlp.189</url>
</location>
<part>
<date>2022-12</date>
<extent unit="page">
<start>2567</start>
<end>2592</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Eliciting and Understanding Cross-task Skills with Task-level Mixture-of-Experts
%A Ye, Qinyuan
%A Zha, Juan
%A Ren, Xiang
%Y Goldberg, Yoav
%Y Kozareva, Zornitsa
%Y Zhang, Yue
%S Findings of the Association for Computational Linguistics: EMNLP 2022
%D 2022
%8 December
%I Association for Computational Linguistics
%C Abu Dhabi, United Arab Emirates
%F ye-etal-2022-eliciting
%X Recent works suggest that transformer models are capable of multi-tasking on diverse NLP tasks and adapt to new tasks efficiently. However, the potential of these multi-task models may be limited as they use the same set of parameters for all tasks. In contrast, humans tackle tasks in a more flexible way, by making proper presumptions on what skills and knowledge are relevant and executing only the necessary computations. Inspired by this, we propose to use task-level mixture-of-expert models, which has a collection of transformer layers (i.e., experts) and a router component to choose among these experts dynamically and flexibly. We find that these models help improve the average performance gain (ARG) metric by 2.6% when adapting to unseen tasks in few-shot settings, and by 5.6% in zero-shot generalization settings. Further, we show that the learned routing decisions and experts partly rediscover human categorization of NLP tasks – certain experts are strongly associated with extractive tasks, some with classification tasks, and some with tasks requiring world knowledge.
%R 10.18653/v1/2022.findings-emnlp.189
%U https://aclanthology.org/2022.findings-emnlp.189
%U https://doi.org/10.18653/v1/2022.findings-emnlp.189
%P 2567-2592
Markdown (Informal)
[Eliciting and Understanding Cross-task Skills with Task-level Mixture-of-Experts](https://aclanthology.org/2022.findings-emnlp.189) (Ye et al., Findings 2022)
ACL