@inproceedings{calvo-etal-2025-beyond,
title = "Beyond instruction-conditioning, {M}o{TE}: Mixture of Task Experts for Multi-task Embedding Models",
author = "Calvo, Miguel Romero and
Ding, Shuoyang and
Barrett, Corey D and
Dinu, Georgiana and
Karypis, George",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.findings-acl.1168/",
doi = "10.18653/v1/2025.findings-acl.1168",
pages = "22731--22746",
ISBN = "979-8-89176-256-5",
abstract = "Dense embeddings are fundamental to modern machine learning systems, powering Retrieval-Augmented Generation (RAG), information retrieval, and representation learning. While instruction-conditioning has become the dominant approach for embedding specialization, its direct application to low-capacity models imposes fundamental representational constraints that limit the performance gains derived from specialization. In this paper, we analyze these limitations and introduce the Mixture of Task Experts (MoTE) transformer block, which leverages task-specialized parameters trained with Task-Aware Contrastive Learning () to enhance the model{'}s ability to generate specialized embeddings. Empirical results show that MoTE achieves 64{\%} higher performance gains in retrieval datasets ($+3.27\rightarrow +5.21$) and 43{\%} higher performance gains across all datasets ($+1.81\rightarrow 2.60$). Critically, these gains are achieved without altering instructions, training data, inference time, or number of active parameters."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="calvo-etal-2025-beyond">
<titleInfo>
<title>Beyond instruction-conditioning, MoTE: Mixture of Task Experts for Multi-task Embedding Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Miguel</namePart>
<namePart type="given">Romero</namePart>
<namePart type="family">Calvo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shuoyang</namePart>
<namePart type="family">Ding</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Corey</namePart>
<namePart type="given">D</namePart>
<namePart type="family">Barrett</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Georgiana</namePart>
<namePart type="family">Dinu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">George</namePart>
<namePart type="family">Karypis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: ACL 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Wanxiang</namePart>
<namePart type="family">Che</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joyce</namePart>
<namePart type="family">Nabende</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ekaterina</namePart>
<namePart type="family">Shutova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohammad</namePart>
<namePart type="given">Taher</namePart>
<namePart type="family">Pilehvar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vienna, Austria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-256-5</identifier>
</relatedItem>
<abstract>Dense embeddings are fundamental to modern machine learning systems, powering Retrieval-Augmented Generation (RAG), information retrieval, and representation learning. While instruction-conditioning has become the dominant approach for embedding specialization, its direct application to low-capacity models imposes fundamental representational constraints that limit the performance gains derived from specialization. In this paper, we analyze these limitations and introduce the Mixture of Task Experts (MoTE) transformer block, which leverages task-specialized parameters trained with Task-Aware Contrastive Learning () to enhance the model’s ability to generate specialized embeddings. Empirical results show that MoTE achieves 64% higher performance gains in retrieval datasets (+3.27\rightarrow +5.21) and 43% higher performance gains across all datasets (+1.81\rightarrow 2.60). Critically, these gains are achieved without altering instructions, training data, inference time, or number of active parameters.</abstract>
<identifier type="citekey">calvo-etal-2025-beyond</identifier>
<identifier type="doi">10.18653/v1/2025.findings-acl.1168</identifier>
<location>
<url>https://aclanthology.org/2025.findings-acl.1168/</url>
</location>
<part>
<date>2025-07</date>
<extent unit="page">
<start>22731</start>
<end>22746</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Beyond instruction-conditioning, MoTE: Mixture of Task Experts for Multi-task Embedding Models
%A Calvo, Miguel Romero
%A Ding, Shuoyang
%A Barrett, Corey D.
%A Dinu, Georgiana
%A Karypis, George
%Y Che, Wanxiang
%Y Nabende, Joyce
%Y Shutova, Ekaterina
%Y Pilehvar, Mohammad Taher
%S Findings of the Association for Computational Linguistics: ACL 2025
%D 2025
%8 July
%I Association for Computational Linguistics
%C Vienna, Austria
%@ 979-8-89176-256-5
%F calvo-etal-2025-beyond
%X Dense embeddings are fundamental to modern machine learning systems, powering Retrieval-Augmented Generation (RAG), information retrieval, and representation learning. While instruction-conditioning has become the dominant approach for embedding specialization, its direct application to low-capacity models imposes fundamental representational constraints that limit the performance gains derived from specialization. In this paper, we analyze these limitations and introduce the Mixture of Task Experts (MoTE) transformer block, which leverages task-specialized parameters trained with Task-Aware Contrastive Learning () to enhance the model’s ability to generate specialized embeddings. Empirical results show that MoTE achieves 64% higher performance gains in retrieval datasets (+3.27\rightarrow +5.21) and 43% higher performance gains across all datasets (+1.81\rightarrow 2.60). Critically, these gains are achieved without altering instructions, training data, inference time, or number of active parameters.
%R 10.18653/v1/2025.findings-acl.1168
%U https://aclanthology.org/2025.findings-acl.1168/
%U https://doi.org/10.18653/v1/2025.findings-acl.1168
%P 22731-22746
Markdown (Informal)
[Beyond instruction-conditioning, MoTE: Mixture of Task Experts for Multi-task Embedding Models](https://aclanthology.org/2025.findings-acl.1168/) (Calvo et al., Findings 2025)
ACL