@inproceedings{jia-etal-2025-mma,
title = "{MMA}: Cross-Domain Knowledge Integration via Mixture of Multi-Domain Agents",
author = "Jia, Kehang and
Li, Juntao and
Liang, Xiaobo and
Xiao, Yisheng and
Yang, Yixuan and
Zhang, Min",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.findings-emnlp.707/",
pages = "13145--13160",
ISBN = "979-8-89176-335-7",
abstract = "Rather than merely to retain previously acquired generalization, achieving synergistic improvements between generalization and domain specialization in foundation models remains a significant challenge in both pre-training and post-training. As an alternative, we propose a test-time cross-domain knowledge integration method, Mixture of Multi-domain Agents (MMA), which dynamically combines the outputs of general-purpose and domain-specific models to enhance their performance on complex, domain{-}specific tasks. MMA formulates the integration process as a search problem, using Monte Carlo Tree Search (MCTS) to find the path that optimally harmonizes the respective strengths of different models in generalization and domain-specific knowledge. In addition, We design specific action spaces to control the knowledge integration between multiple models, and cross-inspection reward is introduced to fairly score strategies in different domains. Experiments in diverse domains show that MMA can effectively combine the strengths of different models to enhance their performance. For instance, in legal tests, the average performance of all tasks increased from 42.57{\%} to 53.68{\%}. In financial tests, it improved from 56.01{\%} to 62.68{\%}."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="jia-etal-2025-mma">
<titleInfo>
<title>MMA: Cross-Domain Knowledge Integration via Mixture of Multi-Domain Agents</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kehang</namePart>
<namePart type="family">Jia</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Juntao</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xiaobo</namePart>
<namePart type="family">Liang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yisheng</namePart>
<namePart type="family">Xiao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yixuan</namePart>
<namePart type="family">Yang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Min</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Christos</namePart>
<namePart type="family">Christodoulopoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tanmoy</namePart>
<namePart type="family">Chakraborty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carolyn</namePart>
<namePart type="family">Rose</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Violet</namePart>
<namePart type="family">Peng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Suzhou, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-335-7</identifier>
</relatedItem>
<abstract>Rather than merely to retain previously acquired generalization, achieving synergistic improvements between generalization and domain specialization in foundation models remains a significant challenge in both pre-training and post-training. As an alternative, we propose a test-time cross-domain knowledge integration method, Mixture of Multi-domain Agents (MMA), which dynamically combines the outputs of general-purpose and domain-specific models to enhance their performance on complex, domain-specific tasks. MMA formulates the integration process as a search problem, using Monte Carlo Tree Search (MCTS) to find the path that optimally harmonizes the respective strengths of different models in generalization and domain-specific knowledge. In addition, We design specific action spaces to control the knowledge integration between multiple models, and cross-inspection reward is introduced to fairly score strategies in different domains. Experiments in diverse domains show that MMA can effectively combine the strengths of different models to enhance their performance. For instance, in legal tests, the average performance of all tasks increased from 42.57% to 53.68%. In financial tests, it improved from 56.01% to 62.68%.</abstract>
<identifier type="citekey">jia-etal-2025-mma</identifier>
<location>
<url>https://aclanthology.org/2025.findings-emnlp.707/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>13145</start>
<end>13160</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T MMA: Cross-Domain Knowledge Integration via Mixture of Multi-Domain Agents
%A Jia, Kehang
%A Li, Juntao
%A Liang, Xiaobo
%A Xiao, Yisheng
%A Yang, Yixuan
%A Zhang, Min
%Y Christodoulopoulos, Christos
%Y Chakraborty, Tanmoy
%Y Rose, Carolyn
%Y Peng, Violet
%S Findings of the Association for Computational Linguistics: EMNLP 2025
%D 2025
%8 November
%I Association for Computational Linguistics
%C Suzhou, China
%@ 979-8-89176-335-7
%F jia-etal-2025-mma
%X Rather than merely to retain previously acquired generalization, achieving synergistic improvements between generalization and domain specialization in foundation models remains a significant challenge in both pre-training and post-training. As an alternative, we propose a test-time cross-domain knowledge integration method, Mixture of Multi-domain Agents (MMA), which dynamically combines the outputs of general-purpose and domain-specific models to enhance their performance on complex, domain-specific tasks. MMA formulates the integration process as a search problem, using Monte Carlo Tree Search (MCTS) to find the path that optimally harmonizes the respective strengths of different models in generalization and domain-specific knowledge. In addition, We design specific action spaces to control the knowledge integration between multiple models, and cross-inspection reward is introduced to fairly score strategies in different domains. Experiments in diverse domains show that MMA can effectively combine the strengths of different models to enhance their performance. For instance, in legal tests, the average performance of all tasks increased from 42.57% to 53.68%. In financial tests, it improved from 56.01% to 62.68%.
%U https://aclanthology.org/2025.findings-emnlp.707/
%P 13145-13160
Markdown (Informal)
[MMA: Cross-Domain Knowledge Integration via Mixture of Multi-Domain Agents](https://aclanthology.org/2025.findings-emnlp.707/) (Jia et al., Findings 2025)
ACL