@inproceedings{gonzalez-gutierrez-quattoni-2025-domain,
title = "Domain Pre-training Impact on Representations",
author = "Gonzalez-Gutierrez, Cesar and
Quattoni, Ariadna",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.findings-emnlp.1201/",
doi = "10.18653/v1/2025.findings-emnlp.1201",
pages = "22033--22049",
ISBN = "979-8-89176-335-7",
abstract = "This empirical study analyzes how the choice of pre-training corpus affects the quality of learned transformer representations. We focus specifically on the representation quality achieved through pre-training alone. Our experiments demonstrate that pre-training on a small, specialized corpus can produce effective representations, and that the effectiveness of combining a generic and a specialized corpora depends on the distributional similarity between the target task and the specialized corpus."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="gonzalez-gutierrez-quattoni-2025-domain">
<titleInfo>
<title>Domain Pre-training Impact on Representations</title>
</titleInfo>
<name type="personal">
<namePart type="given">Cesar</namePart>
<namePart type="family">Gonzalez-Gutierrez</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ariadna</namePart>
<namePart type="family">Quattoni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Christos</namePart>
<namePart type="family">Christodoulopoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tanmoy</namePart>
<namePart type="family">Chakraborty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carolyn</namePart>
<namePart type="family">Rose</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Violet</namePart>
<namePart type="family">Peng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Suzhou, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-335-7</identifier>
</relatedItem>
<abstract>This empirical study analyzes how the choice of pre-training corpus affects the quality of learned transformer representations. We focus specifically on the representation quality achieved through pre-training alone. Our experiments demonstrate that pre-training on a small, specialized corpus can produce effective representations, and that the effectiveness of combining a generic and a specialized corpora depends on the distributional similarity between the target task and the specialized corpus.</abstract>
<identifier type="citekey">gonzalez-gutierrez-quattoni-2025-domain</identifier>
<identifier type="doi">10.18653/v1/2025.findings-emnlp.1201</identifier>
<location>
<url>https://aclanthology.org/2025.findings-emnlp.1201/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>22033</start>
<end>22049</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Domain Pre-training Impact on Representations
%A Gonzalez-Gutierrez, Cesar
%A Quattoni, Ariadna
%Y Christodoulopoulos, Christos
%Y Chakraborty, Tanmoy
%Y Rose, Carolyn
%Y Peng, Violet
%S Findings of the Association for Computational Linguistics: EMNLP 2025
%D 2025
%8 November
%I Association for Computational Linguistics
%C Suzhou, China
%@ 979-8-89176-335-7
%F gonzalez-gutierrez-quattoni-2025-domain
%X This empirical study analyzes how the choice of pre-training corpus affects the quality of learned transformer representations. We focus specifically on the representation quality achieved through pre-training alone. Our experiments demonstrate that pre-training on a small, specialized corpus can produce effective representations, and that the effectiveness of combining a generic and a specialized corpora depends on the distributional similarity between the target task and the specialized corpus.
%R 10.18653/v1/2025.findings-emnlp.1201
%U https://aclanthology.org/2025.findings-emnlp.1201/
%U https://doi.org/10.18653/v1/2025.findings-emnlp.1201
%P 22033-22049
Markdown (Informal)
[Domain Pre-training Impact on Representations](https://aclanthology.org/2025.findings-emnlp.1201/) (Gonzalez-Gutierrez & Quattoni, Findings 2025)
ACL
- Cesar Gonzalez-Gutierrez and Ariadna Quattoni. 2025. Domain Pre-training Impact on Representations. In Findings of the Association for Computational Linguistics: EMNLP 2025, pages 22033–22049, Suzhou, China. Association for Computational Linguistics.