@inproceedings{turk-etal-2025-clac,
title = "{CL}a{C} at {DISRPT} 2025: Hierarchical Adapters for Cross-Framework Multi-lingual Discourse Relation Classification",
author = "Turk, Nawar and
Comitogianni, Daniele and
Kosseim, Leila",
editor = "Braud, Chlo{\'e} and
Liu, Yang Janet and
Muller, Philippe and
Zeldes, Amir and
Li, Chuyuan",
booktitle = "Proceedings of the 4th Shared Task on Discourse Relation Parsing and Treebanking (DISRPT 2025)",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.disrpt-1.3/",
pages = "36--47",
ISBN = "979-8-89176-344-9",
abstract = "We present our submission to Task 3 (Discourse Relation Classification) of the DISRPT 2025 shared task. Task 3 introduces a unified set of 17 discourse relation labels across 39 corpora in 16 languages and six discourse frameworks, posing significant multilingual and cross{-}formalism challenges. We first benchmark the task by fine{-}tuning multilingual BERT{-}based models (mBERT, XLM{-}RoBERTa{-}Base, and XLM{-}RoBERTa{-}Large) with two argument{-}ordering strategies and progressive unfreezing ratios to establish strong baselines. We then evaluate prompt{-}based large language models (namely Claude Opus 4.0) in zero{-}shot and few{-}shot settings to understand how LLMs respond to the newly proposed unified labels. Finally, we introduce HiDAC, a Hierarchical Dual{-}Adapter Contrastive learning model. Results show that while larger transformer models achieve higher accuracy, the improvements are modest, and that unfreezing the top 75{\%} of encoder layers yields performance comparable to full fine{-}tuning while training far fewer parameters. Prompt{-}based models lag significantly behind fine{-}tuned transformers, and HiDAC achieves the highest overall accuracy (67.5{\%}) while remaining more parameter{-}efficient than full fine{-}tuning."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="turk-etal-2025-clac">
<titleInfo>
<title>CLaC at DISRPT 2025: Hierarchical Adapters for Cross-Framework Multi-lingual Discourse Relation Classification</title>
</titleInfo>
<name type="personal">
<namePart type="given">Nawar</namePart>
<namePart type="family">Turk</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Daniele</namePart>
<namePart type="family">Comitogianni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Leila</namePart>
<namePart type="family">Kosseim</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 4th Shared Task on Discourse Relation Parsing and Treebanking (DISRPT 2025)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Chloé</namePart>
<namePart type="family">Braud</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yang</namePart>
<namePart type="given">Janet</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Philippe</namePart>
<namePart type="family">Muller</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Amir</namePart>
<namePart type="family">Zeldes</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chuyuan</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Suzhou, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-344-9</identifier>
</relatedItem>
<abstract>We present our submission to Task 3 (Discourse Relation Classification) of the DISRPT 2025 shared task. Task 3 introduces a unified set of 17 discourse relation labels across 39 corpora in 16 languages and six discourse frameworks, posing significant multilingual and cross-formalism challenges. We first benchmark the task by fine-tuning multilingual BERT-based models (mBERT, XLM-RoBERTa-Base, and XLM-RoBERTa-Large) with two argument-ordering strategies and progressive unfreezing ratios to establish strong baselines. We then evaluate prompt-based large language models (namely Claude Opus 4.0) in zero-shot and few-shot settings to understand how LLMs respond to the newly proposed unified labels. Finally, we introduce HiDAC, a Hierarchical Dual-Adapter Contrastive learning model. Results show that while larger transformer models achieve higher accuracy, the improvements are modest, and that unfreezing the top 75% of encoder layers yields performance comparable to full fine-tuning while training far fewer parameters. Prompt-based models lag significantly behind fine-tuned transformers, and HiDAC achieves the highest overall accuracy (67.5%) while remaining more parameter-efficient than full fine-tuning.</abstract>
<identifier type="citekey">turk-etal-2025-clac</identifier>
<location>
<url>https://aclanthology.org/2025.disrpt-1.3/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>36</start>
<end>47</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T CLaC at DISRPT 2025: Hierarchical Adapters for Cross-Framework Multi-lingual Discourse Relation Classification
%A Turk, Nawar
%A Comitogianni, Daniele
%A Kosseim, Leila
%Y Braud, Chloé
%Y Liu, Yang Janet
%Y Muller, Philippe
%Y Zeldes, Amir
%Y Li, Chuyuan
%S Proceedings of the 4th Shared Task on Discourse Relation Parsing and Treebanking (DISRPT 2025)
%D 2025
%8 November
%I Association for Computational Linguistics
%C Suzhou, China
%@ 979-8-89176-344-9
%F turk-etal-2025-clac
%X We present our submission to Task 3 (Discourse Relation Classification) of the DISRPT 2025 shared task. Task 3 introduces a unified set of 17 discourse relation labels across 39 corpora in 16 languages and six discourse frameworks, posing significant multilingual and cross-formalism challenges. We first benchmark the task by fine-tuning multilingual BERT-based models (mBERT, XLM-RoBERTa-Base, and XLM-RoBERTa-Large) with two argument-ordering strategies and progressive unfreezing ratios to establish strong baselines. We then evaluate prompt-based large language models (namely Claude Opus 4.0) in zero-shot and few-shot settings to understand how LLMs respond to the newly proposed unified labels. Finally, we introduce HiDAC, a Hierarchical Dual-Adapter Contrastive learning model. Results show that while larger transformer models achieve higher accuracy, the improvements are modest, and that unfreezing the top 75% of encoder layers yields performance comparable to full fine-tuning while training far fewer parameters. Prompt-based models lag significantly behind fine-tuned transformers, and HiDAC achieves the highest overall accuracy (67.5%) while remaining more parameter-efficient than full fine-tuning.
%U https://aclanthology.org/2025.disrpt-1.3/
%P 36-47
Markdown (Informal)
[CLaC at DISRPT 2025: Hierarchical Adapters for Cross-Framework Multi-lingual Discourse Relation Classification](https://aclanthology.org/2025.disrpt-1.3/) (Turk et al., DISRPT 2025)
ACL