@inproceedings{lalitha-devi-etal-2025-secorel,
title = "{S}e{C}o{R}el: Multilingual Discourse Analysis in {DISRPT} 2025",
author = "Lalitha Devi, Sobha and
Rk Rao, Pattabhi and
Sundar Ram, Vijay",
editor = "Braud, Chlo{\'e} and
Liu, Yang Janet and
Muller, Philippe and
Zeldes, Amir and
Li, Chuyuan",
booktitle = "Proceedings of the 4th Shared Task on Discourse Relation Parsing and Treebanking (DISRPT 2025)",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.disrpt-1.6/",
pages = "79--86",
ISBN = "979-8-89176-344-9",
abstract = "The work presented here describes our participation in DISRPT 2025 shared task in three tasks, Task1: Discourse Unit Segmentation across Formalisms, Task 2: Discourse Connective Identification across Languages and Task 3: Discourse Relation Classification across Formalisms. We have fine-tuned XLM-RoBERTa, a language model to address these three tasks. We have come up with one single multilingual language model for each task. Our system handles data in both the formats .conllu and .tok and different discourse formalisms. We have obtained encouraging results. The performance on test data in the three tasks is similar to the results obtained for the development data."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="lalitha-devi-etal-2025-secorel">
<titleInfo>
<title>SeCoRel: Multilingual Discourse Analysis in DISRPT 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sobha</namePart>
<namePart type="family">Lalitha Devi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pattabhi</namePart>
<namePart type="family">Rk Rao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vijay</namePart>
<namePart type="family">Sundar Ram</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 4th Shared Task on Discourse Relation Parsing and Treebanking (DISRPT 2025)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Chloé</namePart>
<namePart type="family">Braud</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yang</namePart>
<namePart type="given">Janet</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Philippe</namePart>
<namePart type="family">Muller</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Amir</namePart>
<namePart type="family">Zeldes</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chuyuan</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Suzhou, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-344-9</identifier>
</relatedItem>
<abstract>The work presented here describes our participation in DISRPT 2025 shared task in three tasks, Task1: Discourse Unit Segmentation across Formalisms, Task 2: Discourse Connective Identification across Languages and Task 3: Discourse Relation Classification across Formalisms. We have fine-tuned XLM-RoBERTa, a language model to address these three tasks. We have come up with one single multilingual language model for each task. Our system handles data in both the formats .conllu and .tok and different discourse formalisms. We have obtained encouraging results. The performance on test data in the three tasks is similar to the results obtained for the development data.</abstract>
<identifier type="citekey">lalitha-devi-etal-2025-secorel</identifier>
<location>
<url>https://aclanthology.org/2025.disrpt-1.6/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>79</start>
<end>86</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T SeCoRel: Multilingual Discourse Analysis in DISRPT 2025
%A Lalitha Devi, Sobha
%A Rk Rao, Pattabhi
%A Sundar Ram, Vijay
%Y Braud, Chloé
%Y Liu, Yang Janet
%Y Muller, Philippe
%Y Zeldes, Amir
%Y Li, Chuyuan
%S Proceedings of the 4th Shared Task on Discourse Relation Parsing and Treebanking (DISRPT 2025)
%D 2025
%8 November
%I Association for Computational Linguistics
%C Suzhou, China
%@ 979-8-89176-344-9
%F lalitha-devi-etal-2025-secorel
%X The work presented here describes our participation in DISRPT 2025 shared task in three tasks, Task1: Discourse Unit Segmentation across Formalisms, Task 2: Discourse Connective Identification across Languages and Task 3: Discourse Relation Classification across Formalisms. We have fine-tuned XLM-RoBERTa, a language model to address these three tasks. We have come up with one single multilingual language model for each task. Our system handles data in both the formats .conllu and .tok and different discourse formalisms. We have obtained encouraging results. The performance on test data in the three tasks is similar to the results obtained for the development data.
%U https://aclanthology.org/2025.disrpt-1.6/
%P 79-86
Markdown (Informal)
[SeCoRel: Multilingual Discourse Analysis in DISRPT 2025](https://aclanthology.org/2025.disrpt-1.6/) (Lalitha Devi et al., DISRPT 2025)
ACL
- Sobha Lalitha Devi, Pattabhi Rk Rao, and Vijay Sundar Ram. 2025. SeCoRel: Multilingual Discourse Analysis in DISRPT 2025. In Proceedings of the 4th Shared Task on Discourse Relation Parsing and Treebanking (DISRPT 2025), pages 79–86, Suzhou, China. Association for Computational Linguistics.