@inproceedings{avila-crego-2025-systran,
title = "{SYSTRAN} @ {IWSLT} 2025 Low-resource track",
author = "Avila, Marko and
Crego, Josep",
editor = "Salesky, Elizabeth and
Federico, Marcello and
Anastasopoulos, Antonis",
booktitle = "Proceedings of the 22nd International Conference on Spoken Language Translation (IWSLT 2025)",
month = jul,
year = "2025",
address = "Vienna, Austria (in-person and online)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.iwslt-1.33/",
doi = "10.18653/v1/2025.iwslt-1.33",
pages = "324--332",
ISBN = "979-8-89176-272-5",
abstract = "SYSTRAN submitted systems for one language pair in the 2025 Low-Resource Language Track. Our main contribution lies in the tight coupling and light fine-tuning of an ASR encoder (Whisper) with a neural machine translation decoder (NLLB), forming an efficient speech translation pipeline. We present the modeling strategies and optimizations implemented to build a system that, unlike large-scale end-to-end models, performs effectively under constraints of limited training data and computational resources. This approach enables the development of high-quality speech translation in low-resource settings, while ensuring both efficiency and scalability. We also conduct a comparative analysis of our proposed system against various paradigms, including a cascaded Whisper+NLLB setup and direct end-to-end fine-tuning of Whisper."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="avila-crego-2025-systran">
<titleInfo>
<title>SYSTRAN @ IWSLT 2025 Low-resource track</title>
</titleInfo>
<name type="personal">
<namePart type="given">Marko</namePart>
<namePart type="family">Avila</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Josep</namePart>
<namePart type="family">Crego</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 22nd International Conference on Spoken Language Translation (IWSLT 2025)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Elizabeth</namePart>
<namePart type="family">Salesky</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marcello</namePart>
<namePart type="family">Federico</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Antonis</namePart>
<namePart type="family">Anastasopoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vienna, Austria (in-person and online)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-272-5</identifier>
</relatedItem>
<abstract>SYSTRAN submitted systems for one language pair in the 2025 Low-Resource Language Track. Our main contribution lies in the tight coupling and light fine-tuning of an ASR encoder (Whisper) with a neural machine translation decoder (NLLB), forming an efficient speech translation pipeline. We present the modeling strategies and optimizations implemented to build a system that, unlike large-scale end-to-end models, performs effectively under constraints of limited training data and computational resources. This approach enables the development of high-quality speech translation in low-resource settings, while ensuring both efficiency and scalability. We also conduct a comparative analysis of our proposed system against various paradigms, including a cascaded Whisper+NLLB setup and direct end-to-end fine-tuning of Whisper.</abstract>
<identifier type="citekey">avila-crego-2025-systran</identifier>
<identifier type="doi">10.18653/v1/2025.iwslt-1.33</identifier>
<location>
<url>https://aclanthology.org/2025.iwslt-1.33/</url>
</location>
<part>
<date>2025-07</date>
<extent unit="page">
<start>324</start>
<end>332</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T SYSTRAN @ IWSLT 2025 Low-resource track
%A Avila, Marko
%A Crego, Josep
%Y Salesky, Elizabeth
%Y Federico, Marcello
%Y Anastasopoulos, Antonis
%S Proceedings of the 22nd International Conference on Spoken Language Translation (IWSLT 2025)
%D 2025
%8 July
%I Association for Computational Linguistics
%C Vienna, Austria (in-person and online)
%@ 979-8-89176-272-5
%F avila-crego-2025-systran
%X SYSTRAN submitted systems for one language pair in the 2025 Low-Resource Language Track. Our main contribution lies in the tight coupling and light fine-tuning of an ASR encoder (Whisper) with a neural machine translation decoder (NLLB), forming an efficient speech translation pipeline. We present the modeling strategies and optimizations implemented to build a system that, unlike large-scale end-to-end models, performs effectively under constraints of limited training data and computational resources. This approach enables the development of high-quality speech translation in low-resource settings, while ensuring both efficiency and scalability. We also conduct a comparative analysis of our proposed system against various paradigms, including a cascaded Whisper+NLLB setup and direct end-to-end fine-tuning of Whisper.
%R 10.18653/v1/2025.iwslt-1.33
%U https://aclanthology.org/2025.iwslt-1.33/
%U https://doi.org/10.18653/v1/2025.iwslt-1.33
%P 324-332
Markdown (Informal)
[SYSTRAN @ IWSLT 2025 Low-resource track](https://aclanthology.org/2025.iwslt-1.33/) (Avila & Crego, IWSLT 2025)
ACL
- Marko Avila and Josep Crego. 2025. SYSTRAN @ IWSLT 2025 Low-resource track. In Proceedings of the 22nd International Conference on Spoken Language Translation (IWSLT 2025), pages 324–332, Vienna, Austria (in-person and online). Association for Computational Linguistics.