@inproceedings{sahoo-etal-2024-nlip,
title = "{NLIP}{\_}{L}ab-{IITH} Low-Resource {MT} System for {WMT}24 {I}ndic {MT} Shared Task",
author = "Sahoo, Pramit and
Brahma, Maharaj and
Desarkar, Maunendra Sankar",
editor = "Haddow, Barry and
Kocmi, Tom and
Koehn, Philipp and
Monz, Christof",
booktitle = "Proceedings of the Ninth Conference on Machine Translation",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.wmt-1.70",
pages = "781--787",
abstract = "In this paper, we describe our system for the WMT 24 shared task of Low-Resource Indic Language Translation. We consider eng↔{as, kha, lus, mni} as participating language pairs. In this shared task, we explore the fine-tuning of a pre-trained model motivated by the pre-trained objective of aligning embeddings closer by alignment augmentation (Lin et al.,2020) for 22 scheduled Indian languages. Our primary system is based on language-specific finetuning on a pre-trained model. We achieve chrF2 scores of 50.6, 42.3, 54.9, and 66.3 on the official public test set for eng→as, eng→kha, eng→lus, eng→mni respectively. We also explore multilingual training with/without language grouping and layer-freezing.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="sahoo-etal-2024-nlip">
<titleInfo>
<title>NLIP_Lab-IITH Low-Resource MT System for WMT24 Indic MT Shared Task</title>
</titleInfo>
<name type="personal">
<namePart type="given">Pramit</namePart>
<namePart type="family">Sahoo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Maharaj</namePart>
<namePart type="family">Brahma</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Maunendra</namePart>
<namePart type="given">Sankar</namePart>
<namePart type="family">Desarkar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Ninth Conference on Machine Translation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Barry</namePart>
<namePart type="family">Haddow</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tom</namePart>
<namePart type="family">Kocmi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Philipp</namePart>
<namePart type="family">Koehn</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Christof</namePart>
<namePart type="family">Monz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Miami, Florida, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In this paper, we describe our system for the WMT 24 shared task of Low-Resource Indic Language Translation. We consider eng↔as, kha, lus, mni as participating language pairs. In this shared task, we explore the fine-tuning of a pre-trained model motivated by the pre-trained objective of aligning embeddings closer by alignment augmentation (Lin et al.,2020) for 22 scheduled Indian languages. Our primary system is based on language-specific finetuning on a pre-trained model. We achieve chrF2 scores of 50.6, 42.3, 54.9, and 66.3 on the official public test set for eng→as, eng→kha, eng→lus, eng→mni respectively. We also explore multilingual training with/without language grouping and layer-freezing.</abstract>
<identifier type="citekey">sahoo-etal-2024-nlip</identifier>
<location>
<url>https://aclanthology.org/2024.wmt-1.70</url>
</location>
<part>
<date>2024-11</date>
<extent unit="page">
<start>781</start>
<end>787</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T NLIP_Lab-IITH Low-Resource MT System for WMT24 Indic MT Shared Task
%A Sahoo, Pramit
%A Brahma, Maharaj
%A Desarkar, Maunendra Sankar
%Y Haddow, Barry
%Y Kocmi, Tom
%Y Koehn, Philipp
%Y Monz, Christof
%S Proceedings of the Ninth Conference on Machine Translation
%D 2024
%8 November
%I Association for Computational Linguistics
%C Miami, Florida, USA
%F sahoo-etal-2024-nlip
%X In this paper, we describe our system for the WMT 24 shared task of Low-Resource Indic Language Translation. We consider eng↔as, kha, lus, mni as participating language pairs. In this shared task, we explore the fine-tuning of a pre-trained model motivated by the pre-trained objective of aligning embeddings closer by alignment augmentation (Lin et al.,2020) for 22 scheduled Indian languages. Our primary system is based on language-specific finetuning on a pre-trained model. We achieve chrF2 scores of 50.6, 42.3, 54.9, and 66.3 on the official public test set for eng→as, eng→kha, eng→lus, eng→mni respectively. We also explore multilingual training with/without language grouping and layer-freezing.
%U https://aclanthology.org/2024.wmt-1.70
%P 781-787
Markdown (Informal)
[NLIP_Lab-IITH Low-Resource MT System for WMT24 Indic MT Shared Task](https://aclanthology.org/2024.wmt-1.70) (Sahoo et al., WMT 2024)
ACL