@inproceedings{giri-etal-2025-enriching,
title = "Enriching the Low-Resource Neural Machine Translation with Large Language Model",
author = "Giri, Sachin and
Ninomiya, Takashi and
Goto, Isao",
editor = "T.y.s.s, Santosh and
Shimizu, Shuichiro and
Gong, Yifan",
booktitle = "The 14th International Joint Conference on Natural Language Processing and The 4th Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics",
month = dec,
year = "2025",
address = "Mumbai, India",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.ijcnlp-srw.16/",
pages = "184--192",
ISBN = "979-8-89176-304-3",
abstract = "Improving the performance of neural machine translation for low-resource languages is challenging due to the limited availability of parallel corpora. However, recently available Large Language Models (LLM) have demonstrated superior performance in various natural language processing tasks, including translation. In this work, we propose to incorporate an LLM into a Machine Translation (MT) model as a prior distribution to leverage its translation capabilities. The LLM acts as a teacher, instructing the student MT model about the target language. We conducted an experiment in four language pairs: English {\ensuremath{\Leftrightarrow}} German and English {\ensuremath{\Leftrightarrow}} Hindi. This resulted in improved BLEU and COMET scores in a low-resource setting."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="giri-etal-2025-enriching">
<titleInfo>
<title>Enriching the Low-Resource Neural Machine Translation with Large Language Model</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sachin</namePart>
<namePart type="family">Giri</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Takashi</namePart>
<namePart type="family">Ninomiya</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Isao</namePart>
<namePart type="family">Goto</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>The 14th International Joint Conference on Natural Language Processing and The 4th Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Santosh</namePart>
<namePart type="family">T.y.s.s</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shuichiro</namePart>
<namePart type="family">Shimizu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yifan</namePart>
<namePart type="family">Gong</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Mumbai, India</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-304-3</identifier>
</relatedItem>
<abstract>Improving the performance of neural machine translation for low-resource languages is challenging due to the limited availability of parallel corpora. However, recently available Large Language Models (LLM) have demonstrated superior performance in various natural language processing tasks, including translation. In this work, we propose to incorporate an LLM into a Machine Translation (MT) model as a prior distribution to leverage its translation capabilities. The LLM acts as a teacher, instructing the student MT model about the target language. We conducted an experiment in four language pairs: English \ensuremathŁeftrightarrow German and English \ensuremathŁeftrightarrow Hindi. This resulted in improved BLEU and COMET scores in a low-resource setting.</abstract>
<identifier type="citekey">giri-etal-2025-enriching</identifier>
<location>
<url>https://aclanthology.org/2025.ijcnlp-srw.16/</url>
</location>
<part>
<date>2025-12</date>
<extent unit="page">
<start>184</start>
<end>192</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Enriching the Low-Resource Neural Machine Translation with Large Language Model
%A Giri, Sachin
%A Ninomiya, Takashi
%A Goto, Isao
%Y T.y.s.s, Santosh
%Y Shimizu, Shuichiro
%Y Gong, Yifan
%S The 14th International Joint Conference on Natural Language Processing and The 4th Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics
%D 2025
%8 December
%I Association for Computational Linguistics
%C Mumbai, India
%@ 979-8-89176-304-3
%F giri-etal-2025-enriching
%X Improving the performance of neural machine translation for low-resource languages is challenging due to the limited availability of parallel corpora. However, recently available Large Language Models (LLM) have demonstrated superior performance in various natural language processing tasks, including translation. In this work, we propose to incorporate an LLM into a Machine Translation (MT) model as a prior distribution to leverage its translation capabilities. The LLM acts as a teacher, instructing the student MT model about the target language. We conducted an experiment in four language pairs: English \ensuremathŁeftrightarrow German and English \ensuremathŁeftrightarrow Hindi. This resulted in improved BLEU and COMET scores in a low-resource setting.
%U https://aclanthology.org/2025.ijcnlp-srw.16/
%P 184-192
Markdown (Informal)
[Enriching the Low-Resource Neural Machine Translation with Large Language Model](https://aclanthology.org/2025.ijcnlp-srw.16/) (Giri et al., IJCNLP 2025)
ACL