@inproceedings{li-etal-2024-linchance,
title = "{L}in{C}hance-{NTU} for Unconstrained {WMT}2024 Literary Translation",
author = "Li, Kechen and
Tao, Yaotian and
Huang, Hongyi and
Ji, Tianbo",
editor = "Haddow, Barry and
Kocmi, Tom and
Koehn, Philipp and
Monz, Christof",
booktitle = "Proceedings of the Ninth Conference on Machine Translation",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.wmt-1.99",
pages = "987--992",
abstract = "The rapid growth of deep learning has spurred significant advancements across industries, par- ticularly in machine translation through large language models (LLMs). However, translat- ing literary still presents challenges, including cross-cultural nuances, complex language struc- tures, metaphorical expressions, and cultural differences. To address these issues, this study utilizes the Llama and Phi models using both LoRA and full-parameter techniques, along-side a prompt-based translation system. Full-parameter tuning of the Llama-3-Chinese-8B-Instruct model was unsuccessful due to mem-ory constraints. In terms of the WMT task, the fully fine-tuned Phi 3 model was selected for submission due to its more natural and flu-ent translations. Nonetheless, results showed that LoRA and the prompt-based system sig- nificantly improved the Llama3 model{'}s perfor- mance, surpassing other models in BLEU and ROUGE evaluations.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="li-etal-2024-linchance">
<titleInfo>
<title>LinChance-NTU for Unconstrained WMT2024 Literary Translation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kechen</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yaotian</namePart>
<namePart type="family">Tao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hongyi</namePart>
<namePart type="family">Huang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tianbo</namePart>
<namePart type="family">Ji</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Ninth Conference on Machine Translation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Barry</namePart>
<namePart type="family">Haddow</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tom</namePart>
<namePart type="family">Kocmi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Philipp</namePart>
<namePart type="family">Koehn</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Christof</namePart>
<namePart type="family">Monz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Miami, Florida, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The rapid growth of deep learning has spurred significant advancements across industries, par- ticularly in machine translation through large language models (LLMs). However, translat- ing literary still presents challenges, including cross-cultural nuances, complex language struc- tures, metaphorical expressions, and cultural differences. To address these issues, this study utilizes the Llama and Phi models using both LoRA and full-parameter techniques, along-side a prompt-based translation system. Full-parameter tuning of the Llama-3-Chinese-8B-Instruct model was unsuccessful due to mem-ory constraints. In terms of the WMT task, the fully fine-tuned Phi 3 model was selected for submission due to its more natural and flu-ent translations. Nonetheless, results showed that LoRA and the prompt-based system sig- nificantly improved the Llama3 model’s perfor- mance, surpassing other models in BLEU and ROUGE evaluations.</abstract>
<identifier type="citekey">li-etal-2024-linchance</identifier>
<location>
<url>https://aclanthology.org/2024.wmt-1.99</url>
</location>
<part>
<date>2024-11</date>
<extent unit="page">
<start>987</start>
<end>992</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T LinChance-NTU for Unconstrained WMT2024 Literary Translation
%A Li, Kechen
%A Tao, Yaotian
%A Huang, Hongyi
%A Ji, Tianbo
%Y Haddow, Barry
%Y Kocmi, Tom
%Y Koehn, Philipp
%Y Monz, Christof
%S Proceedings of the Ninth Conference on Machine Translation
%D 2024
%8 November
%I Association for Computational Linguistics
%C Miami, Florida, USA
%F li-etal-2024-linchance
%X The rapid growth of deep learning has spurred significant advancements across industries, par- ticularly in machine translation through large language models (LLMs). However, translat- ing literary still presents challenges, including cross-cultural nuances, complex language struc- tures, metaphorical expressions, and cultural differences. To address these issues, this study utilizes the Llama and Phi models using both LoRA and full-parameter techniques, along-side a prompt-based translation system. Full-parameter tuning of the Llama-3-Chinese-8B-Instruct model was unsuccessful due to mem-ory constraints. In terms of the WMT task, the fully fine-tuned Phi 3 model was selected for submission due to its more natural and flu-ent translations. Nonetheless, results showed that LoRA and the prompt-based system sig- nificantly improved the Llama3 model’s perfor- mance, surpassing other models in BLEU and ROUGE evaluations.
%U https://aclanthology.org/2024.wmt-1.99
%P 987-992
Markdown (Informal)
[LinChance-NTU for Unconstrained WMT2024 Literary Translation](https://aclanthology.org/2024.wmt-1.99) (Li et al., WMT 2024)
ACL