@inproceedings{jones-mitkov-2025-evaluating,
title = "Evaluating the Performance of Transformers in Translating Low-Resource Languages through {A}kkadian",
author = "Jones, Daniel A. and
Mitkov, Ruslan",
editor = "Picazo-Izquierdo, Alicia and
Estevanell-Valladares, Ernesto Luis and
Mitkov, Ruslan and
Guillena, Rafael Mu{\~n}oz and
Cerd{\'a}, Ra{\'u}l Garc{\'i}a",
booktitle = "Proceedings of the First Workshop on Comparative Performance Evaluation: From Rules to Language Models",
month = sep,
year = "2025",
address = "Varna, Bulgaria",
publisher = "INCOMA Ltd., Shoumen, Bulgaria",
url = "https://aclanthology.org/2025.r2lm-1.5/",
pages = "39--47",
abstract = "In this paper, we evaluate the performance of various fine-tuned, transformer-based models in translating Akkadian into English. Using annotated Akkadian data, we seek to establish potential considerations when developing models for other low-resource languages, which do not yet have as robust data. The results of this study show the potency, but also cost inefficiency, of Large Language Models compared to smaller Neural Machine Translation models. Significant evidence was also found demonstrating the importance of fine-tuning machine translation models from related languages."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="jones-mitkov-2025-evaluating">
<titleInfo>
<title>Evaluating the Performance of Transformers in Translating Low-Resource Languages through Akkadian</title>
</titleInfo>
<name type="personal">
<namePart type="given">Daniel</namePart>
<namePart type="given">A</namePart>
<namePart type="family">Jones</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ruslan</namePart>
<namePart type="family">Mitkov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the First Workshop on Comparative Performance Evaluation: From Rules to Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Alicia</namePart>
<namePart type="family">Picazo-Izquierdo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ernesto</namePart>
<namePart type="given">Luis</namePart>
<namePart type="family">Estevanell-Valladares</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ruslan</namePart>
<namePart type="family">Mitkov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rafael</namePart>
<namePart type="given">Muñoz</namePart>
<namePart type="family">Guillena</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Raúl</namePart>
<namePart type="given">García</namePart>
<namePart type="family">Cerdá</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>INCOMA Ltd., Shoumen, Bulgaria</publisher>
<place>
<placeTerm type="text">Varna, Bulgaria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In this paper, we evaluate the performance of various fine-tuned, transformer-based models in translating Akkadian into English. Using annotated Akkadian data, we seek to establish potential considerations when developing models for other low-resource languages, which do not yet have as robust data. The results of this study show the potency, but also cost inefficiency, of Large Language Models compared to smaller Neural Machine Translation models. Significant evidence was also found demonstrating the importance of fine-tuning machine translation models from related languages.</abstract>
<identifier type="citekey">jones-mitkov-2025-evaluating</identifier>
<location>
<url>https://aclanthology.org/2025.r2lm-1.5/</url>
</location>
<part>
<date>2025-09</date>
<extent unit="page">
<start>39</start>
<end>47</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Evaluating the Performance of Transformers in Translating Low-Resource Languages through Akkadian
%A Jones, Daniel A.
%A Mitkov, Ruslan
%Y Picazo-Izquierdo, Alicia
%Y Estevanell-Valladares, Ernesto Luis
%Y Mitkov, Ruslan
%Y Guillena, Rafael Muñoz
%Y Cerdá, Raúl García
%S Proceedings of the First Workshop on Comparative Performance Evaluation: From Rules to Language Models
%D 2025
%8 September
%I INCOMA Ltd., Shoumen, Bulgaria
%C Varna, Bulgaria
%F jones-mitkov-2025-evaluating
%X In this paper, we evaluate the performance of various fine-tuned, transformer-based models in translating Akkadian into English. Using annotated Akkadian data, we seek to establish potential considerations when developing models for other low-resource languages, which do not yet have as robust data. The results of this study show the potency, but also cost inefficiency, of Large Language Models compared to smaller Neural Machine Translation models. Significant evidence was also found demonstrating the importance of fine-tuning machine translation models from related languages.
%U https://aclanthology.org/2025.r2lm-1.5/
%P 39-47
Markdown (Informal)
[Evaluating the Performance of Transformers in Translating Low-Resource Languages through Akkadian](https://aclanthology.org/2025.r2lm-1.5/) (Jones & Mitkov, R2LM 2025)
ACL