@inproceedings{lepage-denoual-2005-purest,
title = "The {`}purest{'} {EBMT} System Ever Built: No Variables, No Templates, No Training, Examples, Just Examples, Only Examples",
author = "Lepage, Yves and
Denoual, Etienne",
booktitle = "Workshop on example-based machine translation",
month = sep # " 13-15",
year = "2005",
address = "Phuket, Thailand",
url = "https://aclanthology.org/2005.mtsummit-ebmt.11",
pages = "81--90",
abstract = "We designed, implemented and assessed an EBMT system that can be dubbed the {``}purest ever built{''}: it strictly does not make any use of variables, templates or training, does not have any explicit transfer component, and does not require any preprocessing of the aligned examples. It uses a specific operation, namely proportional analogy, that implicitly neutralises divergences between languages and captures lexical and syntactical variations along the paradigmatic and syntagmatic axes without explicitly decomposing sentences into fragments. In an experiment with a test set of 510 input sentences and an unprocessed corpus of almost 160,000 aligned sentences in Japanese and English, we obtained BLEU, NIST and mWER scores of 0.53, 8.53 and 0.39 respectively, well above a baseline simulating a translation memory.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="lepage-denoual-2005-purest">
<titleInfo>
<title>The ‘purest’ EBMT System Ever Built: No Variables, No Templates, No Training, Examples, Just Examples, Only Examples</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yves</namePart>
<namePart type="family">Lepage</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Etienne</namePart>
<namePart type="family">Denoual</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2005-sep 13-15</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Workshop on example-based machine translation</title>
</titleInfo>
<originInfo>
<place>
<placeTerm type="text">Phuket, Thailand</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We designed, implemented and assessed an EBMT system that can be dubbed the “purest ever built”: it strictly does not make any use of variables, templates or training, does not have any explicit transfer component, and does not require any preprocessing of the aligned examples. It uses a specific operation, namely proportional analogy, that implicitly neutralises divergences between languages and captures lexical and syntactical variations along the paradigmatic and syntagmatic axes without explicitly decomposing sentences into fragments. In an experiment with a test set of 510 input sentences and an unprocessed corpus of almost 160,000 aligned sentences in Japanese and English, we obtained BLEU, NIST and mWER scores of 0.53, 8.53 and 0.39 respectively, well above a baseline simulating a translation memory.</abstract>
<identifier type="citekey">lepage-denoual-2005-purest</identifier>
<location>
<url>https://aclanthology.org/2005.mtsummit-ebmt.11</url>
</location>
<part>
<date>2005-sep 13-15</date>
<extent unit="page">
<start>81</start>
<end>90</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T The ‘purest’ EBMT System Ever Built: No Variables, No Templates, No Training, Examples, Just Examples, Only Examples
%A Lepage, Yves
%A Denoual, Etienne
%S Workshop on example-based machine translation
%D 2005
%8 sep 13 15
%C Phuket, Thailand
%F lepage-denoual-2005-purest
%X We designed, implemented and assessed an EBMT system that can be dubbed the “purest ever built”: it strictly does not make any use of variables, templates or training, does not have any explicit transfer component, and does not require any preprocessing of the aligned examples. It uses a specific operation, namely proportional analogy, that implicitly neutralises divergences between languages and captures lexical and syntactical variations along the paradigmatic and syntagmatic axes without explicitly decomposing sentences into fragments. In an experiment with a test set of 510 input sentences and an unprocessed corpus of almost 160,000 aligned sentences in Japanese and English, we obtained BLEU, NIST and mWER scores of 0.53, 8.53 and 0.39 respectively, well above a baseline simulating a translation memory.
%U https://aclanthology.org/2005.mtsummit-ebmt.11
%P 81-90
Markdown (Informal)
[The ‘purest’ EBMT System Ever Built: No Variables, No Templates, No Training, Examples, Just Examples, Only Examples](https://aclanthology.org/2005.mtsummit-ebmt.11) (Lepage & Denoual, MTSummit 2005)
ACL