@inproceedings{manzur-2021-mt,
title = "{MT} Human Evaluation {--} Insights {\&} Approaches",
author = "Manzur, Paula",
editor = "Campbell, Janice and
Huyck, Ben and
Larocca, Stephen and
Marciano, Jay and
Savenkov, Konstantin and
Yanishevsky, Alex",
booktitle = "Proceedings of Machine Translation Summit XVIII: Users and Providers Track",
month = aug,
year = "2021",
address = "Virtual",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2021.mtsummit-up.12",
pages = "149--165",
abstract = "This session is designed to help companies and people in the business of translation evaluate MT output and to show how human translator feedback can be tweaked to make the process more objective and accurate. You will hear recommendations, insights, and takeaways on how to improve the procedure for human evaluation. When this is achieved, we can understand if the human eval study and machine metric result coheres. And we can think about what the future of translators looks like {--} the final {``}human touch{''} and automated MT review.{''}",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="manzur-2021-mt">
<titleInfo>
<title>MT Human Evaluation – Insights & Approaches</title>
</titleInfo>
<name type="personal">
<namePart type="given">Paula</namePart>
<namePart type="family">Manzur</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of Machine Translation Summit XVIII: Users and Providers Track</title>
</titleInfo>
<name type="personal">
<namePart type="given">Janice</namePart>
<namePart type="family">Campbell</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ben</namePart>
<namePart type="family">Huyck</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Stephen</namePart>
<namePart type="family">Larocca</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jay</namePart>
<namePart type="family">Marciano</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Konstantin</namePart>
<namePart type="family">Savenkov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alex</namePart>
<namePart type="family">Yanishevsky</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Machine Translation in the Americas</publisher>
<place>
<placeTerm type="text">Virtual</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This session is designed to help companies and people in the business of translation evaluate MT output and to show how human translator feedback can be tweaked to make the process more objective and accurate. You will hear recommendations, insights, and takeaways on how to improve the procedure for human evaluation. When this is achieved, we can understand if the human eval study and machine metric result coheres. And we can think about what the future of translators looks like – the final “human touch” and automated MT review.”</abstract>
<identifier type="citekey">manzur-2021-mt</identifier>
<location>
<url>https://aclanthology.org/2021.mtsummit-up.12</url>
</location>
<part>
<date>2021-08</date>
<extent unit="page">
<start>149</start>
<end>165</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T MT Human Evaluation – Insights & Approaches
%A Manzur, Paula
%Y Campbell, Janice
%Y Huyck, Ben
%Y Larocca, Stephen
%Y Marciano, Jay
%Y Savenkov, Konstantin
%Y Yanishevsky, Alex
%S Proceedings of Machine Translation Summit XVIII: Users and Providers Track
%D 2021
%8 August
%I Association for Machine Translation in the Americas
%C Virtual
%F manzur-2021-mt
%X This session is designed to help companies and people in the business of translation evaluate MT output and to show how human translator feedback can be tweaked to make the process more objective and accurate. You will hear recommendations, insights, and takeaways on how to improve the procedure for human evaluation. When this is achieved, we can understand if the human eval study and machine metric result coheres. And we can think about what the future of translators looks like – the final “human touch” and automated MT review.”
%U https://aclanthology.org/2021.mtsummit-up.12
%P 149-165
Markdown (Informal)
[MT Human Evaluation – Insights & Approaches](https://aclanthology.org/2021.mtsummit-up.12) (Manzur, MTSummit 2021)
ACL
- Paula Manzur. 2021. MT Human Evaluation – Insights & Approaches. In Proceedings of Machine Translation Summit XVIII: Users and Providers Track, pages 149–165, Virtual. Association for Machine Translation in the Americas.