@inproceedings{fordyce-2007-overview,
title = "Overview of the {IWSLT} 2007 evaluation campaign",
author = "Fordyce, Cameron Shaw",
booktitle = "Proceedings of the Fourth International Workshop on Spoken Language Translation",
month = oct # " 15-16",
year = "2007",
address = "Trento, Italy",
url = "https://aclanthology.org/2007.iwslt-1.1",
abstract = "In this paper we give an overview of the 2007 evaluation campaign for the International Workshop on Spoken Language Translation (IWSLT)1. As with previous evaluation campaigns, the primary focus of the workshop was the translation of spoken language in the travel domain. This year there were four language pairs; the translation of Chinese, Italian, Arabic, and Japanese into English. The input data consisted of the output of ASR systems for read speech and clean text. The exceptions were the challenge task of the Italian English language pair which used spontaneous speech ASR outputs and transcriptions and the Chinese English task which used only clean text. A new characteristic of this year{'}s evaluation campaign was an increased focus on the sharing of resources. Participants were requested to submit the data and supplementary resources used in building their systems so that the other participants might be able to take advantage of the same resources. A second new characteristic this year was the focus on the human evaluation of systems. Each primary run was judged in the human evaluation for every task using a straightforward ranking of systems. This year's workshop saw an increased participation over last year's workshop. This year 24 groups submitted runs to one or more of the tasks, compared to the 19 groups that submitted runs last year [1]. Automatic and human evaluation were carried out to measure MT performance under each condition, ASR system outputs for read speech, spontaneous travel dialogues, and clean text.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="fordyce-2007-overview">
<titleInfo>
<title>Overview of the IWSLT 2007 evaluation campaign</title>
</titleInfo>
<name type="personal">
<namePart type="given">Cameron</namePart>
<namePart type="given">Shaw</namePart>
<namePart type="family">Fordyce</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2007-oct 15-16</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Fourth International Workshop on Spoken Language Translation</title>
</titleInfo>
<originInfo>
<place>
<placeTerm type="text">Trento, Italy</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In this paper we give an overview of the 2007 evaluation campaign for the International Workshop on Spoken Language Translation (IWSLT)1. As with previous evaluation campaigns, the primary focus of the workshop was the translation of spoken language in the travel domain. This year there were four language pairs; the translation of Chinese, Italian, Arabic, and Japanese into English. The input data consisted of the output of ASR systems for read speech and clean text. The exceptions were the challenge task of the Italian English language pair which used spontaneous speech ASR outputs and transcriptions and the Chinese English task which used only clean text. A new characteristic of this year’s evaluation campaign was an increased focus on the sharing of resources. Participants were requested to submit the data and supplementary resources used in building their systems so that the other participants might be able to take advantage of the same resources. A second new characteristic this year was the focus on the human evaluation of systems. Each primary run was judged in the human evaluation for every task using a straightforward ranking of systems. This year’s workshop saw an increased participation over last year’s workshop. This year 24 groups submitted runs to one or more of the tasks, compared to the 19 groups that submitted runs last year [1]. Automatic and human evaluation were carried out to measure MT performance under each condition, ASR system outputs for read speech, spontaneous travel dialogues, and clean text.</abstract>
<identifier type="citekey">fordyce-2007-overview</identifier>
<location>
<url>https://aclanthology.org/2007.iwslt-1.1</url>
</location>
<part>
<date>2007-oct 15-16</date>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Overview of the IWSLT 2007 evaluation campaign
%A Fordyce, Cameron Shaw
%S Proceedings of the Fourth International Workshop on Spoken Language Translation
%D 2007
%8 oct 15 16
%C Trento, Italy
%F fordyce-2007-overview
%X In this paper we give an overview of the 2007 evaluation campaign for the International Workshop on Spoken Language Translation (IWSLT)1. As with previous evaluation campaigns, the primary focus of the workshop was the translation of spoken language in the travel domain. This year there were four language pairs; the translation of Chinese, Italian, Arabic, and Japanese into English. The input data consisted of the output of ASR systems for read speech and clean text. The exceptions were the challenge task of the Italian English language pair which used spontaneous speech ASR outputs and transcriptions and the Chinese English task which used only clean text. A new characteristic of this year’s evaluation campaign was an increased focus on the sharing of resources. Participants were requested to submit the data and supplementary resources used in building their systems so that the other participants might be able to take advantage of the same resources. A second new characteristic this year was the focus on the human evaluation of systems. Each primary run was judged in the human evaluation for every task using a straightforward ranking of systems. This year’s workshop saw an increased participation over last year’s workshop. This year 24 groups submitted runs to one or more of the tasks, compared to the 19 groups that submitted runs last year [1]. Automatic and human evaluation were carried out to measure MT performance under each condition, ASR system outputs for read speech, spontaneous travel dialogues, and clean text.
%U https://aclanthology.org/2007.iwslt-1.1
Markdown (Informal)
[Overview of the IWSLT 2007 evaluation campaign](https://aclanthology.org/2007.iwslt-1.1) (Fordyce, IWSLT 2007)
ACL