|
{ |
|
"paper_id": "W09-0401", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T06:44:02.032953Z" |
|
}, |
|
"title": "Findings of the 2009 Workshop on Statistical Machine Translation", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Christof", |
|
"middle": [], |
|
"last": "Monz", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Josh", |
|
"middle": [], |
|
"last": "Schroeder", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Cu-Bojar", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper presents the results of the WMT09 shared tasks, which included a translation task, a system combination task, and an evaluation task. We conducted a large-scale manual evaluation of 87 machine translation systems and 22 system combination entries. We used the ranking of these systems to measure how strongly automatic metrics correlate with human judgments of translation quality, for more than 20 metrics. We present a new evaluation technique whereby system output is edited and judged for correctness. 2.1 Test data The test data for this year's task was created by hiring people to translate news articles that were drawn from a variety of sources during the period from the end of September to mid-October of 2008. A total of 136 articles were selected, in roughly equal amounts from a variety of Czech, English, French, German, Hungarian, Italian and Spanish news sites: 2", |
|
"pdf_parse": { |
|
"paper_id": "W09-0401", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper presents the results of the WMT09 shared tasks, which included a translation task, a system combination task, and an evaluation task. We conducted a large-scale manual evaluation of 87 machine translation systems and 22 system combination entries. We used the ranking of these systems to measure how strongly automatic metrics correlate with human judgments of translation quality, for more than 20 metrics. We present a new evaluation technique whereby system output is edited and judged for correctness. 2.1 Test data The test data for this year's task was created by hiring people to translate news articles that were drawn from a variety of sources during the period from the end of September to mid-October of 2008. A total of 136 articles were selected, in roughly equal amounts from a variety of Czech, English, French, German, Hungarian, Italian and Spanish news sites: 2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "This paper presents the results of the shared tasks of the 2009 EACL Workshop on Statistical Machine Translation, which builds on three previous workshops (Koehn and Monz, 2006; Callison-Burch et al., 2008) . There were three shared tasks this year: a translation task between English and five other European languages, a task to combine the output of multiple machine translation systems, and a task to predict human judgments of translation quality using automatic evaluation metrics. The performance on each of these shared task was determined after a comprehensive human evaluation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 155, |
|
"end": 177, |
|
"text": "(Koehn and Monz, 2006;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 178, |
|
"end": 206, |
|
"text": "Callison-Burch et al., 2008)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "There were a number of differences between this year's workshop and last year's workshop:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Larger training sets -In addition to annual increases in the Europarl corpus, we released a French-English parallel corpus verging on 1 billion words. We also provided large monolingual training sets for better language modeling of the news translation task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Reduced number of conditions -Previous workshops had many conditions: 10 language pairs, both in-domain and out-ofdomain translation, and three types of manual evaluation. This year we eliminated the in-domain Europarl test set and defined sentence-level ranking as the primary type of manual evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Editing to evaluate translation quality -Beyond ranking the output of translation systems, we evaluated translation quality by having people edit the output of systems. Later, we asked annotators to judge whether those edited translations were correct when shown the source and reference translation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The primary objectives of this workshop are to evaluate the state of the art in machine translation, to disseminate common test sets and public training data with published performance numbers, and to refine evaluation methodologies for machine translation. All of the data, translations, and human judgments produced for our workshop are publicly available. 1 We hope they form a valuable resource for research into statistical machine translation, system combination, and automatic evaluation of translation quality.", |
|
"cite_spans": [ |
|
{ |
|
"start": 359, |
|
"end": 360, |
|
"text": "1", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The workshop examined translation between English and five other languages: German, Spanish, French, Czech, and Hungarian. We created a test set for each language pair by translating newspaper articles. We additionally provided training data and a baseline system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overview of the shared translation and system combination tasks", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "that European Parliament proceedings were less of general interest than news stories. We focus on a single task since the use of multiple test sets in the past spread our resources too thin, especially in the manual evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Overview of the shared translation and system combination tasks", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "As in past years we provided parallel corpora to train translation models, monolingual corpora to train language models, and development sets to tune parameters. Some statistics about the training materials are given in Figure 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 220, |
|
"end": 228, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Training data", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "10 9 word parallel corpus", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training data", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "To create the large French-English parallel corpus, we conducted a targeted web crawl of bilingual web sites. These sites came from a variety of sources including the Canadian government, the European Union, the United Nations, and other international organizations. The crawl yielded on the order of 40 million files, consisting of more than 1TB of data. Pairs of translated documents were identified using a set of simple heuristics to transform French URLs into English URLs (for instance, by replacing fr with en). Documents that matched were assumed to be translations of each other. All HTML and PDF documents were converted into plain text, which yielded 2 million French files paired with their English equivalents. Text files were split so that they contained one sentence per line and had markers between paragraphs. They were sentence-aligned in batches of 10,000 document pairs, using a sentence aligner that incorporates IBM Model 1 probabilities in addition to sentence lengths (Moore, 2002) . The document-aligned corpus contained 220 million segments with 2.9 billion words on the French side and 215 million segments with 2.5 billion words on the English side. After sentence alignment, there were 177 million sentence pairs with 2.5 billion French words and 2.2 billion English words.", |
|
"cite_spans": [ |
|
{ |
|
"start": 992, |
|
"end": 1005, |
|
"text": "(Moore, 2002)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training data", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The sentence-aligned corpus was cleaned to remove sentence pairs which consisted only of numbers or paragraph markers, or where the French and English sentences were identical. The later step helped eliminate documents that were not actually translated, which was necessary because we did not perform language identification. After cleaning, the parallel corpus contained 105 million sentence pairs with 2 billion French words and 1.8 billion English words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training data", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Spanish \u2194 English French \u2194 English German \u2194 English Sentences 1, 411, 589 1, 428, 799 1, 418, 115 Words 40, 067, 498 41, 042, 070 44, 692, 992 40, 067, 498 39, 516, 645 37, 431, 872 Distinct words 154, 971 108, 116 129, 166 107, 733 320, 180 104, 269 In addition to cleaning the sentence-aligned parallel corpus we also de-duplicated the corpus, removing all sentence pairs that occured more than once in the parallel corpus. Many of the documents gathered in our web crawl were duplicates or near duplicates, and a lot of the text is repeated, as with web site navigation. We further eliminated sentence pairs that varied from previous sentences by only numbers, which helped eliminate template web pages such as expense reports. We used a Bloom Filter (Talbot and Osborne, 2007) to do de-duplication, so it may have discarded more sentence pairs than strictly necessary. After deduplication, the parallel corpus contained 28 million sentence pairs with 0.8 billion French words and 0.7 billion English words.", |
|
"cite_spans": [ |
|
{ |
|
"start": 65, |
|
"end": 69, |
|
"text": "411,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 70, |
|
"end": 76, |
|
"text": "589 1,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 77, |
|
"end": 81, |
|
"text": "428,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 82, |
|
"end": 88, |
|
"text": "799 1,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 89, |
|
"end": 93, |
|
"text": "418,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 94, |
|
"end": 107, |
|
"text": "115 Words 40,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 108, |
|
"end": 112, |
|
"text": "067,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 113, |
|
"end": 120, |
|
"text": "498 41,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 121, |
|
"end": 125, |
|
"text": "042,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 126, |
|
"end": 133, |
|
"text": "070 44,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 134, |
|
"end": 138, |
|
"text": "692,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 139, |
|
"end": 146, |
|
"text": "992 40,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 147, |
|
"end": 151, |
|
"text": "067,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 152, |
|
"end": 159, |
|
"text": "498 39,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 160, |
|
"end": 164, |
|
"text": "516,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 165, |
|
"end": 172, |
|
"text": "645 37,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 173, |
|
"end": 177, |
|
"text": "431,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 178, |
|
"end": 201, |
|
"text": "872 Distinct words 154,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 202, |
|
"end": 210, |
|
"text": "971 108,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 211, |
|
"end": 219, |
|
"text": "116 129,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 220, |
|
"end": 228, |
|
"text": "166 107,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 229, |
|
"end": 237, |
|
"text": "733 320,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 238, |
|
"end": 246, |
|
"text": "180 104,", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 247, |
|
"end": 250, |
|
"text": "269", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 754, |
|
"end": 780, |
|
"text": "(Talbot and Osborne, 2007)", |
|
"ref_id": "BIBREF46" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Europarl Training Corpus", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We have crawled the news sources that were the basis of our test sets (and a few more additional sources) since August 2007. This allowed us to assemble large corpora in the target domain to be mainly used as training data for language modeling. We collected texts from the beginning of our data collection period to one month before the test set period, segmented these into sentences and randomized the order of the sentences to obviate copyright concerns.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Monolingual news corpora", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To lower the barrier of entry for newcomers to the field, we provided Moses, an open source toolkit for phrase-based statistical translation . The performance of this baseline system is similar to the best submissions in last year's shared task. Twelve participating groups used the Moses toolkit for the development of their system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline system", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "We received submissions from 22 groups from 20 institutions, as listed in Table 1 , a similar turnout to last year's shared task. Of the 20 groups that participated with regular system submissions in last year's shared task, 12 groups returned this year. A major hurdle for many was a DARPA/GALE evaluation that occurred at the same time as this shared task.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 74, |
|
"end": 81, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Submitted systems", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "We also evaluated 7 commercial rule-based MT systems, and Google's online statistical machine translation system. We note that Google did not submit an entry itself. Its entry was created by the WMT09 organizers using Google's online system. 3 In personal correspondence, Franz Och clarified that the online system is different from Google's research system in that it runs at faster speeds at the expense of somewhat lower translation quality. On the other hand, the training data used by Google is unconstrained, which means that it may have an advantage compared to the research systems evaluated in this workshop, since they were trained using only the provided materials.", |
|
"cite_spans": [ |
|
{ |
|
"start": 242, |
|
"end": 243, |
|
"text": "3", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Submitted systems", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "In total, we received 87 primary system submissions along with 42 secondary submissions. These were made available to participants in the system combination shared task. Based on feedback that we received on last year's system combination task, we provided two additional resources to participants:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System combination", |
|
"sec_num": "2.5" |
|
}, |
|
{ |
|
"text": "\u2022 Development set: We reserved 25 articles to use as a dev set for system combination (details of the set are given in Table 1 ). These were translated by all participating sites, and distributed to system combination participants along with reference translations.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 119, |
|
"end": 127, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "System combination", |
|
"sec_num": "2.5" |
|
}, |
|
{ |
|
"text": "\u2022 n-best translations: We requested n-best lists from sites whose systems could produce them. We received 25 100-best lists accompanying the primary system submissions, and 5 accompanying the secondary system submissions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System combination", |
|
"sec_num": "2.5" |
|
}, |
|
{ |
|
"text": "In addition to soliciting system combination entries for each of the language pairs, we treated system combination as a way of doing multi-source translation, following Schroeder et al. (2009) . For the multi-source system combination task, we provided all 46 primary system submissions from any language into English, along with an additional 32 secondary systems. Table 2 lists the six participants in the system combination task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 169, |
|
"end": 192, |
|
"text": "Schroeder et al. (2009)", |
|
"ref_id": "BIBREF42" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 366, |
|
"end": 373, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "System combination", |
|
"sec_num": "2.5" |
|
}, |
|
{ |
|
"text": "As with past workshops, we placed greater emphasis on the human evaluation than on the automatic evaluation metric scores. It is our contention German-English 3,736 1,271 4,361 English-German 3,700 823 3,854 Spanish-English 2,412 844 2,599 English-Spanish 1,878 278 837 French-English 3,920 1,145 4,491 English-French 1,968 Table 3 : The number of items that were judged for each task during the manual evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 144, |
|
"end": 344, |
|
"text": "German-English 3,736 1,271 4,361 English-German 3,700 823 3,854 Spanish-English 2,412 844 2,599 English-Spanish 1,878 278 837 French-English 3,920 1,145 4,491 English-French 1,968", |
|
"ref_id": "TABREF7" |
|
}, |
|
{ |
|
"start": 345, |
|
"end": 352, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Human evaluation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "that automatic measures are an imperfect substitute for human assessment of translation quality. Therefore, we define the manual evaluation to be primary, and use the human judgments to validate automatic metrics. Manual evaluation is time consuming, and it requires a large effort to conduct it on the scale of our workshop. We distributed the workload across a number of people, including shared-task participants, interested volunteers, and a small number of paid annotators. More than 160 people participated in the manual evaluation, with 100 people putting in more than an hour's worth of effort, and 30 putting in more than four hours. A collective total of 479 hours of labor was invested.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human evaluation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We asked people to evaluate the systems' output in two different ways:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human evaluation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Ranking translated sentences relative to each other. This was our official determinant of translation quality.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human evaluation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Editing the output of systems without displaying the source or a reference translation, and then later judging whether edited translations were correct.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human evaluation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The total number of judgments collected for the different modes of annotation is given in Table 3 . In all cases, the output of the various translation outputs were judged on equal footing; the output of system combinations was judged alongside that of the individual system, and the constrained and unconstrained systems were judged together.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 90, |
|
"end": 97, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Human evaluation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Ranking translations relative to each other is a reasonably intuitive task. We therefore kept the instructions simple:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ranking translations of sentences", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Rank translations from Best to Worst relative to the other choices (ties are allowed).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ranking translations of sentences", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In our the manual evaluation, annotators were shown at most five translations at a time. For most language pairs there were more than 5 systems submissions. We did not attempt to get a complete ordering over the systems, and instead relied on random selection and a reasonably large sample size to make the comparisons fair.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ranking translations of sentences", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Relative ranking is our official evaluation metric. Individual systems and system combinations are ranked based on how frequently they were judged to be better than or equal to any other system. The results of this are reported in Section 4. Appendix A provides detailed tables that contain pairwise comparisons between systems.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ranking translations of sentences", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We experimented with a new type of evaluation this year where we asked judges to edit the output of MT systems. We did not show judges the reference translation, which makes our edit-based evaluation different than the Human-targeted Translation Error Rate (HTER) measure used in the DARPA GALE program (NIST, 2008) . Rather than asking people to make the minimum number of changes to the MT output in order capture the same meaning as the reference, we asked them to edit the translation to be as fluent as possible without seeing the reference. Our hope was that this would reflect people's understanding of the output.", |
|
"cite_spans": [ |
|
{ |
|
"start": 303, |
|
"end": 315, |
|
"text": "(NIST, 2008)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Editing machine translation output", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The instructions that we gave our judges were the following:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Editing machine translation output", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Correct the translation displayed, making it as fluent as possible. If no corrections are needed, select \"No corrections needed.\" If you cannot understand the sentence well enough to correct it, select \"Unable to correct.\"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Editing machine translation output", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Each translated sentence was shown in isolation without any additional context. A screenshot is shown in Figure 2 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 105, |
|
"end": 113, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Editing machine translation output", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Since we wanted to prevent judges from seeing the reference before editing the translations, we split the test set between the sentences used in the ranking task and the editing task (because they were being conducted concurrently). Moreover, annotators edited only a single system's output for one source sentence to ensure that their understanding of it would not be influenced by another system's output.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Editing machine translation output", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Halfway through the manual evaluation period, we stopped collecting edited translations, and instead asked annotators to do the following:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Judging the acceptability of edited output", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Indicate whether the edited translations represent fully fluent and meaningequivalent alternatives to the reference sentence. The reference is shown with context, the actual sentence is bold.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Judging the acceptability of edited output", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In addition to edited translations, unedited items that were either marked as acceptable or as incomprehensible were also shown. Judges gave a simple yes/no indication to each item. A screenshot is shown in Figure 3 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 207, |
|
"end": 215, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Judging the acceptability of edited output", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "In order to measure intra-annotator agreement 10% of the items were repeated and evaluated twice by each judge. In order to measure interannotator agreement 40% of the items were randomly drawn from a common pool that was shared across all annotators so that we would have items that were judged by multiple annotators. Table 4 : Inter-and intra-annotator agreement for the two types of manual evaluation", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 320, |
|
"end": 327, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Inter-and Intra-annotator agreement", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "We measured pairwise agreement among annotators using the kappa coefficient (K) which is defined as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inter-and Intra-annotator agreement", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "K = P (A) \u2212 P (E) 1 \u2212 P (E)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inter-and Intra-annotator agreement", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "where P (A) is the proportion of times that the annotators agree, and P (E) is the proportion of time that they would agree by chance. For inter-annotator agreement we calculated P (A) for the yes/no judgments by examining all items that were annotated by two or more annotators, and calculating the proportion of time they assigned identical scores to the same items. For the ranking tasks we calculated P (A) by examining all pairs of systems which had been judged by two or more judges, and calculated the proportion of time that they agreed that A > B, A = B, or A < B. Intra-annotator agreement was computed similarly, but we gathered items that were annotated on multiple occasions by a single annotator. Table 4 gives K values for inter-annotator and intra-annotator agreement. These give an indication of how often different judges agree, and how often single judges are consistent for repeated judgments, respectively. The interpretation of Kappa varies, but according to Landis and Koch (1977) , 0 \u2212 .2 is slight, .2 \u2212 .4 is fair, .4 \u2212 .6 is moderate, .6 \u2212 .8 is substantial and the rest almost perfect.", |
|
"cite_spans": [ |
|
{ |
|
"start": 981, |
|
"end": 1003, |
|
"text": "Landis and Koch (1977)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 711, |
|
"end": 718, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Inter-and Intra-annotator agreement", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "Based on these interpretations the agreement for yes/no judgments is moderate for inter-annotator agreement and substantial for intra-annotator agreement, but the inter-annotator agreement for sentence level ranking is only fair.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inter-and Intra-annotator agreement", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "We analyzed two possible strategies for improving inter-annotator agreement on the ranking task: First, we tried discarding initial judgments to give", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inter-and Intra-annotator agreement", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "You have judged 19 sentences for WMT09 Multisource-English News Editing, 468 sentences total taking 74.4 seconds per sentence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Edit MT Output", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Original: They are often linked to other alterations sleep as nightmares, night terrors, the nocturnal enuresis (pee in bed) or the sleepwalking, but it is not always the case. Analysts at Barclays Capital said the frantic weekend negotiations that led to the bailout agreement \"appear to have failed to revive market sentiment.\" As the economic situation deteriorates, the demand for commodities, including oil, is expected to slow down. \"The outlook for global equity, interest rate and exchange rate markets has become increasingly uncertain,\" analysts at Deutsche Bank wrote in a note to investors. \"We believe commodities will be unable to escape the contagion.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Edit MT Output", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "While the economic situation is deteriorating, demand for commodities, including oil, should decrease.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation Verdict", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "While the economic situation is deteriorating, the demand for raw materials, including oil, should slow down.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Yes No", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Alors que the economic situation deteriorated, the request in rawmaterial enclosed, oil, would have to slow down.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Yes No", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "While the financial situation damaged itself, the first matters affected, oil included, should slow down themselves.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Yes No", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "While the economic situation is depressed, demand for raw materials, including oil, will be slow. Yes No Annotator: ccb Task: WMT09 French-English News Edit Acceptance Instructions: Indicate whether the edited translations represent fully fluent and meaning-equivalent alternatives to the reference sentence. The reference is shown with context, the actual sentence is bold. annotators a chance to learn to how to perform the task. Second, we tried disregarding annotators who have very low agreement with others, by throwing away judgments for the annotators with the lowest judgments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Yes No", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Figures 4 and 5 show how the K values improve for intra-and inter-annotator agreement under these two strategies, and what percentage of the judgments are retained as more annotators are removed, or as the initial learning period is made longer. It seems that the strategy of removing the worst annotators is the best in terms of improving inter-annotator K, while retaining most of the judgments. If we remove the 33 judges with the worst agreement, we increase the inter-annotator K from fair to moderate, and still retain 60% of the data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Yes No", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For the results presented in the rest of the paper, we retain all judgments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Yes No", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We used the results of the manual evaluation to analyze the translation quality of the different systems that were submitted to the workshop. In our analysis, we aimed to address the following questions:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation task results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 Which systems produced the best translation quality for each language pair?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation task results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 Did the system combinations produce better translations than individual systems?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation task results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 Which of the systems that used only the provided training materials produced the best translation quality? Table 6 shows best individual systems. We define the best systems as those which had no other system that was statistically significantly better than them under the Sign Test at p \u2264 0.1. 4 Multiple systems are listed for many language pairs because it was not possible to draw a statistically significant difference between the systems. Commercial translation software (including Google, Systran, Morphologic, PCTrans, Eurotran XP, and anonymized RBMT providers) did well in each of the language pairs. Research systems that utilized only the provided data did as well as commercial vendors in half of the language pairs.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 109, |
|
"end": 116, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Translation task results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The table also lists the best systems among those which used only the provided materials. To determine this decision we excluded unconstrained systems which employed significant external resources. Specifically, we ruled out all of the commercial systems, since Google has access to significantly greater data sources for its statistical system, and since the commercial RBMT systems utilize knowledge sources not available to other workshop participants. The remaining systems were research systems that employ statistical models. We were able to draw distinctions between half of these for each of the language pairs. There are some borderline cases, for instance LIMSI only used additional monolingual training resources, and LIUM/Systran used additional translation dictionaries as well as additional monolingual resources. Table 5 summarizes the performance of the system combination entries by listing the best ranked combinations, and by indicating whether they have a statistically significant difference with the best individual systems. In general, system combinations performed as well as the best individual systems, but not statistically significantly better than them. Moreover, it was hard to draw a distinction between the different system combination strategies themselves. There are a number of possibilities as to why we failed to find significant differences:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 828, |
|
"end": 835, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Translation task results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 The number of judgments that we collected were not sufficient to find a difference. Although we collected several thousand judgments for each language pair, most pairs of systems were judged together fewer than 100 times.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation task results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 It is possible that the best performing individual systems were sufficiently better than the other systems and that it is difficult to improve on them by combining them.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation task results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 Individual systems could have been weighted incorrectly during the development stage, which could happen if the automatic evaluation metrics scores on the dev set did not strongly correlate with human judgments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation task results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "\u2022 The lack of distinction between different combinations could be due to the fact that Multisource-English RWTH-COMBO 3 n/a Table 5 : A comparison between the best system combinations and the best individual systems. It was generally difficult to draw a statistically significant differences between the two groups, and between the combinations themselves.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 124, |
|
"end": 131, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Translation task results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "there is significant overlap in the strategies that they employ.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation task results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Improved system combination warrants further investigation. We would suggest collecting additional judgments, and doing oracle experiments where the contributions of individual systems are weighted according to human judgments of their quality.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation task results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Our hope is that judging the acceptability of edited output as discussed in Section 3 gives some indication of how often a system's output was understandable. Figure 6 gives the percentage of times that each system's edited output was judged to be acceptable (the percentage also factors in instances when judges were unable to improve the output because it was incomprehensible). The edited output of the best performing systems under this evaluation model were deemed acceptable around 50% of the time for French-English, English-French, English-Spanish, German-English, and English-German. For Spanish-English the edited output of the best system was acceptable around 40% of the time, for English-Czech it was 30% and for Czech-English and Hungarian-English it was around 20%. This style of manual evaluation is experimental and should not be taken to be authoritative. Some caveats about this measure:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 159, |
|
"end": 167, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Understandability", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Editing translations without context is difficult, so the acceptability rate is probably an underestimate of how understandable a system actually is.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Understandability", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 There are several sources of variance that are difficult to control for: some people are better at editing, and some sentences are more difficult to edit. Therefore, variance in the understandability of systems is difficult to pin down.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Understandability", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 The acceptability measure does not strongly correlate with the more established method of ranking translations relative to each other for all the language pairs. 5", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Understandability", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Please also note that the number of corrected translations per system are very low for some language pairs, as low as 23 corrected sentences per system for the language pair English-French. Systems are listed in the order of how often their translations were ranked higher than or equal to any other system. Ties are broken by direct comparison.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Understandability", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "C? indicates constrained condition, meaning only using the supplied training data and possibly standard monolingual linguistic tools (but no additional corpora). \u2022 indicates a win in the category, meaning that no other system is statistically significantly better at p-level\u22640.1 in pairwise comparison. indicates a constrained win, no other constrained system is statistically better.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Understandability", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For all pairwise comparisons between systems, please check the appendix. Table 6 : Official results for the WMT09 translation task, based on the human evaluation (ranking translations relative to each other)", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 73, |
|
"end": 80, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Understandability", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Given these low numbers, the numbers presented in Figure 6 should not be read as comparisons between systems, but rather viewed as indicating the state of machine translation for different language pairs.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 50, |
|
"end": 58, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Understandability", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In addition to allowing us to analyze the translation quality of different systems, the data gathered during the manual evaluation is useful for validating the automatic evaluation metrics. Last year, NIST began running a similar \"Metrics for MAchine TRanslation\" challenge (Metrics-MATR), and presented their findings at a workshop at AMTA (Przybocki et al., 2008) . In this year's shared task we evaluated a number of different automatic metrics:", |
|
"cite_spans": [ |
|
{ |
|
"start": 341, |
|
"end": 365, |
|
"text": "(Przybocki et al., 2008)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Shared evaluation task overview", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 Bleu (Papineni et al., 2002) -Bleu remains the de facto standard in machine translation evaluation. It calculates n-gram precision and a brevity penalty, and can make use of multiple reference translations as a way of capturing some of the allowable variation in translation. We use a single reference translation in our experiments.", |
|
"cite_spans": [ |
|
{ |
|
"start": 7, |
|
"end": 30, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF35" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Shared evaluation task overview", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 Meteor (Agarwal and Lavie, 2008 )-Meteor measures precision and recall for unigrams and applies a fragmentation penalty. It uses flexible word matching based on stemming and WordNet-synonymy. meteor-ranking is optimized for correlation with ranking judgments.", |
|
"cite_spans": [ |
|
{ |
|
"start": 9, |
|
"end": 33, |
|
"text": "(Agarwal and Lavie, 2008", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Shared evaluation task overview", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 Translation Error Rate (Snover et al., 2006) -TER calculates the number of edits required to change a hypothesis translation into a reference translation. The possible edits in TER include insertion, deletion, and substitution of single words, and an edit which moves sequences of contiguous words. Two variants of TER are also included: TERp (Snover et al., 2009) , a new version which introduces a number of different features, and (Bleu \u2212 TER)/2, a combination of Bleu and Translation Edit Rate.", |
|
"cite_spans": [ |
|
{ |
|
"start": 25, |
|
"end": 46, |
|
"text": "(Snover et al., 2006)", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 345, |
|
"end": 366, |
|
"text": "(Snover et al., 2009)", |
|
"ref_id": "BIBREF45" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Shared evaluation task overview", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 MaxSim (Chan and Ng, 2008)-MaxSim calculates a similarity score by comparing items in the translation against the reference. Unlike most metrics which do strict matching, MaxSim computes a similarity score for non-identical items. To find a maximum weight matching that matches each system item to at most one reference item, the items are then modeled as nodes in a bipartite graph.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Shared evaluation task overview", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 wcd6p4er (Leusch and Ney, 2008 )-a measure based on cder with word-based substitution costs. Leusch and Ney (2008) also submitted two contrastive metrics: bleusp4114, a modified version of BLEU-S (Lin and Och, 2004) , with tuned n-gram weights, and bleusp, with constant weights. wcd6p4er is an error measure and bleusp is a quality score.", |
|
"cite_spans": [ |
|
{ |
|
"start": 11, |
|
"end": 32, |
|
"text": "(Leusch and Ney, 2008", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 95, |
|
"end": 116, |
|
"text": "Leusch and Ney (2008)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 198, |
|
"end": 217, |
|
"text": "(Lin and Och, 2004)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Shared evaluation task overview", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 RTE (Pado et al., 2009 )-The RTE metric follows a semantic approach which applies recent work in rich textual entailment to the problem of MT evaluation. Its predictions are based on a regression model over a feature set adapted from an entailment systems. The features primarily model alignment quality and (mis-)matches of syntactic and semantic structures.", |
|
"cite_spans": [ |
|
{ |
|
"start": 6, |
|
"end": 24, |
|
"text": "(Pado et al., 2009", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Shared evaluation task overview", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 ULC (Gim\u00e9nez and M\u00e0rquez, 2008) -ULC is an arithmetic mean over other automatic metrics. The set of metrics used include Rouge, Meteor, measures of overlap between constituent parses, dependency parses, semantic roles, and discourse representations. The ULC metric had the strongest correlation with human judgments in WMT08 (Callison-Burch et al., 2008) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 6, |
|
"end": 33, |
|
"text": "(Gim\u00e9nez and M\u00e0rquez, 2008)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 327, |
|
"end": 356, |
|
"text": "(Callison-Burch et al., 2008)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Shared evaluation task overview", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 wpF and wpBleu ) -These metrics are based on words and part of speech sequences. wpF is an n-gram based Fmeasure which takes into account both word n-grams and part of speech n-grams. wp-BLEU is a combnination of the normal Blue score and a part of speech-based Bleu score.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Shared evaluation task overview", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "\u2022 SemPOS (Kos and Bojar, 2009) -the Sem-POS metric computes overlapping words, as defined in (Gim\u00e9nez and M\u00e0rquez, 2007) , with respect to their semantic part of speech. Moreover, it does not use the surface representation of words but their underlying forms obtained from the TectoMT framework. Figure 6 : The percent of time that each system's edited output was judged to be an acceptable translation. These numbers also include judgments of the system's output when it was marked either incomprehensible or acceptable and left unedited. Note that the reference translation was edited alongside the system outputs. Error bars show one positive and one negative standard deviation for the systems in that language pair.", |
|
"cite_spans": [ |
|
{ |
|
"start": 9, |
|
"end": 30, |
|
"text": "(Kos and Bojar, 2009)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 93, |
|
"end": 120, |
|
"text": "(Gim\u00e9nez and M\u00e0rquez, 2007)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 296, |
|
"end": 304, |
|
"text": "Figure 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Shared evaluation task overview", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We measured the correlation of the automatic metrics with the human judgments of translation quality at the system-level using Spearman's rank correlation coefficient \u03c1. We converted the raw scores assigned to each system into ranks. We assigned a human ranking to the systems based on the percent of time that their translations were judged to be better than or equal to the translations of any other system in the manual evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Measuring system-level correlation", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "When there are no ties \u03c1 can be calculated using the simplified equation:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Measuring system-level correlation", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "\u03c1 = 1 \u2212 6 d 2 i n(n 2 \u2212 1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Measuring system-level correlation", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "where d i is the difference between the rank for system i and n is the number of systems. The possible values of \u03c1 range between 1 (where all systems are ranked in the same order) and \u22121 (where the systems are ranked in the reverse order). Thus an automatic evaluation metric with a higher absolute value for \u03c1 is making predictions that are more similar to the human judgments than an automatic evaluation metric with a lower absolute \u03c1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Measuring system-level correlation", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Because the sentence-level judgments collected in the manual evaluation are relative judgments rather than absolute judgments, it is not possible for us to measure correlation at the sentencelevel in the same way that previous work has done (Kulesza and Shieber, 2004; Albrecht and Hwa, 2007a; Albrecht and Hwa, 2007b) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 241, |
|
"end": 268, |
|
"text": "(Kulesza and Shieber, 2004;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 269, |
|
"end": 293, |
|
"text": "Albrecht and Hwa, 2007a;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 294, |
|
"end": 318, |
|
"text": "Albrecht and Hwa, 2007b)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Measuring sentence-level consistency", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Rather than calculating a correlation coefficient at the sentence-level we instead ascertained how consistent the automatic metrics were with the human judgments. The way that we calculated consistency was the following: for every pairwise comparison of two systems on a single sentence by a person, we counted the automatic metric as being consistent if the relative scores were the same (i.e. the metric assigned a higher score to the higher ranked system). We divided this by the total number of pairwise comparisons to get a percentage. Because the systems generally assign real numbers as scores, we excluded pairs that the human annotators ranked as ties. Table 7 shows the correlation of automatic metrics when they rank systems that are translating into English. Note that TERp, TER and wcd6p4er are error metrics, so a negative correlation is better for them. The strength of correlation varied for the different language pairs. The automatic metrics were able to rank the French-English systems reasonably well with correlation coefficients in the range of .8 and .9. In comparison, metrics performed worse for Hungarian-English, where half of the systems had negative correlation. The ULC metric once again had strongest correlation with human judgments of translation quality. This was followed closely by MaxSim and RTE, with Meteor and TERp doing respectably well in 4th and 5th place. Notably, Bleu and its variants were the worst performing metrics in this translation direction. Table 8 shows correlation for metrics which operated on languages other than English. Most of the best performing metrics that operate on English do not work for foreign languages, because they perform some linguistic analysis or rely on a resource like WordNet. For translation into foreign languages TERp was the best system overall. The wpBleu and wpF metrics also did extremely well, performing the best in the language pairs that they were applied to. wpBleu and wpF were not applied to Czech because the authors of the metric did not have a Czech tagger. English-German proved to be the most problematic language pair to automatically evaluate, with all of the metrics having a negative correlation except wpBleu and TER. Table 9 gives detailed results for how well vari-ations on a number of automatic metrics do for the task of ranking five English-Czech systems. 6 These systems were submitted by Kos and Bojar (2009) , and they investigate the effects of using Prague Dependency Treebank annotations during automatic evaluation. They linearizing the Czech trees and evaluated either the lemmatized forms of the Czech (lemma) read off the trees or the Tectogrammatical form which retained only lemmatized content words (tecto). The table also demonstrates SemPOS, Meteor, and GTM perform better on Czech than many other metrics.", |
|
"cite_spans": [ |
|
{ |
|
"start": 2368, |
|
"end": 2369, |
|
"text": "6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 2402, |
|
"end": 2422, |
|
"text": "Kos and Bojar (2009)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 662, |
|
"end": 669, |
|
"text": "Table 7", |
|
"ref_id": "TABREF7" |
|
}, |
|
{ |
|
"start": 1496, |
|
"end": 1503, |
|
"text": "Table 8", |
|
"ref_id": "TABREF8" |
|
}, |
|
{ |
|
"start": 2224, |
|
"end": 2231, |
|
"text": "Table 9", |
|
"ref_id": "TABREF9" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Measuring sentence-level consistency", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Tables 10 and 11 show the percent of times that the metrics' scores were consistent with human rankings of every pair of translated sentences. 7 Since we eliminated sentence pairs that were judged to be equal, the random baseline for this task is 50%. Many metrics failed to reach the baseline (including most metrics in the out-of-English direction). This indicates that sentence-level evaluation of machine translation quality is very difficult. RTE and ULC again do the best overall for the into-English direction. They are followed closely by wpF and wcd6p4er, which considerably improve their performance over their system-level correlations. We tried a variant on measuring sentence-level consistency. Instead of using the scores assigned to each individual sentence, we used the systemlevel score and applied it to every sentence that was produced by that system. These can be thought of as a metric's prior expectation about how a system should preform, based on their performance on the whole data set. Tables 12 and 13 show that using the system-level scores in place of the sentence-level scores results in considerably higher consistency with human judgments. This suggests an interesting line of research for improving sentence-level predictions by using the performance on a larger data set as a prior.", |
|
"cite_spans": [ |
|
{ |
|
"start": 143, |
|
"end": 144, |
|
"text": "7", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1012, |
|
"end": 1028, |
|
"text": "Tables 12 and 13", |
|
"ref_id": "TABREF13" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sentence-level consistency", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "As in previous editions of this workshop we carried out an extensive manual and automatic evaluation of machine translation performance for translating from European languages into English, Table 13 : Consistency of the automatic metrics when their system-level ranks are treated as sentence-level scores. Oracle shows the consistency of using the system-level human ranks that are given in Table 6 . and vice versa. The number of participants remained stable compared to last year's WMT workshop, with 22 groups from 20 institutions participating in WMT09. This year's evaluation also included 7 commercial rule-based MT systems and Google's online statistical machine translation system.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 190, |
|
"end": 198, |
|
"text": "Table 13", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 391, |
|
"end": 398, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Summary", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Compared to previous years, we have simplified the evaluation conditions by removing the indomain vs. out-of-domain distinction focusing on news translations only. The main reason for this was eliminating the advantage statistical systems have with respect to test data that are from the same domain as the training data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Summary", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Analogously to previous years, the main focus of comparing the quality of different approaches is on manual evaluation. Here, also, we reduced the number of dimensions with respect to which the different systems are compared, with sentencelevel ranking as the primary type of manual evaluation. In addition to the direct quality judgments we also evaluated translation quality by having people edit the output of systems and have assessors judge the correctness of the edited output. The degree to which users were able to edit the translations (without having access to the source sentence or reference translation) served as a measure of the overall comprehensibility of the translation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Summary", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Although the inter-annotator agreement in the sentence-ranking evaluation is only fair (as measured by the Kappa score), agreement can be improved by removing the first (up to 50) judgments of each assessor, focusing on the judgments that were made once the assessors are more familiar with the task. Inter-annotator agreement with respect to correctness judgments of the edited translations were higher (moderate), which is probably due to the simplified evaluation criterion (binary judgments versus rankings). Inter-annotator agreement for both conditions can be increased further by removing the judges with the worst agreement. Intra-annotator agreement on the other hand was considerably higher ranging between moderate and substantial.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Summary", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "In addition to the manual evaluation criteria we applied a large number of automated metrics to see how they correlate with the human judgments. There is considerably variation between the different metrics and the language pairs under consideration. As in WMT08, the ULC metric had the highest overall correlation with human judgments when translating into English, with MaxSim and RTE following closely behind. TERp and wpBleu were best when translating into other languages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Summary", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Automatically predicting human judgments at the sentence-level proved to be quite challenging with many of the systems performing around chance. We performed an analysis that showed that if metrics' system-level scores are used in place of their scores for individual sentences, that they do quite a lot better. This suggests that prior probabilities ought to be integrated into sentencelevel scoring.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Summary", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "All data sets generated by this workshop, including the human A Pairwise system comparisons by human judges Tables 14-24 show pairwise comparisons between systems for each language pair. The numbers in each of the tables' cells indicate the percentage of times that the system in that column was judged to be better than the system in that row. Bolding indicates the winner of the two systems. The difference between 100 and the sum of the complimentary cells is the percent of time that the two systems were judged to be equal. Because there were so many systems and data conditions the significance of each pairwise comparison needs to be quantified. We applied the Sign Test to measure which comparisons indicate genuine differences (rather than differences that are attributable to chance). In the following tables indicates statistical significance at p \u2264 0.10, \u2020 indicates statistical significance at p \u2264 0.05, and \u2021 indicates statistical significance at p \u2264 0.01, according to the Sign Test.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 108, |
|
"end": 120, |
|
"text": "Tables 14-24", |
|
"ref_id": "TABREF16" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Summary", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Tables 26 and 25 give the automatic scores for each of the systems. . 40 .31 .41 .23 .56 .43 .46 .36 .37 .41 .30 .40 .33 .41 .40 .40 .50 .47 .46 .49 .36 >= OTHERS .58 .5 .66 .34 .76 .62 .65 .60 .56 .54 .47 .59 .52 .61 .61 .55 .73 .66 .71 .67 .57 ", |
|
"cite_spans": [ |
|
{ |
|
"start": 70, |
|
"end": 245, |
|
"text": "40 .31 .41 .23 .56 .43 .46 .36 .37 .41 .30 .40 .33 .41 .40 .40 .50 .47 .46 .49 .36 >= OTHERS .58 .5 .66 .34 .76 .62 .65 .60 .56 .54 .47 .59 .52 .61 .61 .55 .73 .66 .71 .67 .57", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B Automatic scores", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://statmt.org/WMT09/results.html", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For more details see the XML test files. The docid tag gives the source and the date for each document in the test set, and the origlang tag indicates the original source language.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://translate.google.com", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In one case this definition meant that the system that was ranked the highest overall was not considered to be one of the best systems. For German-English translation RBMT5 was ranked highest overall, but was statistically significantly worse than RBMT2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The Spearman rank correlation coefficients for how the two types of manual evaluation rank systems are .67 for deen, .67 for fr-en, .06 for es-en, .50 for cz-en, .36 for hu-en, .65 for en-de, .02 for en-fr, -.6 for en-es, and .94 for en-cz.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "PCTRANS was excluded from the English-Czech systems because its SGML file was malformed.7 Not all metrics entered into the sentence-level task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://www.statmt.org/wmt09/results. html", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work was supported in parts by the EuroMatrix project funded by the European Commission (6th Framework Programme), the GALE program of the US Defense Advanced Research Projects Agency, Contract No. HR0011-06-C-0022, and the US National Science Foundation under grant IIS-0713448.We are grateful to Holger Schwenk and Preslav Nakov for pointing out the potential bias in our method for ranking systems when self-judgments are excluded. We analyzed the results and found that this did not hold. We would like to thank Maja Popovic for sharing thoughts about how to improve the manual evaluation. Thanks to Cam Fordyce for helping out with the manual evaluation again this year.An extremely big thanks to Sebastian Pado for helping us work through the logic of segment-level scoring of automatic evaluation metric.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Evaluation metrics for highcorrelation with human rankings of machine translation output", |
|
"authors": [ |
|
{ |
|
"first": "Abhaya", |
|
"middle": [], |
|
"last": "Agarwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alon", |
|
"middle": [], |
|
"last": "Lavie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "; M-Bleu", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M-Ter", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the Third Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "115--118", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abhaya Agarwal and Alon Lavie. 2008. Meteor, M- BLEU and M-TER: Evaluation metrics for high- correlation with human rankings of machine trans- lation output. In Proceedings of the Third Workshop on Statistical Machine Translation, pages 115-118, Columbus, Ohio, June. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "A reexamination of machine learning approaches for sentence-level MT evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Joshua", |
|
"middle": [], |
|
"last": "Albrecht", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rebecca", |
|
"middle": [], |
|
"last": "Hwa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 45th Annual Meeting of the Association for Computational Linguistics (ACL-2007)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joshua Albrecht and Rebecca Hwa. 2007a. A re- examination of machine learning approaches for sentence-level MT evaluation. In Proceedings of the 45th Annual Meeting of the Association for Compu- tational Linguistics (ACL-2007), Prague, Czech Re- public.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Regression for sentence-level MT evaluation with pseudo references", |
|
"authors": [ |
|
{ |
|
"first": "Joshua", |
|
"middle": [], |
|
"last": "Albrecht", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rebecca", |
|
"middle": [], |
|
"last": "Hwa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 45th Annual Meeting of the Association for Computational Linguistics (ACL-2007)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joshua Albrecht and Rebecca Hwa. 2007b. Regres- sion for sentence-level MT evaluation with pseudo references. In Proceedings of the 45th Annual Meet- ing of the Association for Computational Linguistics (ACL-2007), Prague, Czech Republic.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "LIMSI's statistical translation systems for WMT'09", |
|
"authors": [ |
|
{ |
|
"first": "Alexandre", |
|
"middle": [], |
|
"last": "Allauzen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josep", |
|
"middle": [], |
|
"last": "Crego", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aur\u00e9lien", |
|
"middle": [], |
|
"last": "Max", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fran", |
|
"middle": [], |
|
"last": "Yvon", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Fourth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexandre Allauzen, Josep Crego, Aur\u00e9lien Max, and Fran cois Yvon. 2009. LIMSI's statistical transla- tion systems for WMT'09. In Proceedings of the Fourth Workshop on Statistical Machine Transla- tion, Athens, Greece, March. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "English-Czech MT in 2008", |
|
"authors": [ |
|
{ |
|
"first": "Ond\u0159ej", |
|
"middle": [], |
|
"last": "Bojar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Mare\u010dek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "V\u00e1clav", |
|
"middle": [], |
|
"last": "Nov\u00e1k", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Fourth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ond\u0159ej Bojar, David Mare\u010dek, V\u00e1clav Nov\u00e1k, Mar- tin Popel, Jan Pt\u00e1\u010dek, Jan Rou\u0161, and Zden\u011b\u01e9 Zabokrtsk\u00fd. 2009. English-Czech MT in 2008. In Proceedings of the Fourth Workshop on Statistical Machine Translation, Athens, Greece, March. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Meta-) evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "-", |
|
"middle": [], |
|
"last": "Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cameron", |
|
"middle": [], |
|
"last": "Fordyce", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christof", |
|
"middle": [], |
|
"last": "Monz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josh", |
|
"middle": [], |
|
"last": "Schroeder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the Second Workshop on Statistical Machine Translation (WMT07)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chris Callison-Burch, Cameron Fordyce, Philipp Koehn, Christof Monz, and Josh Schroeder. 2007. (Meta-) evaluation of machine translation. In Pro- ceedings of the Second Workshop on Statistical Ma- chine Translation (WMT07), Prague, Czech Repub- lic.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Further meta-evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "-", |
|
"middle": [], |
|
"last": "Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cameron", |
|
"middle": [], |
|
"last": "Fordyce", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christof", |
|
"middle": [], |
|
"last": "Monz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josh", |
|
"middle": [], |
|
"last": "Schroeder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the Third Workshop on Statistical Machine Translation (WMT08)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chris Callison-Burch, Cameron Fordyce, Philipp Koehn, Christof Monz, and Josh Schroeder. 2008. Further meta-evaluation of machine translation. In Proceedings of the Third Workshop on Statistical Machine Translation (WMT08), Colmbus, Ohio.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Toward using morphology in French-English phrase-based SMT", |
|
"authors": [ |
|
{ |
|
"first": "Marine", |
|
"middle": [], |
|
"last": "Carpuat", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Fourth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marine Carpuat. 2009. Toward using morphology in French-English phrase-based SMT. In Proceed- ings of the Fourth Workshop on Statistical Machine Translation, Athens, Greece, March. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "An automatic metric for machine translation evaluation based on maximum similary", |
|
"authors": [ |
|
{ |
|
"first": "Yee", |
|
"middle": [], |
|
"last": "Seng Chan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hwee Tou", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "the Metrics-MATR Workshop of AMTA-2008", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yee Seng Chan and Hwee Tou Ng. 2008. An automatic metric for machine translation evaluation based on maximum similary. In In the Metrics-MATR Work- shop of AMTA-2008, Honolulu, Hawaii.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Silke Theison, Christian Federmann, and Hans Uszkoreit", |
|
"authors": [ |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Jellinghaus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Eisele", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sabine", |
|
"middle": [], |
|
"last": "Hunsicker", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Fourth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yu Chen, Michael Jellinghaus, Andreas Eisele, Yi Zhang, Sabine Hunsicker, Silke Theison, Chris- tian Federmann, and Hans Uszkoreit. 2009. Com- bining multi-engine translations with moses. In Pro- ceedings of the Fourth Workshop on Statistical Ma- chine Translation, Athens, Greece, March. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "MATREX: The DCU MT system for WMT", |
|
"authors": [ |
|
{ |
|
"first": "Jinhua", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yifan", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sergio", |
|
"middle": [], |
|
"last": "Penkale", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andy", |
|
"middle": [], |
|
"last": "Way", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Fourth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jinhua Du, Yifan He, Sergio Penkale, and Andy Way. 2009. MATREX: The DCU MT system for WMT 2009. In Proceedings of the Fourth Workshop on Statistical Machine Translation, Athens, Greece, March. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Statistical post editing and dictionary extraction: Systran/Edinburgh submissions for ACL-WMT2009", |
|
"authors": [ |
|
{ |
|
"first": "Lo\u00efc", |
|
"middle": [], |
|
"last": "Dugast", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jean", |
|
"middle": [], |
|
"last": "Senellart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Fourth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lo\u00efc Dugast, Jean Senellart, and Philipp Koehn. 2009. Statistical post editing and dictionary ex- traction: Systran/Edinburgh submissions for ACL- WMT2009. In Proceedings of the Fourth Workshop on Statistical Machine Translation, Athens, Greece, March. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "The University of Maryland statistical machine translation system for the fourth workshop on machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hendra", |
|
"middle": [], |
|
"last": "Setiawan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuval", |
|
"middle": [], |
|
"last": "Marton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip", |
|
"middle": [], |
|
"last": "Resnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Fourth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chris Dyer, Hendra Setiawan, Yuval Marton, and Philip Resnik. 2009. The University of Mary- land statistical machine translation system for the fourth workshop on machine translation. In Pro- ceedings of the Fourth Workshop on Statistical Ma- chine Translation, Athens, Greece, March. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Local search with very large-scale neighborhoods for optimal permutations in machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Eisner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roy", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Tromble", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the Human Language Technology Conference of the North American chapter of the Association for Computational Linguistics (HLT/NAACL-2006)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jason Eisner and Roy W. Tromble. 2006. Local search with very large-scale neighborhoods for op- timal permutations in machine translation. In Pro- ceedings of the Human Language Technology Con- ference of the North American chapter of the Associ- ation for Computational Linguistics (HLT/NAACL- 2006), New York, New York.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Translation combination using factored word substitution", |
|
"authors": [ |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Federmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Silke", |
|
"middle": [], |
|
"last": "Theison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Eisele", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hans", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Jellinghaus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sabine", |
|
"middle": [], |
|
"last": "Hunsicker", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Fourth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christian Federmann, Silke Theison, Andreas Eisele, Hans Uszkoreit, Yu Chen, Michael Jellinghaus, and Sabine Hunsicker. 2009. Translation combina- tion using factored word substitution. In Proceed- ings of the Fourth Workshop on Statistical Machine Translation, Athens, Greece, March. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Experiments in morphosyntactic processing for translating to and from German", |
|
"authors": [ |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Fraser", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Fourth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexander Fraser. 2009. Experiments in morphosyn- tactic processing for translating to and from German. In Proceedings of the Fourth Workshop on Statistical Machine Translation, Athens, Greece, March. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Linguistic features for automatic evaluation of heterogenous MT systems", |
|
"authors": [ |
|
{ |
|
"first": "Jes\u00fas", |
|
"middle": [], |
|
"last": "Gim\u00e9nez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llu\u00eds", |
|
"middle": [], |
|
"last": "M\u00e0rquez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of ACL Workshop on Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jes\u00fas Gim\u00e9nez and Llu\u00eds M\u00e0rquez. 2007. Linguis- tic features for automatic evaluation of heterogenous MT systems. In Proceedings of ACL Workshop on Machine Translation.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "A smorgasbord of features for automatic MT evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Jes\u00fas", |
|
"middle": [], |
|
"last": "Gim\u00e9nez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llu\u00eds", |
|
"middle": [], |
|
"last": "M\u00e0rquez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the Third Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "195--198", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jes\u00fas Gim\u00e9nez and Llu\u00eds M\u00e0rquez. 2008. A smor- gasbord of features for automatic MT evaluation. In Proceedings of the Third Workshop on Statistical Machine Translation, pages 195-198.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "An improved statistical transfer system for French-English machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Hanneman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vamshi", |
|
"middle": [], |
|
"last": "Ambati", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Clark", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Fourth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Greg Hanneman, Vamshi Ambati, Jonathan H. Clark, Alok Parlikar, and Alon Lavie. 2009. An improved statistical transfer system for French- English machine translation. In Proceedings of the Fourth Workshop on Statistical Machine Transla- tion, Athens, Greece, March. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Machine translation system combination with flexible word ordering", |
|
"authors": [ |
|
{ |
|
"first": "Kenneth", |
|
"middle": [], |
|
"last": "Heafield", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Hanneman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alon", |
|
"middle": [], |
|
"last": "Lavie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Fourth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kenneth Heafield, Greg Hanneman, and Alon Lavie. 2009. Machine translation system combination with flexible word ordering. In Proceedings of the Fourth Workshop on Statistical Machine Transla- tion, Athens, Greece, March. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "CMU system combination for WMT'09", |
|
"authors": [ |
|
{ |
|
"first": "Almut", |
|
"middle": [], |
|
"last": "Silja Hildebrand", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephan", |
|
"middle": [], |
|
"last": "Vogel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Fourth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Almut Silja Hildebrand and Stephan Vogel. 2009. CMU system combination for WMT'09. In Pro- ceedings of the Fourth Workshop on Statistical Ma- chine Translation, Athens, Greece, March. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Open source toolkit for statistical machine translation: Factored translation models and confusion network decoding", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicola", |
|
"middle": [], |
|
"last": "Bertoldi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ondrej", |
|
"middle": [], |
|
"last": "Bojar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Constantin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brooke", |
|
"middle": [], |
|
"last": "Cowan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcello", |
|
"middle": [], |
|
"last": "Federico", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Evan", |
|
"middle": [], |
|
"last": "Herbst", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hieu", |
|
"middle": [], |
|
"last": "Hoang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christine", |
|
"middle": [], |
|
"last": "Moran", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wade", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Zens", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "CLSP Summer Workshop Final Report WS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Koehn, Nicola Bertoldi, Ondrej Bojar, Chris Callison-Burch, Alexandra Constantin, Brooke Cowan, Chris Dyer, Marcello Federico, Evan Herbst, Hieu Hoang, Christine Moran, Wade Shen, and Richard Zens. 2007. Open source toolkit for statistical machine translation: Factored translation models and confusion network decoding. CLSP Summer Workshop Final Report WS-2006, Johns Hopkins University.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Evaluation of Machine Translation Metrics for Czech as the Target Language", |
|
"authors": [ |
|
{ |
|
"first": "Kamil", |
|
"middle": [], |
|
"last": "Kos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ond\u0159ej", |
|
"middle": [], |
|
"last": "Bojar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Prague Bulletin of Mathematical Linguistics", |
|
"volume": "92", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kamil Kos and Ond\u0159ej Bojar. 2009. Evaluation of Ma- chine Translation Metrics for Czech as the Target Language. Prague Bulletin of Mathematical Lin- guistics, 92. in print.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "A learning approach to improving sentence-level MT evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Kulesza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stuart", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Shieber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the 10th International Conference on Theoretical and Methodological Issues in Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Kulesza and Stuart M. Shieber. 2004. A learn- ing approach to improving sentence-level MT evalu- ation. In Proceedings of the 10th International Con- ference on Theoretical and Methodological Issues in Machine Translation, Baltimore, MD, October 4-6.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "The measurement of observer agreement for categorical data", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Landis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gary", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Koch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1977, |
|
"venue": "Biometrics", |
|
"volume": "33", |
|
"issue": "", |
|
"pages": "159--174", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Richard Landis and Gary G. Koch. 1977. The mea- surement of observer agreement for categorical data. Biometrics, 33:159-174.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "BLEUSP, PINVWER, CDER: Three improved MT evaluation measures", |
|
"authors": [ |
|
{ |
|
"first": "Gregor", |
|
"middle": [], |
|
"last": "Leusch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "In In the Metrics-MATR Workshop of AMTA", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gregor Leusch and Hermann Ney. 2008. BLEUSP, PINVWER, CDER: Three improved MT evaluation measures. In In the Metrics-MATR Workshop of AMTA-2008, Honolulu, Hawaii.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "The RWTH system combination system for WMT", |
|
"authors": [ |
|
{ |
|
"first": "Gregor", |
|
"middle": [], |
|
"last": "Leusch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Evgeny", |
|
"middle": [], |
|
"last": "Matusov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Fourth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gregor Leusch, Evgeny Matusov, and Hermann Ney. 2009. The RWTH system combination system for WMT 2009. In Proceedings of the Fourth Workshop on Statistical Machine Translation, Athens, Greece, March. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Joshua: An open source toolkit for parsingbased machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Zhifei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juri", |
|
"middle": [], |
|
"last": "Ganitkevitch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanjeev", |
|
"middle": [], |
|
"last": "Khudanpur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lane", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wren", |
|
"middle": [], |
|
"last": "Thornton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Weese", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omar", |
|
"middle": [], |
|
"last": "Zaidan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Fourth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhifei Li, Chris Callison-Burch, Chris Dyer, Juri Ganitkevitch, Sanjeev Khudanpur, Lane Schwartz, Wren Thornton, Jonathan Weese, and Omar Zaidan. 2009. Joshua: An open source toolkit for parsing- based machine translation. In Proceedings of the Fourth Workshop on Statistical Machine Transla- tion, Athens, Greece, March. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Automatic evaluation of machine translation quality using longest common subsequence and skip-bigram statistics", |
|
"authors": [ |
|
{ |
|
"first": "Chin-Yew", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Franz Josef", |
|
"middle": [], |
|
"last": "Och", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the 42nd Annual Meeting of the Association for Computational Linguistics (ACL-2004)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chin-Yew Lin and Franz Josef Och. 2004. Auto- matic evaluation of machine translation quality us- ing longest common subsequence and skip-bigram statistics. In Proceedings of the 42nd Annual Meet- ing of the Association for Computational Linguistics (ACL-2004), Barcelona, Spain.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Fast and accurate sentence alignment of bilingual corpora", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Robert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Moore", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 5th Biennial Conference of the Association for Machine Translation in the Americas (AMTA-2002)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Robert C. Moore. 2002. Fast and accurate sentence alignment of bilingual corpora. In Proceedings of the 5th Biennial Conference of the Association for Machine Translation in the Americas (AMTA-2002), Tiburon, California.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "NUS at WMT09: Domain adaptation experiments for English-Spanish machine translation of news commentary text", |
|
"authors": [ |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hwee Tou", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Fourth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Preslav Nakov and Hwee Tou Ng. 2009. NUS at WMT09: Domain adaptation experiments for English-Spanish machine translation of news com- mentary text. In Proceedings of the Fourth Work- shop on Statistical Machine Translation, Athens, Greece, March. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "The Universit\u00e4t Karlsruhe translation system for the EACL-WMT", |
|
"authors": [ |
|
{ |
|
"first": "Jan", |
|
"middle": [], |
|
"last": "Niehues", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Teresa", |
|
"middle": [], |
|
"last": "Herrmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Muntsin", |
|
"middle": [], |
|
"last": "Kolss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Waibel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Fourth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jan Niehues, Teresa Herrmann, Muntsin Kolss, and Alex Waibel. 2009. The Universit\u00e4t Karlsruhe translation system for the EACL-WMT 2009. In Proceedings of the Fourth Workshop on Statistical Machine Translation, Athens, Greece, March. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Evaluation plan for gale go/no-go phase 3 / phase 3.5 translation evaluations", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nist", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "NIST. 2008. Evaluation plan for gale go/no-go phase 3 / phase 3.5 translation evaluations. June 18, 2008.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Morphologic's submission for the WMT 2009 shared task", |
|
"authors": [ |
|
{ |
|
"first": "Attila", |
|
"middle": [], |
|
"last": "Nov\u00e1k", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Fourth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Attila Nov\u00e1k. 2009. Morphologic's submission for the WMT 2009 shared task. In Proceedings of the Fourth Workshop on Statistical Machine Transla- tion, Athens, Greece, March. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Machine translation evaluation with textual entailment features", |
|
"authors": [ |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Pado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michel", |
|
"middle": [], |
|
"last": "Galley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Fourth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sebastian Pado, Michel Galley, Dan Jurafsky, and Christopher D. Manning. 2009. Machine transla- tion evaluation with textual entailment features. In Proceedings of the Fourth Workshop on Statistical Machine Translation, Athens, Greece, March. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Bleu: A method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002. Bleu: A method for auto- matic evaluation of machine translation. In Pro- ceedings of the 40th Annual Meeting of the Asso- ciation for Computational Linguistics (ACL-2002), Philadelphia, Pennsylvania.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "NICT@WMT09: Model adaptation and transliteration for Spanish-English SMT", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Paul", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Finch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eiichiro", |
|
"middle": [], |
|
"last": "Sumita", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Fourth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael Paul, Andrew Finch, and Eiichiro Sumita. 2009. NICT@WMT09: Model adaptation and transliteration for Spanish-English SMT. In Pro- ceedings of the Fourth Workshop on Statistical Ma- chine Translation, Athens, Greece, March. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Syntaxoriented evaluation measures for machine translation output", |
|
"authors": [ |
|
{ |
|
"first": "Maja", |
|
"middle": [], |
|
"last": "Popovic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Fourth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maja Popovic and Hermann Ney. 2009. Syntax- oriented evaluation measures for machine transla- tion output. In Proceedings of the Fourth Workshop on Statistical Machine Translation, Athens, Greece, March. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "The RWTH machine translation system for WMT", |
|
"authors": [ |
|
{ |
|
"first": "Maja", |
|
"middle": [], |
|
"last": "Popovic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Vilar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Stein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Evgeny", |
|
"middle": [], |
|
"last": "Matusov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Fourth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maja Popovic, David Vilar, Daniel Stein, Evgeny Ma- tusov, and Hermann Ney. 2009. The RWTH ma- chine translation system for WMT 2009. In Pro- ceedings of the Fourth Workshop on Statistical Ma- chine Translation, Athens, Greece, March. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Official results of the NIST", |
|
"authors": [ |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Przybocki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kay", |
|
"middle": [], |
|
"last": "Peterson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastien", |
|
"middle": [], |
|
"last": "Bronsart", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Metrics for MAchine TRanslation\" challenge (MetricsMATR08)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mark Przybocki, Kay Peterson, and Se- bastien Bronsart. 2008. Official results of the NIST 2008 \"Metrics for MAchine TRanslation\" challenge (MetricsMATR08).", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "The TALP-UPC phrase-based translation system for EACL-WMT", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Jos\u00e9", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maxim", |
|
"middle": [], |
|
"last": "Fonollosa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marta", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Khalilov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jos\u00e9", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Costajuss\u00e1", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carlos", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Mari\u00f1o", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Q", |
|
"middle": [], |
|
"last": "Henr\u00e1quez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adolfo", |
|
"middle": [], |
|
"last": "Hern\u00e1ndez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rafael", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Banchs", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Fourth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jos\u00e9 A. R. Fonollosa, Maxim Khalilov, Marta R. Costa- juss\u00e1, Jos\u00e9 B. Mari\u00f1o, Carlos A. Henr\u00e1quez Q., Adolfo Hern\u00e1ndez H., and Rafael E. Banchs. 2009. The TALP-UPC phrase-based translation system for EACL-WMT 2009. In Proceedings of the Fourth Workshop on Statistical Machine Transla- tion, Athens, Greece, March. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Incremental hypothesis alignment with flexible matching for building confusion networks: BBN system description for WMT09 system combination task", |
|
"authors": [ |
|
{ |
|
"first": "Antti-Veikko", |
|
"middle": [], |
|
"last": "Rosti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Spyros", |
|
"middle": [], |
|
"last": "Matsoukas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Fourth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Antti-Veikko Rosti, Bing Zhang, Spyros Matsoukas, and Richard Schwartz. 2009. Incremental hy- pothesis alignment with flexible matching for build- ing confusion networks: BBN system description for WMT09 system combination task. In Proceed- ings of the Fourth Workshop on Statistical Machine Translation, Athens, Greece, March. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Word lattices for multi-source translation", |
|
"authors": [ |
|
{ |
|
"first": "Josh", |
|
"middle": [], |
|
"last": "Schroeder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trevor", |
|
"middle": [], |
|
"last": "Cohn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "12th Conference of the European Chapter of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Josh Schroeder, Trevor Cohn, and Philipp Koehn. 2009. Word lattices for multi-source translation. In 12th Conference of the European Chapter of the Association for Computational Linguistics (EACL- 2009), Athens, Greece.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "SMT and SPE machine translation systems for WMT'09", |
|
"authors": [ |
|
{ |
|
"first": "Holger", |
|
"middle": [], |
|
"last": "Schwenk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abdul", |
|
"middle": [], |
|
"last": "Sadaf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Loic", |
|
"middle": [], |
|
"last": "Rauf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jean", |
|
"middle": [], |
|
"last": "Barrault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Senellart", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Fourth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Holger Schwenk, Sadaf Abdul Rauf, Loic Barrault, and Jean Senellart. 2009. SMT and SPE machine trans- lation systems for WMT'09. In Proceedings of the Fourth Workshop on Statistical Machine Transla- tion, Athens, Greece, March. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "A study of translation edit rate with targeted human annotation", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Snover", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bonnie", |
|
"middle": [], |
|
"last": "Dorr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Linnea", |
|
"middle": [], |
|
"last": "Micciulla", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Makhoul", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the 7th Biennial Conference of the Association for Machine Translation in the Americas (AMTA-2006)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Snover, Bonnie Dorr, Richard Schwartz, Lin- nea Micciulla, and John Makhoul. 2006. A study of translation edit rate with targeted human annotation. In Proceedings of the 7th Biennial Conference of the Association for Machine Translation in the Ameri- cas (AMTA-2006), Cambridge, Massachusetts.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "Fluency, adequacy, or HTER? exploring different human judgments with a tunable MT metric", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Snover", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nitin", |
|
"middle": [], |
|
"last": "Madnani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bonnie", |
|
"middle": [], |
|
"last": "Dorr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Fourth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Snover, Nitin Madnani, Bonnie Dorr, and Richard Schwartz. 2009. Fluency, adequacy, or HTER? exploring different human judgments with a tunable MT metric. In Proceedings of the Fourth Workshop on Statistical Machine Transla- tion, Athens, Greece, March. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Smoothed Bloom filter language models: Tera-scale lms on the cheap", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Talbot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miles", |
|
"middle": [], |
|
"last": "Osborne", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Talbot and Miles Osborne. 2007. Smoothed Bloom filter language models: Tera-scale lms on the cheap. In Proceedings of the 2007 Joint Con- ference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL), Prague, Czech Repub- lic.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "Deep linguistic multilingual translation and bilingual dictionaries", |
|
"authors": [ |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Wehrli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luka", |
|
"middle": [], |
|
"last": "Nerima", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yves", |
|
"middle": [], |
|
"last": "Scherrer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Fourth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eric Wehrli, Luka Nerima, and Yves Scherrer. 2009. Deep linguistic multilingual translation and bilingual dictionaries. In Proceedings of the Fourth Workshop on Statistical Machine Transla- tion, Athens, Greece, March. Association for Com- putational Linguistics. .33 .42 .37 .38 .41 .35 .49 .45 .11 \u2021 .39 .25 .36 .18 \u2021 .26 .36 .22 \u2021 .32 .18 \u2021 .38 .4 .4 .38 .22", |
|
"links": null |
|
}, |
|
"BIBREF50": { |
|
"ref_id": "b50", |
|
"title": "Sentence-level ranking for the WMT09 All-English News Task", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "23", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Table 23: Sentence-level ranking for the WMT09 All-English News Task", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"text": "This screenshot shows an annotator judging the acceptability of edited translations.", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"text": "The effect of discarding every annotators' initial judgments, up to the first 50 items", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"uris": null, |
|
"text": "The effect of removing annotators with the lowest agreement, disregarding up to 40 annotators", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF4": { |
|
"uris": null, |
|
"text": "0.12 0.11 -0.32 0.18 0.19 0.37 0.42 0.43 0.30 4.95 4.74 0.12 0.12 0.75 0.87 0.21 0.58 0.27 0.19 UMD 0.66 0.13 0.12 -0.28 0.18 0.2 0.36 0.44 0.45 0.30 5.41 5.12 0.21 0.13 0.68 0.85 0.22 0.55 0.27 0.18", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF3": { |
|
"html": null, |
|
"text": "Meanwhile, the Belgian, Dutch and Luxembourg governments partially nationalized the European financial conglomerate Fortis.", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>WMT09 Manual Evaluation</td></tr><tr><td>http://www.statmt.org/wmt09/judge/do_task.php</td></tr><tr><td>:</td></tr><tr><td>They are often linked to other sleep disorders, such as nightmares, night terrors, the nocturnal enuresis (bedwetting) or sleepwalking, but this is</td></tr><tr><td>not always the case.</td></tr><tr><td>Reset Edit</td></tr><tr><td>Edited.</td></tr><tr><td>No corrections needed.</td></tr><tr><td>Unable to correct.</td></tr><tr><td>Annotator: ccb Task: WMT09 Multisource-English News Editing</td></tr><tr><td>Instructions:</td></tr><tr><td>Correct the translation displayed, making it as fluent as possble. If no corrections are needed, select \"No corrections needed.\" If you cannot understand</td></tr><tr><td>the sentence well enough to correct it, select \"Unable to correct.\"</td></tr><tr><td>Figure 2: This screenshot shows an annotator editing the output of a machine translation system.</td></tr><tr><td>WMT09 Manual Evaluation</td></tr><tr><td>http://www.statmt.org/wmt09/judge/do_task.php</td></tr><tr><td>Judge Edited MT Output</td></tr><tr><td>You have judged 84 sentences for WMT09 French-English News Edit Acceptance, 459 sentences total taking 64.9 seconds per sentence.</td></tr><tr><td>Source: Au m\u00eame moment, les gouvernements belges, hollandais et luxembourgeois ont en parti nationalis\u00e9 le conglom\u00e9rat europ\u00e9en financier, Fortis.</td></tr><tr><td>Les analystes de Barclays Capital ont d\u00e9clar\u00e9 que les n\u00e9gociations fr\u00e9n\u00e9tiques de ce week end, conclues avec l'accord de sauvetage\" semblent ne pas avoir</td></tr><tr><td>r\u00e9ussi \u00e0 faire revivre le march\u00e9\".</td></tr><tr><td>\"la prospective d'\u00e9quit\u00e9 globale, de taux d'int\u00e9r\u00eat et d'\u00e9change des march\u00e9s, est devenue incertaine\" ont \u00e9crit les analystes de Deutsche Bank dans une</td></tr><tr><td>lettre \u00e0 leurs investisseurs.\"</td></tr><tr><td>\"nous pensons que les mati\u00e8res premi\u00e8res ne pourront \u00e9chapper \u00e0 cette contagion.</td></tr><tr><td>Reference:</td></tr></table>" |
|
}, |
|
"TABREF7": { |
|
"html": null, |
|
"text": "The system-level correlation of the automatic evaluation metrics with the human judgments for translation into English.", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td>en-de (13 systems)</td><td>en-fr (16 systems)</td><td>en-es (11 systems)</td><td>en-cz (5 systems)</td><td>Average</td></tr><tr><td>terp</td><td colspan=\"3\">.03 -.89 -.58</td><td colspan=\"2\">-.4 -.46</td></tr><tr><td>ter</td><td colspan=\"2\">-.03 -.78</td><td>-.5</td><td colspan=\"2\">-.1 -.35</td></tr><tr><td>bleusp4114</td><td>-.3</td><td>.88</td><td>.51</td><td>.1</td><td>.3</td></tr><tr><td>bleusp</td><td>-.3</td><td>.87</td><td>.51</td><td>.1</td><td>.29</td></tr><tr><td>bleu</td><td>-.43</td><td>.87</td><td>.36</td><td>.3</td><td>.27</td></tr><tr><td colspan=\"2\">bleu (cased) -.45</td><td>.87</td><td>.35</td><td>.3</td><td>.27</td></tr><tr><td>bleu-ter/2</td><td>-.37</td><td>.87</td><td>.44</td><td>.1</td><td>.26</td></tr><tr><td>wcd6p4er</td><td colspan=\"3\">.54 -.89 -.45</td><td colspan=\"2\">-.1 -.22</td></tr><tr><td>nist (cased)</td><td>-.47</td><td>.84</td><td>.35</td><td>.1</td><td>.2</td></tr><tr><td>nist</td><td>-.52</td><td>.87</td><td>.23</td><td>.1</td><td>.17</td></tr><tr><td>wpF</td><td>-.06</td><td>.9</td><td colspan=\"2\">.58 n/a</td><td>n/a</td></tr><tr><td>wpbleu</td><td>.07</td><td>.92</td><td colspan=\"2\">.63 n/a</td><td>n/a</td></tr></table>" |
|
}, |
|
"TABREF8": { |
|
"html": null, |
|
"text": "The system-level correlation of the automatic evaluation metrics with the human judgments for translation out of English.", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>SemPOS</td><td>.4 BLEU tecto</td><td>.3</td></tr><tr><td>Meteor</td><td>.4 BLEU</td><td>.3</td></tr><tr><td colspan=\"2\">GTM(e=0.5) tecto GTM(e=0.5) lemma .4 NIST .4 NIST lemma GTM(e=0.5) .4 BLEU lemma WER tecto .3 WER lemma TER tecto .3 WER</td><td>.1 .1 .1 -.1 -.1</td></tr><tr><td>PER tecto F-measure tecto</td><td>.3 TER lemma .3 TER</td><td>-.1 -.1</td></tr><tr><td>F-measure lemma F-measure</td><td>.3 PER lemma .3 PER</td><td>-.1 -.1</td></tr><tr><td/><td>NIST tecto</td><td>-.3</td></tr></table>" |
|
}, |
|
"TABREF9": { |
|
"html": null, |
|
"text": "", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>: The system-level correlation for auto-</td></tr><tr><td>matic metrics ranking five English-Czech systems</td></tr><tr><td>6 Evaluation task results</td></tr><tr><td>6.1 System-level correlation</td></tr></table>" |
|
}, |
|
"TABREF10": { |
|
"html": null, |
|
"text": ".51 .50 .51 .51 .54 rte (absolute) .54 .56 .51 .50 .55 .51 .53 wpF .54 .55 .50 .47 .48 .51 .52 wcd6p4er .54 .54 .49 .48 .48 .50 .52 maxsim .53 .55 .49 .47 .50 .49 .52 bleusp .54 .55 .49 .47 .46 .50 .51 bleusp4114 .53 .55 .48 .47 .46 .50 .51 rte (pairwise) .49 .48 .52 .53 .55 .52 .", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td>fr-en (6268 pairs)</td><td>de-en (6382 pairs)</td><td>es-en (4106 pairs)</td><td>cz-en (2251 pairs)</td><td>hu-en (2193 pairs)</td><td>xx-en (1952 pairs)</td><td>Overall (23152 pairs)</td></tr><tr><td>ulc</td><td colspan=\"7\">.55 .56 51</td></tr><tr><td>terp</td><td colspan=\"7\">.52 .53 .48 .46 .45 .48 .50</td></tr><tr><td>meteor-0.6</td><td colspan=\"7\">.50 .53 .46 .48 .47 .47 .49</td></tr><tr><td>meteor-rank</td><td colspan=\"7\">.50 .52 .46 .48 .47 .47 .49</td></tr><tr><td>meteor-0.7</td><td colspan=\"7\">.49 .52 .46 .48 .47 .47 .49</td></tr><tr><td>ter</td><td colspan=\"7\">.48 .47 .43 .41 .40 .42 .45</td></tr><tr><td>wpbleu</td><td colspan=\"7\">.46 .45 .46 .39 .35 .45 .44</td></tr></table>" |
|
}, |
|
"TABREF11": { |
|
"html": null, |
|
"text": ".47 .52 .49 .50 bleusp4114 .57 .46 .54 .49 .50 bleusp .57 .46 .53 .48 .49 ter .50 .41 .45 .37 .41 terp .51 .39 .48 .27 .36 wpF .57 .46 .54 n/a .51 wpbleu .53 .37 .46 n/a .43", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td colspan=\"6\">: Sentence-level consistency of the auto-</td></tr><tr><td colspan=\"6\">matic metrics with human judgments for transla-</td></tr><tr><td colspan=\"6\">tions into English. Italicized numbers fall below</td></tr><tr><td colspan=\"3\">the random-choice baseline.</td><td/><td/></tr><tr><td/><td>en-fr (2967 pairs)</td><td>en-de (6563 pairs)</td><td>en-es (3249 pairs)</td><td>en-cz (11242 pairs)</td><td>Overall (24021 pairs)</td></tr><tr><td>wcd6p4er</td><td>.57</td><td/><td/><td/></tr></table>" |
|
}, |
|
"TABREF12": { |
|
"html": null, |
|
"text": "", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>: Sentence-level consistency of the auto-</td></tr><tr><td>matic metrics with human judgments for transla-</td></tr><tr><td>tions out of English. Italicized numbers fall below</td></tr><tr><td>the random-choice baseline.</td></tr></table>" |
|
}, |
|
"TABREF13": { |
|
"html": null, |
|
"text": "Consistency of the automatic metrics when their system-level ranks are treated as sentence-level scores. Oracle shows the consistency of using the system-level human ranks that are given inTable 6.", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td>en-fr (2967 pairs)</td><td>en-de (6563 pairs)</td><td>en-es (3249 pairs)</td><td>en-cz (11242 pairs)</td><td>Overall (24021 pairs)</td></tr><tr><td>Oracle</td><td colspan=\"5\">.62 .59 .63 .60 .60</td></tr><tr><td>terp</td><td colspan=\"5\">.62 .50 .59 .53 .54</td></tr><tr><td>ter</td><td colspan=\"5\">.61 .51 .58 .50 .53</td></tr><tr><td>bleusp</td><td colspan=\"5\">.62 .48 .59 .50 .52</td></tr><tr><td colspan=\"6\">bleusp4114 .63 .48 .59 .50 .52</td></tr><tr><td>wcd6p4er</td><td colspan=\"5\">.62 .46 .58 .50 .52</td></tr><tr><td>wpbleu</td><td colspan=\"5\">.63 .51 .60 n/a .56</td></tr><tr><td>wpF</td><td colspan=\"5\">.63 .50 .59 n/a .55</td></tr></table>" |
|
}, |
|
"TABREF14": { |
|
"html": null, |
|
"text": "MariaHolmqvist, Sara Stymne, Jody Foo, and Lars Ahrenberg. 2009. Improving alignment for SMT by reordering and augmenting the training corpus. In Proceedings of the Fourth Workshop on Statistical Machine Translation, Athens, Greece, March. Association for Computational Linguistics.", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>Philipp Koehn and Barry Haddow. 2009. Edin-</td><td/></tr><tr><td>burgh's submission to all tracks of the WMT2009</td><td/></tr><tr><td>shared task with reordering and speed improvements</td><td/></tr><tr><td>to Moses. In Proceedings of the Fourth Workshop</td><td/></tr><tr><td>on Statistical Machine Translation, Athens, Greece,</td><td/></tr><tr><td>March. Association for Computational Linguistics.</td><td/></tr><tr><td>Philipp Koehn and Christof Monz. 2006. Manual and</td><td/></tr><tr><td>automatic evaluation of machine translation between</td><td/></tr><tr><td>European languages. In Proceedings of NAACL</td><td/></tr><tr><td>2006 Workshop on Statistical Machine Translation, New York, New York.</td><td>judgments, system translations and automatic scores, are publicly available for</td></tr><tr><td/><td>other researchers to analyze. 8</td></tr></table>" |
|
}, |
|
"TABREF15": { |
|
"html": null, |
|
"text": ".08 \u2021 .63 \u2020 .54 .69 \u2020 .73 \u2021 .83 \u2021 .78 \u2021 .49 .77 \u2021 .75 \u2021 .74 \u2021 .57 \u2020 .74 \u2021 .69 \u2021 .75 \u2021 .84 \u2021 .60 .84 \u2021 .71 \u2021 GOOGLE .15 \u2021 .03 \u2021 .23 \u2020 .50 .43 .24 \u2020 .39 .42 .39 .43 .33 .27 .29 .38 .48 .57 .44 .32 .35 .36 JHU-TROMBLE .75 \u2021 .90 \u2021 .77 \u2021 .81 \u2021 .84 \u2021 .91 \u2021 .94 \u2021 .88 \u2021 .79 \u2021 .83 \u2021 .83 \u2021 .93 \u2021 .89 \u2021 .92 \u2021 .90 \u2021 .94 \u2021 .90 \u2021 .95 \u2021 .91 \u2021 .83 \u2021 LIU .29 \u2020 .65 \u2020 .12 \u2021 .49 .63 .63 .57 .63 .41 .49 .46 .50 .49 .50 .41 .66 \u2020 .53 .59 \u2021 .62 \u2020 .53 RBMT1 .32 .43 .11 \u2021 .46 .42 .46 .50 .61 \u2020 .34 .46 .58 .51 .42 .42 .56 .47 .53 .49 .58 .54 RBMT2 .25 \u2020 .46 .09 \u2021 .37 .45 .33 .45 .23 \u2020 .3 .28 .47 .42 .31 .34 .39 .49 .61 .4 .32 .29 RBMT3 .17 \u2021 .59 \u2020 .02 \u2021 .26 .35 .46 .27 .45 .27 .36 .46 .42 .43 .26 .49 .4 .48 .58 .29 .31 RBMT4 .12 \u2021 .47 .07 \u2021 .37 .4 .45 .52 .60 .39 .39 .45 .39 .31 .29 \u2020 .44 .54 .45 .37 .43 .30 RBMT5 .13 \u2021 .34 .07 \u2021 .30 .24 \u2020 .57 \u2020 .41 .29 .31 .50 .34 .3 .28 \u2020 .43 .30 .49 .57 .3 .49 .21 RWTH .21 .55 .10 \u2021 .41 .49 .55 .46 .46 .60 .44 .57 .48 .51 .41 .56 .64 \u2021 .54 .56 .74 \u2021 .59 STUTTGART .17 \u2021 .43 .13 \u2021 .39 .43 .55 .39 .36 .33 .34 .38 .42 .52 .42 .49 .49 .28 .35 .56 .46 SYSTRAN .11 \u2021 .63 .06 \u2021 .42 .37 .47 .50 .32 .58 .34 .55 .36 .44 .35 .43 .61 \u2020 .46 .41 .33 .44 UEDIN .10 \u2021 .50 .03 \u2021 .35 .49 .46 .39 .52 .55 .29 .39 .52 .35 .33 .42 .58 .43 .56 .59 \u2020 .55 UKA .29 \u2020 .58 .04 \u2021 .32 .47 .63 .55 .54 .64 \u2020 .24 .28 .39 .50 .29 .50 .48 .36 .57 .45 .45 UMD .16 \u2021 .53 .08 \u2021 .38 .49 .43 .63 .68 \u2020 .49 .38 .39 .41 .50 .49 .46 .54 .44 .38 .46 .50 USAAR .19 \u2021 .44 \u2021 .41 .34 .49 .4 .44 .33 .36 .33 .45 .39 .32 .41 .46 .41 .31 .42 .11 BBN-COMBO .14 \u2021 .31 .06 \u2021 .26 \u2020 .44 .44 .48 .36 .38 .23 \u2021 .35 .26 \u2020 .29 .34 .36 .37 .32 .23 \u2020 .38 .32 CMU-COMBO .10 \u2021 .36 .07 \u2021 .37 .37 .36 .48 .40 .30 .28 .53 .41 .4 .10 \u2021 .39 .43 .40 .48 .57 .27 .41 .47 .28 .26 .38 .49 .65 \u2020 .46 .41 .47 RWTH-COMBO .06 \u2021 .38 \u2021 .19 \u2020 .36 .54 .43 .43 .30 .10 \u2021 .33 .56 .22 \u2020 .27 .23 .42 .32 .31 .41 .29 USAAR-COMBO .20 \u2021 .55 .17 \u2021 .3 .39 .57 .45 .59 .32 .27 .33 .47 .32 .33 .27 .16 .55 .44 .4 .50 > OTHERS.22 .51 .06 .38 .44 .52 .49 .49 .50 .33 .44 .48 .44 .42 .41 .47 .56 .48 .46 .51 .43 >= OTHERS .33 .65 .13 .50 .54 .64 .64 .62 .66 .50 .61 .60 .59 .58 .56 .65 .68 .63 .62 .70 .62", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td>GENEVA</td><td>GOOGLE</td><td>JHU-TROMBLE</td><td>LIU</td><td>RBMT1</td><td>RBMT2</td><td>RBMT3</td><td>RBMT4</td><td>RBMT5</td><td>RWTH</td><td>STUTTGART</td><td>SYSTRAN</td><td>UEDIN</td><td>UKA</td><td>UMD</td><td>USAAR</td><td>BBN-COMBO</td><td>CMU-COMBO</td><td>CMU-COMBO-HYPOSEL</td><td>RWTH-COMBO</td><td>USAAR-COMBO</td></tr><tr><td>GENEVA</td><td/><td colspan=\"15\">.76 \u2021 .43 .28 .34</td><td>.50</td><td/><td colspan=\"3\">.33 .53 .44</td></tr><tr><td>CMU-COMBO-H</td><td>.3</td><td colspan=\"2\">.46 \u2021</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr></table>" |
|
}, |
|
"TABREF16": { |
|
"html": null, |
|
"text": "Sentence-level ranking for the WMT09 German-English News Task", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td>GOOGLE</td><td>LIU</td><td>RBMT1</td><td>RBMT2</td><td>RBMT3</td><td>RBMT4</td><td>RBMT5</td><td>RWTH</td><td>STUTTGART</td><td>UEDIN</td><td>UKA</td><td>USAAR</td><td>USAAR-COMBO</td></tr><tr><td>GOOGLE</td><td/><td>.34 \u2020</td><td>.56</td><td>.51</td><td>.55 \u2020</td><td>.44</td><td>.56 \u2020</td><td>.37</td><td>.41</td><td>.42</td><td>.45</td><td>.45</td><td>.43</td></tr><tr><td>LIU</td><td>.58 \u2020</td><td/><td>.62 \u2021</td><td>.55 \u2020</td><td>.55</td><td>.61 \u2021</td><td>.59 \u2020</td><td>.37</td><td>.38</td><td>.47</td><td>.43</td><td>.58 \u2020</td><td>.44</td></tr><tr><td>RBMT1</td><td>.39</td><td>.33 \u2021</td><td/><td>.56 \u2020</td><td>.44</td><td>.50</td><td>.57 \u2020</td><td>.41</td><td>.32 \u2021</td><td>.37</td><td>.35 \u2020</td><td>.45</td><td>.42</td></tr><tr><td>RBMT2</td><td>.35</td><td>.34 \u2020</td><td>.34 \u2020</td><td/><td>.43</td><td>.37</td><td>.40</td><td>.25 \u2021</td><td>.25 \u2021</td><td>.31 \u2021</td><td>.36 \u2020</td><td>.37</td><td>.32 \u2020</td></tr><tr><td>RBMT3</td><td>.31 \u2020</td><td>.35</td><td>.41</td><td>.35</td><td/><td>.37</td><td>.41</td><td>.24 \u2021</td><td>.25 \u2021</td><td>.33 \u2021</td><td>.43</td><td>.49</td><td>.36</td></tr><tr><td>RBMT4</td><td>.48</td><td>.33 \u2021</td><td>.33</td><td>.56</td><td>.55</td><td/><td>.47</td><td>.37</td><td>.35 \u2020</td><td>.34 \u2021</td><td>.45</td><td>.44</td><td>.38</td></tr><tr><td>RBMT5</td><td>.36 \u2020</td><td>.35 \u2020</td><td>.33 \u2020</td><td>.50</td><td>.53</td><td>.33</td><td/><td>.36 \u2020</td><td>.32 \u2021</td><td>.35 \u2020</td><td>.31 \u2021</td><td>.25 \u2021</td><td>.32 \u2021</td></tr><tr><td>RWTH</td><td>.51</td><td>.46</td><td>.50</td><td>.60 \u2021</td><td>.65 \u2021</td><td>.51</td><td>.60 \u2020</td><td/><td>.38</td><td>.47</td><td>.48</td><td>.52</td><td>.54</td></tr><tr><td>STUTTGART</td><td>.50</td><td>.47</td><td>.62 \u2021</td><td>.65 \u2021</td><td>.64 \u2021</td><td>.57 \u2020</td><td>.62 \u2021</td><td>.46</td><td/><td>.52 \u2020</td><td>.54 \u2020</td><td>.66 \u2021</td><td>.53</td></tr><tr><td>UEDIN</td><td>.50</td><td>.37</td><td>.53</td><td>.64 \u2021</td><td>.62 \u2021</td><td>.60 \u2021</td><td>.55 \u2020</td><td>.45</td><td>.28 \u2020</td><td/><td>.41</td><td>.53</td><td>.35</td></tr><tr><td>UKA</td><td>.47</td><td>.42</td><td>.57 \u2020</td><td>.58 \u2020</td><td>.46</td><td>.44</td><td>.62 \u2021</td><td>.35</td><td>.32 \u2020</td><td>.36</td><td/><td>.46</td><td>.41</td></tr><tr><td>USAAR</td><td>.46</td><td>.36 \u2020</td><td>.46</td><td>.55</td><td>.42</td><td>.42</td><td>.48 \u2021</td><td>.42</td><td>.28 \u2021</td><td>.39</td><td>.44</td><td/><td>.41</td></tr><tr><td>USAAR-COMBO</td><td>.37</td><td>.45</td><td>.54</td><td>.55 \u2020</td><td>.55</td><td>.53</td><td>.61 \u2021</td><td>.39</td><td>.40</td><td>.39</td><td>.46</td><td>.52</td><td/></tr><tr><td>> OTHERS</td><td>.44</td><td>.38</td><td>.48</td><td>.55</td><td>.53</td><td>.47</td><td>.54</td><td>.37</td><td>.33</td><td>.39</td><td>.42</td><td>.48</td><td>.41</td></tr><tr><td>>= OTHERS</td><td>.54</td><td>.49</td><td>.57</td><td>.66</td><td>.64</td><td>.58</td><td>.64</td><td>.48</td><td>.43</td><td>.51</td><td>.54</td><td>.58</td><td>.52</td></tr></table>" |
|
}, |
|
"TABREF17": { |
|
"html": null, |
|
"text": "", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td colspan=\"12\">: Sentence-level ranking for the WMT09 English-German News Task</td><td/></tr><tr><td/><td>GOOGLE</td><td>NICT</td><td>RBMT1</td><td>RBMT3</td><td>RBMT4</td><td>RBMT5</td><td>RWTH</td><td>TALP-UPC</td><td>UEDIN</td><td>USAAR</td><td>BBN-COMBO</td><td>CMU-COMBO</td><td>USAAR-COMBO</td></tr><tr><td>GOOGLE</td><td/><td>.21 \u2021</td><td>.40</td><td>.40</td><td>.41</td><td>.38</td><td>.23 \u2021</td><td>.35</td><td>.31 \u2020</td><td>.25 \u2021</td><td>.36</td><td>.14</td><td>.21</td></tr><tr><td>NICT</td><td>.74 \u2021</td><td/><td>.52</td><td>.53</td><td>.63 \u2021</td><td>.64 \u2021</td><td>.55 \u2020</td><td>.61 \u2021</td><td>.65 \u2021</td><td>.59 \u2020</td><td>.62 \u2021</td><td>.78 \u2021</td><td>.66 \u2021</td></tr><tr><td>RBMT1</td><td>.56</td><td>.40</td><td/><td>.34</td><td>.44</td><td>.46</td><td>.35</td><td>.48</td><td>.42</td><td>.42</td><td>.57 \u2020</td><td>.52</td><td>.54</td></tr><tr><td>RBMT3</td><td>.40</td><td>.39</td><td>.40</td><td/><td>.34</td><td>.36</td><td>.42</td><td>.4</td><td>.55</td><td>.50</td><td>.57</td><td>.48</td><td>.62 \u2020</td></tr><tr><td>RBMT4</td><td>.55</td><td>.32 \u2021</td><td>.41</td><td>.46</td><td/><td>.47</td><td>.39</td><td>.49</td><td>.49</td><td>.48</td><td>.54</td><td>.57</td><td>.54</td></tr><tr><td>RBMT5</td><td>.54</td><td>.30 \u2021</td><td>.35</td><td>.44</td><td>.38</td><td/><td>.45</td><td>.50</td><td>.49</td><td>.23</td><td>.51</td><td>.51</td><td>.66 \u2021</td></tr><tr><td>RWTH</td><td>.64 \u2021</td><td>.29 \u2020</td><td>.50</td><td>.53</td><td>.53</td><td>.49</td><td/><td>.42</td><td>.46</td><td>.43</td><td>.44</td><td>.51</td><td>.58 \u2021</td></tr><tr><td>TALP-UPC</td><td>.48</td><td>.24 \u2021</td><td>.44</td><td>.47</td><td>.41</td><td>.36</td><td>.39</td><td/><td>.36</td><td>.32</td><td>.47</td><td>.45</td><td>.50</td></tr><tr><td>UEDIN</td><td>.61 \u2020</td><td>.16 \u2021</td><td>.48</td><td>.42</td><td>.41</td><td>.46</td><td>.44</td><td>.43</td><td/><td>.44</td><td>.49</td><td>.51</td><td>.41</td></tr><tr><td>USAAR</td><td>.69 \u2021</td><td>.28 \u2020</td><td>.47</td><td>.44</td><td>.38</td><td>.35</td><td>.43</td><td>.60</td><td>.48</td><td/><td>.64 \u2020</td><td>.58 \u2021</td><td>.56</td></tr><tr><td>BBN-COMBO</td><td>.35</td><td>.20 \u2021</td><td>.32 \u2020</td><td>.36</td><td>.39</td><td>.37</td><td>.36</td><td>.39</td><td>.32</td><td>.31 \u2020</td><td/><td>.50</td><td>.40</td></tr><tr><td>CMU-COMBO</td><td>.19</td><td>.15 \u2021</td><td>.33</td><td>.39</td><td>.32</td><td>.37</td><td>.36</td><td>.31</td><td>.37</td><td>.21 \u2021</td><td>.35</td><td/><td>.31</td></tr><tr><td>USAAR-COMBO</td><td>.23</td><td>.20 \u2021</td><td>.42</td><td>.31 \u2020</td><td>.39</td><td>.25 \u2021</td><td>.27 \u2021</td><td>.35</td><td>.35</td><td>.32</td><td>.36</td><td>.29</td><td/></tr><tr><td>> OTHERS</td><td>.50</td><td>.26</td><td>.42</td><td>.42</td><td>.42</td><td>.42</td><td>.39</td><td>.44</td><td>.43</td><td>.37</td><td>.49</td><td>.49</td><td>.50</td></tr><tr><td>>= OTHERS</td><td>.70</td><td>.37</td><td>.55</td><td>.55</td><td>.53</td><td>.55</td><td>.51</td><td>.59</td><td>.56</td><td>.51</td><td>.64</td><td>.70</td><td>.69</td></tr></table>" |
|
}, |
|
"TABREF18": { |
|
"html": null, |
|
"text": "Sentence-level ranking for the WMT09 Spanish-English News Task", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td>GOOGLE</td><td>NUS</td><td>RBMT1</td><td>RBMT3</td><td>RBMT4</td><td>RBMT5</td><td>RWTH</td><td>TALP-UPC</td><td>UEDIN</td><td>USAAR</td><td>USAAR-COMBO</td></tr><tr><td>GOOGLE</td><td/><td>.39</td><td>.21 \u2021</td><td>.49</td><td>.36</td><td>.48</td><td>.34</td><td>.39</td><td>.33</td><td>.36</td><td>.21</td></tr><tr><td>NUS</td><td>.50</td><td/><td>.11 \u2021</td><td>.62 \u2020</td><td>.51</td><td>.51</td><td>.35</td><td>.25</td><td>.47</td><td>.36</td><td>.43</td></tr><tr><td>RBMT1</td><td>.76 \u2021</td><td>.80 \u2021</td><td/><td>.79 \u2021</td><td>.79 \u2021</td><td>.83 \u2021</td><td>.64 \u2021</td><td>.76 \u2021</td><td>.80 \u2021</td><td>.67 \u2021</td><td>.64 \u2021</td></tr><tr><td>RBMT3</td><td>.42</td><td>.31 \u2020</td><td>.16 \u2021</td><td/><td>.30</td><td>.43</td><td>.34</td><td>.29 \u2021</td><td>.56</td><td>.24 \u2021</td><td>.32</td></tr><tr><td>RBMT4</td><td>.47</td><td>.32</td><td>.11 \u2021</td><td>.52</td><td/><td>.49</td><td>.38</td><td>.36</td><td>.51</td><td>.39</td><td>.38</td></tr><tr><td>RBMT5</td><td>.42</td><td>.40</td><td>.11 \u2021</td><td>.49</td><td>.35</td><td/><td>.31 \u2020</td><td>.39</td><td>.47</td><td>.18 \u2020</td><td>.47</td></tr><tr><td>RWTH</td><td>.59</td><td>.52</td><td>.26 \u2021</td><td>.54</td><td>.51</td><td>.61 \u2020</td><td/><td>.46</td><td>.56 \u2020</td><td>.39</td><td>.55 \u2020</td></tr><tr><td>TALP-UPC</td><td>.49</td><td>.41</td><td>.17 \u2021</td><td>.63 \u2021</td><td>.52</td><td>.51</td><td>.29</td><td/><td>.45</td><td>.39</td><td>.41</td></tr><tr><td>UEDIN</td><td>.50</td><td>.32</td><td>.17 \u2021</td><td>.36</td><td>.37</td><td>.46</td><td>.30 \u2020</td><td>.29</td><td/><td>.32 \u2020</td><td>.36</td></tr><tr><td>USAAR</td><td>.58</td><td>.56</td><td>.23 \u2021</td><td>.67 \u2021</td><td>.53</td><td>.47 \u2020</td><td>.51</td><td>.49</td><td>.61 \u2020</td><td/><td>.58</td></tr><tr><td>USAAR-COMBO</td><td>.31</td><td>.45</td><td>.21 \u2021</td><td>.54</td><td>.49</td><td>.50</td><td>.30 \u2020</td><td>.43</td><td>.43</td><td>.33</td><td/></tr><tr><td>> OTHERS</td><td>.50</td><td>.45</td><td>.17</td><td>.56</td><td>.47</td><td>.53</td><td>.38</td><td>.42</td><td>.52</td><td>.37</td><td>.43</td></tr><tr><td>>= OTHERS</td><td>.65</td><td>.59</td><td>.25</td><td>.66</td><td>.61</td><td>.64</td><td>.51</td><td>.58</td><td>.66</td><td>.48</td><td>.61</td></tr></table>" |
|
}, |
|
"TABREF19": { |
|
"html": null, |
|
"text": ".44 .17 \u2021 .63 \u2020 .47 .46 .58 \u2020 .34 .32 .25 \u2020 .42 .48 .46 .28 .38 .58 \u2021 .47 .39 .41 .35 COLUMBIA .56 .56 .37 .71 \u2021 .48 .56 \u2021 .35 .45 .28 .38 .42 .41 .33 .58 .50 .64 \u2020 .52 .64 \u2020 .71 \u2021 .58 \u2020 DCU .27 .29 .15 \u2021 .67 \u2021 .45 .33 .34 .29 .31 .29 .27 .24 .37 .21 \u2020 .39 .61 \u2021 .4 .36 .37 .1 GENEVA .76 \u2021 .54 .73 \u2021 .71 \u2021 .65 \u2021 .73 \u2021 .62 .66 \u2021 .76 \u2021 .46 .79 \u2021 .57 .74 \u2021 .72 \u2021 .67 \u2020 .69 \u2021 .52 .71 \u2021 .67 \u2021 .64 \u2020 GOOGLE .23 \u2020 .17 \u2021 .12 \u2021 .13 \u2021 .21 \u2021 .35 .09 \u2021 .20 \u2021 .27 \u2020 .31 \u2020 .44 .16 \u2021 .21 \u2021 .33 .27 .28 .30 .34 .37 .16 \u2021 JHU .40 .26 .38 .22 \u2021 .60 \u2021 .31 .44 .27 .37 .29 \u2020 .41 .33 .37 .48 .48 .53 .47 .31 .47 .29 LIMSI .4 .16 \u2021 .38 .19 \u2021 .56 .49 .29 .37 .27 .20 \u2021 .38 .23 .33 .29 .38 .61 \u2020 .47 .31 .36 .26 LIUM-SYSTRAN .23 \u2020 .30 .42 .33 .61 \u2021 .27 .45 .48 .31 .41 .44 .32 .35 .41 .39 .54 \u2020 .61 \u2020 .24 .67 \u2020 .36 RBMT1 .53 .23 .42 .19 \u2021 .57 \u2021 .46 .51 .45 .47 .33 .46 .33 .41 .30 .61 .77 \u2021 .51 .41 .50 .41 RBMT3 .57 .63 .55 .15 \u2021 .69 \u2020 .44 .57 .52 .41 .22 \u2021 .38 .51 .43 .43 .31 .57 .46 .47 .38 .55 RBMT4 .58 \u2020 .35 .51 .36 .67 \u2020 .60 \u2020 .63 \u2021 .35 .41 .59 \u2021 .40 .55 .50 .71 \u2021 .52 \u2020 .63 \u2020 .65 \u2020 .65 \u2020 .66 \u2020 .38 RBMT5 .42 .49 .54 .09 \u2021 .38 .49 .49 .37 .27 .29 .34 .38 .39 .51 .18 .42 .58 .48 .50 .60 \u2021 RWTH .38 .39 .45 .32 .63 \u2021 .46 .51 .34 .56 .39 .32 .52 .48 .46 .46 .66 \u2021 .62 \u2020 .61 \u2021 .66 \u2021 .54 UEDIN .41 .21 .31 .19 \u2021 .68 \u2021 .46 .42 .35 .41 .38 .31 .46 .33 .34 .41 .41 .35 .44 .63 \u2021 .37 UKA .40 .31 .54 \u2020 .19 \u2021 .51 .37 .44 .33 .52 .51 .17 \u2021 .27 .32 .49 .34 .39 .53 .36 .44 .29 USAAR .44 .43 .52 .26 \u2020 .62 .48 .46 .30 .30 .58 .17 \u2020 .24 .44 .47 .41 .65 \u2021 .52 .70 \u2021 .55 .41 BBN-COMBO .21 \u2021 .21 \u2020 .12 \u2021 .23 \u2021 .26 .32 .28 \u2020 .23 \u2020 .12 \u2021 .26 .22 \u2020 .49 .09 \u2021 .34 .23 .19 \u2021 .44 .49 \u2020 .28 .21 \u2021 CMU-COMBO .41 .36 .4 .28 .30 .35 .47 .21 \u2020 .29 .42 .23 \u2020 .31 .17 \u2020 .49 .25 .42 .31 .37 .29 .25 CMU-COMBO-H .24 .21 \u2020 .38 .23 \u2021 .37 .39 .31 .24 .31 .41 .28 \u2020 .31 .14 \u2021 .33 .34 .24 \u2021 .18 \u2020 .3 .29 .27 DCU-COMBO .41 .13 \u2021 .42 .20 \u2021 .37 .29 .50 .19 \u2020 .44 .49 .23 \u2020 .46 .20 \u2021 .21 \u2021 .37 .39 .31 .26 .46 .19 \u2021 USAAR-COMBO .41 .25 \u2020 .18 .28 \u2020 .66 \u2021 .53 .52 .48 .41 .38 .53 .17 \u2021 .21 .42 .42 .47 .58 \u2021 .58 .47 .63 \u2021 > OTHERS", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>: Sentence-level ranking for the WMT09 English-Spanish News Task</td></tr></table>" |
|
}, |
|
"TABREF20": { |
|
"html": null, |
|
"text": "Sentence-level ranking for the WMT09 French-English News Task .66 \u2021 .46 \u2021 .56 \u2021 .57 .74 \u2021 .84 \u2021", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td>DCU</td><td>GENEVA</td><td>GOOGLE</td><td>LIMSI</td><td>LIUM-SYSTRAN</td><td>RBMT1</td><td>RBMT3</td><td>RBMT4</td><td>RBMT5</td><td>RWTH</td><td>SYSTRAN</td><td>UEDIN</td><td>UKA</td><td>USAAR</td><td>DCU-COMBO</td><td>USAAR-COMBO</td></tr><tr><td>DCU</td><td/><td colspan=\"2\">.12 \u2021 .39</td><td>.47</td><td>.44</td><td>.33</td><td>.44</td><td>.27</td><td>.45</td><td>.24</td><td>.49</td><td>.24</td><td>.46</td><td colspan=\"2\">.26 \u2020 .39</td><td>.33</td></tr><tr><td colspan=\"11\">GENEVA .56 \u2020 GOOGLE .62 \u2021 .73 \u2021 .69 \u2021 .80 \u2021 .50 .71 \u2021 .50 .52 .46 .15 \u2021 .28 .42 .26 .44 .26 \u2020 .34 .29</td><td>.44</td><td>.24</td><td>.32</td><td>.29</td><td>.36</td><td>.32</td></tr><tr><td>LIMSI</td><td>.25</td><td colspan=\"2\">.16 \u2021 .45</td><td/><td>.48</td><td>.23</td><td>.43</td><td>.30</td><td>.45</td><td>.27</td><td>.42</td><td>.34</td><td>.4</td><td>.36</td><td colspan=\"2\">.53 \u2020 .38</td></tr><tr><td>LIUM-SYSTRAN</td><td>.24</td><td>\u2021</td><td>.45</td><td>.32</td><td/><td colspan=\"2\">.17 \u2020 .29</td><td colspan=\"3\">.17 \u2020 .21 \u2020 .38</td><td>.29</td><td colspan=\"2\">.17 \u2021 .35</td><td colspan=\"2\">.17 \u2020 .41</td><td>.41</td></tr><tr><td>RBMT1</td><td>.39</td><td>.25</td><td>.51</td><td>.51</td><td>.53 \u2020</td><td/><td>.46</td><td>.40</td><td>.29</td><td>.52</td><td>.36</td><td>.60</td><td colspan=\"2\">.63 \u2021 .41</td><td>.44</td><td>.60 \u2020</td></tr><tr><td>RBMT3</td><td>.36</td><td colspan=\"2\">.11 \u2021 .37</td><td>.37</td><td>.52</td><td>.24</td><td/><td>.25</td><td>.27</td><td>.31</td><td>.44</td><td>.43</td><td>.32</td><td>.27</td><td>.53</td><td>.44</td></tr><tr><td>RBMT4</td><td>.36</td><td>.19</td><td colspan=\"2\">.58 \u2020 .37</td><td colspan=\"2\">.57 \u2020 .23</td><td>.61</td><td/><td>.42</td><td>.32</td><td>.50</td><td>.22</td><td>.39</td><td>.44</td><td>.53</td><td>.56</td></tr><tr><td>RBMT5</td><td>.41</td><td>.17</td><td>.53</td><td>.39</td><td colspan=\"2\">.61 \u2020 .38</td><td>.58</td><td>.30</td><td/><td>.41</td><td>.52</td><td>.41</td><td>.48</td><td>.13</td><td>.54</td><td>.60</td></tr><tr><td>RWTH</td><td>.59</td><td colspan=\"2\">.21 \u2020 .63</td><td>.50</td><td>.47</td><td>.29</td><td>.44</td><td>.37</td><td>.31</td><td/><td>.37</td><td>.35</td><td>.51</td><td colspan=\"3\">.16 \u2020 .50 \u2021 .57 \u2020</td></tr><tr><td>SYSTRAN</td><td>.35</td><td colspan=\"2\">.20 \u2021 .33</td><td>.39</td><td>.38</td><td>.40</td><td>.22</td><td>.29</td><td>.26</td><td>.44</td><td/><td>.47</td><td>.33</td><td>.32</td><td>.60</td><td>.45</td></tr><tr><td>UEDIN</td><td>.38</td><td colspan=\"2\">.11 \u2021 .41</td><td>.28</td><td colspan=\"2\">.77 \u2021 .33</td><td>.51</td><td>.44</td><td>.49</td><td>.32</td><td>.37</td><td/><td>.30</td><td>.31</td><td>.56</td><td>.56 \u2021</td></tr><tr><td>UKA</td><td>.36</td><td colspan=\"2\">.09 \u2021 .46</td><td>.4</td><td>.45</td><td colspan=\"2\">.23 \u2021 .50</td><td>.39</td><td>.29</td><td>.29</td><td>.47</td><td>.26</td><td/><td colspan=\"2\">.19 \u2021 .41</td><td>.56 \u2020</td></tr><tr><td>USAAR</td><td colspan=\"2\">.66 \u2020 .27</td><td>.52</td><td>.49</td><td colspan=\"2\">.70 \u2020 .31</td><td>.61</td><td>.29</td><td>.32</td><td colspan=\"2\">.64 \u2020 .62</td><td>.51</td><td>.61 \u2021</td><td/><td colspan=\"2\">.76 \u2021 .65 \u2021</td></tr><tr><td>DCU-COMBO</td><td>.32</td><td colspan=\"2\">.11 \u2021 .30</td><td colspan=\"2\">.18 \u2020 .45</td><td>.22</td><td>.29</td><td>.33</td><td>.29</td><td colspan=\"2\">.13 \u2021 .27</td><td>.26</td><td>.41</td><td>.12 \u2021</td><td/><td>.21</td></tr><tr><td>USAAR-COMBO</td><td>.40</td><td>\u2021</td><td>.39</td><td>.17</td><td>.26</td><td colspan=\"2\">.17 \u2020 .28</td><td>.20</td><td>.28</td><td colspan=\"2\">.20 \u2020 .39</td><td colspan=\"4\">.04 \u2021 .06 \u2020 .08 \u2021 .39</td><td/></tr><tr><td>> OTHERS</td><td>.41</td><td>.15</td><td>.47</td><td>.39</td><td>.52</td><td>.29</td><td>.45</td><td>.32</td><td>.35</td><td>.35</td><td>.45</td><td>.34</td><td>.42</td><td>.28</td><td>.51</td><td>.49</td></tr><tr><td>>= OTHERS</td><td>.65</td><td>.38</td><td>.68</td><td>.64</td><td>.73</td><td>.54</td><td>.65</td><td>.59</td><td>.57</td><td>.58</td><td>.65</td><td>.60</td><td>.66</td><td>.48</td><td>.74</td><td>.77</td></tr></table>" |
|
}, |
|
"TABREF21": { |
|
"html": null, |
|
"text": "Sentence-level ranking for the WMT09 English-French News Task", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td>CU-BOJAR</td><td>GOOGLE</td><td>UEDIN</td><td>BBN-COMBO</td><td>CMU-COMBO</td></tr><tr><td>CU-BOJAR</td><td/><td>.54 \u2021</td><td>.44</td><td>.45 \u2021</td><td>.52 \u2021</td></tr><tr><td>GOOGLE</td><td>.28 \u2021</td><td/><td>.32 \u2021</td><td>.18 \u2021</td><td>.23</td></tr><tr><td>UEDIN</td><td>.38</td><td>.51 \u2021</td><td/><td>.38</td><td>.45 \u2021</td></tr><tr><td>BBN-COMBO</td><td>.31 \u2021</td><td>.39 \u2021</td><td>.32</td><td/><td>.38 \u2021</td></tr><tr><td>CMU-COMBO</td><td>.28 \u2021</td><td>.29</td><td>.27 \u2021</td><td>.24 \u2021</td><td/></tr><tr><td>> OTHERS</td><td>.31</td><td>.43</td><td>.34</td><td>.31</td><td>.40</td></tr><tr><td>>= OTHERS</td><td>.51</td><td>.75</td><td>.57</td><td>.65</td><td>.73</td></tr></table>" |
|
}, |
|
"TABREF22": { |
|
"html": null, |
|
"text": "", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td>: Sentence-level ranking for the WMT09 Czech-English News Task</td></tr></table>" |
|
}, |
|
"TABREF23": { |
|
"html": null, |
|
"text": "", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td colspan=\"7\">: Sentence-level ranking for the WMT09 English-Czech News Task</td></tr><tr><td/><td>MORPHO</td><td>UEDIN</td><td>UMD</td><td>BBN-COMBO</td><td>CMU-COMBO</td><td>CMU-COMBO-HYPOSEL</td></tr><tr><td>MORPHO</td><td/><td>.21 \u2021</td><td>.28 \u2021</td><td>.24 \u2021</td><td>.27 \u2021</td><td>.28 \u2021</td></tr><tr><td>UEDIN</td><td>.70 \u2021</td><td/><td>.59 \u2021</td><td>.45 \u2021</td><td>.55 \u2021</td><td>.50 \u2021</td></tr><tr><td>UMD</td><td>.61 \u2021</td><td>.26 \u2021</td><td/><td>.21 \u2021</td><td>.29</td><td>.38</td></tr><tr><td>BBN-COMBO</td><td>.67 \u2021</td><td>.23 \u2021</td><td>.48 \u2021</td><td/><td>.41</td><td>.52 \u2021</td></tr><tr><td>CMU-COMBO</td><td>.59 \u2021</td><td>.25 \u2021</td><td>.35</td><td>.29</td><td/><td>.42</td></tr><tr><td>CMU-COMBO-HYPOSEL</td><td>.55 \u2021</td><td>.15 \u2021</td><td>.34</td><td>.27 \u2021</td><td>.34</td><td/></tr><tr><td>> OTHERS</td><td>.62</td><td>.22</td><td>.41</td><td>.29</td><td>.37</td><td>.42</td></tr><tr><td>>= OTHERS</td><td>.75</td><td>.45</td><td>.66</td><td>.54</td><td>.62</td><td>.68</td></tr></table>" |
|
}, |
|
"TABREF24": { |
|
"html": null, |
|
"text": "Sentence-level ranking for the WMT09 Hungarian-English News Task GOOGLE .61 .54 .47 .52 .51 .47 .61 .42 .38 .52 .55 .54 .11 \u2021 .51 .48 .34 .49 .32 .53 .52 .50 .59 .53", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td>DE</td><td>HU</td><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>CZ</td><td>GOOGLE ES</td><td>GOOGLE F R</td><td>RBMT2 DE</td><td>RBMT3 DE</td><td>RBMT3 ES</td><td>RBMT3 F R</td><td>RBMT5 ES</td><td>RBMT5 F R</td><td>BBN-COMBO CZ</td><td>BBN-COMBO DE</td><td>BBN-COMBO ES</td><td>BBN-COMBO F R</td><td>BBN-COMBO HU</td><td>BBN-COMBO XX</td><td>CMU-COMBO-HYPOSEL</td><td>CMU-COMBO-HYPOSEL</td><td>CMU-COMBO CZ</td><td>CMU-COMBO HU</td><td>CMU-COMBO XX</td><td>DCU-COMBO F R</td><td>RWTH-COMBO DE</td><td>RWTH-COMBO XX</td><td>USAAR-COMBO</td></tr><tr><td>GOOGLE ES</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr></table>" |
|
}, |
|
"TABREF25": { |
|
"html": null, |
|
"text": "Sentence-level ranking for the WMT09 Multisource-English News Task", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td>RANK</td><td>BLEU</td><td>BLEU-CASED</td><td>BLEU-TER</td><td>BLEUSP</td><td>BLEUSP4114</td><td>MAXSIM</td><td>METEOR-0.6</td><td>METEOR-0.7</td><td>METEOR-RANKING</td><td>NIST</td><td>NIST-CASED</td><td>RTE-ABSOLUTE</td><td>RTE-PAIRWISE</td><td>TER</td><td>TERP</td><td>ULC</td><td>WCD6P4ER</td><td>WPF</td><td>WPBLEU</td></tr><tr><td/><td/><td/><td/><td/><td/><td colspan=\"5\">German-English News Task</td><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>BBN-COMBO</td><td colspan=\"4\">0.68 0.24 0.22 -0.17</td><td colspan=\"16\">0.29 0.31 0.51 0.55 0.6 0.41 7.08 6.78 0.13 0.1 0.54 0.63 0.31 0.45 0.36 0.31</td></tr><tr><td>CMU-COMBO</td><td colspan=\"4\">0.63 0.22 0.21 -0.19</td><td colspan=\"16\">0.28 0.29 0.49 0.54 0.58 0.4 6.95 6.71 0.12 0.09 0.56 0.66 0.29 0.47 0.35 0.29</td></tr><tr><td>CMU-COMBO-HYPOSEL</td><td colspan=\"4\">0.62 0.23 0.21 -0.19</td><td colspan=\"16\">0.28 0.3 0.49 0.54 0.57 0.4 6.79 6.5 0.11 0.09 0.57 0.66 0.29 0.47 0.35 0.3</td></tr><tr><td>GENEVA</td><td colspan=\"4\">0.33 0.1 0.09 -0.33</td><td colspan=\"16\">0.17 0.18 0.38 0.43 0.44 0.30 4.88 4.65 0.03 0.04 0.71 0.86 0.22 0.58 0.25 0.17</td></tr><tr><td>GOOGLE</td><td colspan=\"4\">0.65 0.21 0.20 -0.2</td><td colspan=\"16\">0.27 0.28 0.48 0.54 0.57 0.39 6.85 6.65 0.11 0.11 0.56 0.65 0.29 0.48 0.35 0.28</td></tr><tr><td>JHU-TROMBLE</td><td colspan=\"4\">0.13 0.07 0.06 -0.38</td><td colspan=\"12\">0.09 0.1 0.34 0.43 0.41 0.29 4.90 4.25 0.02 0.02 0.81 1</td><td colspan=\"4\">0.19 0.61 0.22 0.12</td></tr><tr><td>LIU</td><td colspan=\"4\">0.50 0.19 0.18 -0.22</td><td colspan=\"16\">0.25 0.27 0.46 0.51 0.54 0.38 6.35 6.02 0.06 0.05 0.61 0.72 0.27 0.49 0.33 0.26</td></tr><tr><td>RBMT1</td><td colspan=\"4\">0.54 0.14 0.13 -0.29</td><td colspan=\"16\">0.20 0.21 0.43 0.50 0.53 0.37 5.30 5.07 0.04 0.04 0.67 0.76 0.26 0.55 0.29 0.22</td></tr><tr><td>RBMT2</td><td colspan=\"4\">0.64 0.17 0.16 -0.26</td><td colspan=\"16\">0.23 0.24 0.48 0.52 0.55 0.38 6.06 5.75 0.1 0.12 0.63 0.70 0.29 0.51 0.31 0.24</td></tr><tr><td>RBMT3</td><td colspan=\"4\">0.64 0.17 0.16 -0.25</td><td colspan=\"16\">0.23 0.25 0.48 0.52 0.55 0.38 5.98 5.71 0.09 0.09 0.61 0.68 0.29 0.51 0.32 0.25</td></tr><tr><td>RBMT4</td><td colspan=\"4\">0.62 0.16 0.14 -0.27</td><td colspan=\"16\">0.21 0.23 0.45 0.5 0.52 0.36 5.65 5.36 0.06 0.07 0.65 0.72 0.27 0.52 0.30 0.23</td></tr><tr><td>RBMT5</td><td colspan=\"4\">0.66 0.16 0.15 -0.26</td><td colspan=\"16\">0.22 0.24 0.47 0.51 0.54 0.37 5.76 5.52 0.07 0.06 0.63 0.70 0.28 0.52 0.31 0.24</td></tr><tr><td>RWTH</td><td colspan=\"4\">0.50 0.19 0.18 -0.21</td><td colspan=\"16\">0.25 0.26 0.45 0.50 0.53 0.36 6.44 6.24 0.06 0.03 0.60 0.74 0.27 0.49 0.33 0.26</td></tr><tr><td>RWTH-COMBO</td><td colspan=\"4\">0.7 0.23 0.22 -0.18</td><td colspan=\"16\">0.29 0.30 0.50 0.55 0.59 0.41 7.06 6.81 0.11 0.07 0.54 0.63 0.30 0.46 0.36 0.31</td></tr><tr><td>STUTTGART</td><td colspan=\"4\">0.61 0.2 0.18 -0.22</td><td colspan=\"16\">0.26 0.27 0.48 0.52 0.56 0.38 6.39 6.11 0.1 0.06 0.60 0.69 0.29 0.49 0.33 0.27</td></tr><tr><td>SYSTRAN</td><td colspan=\"4\">0.6 0.19 0.17 -0.22</td><td colspan=\"16\">0.24 0.26 0.47 0.52 0.55 0.38 6.40 6.08 0.08 0.07 0.60 0.71 0.28 0.5 0.33 0.26</td></tr><tr><td>UEDIN</td><td colspan=\"4\">0.59 0.20 0.19 -0.22</td><td colspan=\"16\">0.26 0.27 0.47 0.52 0.55 0.38 6.47 6.24 0.07 0.04 0.61 0.70 0.27 0.49 0.34 0.27</td></tr><tr><td>UKA</td><td colspan=\"4\">0.58 0.21 0.2 -0.20</td><td colspan=\"16\">0.27 0.28 0.47 0.52 0.56 0.38 6.66 6.43 0.08 0.04 0.58 0.69 0.28 0.48 0.34 0.28</td></tr><tr><td>UMD</td><td colspan=\"4\">0.56 0.21 0.19 -0.19</td><td colspan=\"16\">0.26 0.28 0.47 0.52 0.56 0.38 6.74 6.42 0.08 0.04 0.56 0.69 0.28 0.48 0.34 0.27</td></tr><tr><td>USAAR</td><td colspan=\"4\">0.65 0.17 0.15 -0.26</td><td colspan=\"16\">0.23 0.24 0.47 0.51 0.54 0.38 5.89 5.64 0.06 0.05 0.64 0.71 0.28 0.52 0.31 0.24</td></tr><tr><td>USAAR-COMBO</td><td colspan=\"4\">0.62 0.17 0.16 -0.25</td><td colspan=\"16\">0.23 0.24 0.47 0.51 0.55 0.38 5.99 6.85 0.07 0.06 0.64 0.70 0.28 0.51 0.32 0.25</td></tr><tr><td/><td/><td/><td/><td/><td/><td colspan=\"5\">Spanish-English News Task</td><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>BBN-COMBO</td><td colspan=\"4\">0.64 0.29 0.27 -0.13</td><td colspan=\"16\">0.34 0.35 0.53 0.57 0.62 0.43 7.64 7.35 0.16 0.13 0.51 0.61 0.33 0.42 0.4 0.35</td></tr><tr><td>CMU-COMBO</td><td colspan=\"4\">0.7 0.28 0.27 -0.13</td><td colspan=\"16\">0.33 0.35 0.53 0.58 0.62 0.43 7.65 7.46 0.21 0.2 0.51 0.60 0.34 0.42 0.40 0.36</td></tr><tr><td>GOOGLE</td><td colspan=\"4\">0.70 0.29 0.28 -0.13</td><td colspan=\"16\">0.34 0.35 0.53 0.58 0.62 0.43 7.68 7.50 0.23 0.22 0.5 0.59 0.34 0.42 0.41 0.36</td></tr><tr><td>NICT</td><td colspan=\"4\">0.37 0.22 0.22 -0.19</td><td colspan=\"16\">0.27 0.29 0.48 0.54 0.57 0.39 6.91 6.74 0.1 0.1 0.60 0.71 0.3 0.46 0.36 0.3</td></tr><tr><td>RBMT1</td><td colspan=\"4\">0.55 0.19 0.18 -0.24</td><td colspan=\"16\">0.25 0.26 0.49 0.54 0.57 0.40 6.07 5.93 0.11 0.12 0.62 0.69 0.3 0.49 0.34 0.28</td></tr><tr><td>RBMT3</td><td colspan=\"4\">0.55 0.20 0.2 -0.22</td><td colspan=\"16\">0.26 0.27 0.50 0.54 0.58 0.41 6.24 6.08 0.13 0.14 0.60 0.65 0.31 0.48 0.36 0.29</td></tr><tr><td>RBMT4</td><td colspan=\"4\">0.53 0.2 0.19 -0.22</td><td colspan=\"16\">0.25 0.27 0.48 0.53 0.57 0.4 6.20 6.03 0.10 0.11 0.60 0.67 0.3 0.48 0.35 0.28</td></tr><tr><td>RBMT5</td><td colspan=\"4\">0.55 0.20 0.2 -0.22</td><td colspan=\"16\">0.26 0.27 0.5 0.54 0.58 0.40 6.26 6.10 0.12 0.11 0.6 0.65 0.31 0.48 0.36 0.29</td></tr><tr><td>RWTH</td><td colspan=\"4\">0.51 0.24 0.23 -0.16</td><td colspan=\"16\">0.3 0.31 0.49 0.54 0.58 0.4 7.12 6.95 0.11 0.08 0.56 0.68 0.31 0.45 0.37 0.32</td></tr><tr><td>TALP-UPC</td><td colspan=\"4\">0.59 0.26 0.25 -0.15</td><td colspan=\"16\">0.31 0.33 0.51 0.56 0.6 0.41 7.28 7.02 0.13 0.11 0.54 0.64 0.32 0.44 0.38 0.33</td></tr><tr><td>UEDIN</td><td colspan=\"4\">0.56 0.26 0.25 -0.15</td><td colspan=\"16\">0.32 0.33 0.51 0.56 0.60 0.42 7.25 7.04 0.16 0.1 0.55 0.64 0.32 0.43 0.39 0.34</td></tr><tr><td>USAAR</td><td colspan=\"4\">0.51 0.2 0.19 -0.22</td><td colspan=\"16\">0.25 0.27 0.48 0.54 0.57 0.4 6.31 6.14 0.11 0.09 0.62 0.67 0.3 0.48 0.34 0.28</td></tr><tr><td>USAAR-COMBO</td><td colspan=\"4\">0.69 0.29 0.27 -0.13</td><td colspan=\"16\">0.34 0.35 0.53 0.58 0.62 0.43 7.58 7.25 0.20 0.13 0.51 0.6 0.34 0.42 0.4 0.35</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td colspan=\"4\">French-English News Task</td><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>BBN-COMBO</td><td colspan=\"4\">0.73 0.31 0.3 -0.11</td><td colspan=\"16\">0.36 0.38 0.54 0.59 0.64 0.45 7.88 7.58 0.14 0.12 0.2 0.20 0.36 0.40 0.41 0.37</td></tr><tr><td>CMU-COMBO</td><td colspan=\"4\">0.66 0.3 0.29 -0.12</td><td colspan=\"16\">0.35 0.36 0.53 0.58 0.63 0.44 7.72 7.57 0.15 0.12 0.24 0.26 0.35 0.41 0.41 0.37</td></tr><tr><td>CMU-COMBO-HYPOSEL</td><td colspan=\"4\">0.71 0.28 0.26 -0.14</td><td colspan=\"16\">0.33 0.35 0.53 0.57 0.61 0.43 7.40 7.15 0.1 0.08 0.31 0.33 0.34 0.42 0.4 0.35</td></tr><tr><td>CMU-STATXFER</td><td colspan=\"4\">0.58 0.24 0.23 -0.18</td><td colspan=\"16\">0.29 0.31 0.49 0.54 0.58 0.40 6.89 6.75 0.08 0.07 0.38 0.42 0.31 0.46 0.37 0.32</td></tr><tr><td>COLUMBIA</td><td colspan=\"4\">0.50 0.23 0.22 -0.18</td><td colspan=\"16\">0.29 0.30 0.49 0.54 0.58 0.40 6.85 6.68 0.07 0.07 0.36 0.39 0.31 0.46 0.36 0.31</td></tr><tr><td>DCU</td><td colspan=\"4\">0.66 0.27 0.25 -0.15</td><td colspan=\"16\">0.32 0.34 0.52 0.56 0.61 0.42 7.29 6.94 0.09 0.07 0.32 0.34 0.33 0.43 0.38 0.34</td></tr><tr><td>DCU-COMBO</td><td colspan=\"4\">0.67 0.31 0.31 -0.11</td><td colspan=\"16\">0.36 0.37 0.54 0.59 0.64 0.44 7.84 7.69 0.14 0.12 0.21 0.22 0.35 0.41 0.42 0.38</td></tr><tr><td>GENEVA</td><td colspan=\"4\">0.34 0.14 0.14 -0.29</td><td colspan=\"16\">0.21 0.22 0.43 0.49 0.52 0.36 5.32 5.15 0.05 0.05 0.54 0.52 0.26 0.53 0.29 0.22</td></tr><tr><td>GOOGLE</td><td colspan=\"4\">0.76 0.31 0.30 -0.10</td><td colspan=\"7\">0.36 0.37 0.54 0.58 0.63 0.44 8</td><td colspan=\"9\">7.84 0.17 0.13 0.17 0.2 0.36 0.41 0.42 0.38</td></tr><tr><td>JHU</td><td colspan=\"4\">0.62 0.27 0.23 -0.15</td><td colspan=\"16\">0.32 0.33 0.51 0.56 0.6 0.41 7.23 6.68 0.08 0.05 0.33 0.36 0.32 0.43 0.37 0.32</td></tr><tr><td>LIMSI</td><td colspan=\"4\">0.65 0.26 0.25 -0.16</td><td colspan=\"16\">0.30 0.32 0.51 0.56 0.60 0.42 7.02 6.87 0.09 0.07 0.35 0.36 0.33 0.44 0.38 0.33</td></tr><tr><td>LIUM-SYSTRAN</td><td colspan=\"4\">0.60 0.27 0.26 -0.15</td><td colspan=\"16\">0.32 0.33 0.51 0.56 0.60 0.42 7.26 7.10 0.10 0.06 0.33 0.36 0.33 0.43 0.39 0.35</td></tr><tr><td>RBMT1</td><td colspan=\"4\">0.56 0.18 0.18 -0.25</td><td colspan=\"16\">0.24 0.25 0.48 0.53 0.57 0.4 5.89 5.73 0.07 0.06 0.51 0.45 0.3 0.50 0.34 0.26</td></tr><tr><td>RBMT3</td><td colspan=\"4\">0.54 0.2 0.19 -0.22</td><td colspan=\"16\">0.25 0.27 0.48 0.53 0.56 0.39 6.12 5.96 0.07 0.06 0.45 0.45 0.30 0.49 0.35 0.28</td></tr></table>" |
|
}, |
|
"TABREF26": { |
|
"html": null, |
|
"text": "Automatic evaluation metric scores for translations into English", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td>RANK</td><td>BLEU</td><td>BLEU-CASED</td><td>BLEU-TER</td><td>BLEUSP</td><td>BLEUSP4114</td><td>NIST</td><td>NIST-CASED</td><td>TER</td><td>TERP</td><td>WCD6P4ER</td><td>WPF</td><td>WPBLEU</td></tr><tr><td/><td/><td/><td/><td colspan=\"4\">English-German News Task</td><td/><td/><td/><td/><td/><td/></tr><tr><td>GOOGLE</td><td>0.54</td><td>0.15</td><td>0.14</td><td>-0.29</td><td>0.20</td><td>0.22</td><td>5.36</td><td>5.25</td><td>0.62</td><td>0.74</td><td>0.54</td><td>0.3</td><td>0.23</td></tr><tr><td>LIU</td><td>0.49</td><td>0.14</td><td>0.13</td><td>-0.29</td><td>0.2</td><td>0.21</td><td>5.35</td><td>5.18</td><td>0.65</td><td>0.78</td><td>0.54</td><td>0.3</td><td>0.23</td></tr><tr><td>RBMT1</td><td>0.57</td><td>0.11</td><td>0.11</td><td>-0.32</td><td>0.17</td><td>0.19</td><td>4.69</td><td>4.59</td><td>0.67</td><td>0.81</td><td>0.57</td><td>0.28</td><td>0.21</td></tr><tr><td>RBMT2</td><td>0.66</td><td>0.13</td><td>0.13</td><td>-0.30</td><td>0.19</td><td>0.21</td><td>5.08</td><td>4.99</td><td>0.62</td><td>0.75</td><td>0.55</td><td>0.30</td><td>0.23</td></tr><tr><td>RBMT3</td><td>0.64</td><td>0.12</td><td>0.12</td><td>-0.29</td><td>0.2</td><td>0.21</td><td>4.8</td><td>4.71</td><td>0.62</td><td>0.76</td><td>0.54</td><td>0.31</td><td>0.25</td></tr><tr><td>RBMT4</td><td>0.58</td><td>0.11</td><td>0.10</td><td>-0.33</td><td>0.17</td><td>0.18</td><td>4.66</td><td>4.57</td><td>0.7</td><td>0.84</td><td>0.57</td><td>0.27</td><td>0.2</td></tr><tr><td>RBMT5</td><td>0.64</td><td>0.13</td><td>0.12</td><td>-0.3</td><td>0.19</td><td>0.20</td><td>5.03</td><td>4.94</td><td>0.64</td><td>0.79</td><td>0.55</td><td>0.3</td><td>0.23</td></tr><tr><td>RWTH</td><td>0.48</td><td>0.14</td><td>0.13</td><td>-0.28</td><td>0.2</td><td>0.21</td><td>5.51</td><td>5.41</td><td>0.62</td><td>0.78</td><td>0.53</td><td>0.3</td><td>0.23</td></tr><tr><td>STUTTGART</td><td>0.43</td><td>0.12</td><td>0.12</td><td>-0.31</td><td>0.18</td><td>0.20</td><td>5.06</td><td>4.82</td><td>0.67</td><td>0.79</td><td>0.55</td><td>0.29</td><td>0.21</td></tr><tr><td>UEDIN</td><td>0.51</td><td>0.15</td><td>0.15</td><td>-0.27</td><td>0.21</td><td>0.23</td><td>5.53</td><td>5.42</td><td>0.63</td><td>0.77</td><td>0.53</td><td>0.31</td><td>0.24</td></tr><tr><td>UKA</td><td>0.54</td><td>0.15</td><td>0.15</td><td>-0.27</td><td>0.21</td><td>0.22</td><td>5.6</td><td>5.48</td><td>0.62</td><td>0.75</td><td>0.52</td><td>0.31</td><td>0.24</td></tr><tr><td>USAAR</td><td>0.58</td><td>0.12</td><td>0.11</td><td>-0.33</td><td>0.18</td><td>0.19</td><td>4.83</td><td>4.71</td><td>0.69</td><td>0.8</td><td>0.57</td><td>0.28</td><td>0.21</td></tr><tr><td>USAAR-COMBO</td><td>0.52</td><td>0.16</td><td>0.15</td><td>-0.27</td><td>0.21</td><td>0.23</td><td>5.6</td><td>5.39</td><td>0.62</td><td>0.75</td><td>0.52</td><td>0.31</td><td>0.24</td></tr><tr><td/><td/><td/><td/><td colspan=\"4\">English-Spanish News Task</td><td/><td/><td/><td/><td/><td/></tr><tr><td>GOOGLE</td><td>0.65</td><td>0.28</td><td>0.27</td><td>-0.15</td><td>0.33</td><td>0.34</td><td>7.27</td><td>7.07</td><td>0.36</td><td>0.42</td><td>0.42</td><td>0.37</td><td>0.31</td></tr><tr><td>NUS</td><td>0.59</td><td>0.25</td><td>0.23</td><td>-0.17</td><td>0.30</td><td>0.31</td><td>6.96</td><td>6.67</td><td>0.48</td><td>0.59</td><td>0.44</td><td>0.34</td><td>0.28</td></tr><tr><td>RBMT1</td><td>0.25</td><td>0.15</td><td>0.14</td><td>-0.27</td><td>0.20</td><td>0.22</td><td>5.32</td><td>5.17</td><td>0.55</td><td>0.66</td><td>0.51</td><td>0.24</td><td>0.16</td></tr><tr><td>RBMT3</td><td>0.66</td><td>0.18</td><td>0.17</td><td>-0.18</td><td>0.28</td><td>0.3</td><td>5.79</td><td>5.63</td><td>0.49</td><td>0.59</td><td>0.45</td><td>0.33</td><td>0.27</td></tr><tr><td>RBMT4</td><td>0.61</td><td>0.21</td><td>0.2</td><td>-0.20</td><td>0.26</td><td>0.28</td><td>6.47</td><td>6.28</td><td>0.52</td><td>0.64</td><td>0.47</td><td>0.31</td><td>0.25</td></tr><tr><td>RBMT5</td><td>0.64</td><td>0.22</td><td>0.21</td><td>-0.2</td><td>0.27</td><td>0.29</td><td>6.53</td><td>6.34</td><td>0.52</td><td>0.64</td><td>0.46</td><td>0.32</td><td>0.26</td></tr><tr><td>RWTH</td><td>0.51</td><td>0.22</td><td>0.21</td><td>-0.18</td><td>0.27</td><td>0.29</td><td>6.83</td><td>6.63</td><td>0.50</td><td>0.65</td><td>0.46</td><td>0.32</td><td>0.26</td></tr><tr><td>TALP-UPC</td><td>0.58</td><td>0.25</td><td>0.23</td><td>-0.17</td><td>0.3</td><td>0.31</td><td>6.96</td><td>6.69</td><td>0.47</td><td>0.58</td><td>0.44</td><td>0.34</td><td>0.28</td></tr><tr><td>UEDIN</td><td>0.66</td><td>0.25</td><td>0.24</td><td>-0.17</td><td>0.30</td><td>0.31</td><td>6.94</td><td>6.73</td><td>0.48</td><td>0.59</td><td>0.44</td><td>0.34</td><td>0.29</td></tr><tr><td>USAAR</td><td>0.48</td><td>0.20</td><td>0.19</td><td>-0.21</td><td>0.26</td><td>0.27</td><td>6.36</td><td>6.16</td><td>0.54</td><td>0.66</td><td>0.47</td><td>0.30</td><td>0.24</td></tr><tr><td>USAAR-COMBO</td><td>0.61</td><td>0.28</td><td>0.26</td><td>-0.14</td><td>0.33</td><td>0.34</td><td>7.36</td><td>6.97</td><td>0.39</td><td>0.48</td><td>0.42</td><td>0.36</td><td>0.31</td></tr><tr><td/><td/><td/><td/><td colspan=\"4\">English-French News Task</td><td/><td/><td/><td/><td/><td/></tr><tr><td>DCU</td><td>0.65</td><td>0.24</td><td>0.22</td><td>-0.19</td><td>0.29</td><td>0.30</td><td>6.69</td><td>6.39</td><td>0.63</td><td>0.72</td><td>0.47</td><td>0.38</td><td>0.34</td></tr><tr><td>DCU-COMBO</td><td>0.74</td><td>0.28</td><td>0.27</td><td>-0.15</td><td>0.33</td><td>0.34</td><td>7.29</td><td>7.12</td><td>0.58</td><td>0.67</td><td>0.44</td><td>0.42</td><td>0.38</td></tr><tr><td>GENEVA</td><td>0.38</td><td>0.15</td><td>0.14</td><td>-0.27</td><td>0.20</td><td>0.22</td><td>5.59</td><td>5.39</td><td>0.68</td><td>0.82</td><td>0.53</td><td>0.32</td><td>0.25</td></tr><tr><td>GOOGLE</td><td>0.68</td><td>0.25</td><td>0.24</td><td>-0.17</td><td>0.30</td><td>0.31</td><td>6.90</td><td>6.71</td><td>0.62</td><td>0.7</td><td>0.46</td><td>0.40</td><td>0.36</td></tr><tr><td>LIMSI</td><td>0.64</td><td>0.25</td><td>0.24</td><td>-0.17</td><td>0.3</td><td>0.31</td><td>6.94</td><td>6.77</td><td>0.60</td><td>0.71</td><td>0.46</td><td>0.4</td><td>0.35</td></tr><tr><td>LIUM-SYSTRAN</td><td>0.73</td><td>0.26</td><td>0.24</td><td>-0.17</td><td>0.31</td><td>0.32</td><td>7.02</td><td>6.83</td><td>0.61</td><td>0.71</td><td>0.45</td><td>0.40</td><td>0.36</td></tr><tr><td>RBMT1</td><td>0.54</td><td>0.18</td><td>0.17</td><td>-0.23</td><td>0.24</td><td>0.26</td><td>6.12</td><td>5.96</td><td>0.65</td><td>0.76</td><td>0.5</td><td>0.35</td><td>0.29</td></tr><tr><td>RBMT3</td><td>0.65</td><td>0.22</td><td>0.20</td><td>-0.20</td><td>0.27</td><td>0.28</td><td>6.48</td><td>6.29</td><td>0.63</td><td>0.72</td><td>0.48</td><td>0.38</td><td>0.33</td></tr><tr><td>RBMT4</td><td>0.59</td><td>0.18</td><td>0.17</td><td>-0.24</td><td>0.24</td><td>0.25</td><td>6.02</td><td>5.86</td><td>0.66</td><td>0.77</td><td>0.50</td><td>0.35</td><td>0.3</td></tr><tr><td>RBMT5</td><td>0.57</td><td>0.20</td><td>0.19</td><td>-0.21</td><td>0.26</td><td>0.27</td><td>6.31</td><td>6.15</td><td>0.63</td><td>0.74</td><td>0.49</td><td>0.36</td><td>0.31</td></tr><tr><td>RWTH</td><td>0.58</td><td>0.22</td><td>0.21</td><td>-0.19</td><td>0.27</td><td>0.28</td><td>6.67</td><td>6.51</td><td>0.62</td><td>0.75</td><td>0.48</td><td>0.38</td><td>0.32</td></tr><tr><td>SYSTRAN</td><td>0.65</td><td>0.23</td><td>0.22</td><td>-0.19</td><td>0.28</td><td>0.29</td><td>6.7</td><td>6.47</td><td>0.63</td><td>0.74</td><td>0.47</td><td>0.39</td><td>0.34</td></tr><tr><td>UEDIN</td><td>0.60</td><td>0.24</td><td>0.23</td><td>-0.18</td><td>0.29</td><td>0.30</td><td>6.75</td><td>6.57</td><td>0.62</td><td>0.71</td><td>0.47</td><td>0.39</td><td>0.35</td></tr><tr><td>UKA</td><td>0.66</td><td>0.24</td><td>0.23</td><td>-0.18</td><td>0.29</td><td>0.30</td><td>6.82</td><td>6.65</td><td>0.61</td><td>0.71</td><td>0.46</td><td>0.39</td><td>0.35</td></tr><tr><td>USAAR</td><td>0.48</td><td>0.19</td><td>0.18</td><td>-0.23</td><td>0.24</td><td>0.26</td><td>6.16</td><td>5.98</td><td>0.66</td><td>0.76</td><td>0.5</td><td>0.34</td><td>0.29</td></tr><tr><td>USAAR-COMBO</td><td>0.77</td><td>0.27</td><td>0.25</td><td>-0.15</td><td>0.32</td><td>0.33</td><td>7.24</td><td>6.93</td><td>0.59</td><td>0.69</td><td>0.44</td><td>0.41</td><td>0.37</td></tr><tr><td/><td/><td/><td/><td colspan=\"4\">English-Czech News Task</td><td/><td/><td/><td/><td/><td/></tr><tr><td>CU-BOJAR</td><td>0.61</td><td>0.14</td><td>0.13</td><td>-0.28</td><td>0.21</td><td>0.23</td><td>5.18</td><td>4.96</td><td>0.63</td><td>0.82</td><td>0.01</td><td>n/a</td><td>n/a</td></tr><tr><td>CU-TECTOMT</td><td>0.48</td><td>0.07</td><td>0.07</td><td>-0.35</td><td>0.14</td><td>0.16</td><td>4.17</td><td>4.03</td><td>0.71</td><td>0.96</td><td>0.01</td><td>n/a</td><td>n/a</td></tr><tr><td>EUROTRANXP</td><td>0.67</td><td>0.1</td><td>0.09</td><td>-0.33</td><td>0.16</td><td>0.18</td><td>4.38</td><td>4.26</td><td>0.7</td><td>0.93</td><td>0.01</td><td>n/a</td><td>n/a</td></tr><tr><td>GOOGLE</td><td>0.66</td><td>0.14</td><td>0.13</td><td>-0.30</td><td>0.20</td><td>0.22</td><td>4.96</td><td>4.84</td><td>0.66</td><td>0.82</td><td>0.01</td><td>n/a</td><td>n/a</td></tr><tr><td>PCTRANS</td><td>0.67</td><td>0.09</td><td>0.09</td><td>-0.34</td><td>0.17</td><td>0.18</td><td>4.34</td><td>4.19</td><td>0.71</td><td>0.90</td><td>0.01</td><td>n/a</td><td>n/a</td></tr><tr><td>UEDIN</td><td>0.53</td><td>0.14</td><td>0.13</td><td>-0.29</td><td>0.21</td><td>0.22</td><td>5.04</td><td>4.9</td><td>0.64</td><td>0.84</td><td>0.01</td><td>n/a</td><td>n/a</td></tr><tr><td/><td/><td/><td/><td colspan=\"4\">English-Hungarian News Task</td><td/><td/><td/><td/><td/><td/></tr><tr><td>MORPHO</td><td>0.79</td><td>0.08</td><td>0.08</td><td>-0.37</td><td>0.15</td><td>0.16</td><td>4.04</td><td>3.92</td><td>0.83</td><td>1</td><td>0.6</td><td>n/a</td><td>n/a</td></tr><tr><td>UEDIN</td><td>0.32</td><td>0.1</td><td>0.09</td><td>-0.33</td><td>0.17</td><td>0.18</td><td>4.48</td><td>4.32</td><td>0.78</td><td>1</td><td>0.56</td><td>n/a</td><td>n/a</td></tr></table>" |
|
}, |
|
"TABREF27": { |
|
"html": null, |
|
"text": "Automatic evaluation metric scores for translations out of English", |
|
"num": null, |
|
"type_str": "table", |
|
"content": "<table/>" |
|
} |
|
} |
|
} |
|
} |