|
{ |
|
"paper_id": "W10-0102", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T05:06:13.640057Z" |
|
}, |
|
"title": "Active Semi-Supervised Learning for Improving Word Alignment", |
|
"authors": [ |
|
{ |
|
"first": "Vamshi", |
|
"middle": [], |
|
"last": "Ambati", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Carnegie Mellon University", |
|
"location": { |
|
"addrLine": "5000 Forbes Avenue", |
|
"postCode": "15213", |
|
"settlement": "Pittsburgh", |
|
"region": "PA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Stephan", |
|
"middle": [], |
|
"last": "Vogel", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Carnegie Mellon University", |
|
"location": { |
|
"addrLine": "5000 Forbes Avenue", |
|
"postCode": "15213", |
|
"settlement": "Pittsburgh", |
|
"region": "PA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [], |
|
"last": "Carbonell", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Carnegie Mellon University", |
|
"location": { |
|
"addrLine": "5000 Forbes Avenue", |
|
"postCode": "15213", |
|
"settlement": "Pittsburgh", |
|
"region": "PA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Word alignment models form an important part of building statistical machine translation systems. Semi-supervised word alignment aims to improve the accuracy of automatic word alignment by incorporating full or partial alignments acquired from humans. Such dedicated elicitation effort is often expensive and depends on availability of bilingual speakers for the language-pair. In this paper we study active learning query strategies to carefully identify highly uncertain or most informative alignment links that are proposed under an unsupervised word alignment model. Manual correction of such informative links can then be applied to create a labeled dataset used by a semi-supervised word alignment model. Our experiments show that using active learning leads to maximal reduction of alignment error rates with reduced human effort.", |
|
"pdf_parse": { |
|
"paper_id": "W10-0102", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Word alignment models form an important part of building statistical machine translation systems. Semi-supervised word alignment aims to improve the accuracy of automatic word alignment by incorporating full or partial alignments acquired from humans. Such dedicated elicitation effort is often expensive and depends on availability of bilingual speakers for the language-pair. In this paper we study active learning query strategies to carefully identify highly uncertain or most informative alignment links that are proposed under an unsupervised word alignment model. Manual correction of such informative links can then be applied to create a labeled dataset used by a semi-supervised word alignment model. Our experiments show that using active learning leads to maximal reduction of alignment error rates with reduced human effort.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "The success of statistical approaches to Machine Translation (MT) can be attributed to the IBM models (Brown et al., 1993) that characterize wordlevel alignments in parallel corpora. Parameters of these alignment models are learnt in an unsupervised manner using the EM algorithm over sentence-level aligned parallel corpora. While the ease of automatically aligning sentences at the word-level with tools like GIZA++ (Och and Ney, 2003) has enabled fast development of statistical machine translation (SMT) systems for various language pairs, the quality of alignment is typically quite low for language pairs that diverge from the independence assumptions made by the generative models. Also, an immense amount of parallel data enables better estimation of the model parameters, but a large number of language pairs still lack parallel data.", |
|
"cite_spans": [ |
|
{ |
|
"start": 102, |
|
"end": 122, |
|
"text": "(Brown et al., 1993)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 418, |
|
"end": 437, |
|
"text": "(Och and Ney, 2003)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Two directions of research have been pursued for improving generative word alignment. The first is to relax or update the independence assumptions based on more information, usually syntactic, from the language pairs (Cherry and Lin, 2006) . The second is to use extra annotation, typically word-level human alignment for some sentence pairs, in conjunction with the parallel data to learn alignment in a semi-supervised manner. Our research is in the direction of the latter, and aims to reduce the effort involved in hand-generation of word alignments by using active learning strategies for careful selection of word pairs to seek alignment.", |
|
"cite_spans": [ |
|
{ |
|
"start": 217, |
|
"end": 239, |
|
"text": "(Cherry and Lin, 2006)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Active learning for MT has not yet been explored to its full potential. Much of the literature has explored one task -selecting sentences to translate and add to the training corpus (Haffari et al., 2009) . In this paper we explore active learning for word alignment, where the input to the active learner is a sentence pair (s J 1 , t I 1 ), present in two different languages S = {s * } and T = {t * }, and the annotation elicited from human is a set of links {(j, i) : j = 0 \u2022 \u2022 \u2022 J; i = 0 \u2022 \u2022 \u2022 I}. Unlike previous approaches, our work does not require elicitation of full alignment for the sentence pair, which could be effortintensive. We use standard active learning query strategies to selectively elicit partial alignment information. This partial alignment information is then fed into a semi-supervised word aligner which per-forms an improved word alignment over the entire parallel corpus.", |
|
"cite_spans": [ |
|
{ |
|
"start": 182, |
|
"end": 204, |
|
"text": "(Haffari et al., 2009)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Rest of the paper is organized as follows. We present related work in Section 2. Section 3 gives an overview of unsupervised word alignment models and its semi-supervised improvisation. Section 4 details our active learning framework with discussion of the link selection strategies in Section 5. Experiments in Section 6 have shown that our selection strategies reduce alignment error rates significantly over baseline. We conclude with discussion on future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Semi-supervised learning is a broader area of Machine Learning, focusing on improving the learning process by usage of unlabeled data in conjunction with labeled data (Chapelle et al., 2006) . Many semi-supervised learning algorithms use co-training framework, which assumes that the dataset has multiple views, and training different classifiers on a non-overlapping subset of these features provides additional labeled data (Zhu, 2005) . Active query selection for training a semi-supervised learning algorithm is an interesting method that has been applied to clustering problems. Tomanek and Hahn (2009) applied active semi supervised learning to the sequence-labeling problem. Tur et al. (2005) describe active and semi-supervised learning methods for reducing labeling effort for spoken language understanding. They train supervised classification algorithms for the task of call classification and apply it to a large unlabeled dataset to select the least confident instances for human labeling.", |
|
"cite_spans": [ |
|
{ |
|
"start": 167, |
|
"end": 190, |
|
"text": "(Chapelle et al., 2006)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 426, |
|
"end": 437, |
|
"text": "(Zhu, 2005)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 584, |
|
"end": 607, |
|
"text": "Tomanek and Hahn (2009)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 682, |
|
"end": 699, |
|
"text": "Tur et al. (2005)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Researchers have begun to explore semisupervised word alignment models that use both labeled and unlabeled data. Fraser and Marcu (2006) pose the problem of alignment as a search problem in log-linear space with features coming from the IBM alignment models. The log-linear model is trained on the available labeled data to improve performance. They propose a semisupervised training algorithm which alternates between discriminative error training on the labeled data to learn the weighting parameters and maximum-likelihood EM training on unlabeled data to estimate the parameters. Callison-Burch et al. (2004) also improve alignment by interpolating human alignments with automatic alignments. They observe that while working with such datasets, alignments of higher quality should be given a much higher weight than the lower-quality alignments. Wu et al. (2006) learn separate models from labeled and unlabeled data using the standard EM algorithm. The two models are then interpolated as a learner in the semi-supervised AdaBoost algorithm to improve word alignment.", |
|
"cite_spans": [ |
|
{ |
|
"start": 113, |
|
"end": 136, |
|
"text": "Fraser and Marcu (2006)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 584, |
|
"end": 612, |
|
"text": "Callison-Burch et al. (2004)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 850, |
|
"end": 866, |
|
"text": "Wu et al. (2006)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Active learning has been applied to various fields of Natural Language Processing like statistical parsing, entity recognition among others (Hwa, 2004; Tang et al., 2001; Shen et al., 2004) . In case of MT, the potential of active learning has remained largely unexplored. For Statistical Machine Translation, application of active learning has been focused on the task of selecting the most informative sentences to train the model, in order to reduce cost of data acquisition. Recent work in this area discussed multiple query selection strategies for a Statistical Phrase Based Translation system (Haffari et al., 2009) . Their framework requires source text to be translated by the system and the translated data is used in a self-training setting to train MT models. To our knowledge, we are not aware of any work that has looked at reducing human effort by selective elicitation of alignment information using active learning techniques.", |
|
"cite_spans": [ |
|
{ |
|
"start": 140, |
|
"end": 151, |
|
"text": "(Hwa, 2004;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 152, |
|
"end": 170, |
|
"text": "Tang et al., 2001;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 171, |
|
"end": 189, |
|
"text": "Shen et al., 2004)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 600, |
|
"end": 622, |
|
"text": "(Haffari et al., 2009)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "3 Word Alignment 3.1 IBM models IBM models provide a generative framework for performing word alignment of parallel corpus. Given two strings from source and target languages", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "s J 1 = s 1 , \u2022 \u2022 \u2022 , s j , \u2022 \u2022 \u2022 s J and t I 1 = t 1 , \u2022 \u2022 \u2022 , t i , \u2022 \u2022 \u2022 t I ,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "an alignment A is defined as a subset of the Cartesian product of the word indices as shown in Eq 1. In IBM models, since alignment is treated as a function, all the source positions must be covered exactly once (Brown et al., 1993) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 212, |
|
"end": 232, |
|
"text": "(Brown et al., 1993)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "A \u2286 {(j, i) : j = 0 \u2022 \u2022 \u2022 J; i = 0 \u2022 \u2022 \u2022 I} (1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "For the task of translation, we would ideally want to model P (s I 1 |t J 1 ), which is the probability of observing source sentence s I 1 given target sentence t J 1 . This requires a lot of parallel corpus for estimation and so it is then factored over the word alignment A for the sentence pair, which is a hidden variable. Word alignment is therefore a by-product in the process of modeling translation. We can also represent the same under some parameterization of \u03b8, which is the model we are interested to estimate.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P (s J 1 |t I 1 ) = a J 1 P r(s J 1 , A|t J 1 ) (2) = A p \u03b8 (s J 1 , A|t I 1 )", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Given a parallel corpus U of sentence pairs {(s k , t k ) : k = 1, \u2022 \u2022 \u2022 , K} the parameters can be estimated by maximizing the conditional likelihood over the data. IBM models (Brown et al., 1993 ) from 1 to 5 are different ways of factoring the probability model to estimate the parameter set \u03b8. For example in the simplest of the models, IBM model 1, only the lexical translation probability is considered treating each word being translated independent of the other words.\u03b8", |
|
"cite_spans": [ |
|
{ |
|
"start": 177, |
|
"end": 196, |
|
"text": "(Brown et al., 1993", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "= arg max \u03b8 K k=1 A p \u03b8 (s k , A|t k )", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The parameters of the model above are estimated as\u03b8, using the EM algorithm. We can also extract the Viterbi alignment ,\u00c2, for all the sentence pairs, which is the alignment with the highest probability under the current model parameters \u03b8:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "A = arg max A p\u03b8(s J 1 , A|t I 1 )", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The alignment models are asymmetric and differ with the choice of translation direction. We can therefore perform the above after switching the direction of the language pair and obtain models and Viterbi alignments for the corpus as represented below:\u03b8", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "= arg max \u03b8 K k=1 a p \u03b8 (t k , a|s k )", |
|
"eq_num": "(6)" |
|
} |
|
], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "A = arg max A p\u03b8(t I 1 , A|s J 1 )", |
|
"eq_num": "(7)" |
|
} |
|
], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Given the Viterbi alignment for each sentence pair in the parallel corpus, we can also compute the word-level alignment probabilities using simple relative likelihood estimation for both the directions. As we will discuss in Section 5, the alignments and the computed lexicons form an important part of our link selection strategies.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P (s j /t i ) = s count(t i , s j ;\u00c2) s count(t i ) (8) P (t i /s j ) = s count(t i , s j ;\u00c2) s count(s j )", |
|
"eq_num": "(9)" |
|
} |
|
], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We perform all our experiments on a symmetrized alignment that combines the bidirectional alignments using heuristics as discussed in (Koehn et al., 2007) . We represent this alignment as A = {a ij :", |
|
"cite_spans": [ |
|
{ |
|
"start": 134, |
|
"end": 154, |
|
"text": "(Koehn et al., 2007)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "i = 0 \u2022 \u2022 \u2022 J \u2208 s J 1 ; j = 0 \u2022 \u2022 \u2022 I \u2208 t I 1 }.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We use an extended version of MGIZA++ (Gao and Vogel, 2008) to perform the constrained semisupervised word alignment. To get full benefit from the manual alignments, MGIZA++ modifies all alignment models used in the standard training procedure, i.e. the IBM1, HMM, IBM3 and IBM4 models. Manual alignments are incorporated in the EM training phase of these models as constraints that restrict the summation over all possible alignment paths. Typically in the EM procedure for IBM models, the training procedure requires for each source sentence position, the summation over all positions in the target sentence. The manual alignments allow for one-to-many alignments and many-to-many alignments in both directions. For each position i in the source sentence, there can be more than one manually aligned target word. The restricted training will allow only those paths, which are consistent with the manual alignments. Therefore, the restriction of the alignment paths reduces to restricting the summation in EM.", |
|
"cite_spans": [ |
|
{ |
|
"start": 38, |
|
"end": 59, |
|
"text": "(Gao and Vogel, 2008)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semi-Supervised Word Alignment", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Active learning attempts to optimize performance by selecting the most informative instances to label, where 'informativeness' is defined as maximal expected improvement in accuracy. The objective is to select optimal instance for an external expert to label and then run the learning method on the newly-labeled and previously-labeled instances to minimize prediction or translation error, repeating until either the maximal number of external queries is reached or a desired accuracy level is achieved. Several studies (Tong and Koller, 2002; Nguyen and Smeulders, 2004; Donmez and Carbonell, 2008) show that active learning greatly helps to reduce the labeling effort in various classification tasks. We discuss our active learning setup for word alignment in Algorithm 1. We start with an unlabeled dataset U = {(S k , T k )}, indexed by k, and a seed pool of partial alignment links A 0 = {a k ij , \u2200s i \u2208 S k , t j \u2208 T k }. Each a k ij represents an alignment link from a sentence pair k that connects source word s i with t j .", |
|
"cite_spans": [ |
|
{ |
|
"start": 521, |
|
"end": 544, |
|
"text": "(Tong and Koller, 2002;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 545, |
|
"end": 572, |
|
"text": "Nguyen and Smeulders, 2004;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 573, |
|
"end": 600, |
|
"text": "Donmez and Carbonell, 2008)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Active Learning for Word Alignment", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "This is usually an empty set at iteration t = 0. We iterate for T iterations. We take a pool-based active learning strategy, where we have access to all the automatically aligned links and we can score the links based on our active learning query strategy. The query strategy uses the automatically trained alignment model \u03b8 t from the current iteration t, for scoring the links. Re-training and re-tuning an SMT system for each link at a time is computationally infeasible. We therefore perform batch learning by selecting a set of N links scored high by our query strategy. We seek manual corrections for the selected links and add the alignment data to the current labeled dataset. The word-level aligned labeled dataset is then provided to our semi-supervised word alignment algorithm, which uses it to produces the alignment model \u03b8 t+1 for U .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Active Learning for Word Alignment", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Algorithm 1 AL FOR WORD ALIGNMENT 1: Unlabeled Data Set: U = {(s k , t k )} 2: Manual Alignment Set : A 0 = {a k ij , \u2200s i \u2208 S k , t j \u2208 T k } 3: Train Semi-supervised Word Alignment using (U , A 0 ) \u2192 \u03b8 0 4: N : batch size 5: for t = 0 to T do 6: L t = LinkSelection(U ,A t ,\u03b8 t ,N ) 7:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Active Learning for Word Alignment", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Request Human Alignment for L t 8:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Active Learning for Word Alignment", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "A t+1 = A t + L t 9:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Active Learning for Word Alignment", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Re-train Semi-Supervised Word Alignment on (U, A t+1 ) \u2192 \u03b8 t+1 10: end for", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Active Learning for Word Alignment", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We can iteratively perform the algorithm for a defined number of iterations T or until a certain desired performance is reached, which is measured by align-ment error rate (AER) (Fraser and Marcu, 2007) in the case of word alignment. In a more typical scenario, since reducing human effort or cost of elicitation is the objective, we iterate until the available budget is exhausted.", |
|
"cite_spans": [ |
|
{ |
|
"start": 178, |
|
"end": 202, |
|
"text": "(Fraser and Marcu, 2007)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Active Learning for Word Alignment", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We propose multiple query selection strategies for our active learning setup. The scoring criteria is designed to select alignment links across sentence pairs that are highly uncertain under current automatic translation models. These links are difficult to align correctly by automatic alignment and will cause incorrect phrase pairs to be extracted in the translation model, in turn hurting the translation quality of the SMT system. Manual correction of such links produces the maximal benefit to the model. We would ideally like to elicit the least number of manual corrections possible in order to reduce the cost of data acquisition. In this section we discuss our link selection strategies based on the standard active learning paradigm of 'uncertainty sampling' (Lewis and Catlett, 1994) . We use the automatically trained translation model \u03b8 t for scoring each link for uncertainty. In particular \u03b8 t consists of bidirectional lexicon tables computed from the bidirectional alignments as discussed in Section 3.", |
|
"cite_spans": [ |
|
{ |
|
"start": 770, |
|
"end": 795, |
|
"text": "(Lewis and Catlett, 1994)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Query Strategies for Link Selection", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The automatic Viterbi alignment produced by the alignment models is used to obtain translation lexicons, as discussed in Section 3. These lexicons capture the conditional distributions of source-giventarget P (s/t) and target-given-source P (t/s) probabilities at the word level where s i \u2208 S and t j \u2208 T . We define certainty of a link as the harmonic mean of the bidirectional probabilities. The selection strategy selects the least scoring links according to the formula below which corresponds to links with maximum uncertainty:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Uncertainty based: Bidirectional Alignment Scores", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "Score(a ij /s I 1 , t J 1 ) = 2 * P (t j /s i ) * P (s i /t j ) P (t j /s i ) + P (s i /t j )", |
|
"eq_num": "(10)" |
|
} |
|
], |
|
"section": "Uncertainty based: Bidirectional Alignment Scores", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Confidence estimation for MT output is an interesting area with meaningful initial exploration (Blatz et al., 2004; Ueffing and Ney, 2007) . Given a sentence pair (s I 1 , t J 1 ) and its word alignment, we compute two confidence metrics at alignment link levelbased on the posterior link probability and a simple IBM Model 1 as seen in Equation 13. We select the alignment links that the initial word aligner is least confident according to our metric and seek manual correction of the links. We use t2s to denote computation using higher order (IBM4) target-givensource models and s2t to denote source-given-target models. Targeting some of the uncertain parts of word alignment has already been shown to improve translation quality in SMT (Huang, 2009) . In our current work, we use confidence metrics as an active learning sampling strategy to obtain most informative links. We also experiment with other confidence metrics as discussed in (Ueffing and Ney, 2007) , especially the IBM 1 model score metric which showed some improvement as well.", |
|
"cite_spans": [ |
|
{ |
|
"start": 95, |
|
"end": 115, |
|
"text": "(Blatz et al., 2004;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 116, |
|
"end": 138, |
|
"text": "Ueffing and Ney, 2007)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 742, |
|
"end": 755, |
|
"text": "(Huang, 2009)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 944, |
|
"end": 967, |
|
"text": "(Ueffing and Ney, 2007)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Confidence Based: Posterior Alignment probabilities", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "P t2s (a ij , t J 1 /s I 1 ) = p t2s (t j /s i , a ij \u2208 A) M i p t2s (t j /s i ) (11) P s2t (a ij , s I 1 /t J 1 ) = p s2t (s i /t j , a ij \u2208 A) N i p t2s (t j /s i )", |
|
"eq_num": "(12)" |
|
} |
|
], |
|
"section": "Confidence Based: Posterior Alignment probabilities", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Conf (a ij /S, T ) = 2 * P t2s * P s2t P t2s + P s2t (13)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Confidence Based: Posterior Alignment probabilities", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "The generative alignments produced differ based on the choice of direction of the language pair. We use A s2t to denote alignment in the source to target direction and A t2s to denote the target to source direction. We consider these alignments to be two experts that have two different views of the alignment process. We formulate our query strategy to select links, where the agreement differs across these two alignments. In general query by committee is a standard sampling strategy in active learning (Freund et al., 1997) , where the committee consists of any number of experts with varying opinions, in this case alignments in different directions. We formulate a query by committee sampling strategy for word alignment as shown in Equation 14. In order to break ties, we extend this approach to select the link with higher average frequency of occurrence of words involved in the link. Score(a ij ) = \u03b1 where (14)", |
|
"cite_spans": [ |
|
{ |
|
"start": 506, |
|
"end": 527, |
|
"text": "(Freund et al., 1997)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Agreement Based: Query by Committee", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "\u03b1 = \uf8f1 \uf8f2 \uf8f3 2 a ij \u2208 A t2s \u2229 A t2s 1 a ij \u2208 A t2s \u222a A t2s 0 otherwise 6 Experiments 6.1 Data Analysis", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Agreement Based: Query by Committee", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "To run our active learning and semi-supervised word alignment experiments iteratively, we simulate the setup by using a parallel corpus for which the gold standard human alignment is already available. We experiment with two language pairs -Chinese-English and Arabic-English. Corpus-level statistics for both language pairs can be seen in Table 1 and their alignment link level statistics can be seen in Table 2 . Both datasets were released by LDC as part of the GALE project. Chinese-English dataset consists of 21,863 sentence pairs with complete manual alignment. The human alignment for this dataset is much denser than the automatic word alignment. On an average each source word is linked to more than one target word. Similarly, the Arabic-English dataset consisting of 29,876 sentence pairs also has a denser manual alignment. Automatic word alignment in both cases was computed as a symmetrized version of the bidirectional alignments obtained from using GIZA++ (Och and Ney, 2003) in each direction separately.", |
|
"cite_spans": [ |
|
{ |
|
"start": 973, |
|
"end": 992, |
|
"text": "(Och and Ney, 2003)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 340, |
|
"end": 347, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 405, |
|
"end": 412, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Agreement Based: Query by Committee", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We first perform an unsupervised word alignment of the parallel corpus. We then use the learned model in running our link selection algorithm over the entire alignments to determine the most uncertain links according to each active learning strategy. The links are then looked up in the gold standard human alignment database and corrected. In scenarios where an alignment link is not present in the gold standard data for the source word, we introduce a NULL alignment constraint, else we select all the links as given in the gold standard. The aim of our work is to show that active learning can help in selecting informative alignment links, which if manually labeled can reduce the overall alignment error rate of the given corpus. We, therefore measure the reduction of alignment error rate (AER) of a semi-supervised word aligner that uses this extra information to align the corpus. We plot performance curves for both Chinese-English, Figure 1 and Arabic-English, Figure 2, with number of manual links elicited on x-axis and AER on y-axis. In each iteration of the experiment, we gradually increase the number of links selected from gold standard and make them available to the semi-supervised word aligner and measure the overall reduction of AER on the corpus. We compare our link selection strategies to a baseline approach, where links are selected at random for manual correction.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 943, |
|
"end": 951, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
}, |
|
{ |
|
"start": 972, |
|
"end": 978, |
|
"text": "Figure", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Word Alignment Results", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "All our approaches perform equally or better than the baseline for both language pairs. Query by committee (qbc) performs similar to the baseline in Chinese-English and only slightly better for Arabic- English. This could be due to our committee consisting of two alignments that differ only in direction and so are not sufficient in deciding for uncertainty. We will be exploring alternative formulations to this strategy. Confidence based and uncertainty based metrics perform significantly better than the baseline in both language pairs. We can interpret the improvements in two ways. For the same number of manual alignments elicited, our selection strategies select links that provide higher reduction of error when compared to the baseline. An alternative interpretation is that assuming a uniform cost per link, our best selection strategy achieves similar performance to the baseline, at a much lower cost of elicitation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word Alignment Results", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "We also perform end-to-end machine translation experiments to show that our improvement of alignment quality leads to an improvement of translation scores. For Chinese-English, we train a standard phrase-based SMT system (Koehn et al., 2007) over the available 21,863 sentences. We tune on the MT-Eval 2004 dataset and test on a subset of MT-Eval 2005 dataset consisting of 631 sentences. The language model we use is built using only the English side of the parallel corpus. We understand that this language model is not the optimal choice, but we are interested in testing the word alignment accuracy, which primarily affects the translation model. We first obtain the baseline score by training in an unsupervised manner, where no manual alignment is used. We also train a configuration, where we substitute the final word alignment with gold standard manual alignment for the entire parallel corpus. This is an upper bound on the translation accuracy that can be achieved by any alignment link selection algorithm for this dataset. We now take our best link selection criteria, which is the confidence based method and re-train the MT system after eliciting manual information for only 20% of the alignment links. We observe that at this point we have reduced the AER from 37.09 to 26.57. The translation accuracy reported in Table 3 , as measured by BLEU (Papineni et al., 2002) and METEOR (Lavie and Agarwal, 2007) , also shows significant improvement and approaches the quality achieved using gold standard data. We did not perform MT experiments with Arabic-English dataset due to the incompatibility of tokenization schemes between the manually aligned parallel corpora and publicly available evaluation sets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 221, |
|
"end": 241, |
|
"text": "(Koehn et al., 2007)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 1360, |
|
"end": 1383, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 1395, |
|
"end": 1420, |
|
"text": "(Lavie and Agarwal, 2007)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1330, |
|
"end": 1337, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Translation Results", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "Word-Alignment is a particularly challenging problem and has been addressed in a completely unsupervised manner thus far (Brown et al., 1993) . While generative alignment models have been successful, lack of sufficient data, model assumptions and local optimum during training are well known problems. Semi-supervised techniques use partial manual alignment data to address some of these issues. We have shown that active learning strategies can reduce the effort involved in eliciting human alignment data. The reduction in effort is due to careful selection of maximally uncertain links that provide the most benefit to the alignment model when used in a semi-supervised training fashion. Experiments on Chinese-English have shown considerable improvements.", |
|
"cite_spans": [ |
|
{ |
|
"start": 121, |
|
"end": 141, |
|
"text": "(Brown et al., 1993)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "In future, we wish to work with word alignments for other language pairs as well as study the effect of manual alignments by varying the size of available parallel data. We also plan to obtain alignments from non-experts over online marketplaces like Amazon Mechanical Turk to further reduce the cost of annotation. We will be experimenting with obtaining full-alignment vs. partial alignment from nonexperts. Our hypothesis is that, humans are good at performing tasks of smaller size and so we can extract high quality alignments in the partial alignment case. Cost of link annotation in our current work is assumed to be uniform, but this needs to be revisited. We will also experiment with active learning techniques for identifying sentence pairs with very low alignment confidence, where obtaining full-alignment is equivalent to obtaining multiple partial alignments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Future Work", |
|
"sec_num": "8" |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This research was partially supported by DARPA under grant NBCHC080097. Any opinions, findings, and conclusions expressed in this paper are those of the authors and do not necessarily reflect the views of the DARPA. The first author would like to thank Qin Gao for the semi-supervised word alignment software and help with running experiments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Confidence estimation for machine translation", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Blatz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Erin", |
|
"middle": [], |
|
"last": "Fitzgerald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "George", |
|
"middle": [], |
|
"last": "Foster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simona", |
|
"middle": [], |
|
"last": "Gandrabur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cyril", |
|
"middle": [], |
|
"last": "Goutte", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Kulesza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alberto", |
|
"middle": [], |
|
"last": "Sanchis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicola", |
|
"middle": [], |
|
"last": "Ueffing", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of Coling", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "315--321", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John Blatz, Erin Fitzgerald, George Foster, Simona Gan- drabur, Cyril Goutte, Alex Kulesza, Alberto Sanchis, and Nicola Ueffing. 2004. Confidence estimation for machine translation. In Proceedings of Coling 2004, pages 315-321, Geneva, Switzerland, Aug 23-Aug 27. COLING.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "The mathematics of statistical machine translation: parameter estimation", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Brown", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"Della" |
|
], |
|
"last": "Vincent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Pietra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Della Pietra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mercer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1993, |
|
"venue": "Computational Linguistics", |
|
"volume": "19", |
|
"issue": "2", |
|
"pages": "263--311", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter F. Brown, Vincent J. Della Pietra, Stephen A. Della Pietra, and Robert L. Mercer. 1993. The mathemat- ics of statistical machine translation: parameter esti- mation. Computational Linguistics, 19(2):263-311.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Statistical machine translation with word-and sentence-aligned parallel corpora", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Callison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "-", |
|
"middle": [], |
|
"last": "Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Talbot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miles", |
|
"middle": [], |
|
"last": "Osborne", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "ACL 2004", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chris Callison-Burch, David Talbot, and Miles Osborne. 2004. Statistical machine translation with word-and sentence-aligned parallel corpora. In ACL 2004, page 175, Morristown, NJ, USA. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Semi-Supervised Learning", |
|
"authors": [ |
|
{ |
|
"first": "O", |
|
"middle": [], |
|
"last": "Chapelle", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Sch\u00f6lkopf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Zien", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "O. Chapelle, B. Sch\u00f6lkopf, and A. Zien, editors. 2006. Semi-Supervised Learning. MIT Press, Cambridge, MA.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Soft syntactic constraints for word alignment through discriminative training", |
|
"authors": [ |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Cherry", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dekang", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the COLING/ACL on Main conference poster sessions", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "105--112", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Colin Cherry and Dekang Lin. 2006. Soft syntactic constraints for word alignment through discriminative training. In Proceedings of the COLING/ACL on Main conference poster sessions, pages 105-112, Morris- town, NJ, USA.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Optimizing estimated loss reduction for active sampling in rank learning", |
|
"authors": [ |
|
{ |
|
"first": "Pinar", |
|
"middle": [], |
|
"last": "Donmez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [ |
|
"G" |
|
], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "ICML '08: Proceedings of the 25th international conference on Machine learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "248--255", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pinar Donmez and Jaime G. Carbonell. 2008. Opti- mizing estimated loss reduction for active sampling in rank learning. In ICML '08: Proceedings of the 25th international conference on Machine learning, pages 248-255, New York, NY, USA. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Semisupervised training for statistical word alignment", |
|
"authors": [ |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Fraser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Marcu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "ACL-44: Proceedings of the 21st International Conference on Computational Linguistics and the 44th annual meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "769--776", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexander Fraser and Daniel Marcu. 2006. Semi- supervised training for statistical word alignment. In ACL-44: Proceedings of the 21st International Con- ference on Computational Linguistics and the 44th annual meeting of the Association for Computational Linguistics, pages 769-776, Morristown, NJ, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Measuring word alignment quality for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Fraser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Marcu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Comput. Linguist", |
|
"volume": "33", |
|
"issue": "3", |
|
"pages": "293--303", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexander Fraser and Daniel Marcu. 2007. Measuring word alignment quality for statistical machine transla- tion. Comput. Linguist., 33(3):293-303.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Selective sampling using the query by committee algorithm", |
|
"authors": [ |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Freund", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Seung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eli", |
|
"middle": [], |
|
"last": "Shamir", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naftali", |
|
"middle": [], |
|
"last": "Tishby", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Machine. Learning", |
|
"volume": "28", |
|
"issue": "2-3", |
|
"pages": "133--168", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoav Freund, Sebastian H. Seung, Eli Shamir, and Naf- tali Tishby. 1997. Selective sampling using the query by committee algorithm. Machine. Learning., 28(2- 3):133-168.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Parallel implementations of word alignment tool", |
|
"authors": [ |
|
{ |
|
"first": "Qin", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephan", |
|
"middle": [], |
|
"last": "Vogel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Software Engineering, Testing, and Quality Assurance for Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "49--57", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qin Gao and Stephan Vogel. 2008. Parallel implemen- tations of word alignment tool. In Software Engi- neering, Testing, and Quality Assurance for Natural Language Processing, pages 49-57, Columbus, Ohio, June. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Active learning for statistical phrase-based machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Gholamreza", |
|
"middle": [], |
|
"last": "Haffari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maxim", |
|
"middle": [], |
|
"last": "Roy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anoop", |
|
"middle": [], |
|
"last": "Sarkar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of HLT NAACL 2009", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "415--423", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gholamreza Haffari, Maxim Roy, and Anoop Sarkar. 2009. Active learning for statistical phrase-based ma- chine translation. In Proceedings of HLT NAACL 2009, pages 415-423, Boulder, Colorado, June. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Confidence measure for word alignment", |
|
"authors": [ |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "932--940", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fei Huang. 2009. Confidence measure for word align- ment. In Proceedings of the Joint ACL and IJCNLP, pages 932-940, Suntec, Singapore, August. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Sample selection for statistical parsing", |
|
"authors": [ |
|
{ |
|
"first": "Rebecca", |
|
"middle": [], |
|
"last": "Hwa", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Comput. Linguist", |
|
"volume": "30", |
|
"issue": "3", |
|
"pages": "253--276", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rebecca Hwa. 2004. Sample selection for statistical parsing. Comput. Linguist., 30(3):253-276.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Moses: Open source toolkit for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hieu", |
|
"middle": [], |
|
"last": "Hoang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [ |
|
"Birch" |
|
], |
|
"last": "Mayne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Callison-Burch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcello", |
|
"middle": [], |
|
"last": "Federico", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicola", |
|
"middle": [], |
|
"last": "Bertoldi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brooke", |
|
"middle": [], |
|
"last": "Cowan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wade", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christine", |
|
"middle": [], |
|
"last": "Moran", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "ACL Demonstration Session", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Philipp Koehn, Hieu Hoang, Alexandra Birch Mayne, Christopher Callison-Burch, Marcello Federico, Nicola Bertoldi, Brooke Cowan, Wade Shen, Chris- tine Moran, Richard Zens, Chris Dyer, Ondrej Bojar, Alexandra Constantin, and Evan Herbst. 2007. Moses: Open source toolkit for statistical machine translation. In ACL Demonstration Session.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Meteor: an automatic metric for mt evaluation with high levels of correlation with human judgments", |
|
"authors": [ |
|
{ |
|
"first": "Alon", |
|
"middle": [], |
|
"last": "Lavie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abhaya", |
|
"middle": [], |
|
"last": "Agarwal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "WMT 2007", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "228--231", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alon Lavie and Abhaya Agarwal. 2007. Meteor: an au- tomatic metric for mt evaluation with high levels of correlation with human judgments. In WMT 2007, pages 228-231, Morristown, NJ, USA.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Heterogeneous uncertainty sampling for supervised learning", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "David", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Catlett", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "Proceedings of the Eleventh International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "148--156", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David D. Lewis and Jason Catlett. 1994. Heterogeneous uncertainty sampling for supervised learning. In In Proceedings of the Eleventh International Conference on Machine Learning, pages 148-156. Morgan Kauf- mann.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Active learning using pre-clustering", |
|
"authors": [ |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Hieu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arnold", |
|
"middle": [], |
|
"last": "Nguyen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Smeulders", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "ICML", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hieu T. Nguyen and Arnold Smeulders. 2004. Active learning using pre-clustering. In ICML.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "A systematic comparison of various statistical alignment models", |
|
"authors": [ |
|
{ |
|
"first": "Josef", |
|
"middle": [], |
|
"last": "Franz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Och", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "19--51", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Franz Josef Och and Hermann Ney. 2003. A system- atic comparison of various statistical alignment mod- els. Computational Linguistics, pages 19-51.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Bleu: a method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "ACL 2002", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "311--318", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic evalua- tion of machine translation. In ACL 2002, pages 311- 318, Morristown, NJ, USA.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Multi-criteria-based active learning for named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jie", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guodong", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chew-Lim", |
|
"middle": [], |
|
"last": "Tan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "ACL '04: Proceedings of the 42nd Annual Meeting on Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dan Shen, Jie Zhang, Jian Su, Guodong Zhou, and Chew- Lim Tan. 2004. Multi-criteria-based active learning for named entity recognition. In ACL '04: Proceed- ings of the 42nd Annual Meeting on Association for Computational Linguistics, page 589, Morristown, NJ, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Active learning for statistical natural language parsing", |
|
"authors": [ |
|
{ |
|
"first": "Min", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaoqiang", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "ACL '02", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "120--127", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Min Tang, Xiaoqiang Luo, and Salim Roukos. 2001. Ac- tive learning for statistical natural language parsing. In ACL '02, pages 120-127, Morristown, NJ, USA.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Semi-supervised active learning for sequence labeling", |
|
"authors": [ |
|
{ |
|
"first": "Katrin", |
|
"middle": [], |
|
"last": "Tomanek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Udo", |
|
"middle": [], |
|
"last": "Hahn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1039--1047", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Katrin Tomanek and Udo Hahn. 2009. Semi-supervised active learning for sequence labeling. In Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP, pages 1039-1047, Suntec, Singapore, August. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Support vector machine active learning with applications to text classification", |
|
"authors": [ |
|
{ |
|
"first": "Simon", |
|
"middle": [], |
|
"last": "Tong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daphne", |
|
"middle": [], |
|
"last": "Koller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Journal of Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "45--66", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Simon Tong and Daphne Koller. 2002. Support vector machine active learning with applications to text clas- sification. Journal of Machine Learning, pages 45-66.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Combining active and semi-supervised learning for spoken language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Gokhan", |
|
"middle": [], |
|
"last": "Tur", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dilek", |
|
"middle": [], |
|
"last": "Hakkani-Tr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Schapire", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Speech Communication", |
|
"volume": "45", |
|
"issue": "2", |
|
"pages": "171--186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gokhan Tur, Dilek Hakkani-Tr, and Robert E. Schapire. 2005. Combining active and semi-supervised learning for spoken language understanding. Speech Commu- nication, 45(2):171 -186.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Word-level confidence estimation for machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Nicola", |
|
"middle": [], |
|
"last": "Ueffing", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Comput. Linguist", |
|
"volume": "33", |
|
"issue": "1", |
|
"pages": "9--40", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nicola Ueffing and Hermann Ney. 2007. Word-level confidence estimation for machine translation. Com- put. Linguist., 33(1):9-40.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Boosting statistical word alignment using labeled and unlabeled data", |
|
"authors": [ |
|
{ |
|
"first": "Hua", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haifeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhanyi", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the COLING/ACL on Main conference poster sessions", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "913--920", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hua Wu, Haifeng Wang, and Zhanyi Liu. 2006. Boosting statistical word alignment using labeled and unlabeled data. In Proceedings of the COLING/ACL on Main conference poster sessions, pages 913-920, Morris- town, NJ, USA. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Semi-Supervised Learning Literature Survey", |
|
"authors": [ |
|
{ |
|
"first": "X", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "X. Zhu. 2005. Semi-Supervised Learning Lit- erature Survey. Technical Report 1530, Com- puter Sciences, University of Wisconsin-Madison.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "Chinese-English: Link Selection Results" |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"type_str": "figure", |
|
"uris": null, |
|
"text": "Arabic-English: Link Selection Results" |
|
}, |
|
"TABREF1": { |
|
"text": "Corpus Statistics of Human Data", |
|
"content": "<table><tr><td colspan=\"3\">Alignment Automatic Links Manual Links</td></tr><tr><td>Ch-En</td><td>491,887</td><td>588,075</td></tr><tr><td>Ar-En</td><td>786,223</td><td>712,583</td></tr></table>", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null |
|
}, |
|
"TABREF2": { |
|
"text": "Alignment Statistics of Human Data", |
|
"content": "<table/>", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null |
|
}, |
|
"TABREF4": { |
|
"text": "Effect of Alignment on Translation Quality", |
|
"content": "<table/>", |
|
"html": null, |
|
"type_str": "table", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |