Description
stringlengths
18
161k
Code
stringlengths
15
300k
disable matplotlib plotting in test code try import matplotlib pyplot as plt mocker patch objectplt gca mocker patch objectplt show except importerror pass pytest fixturescopemodule autousetrue def teardownloadedcorpora yield first wait for the test to end import nltk corpus for name in dirnltk corpus obj getattrnltk corpus name none if isinstanceobj corpusreader and hasattrobj unload obj unload disable matplotlib plotting in test code after each test session ends either doctest or unit test unload any loaded corpora first wait for the test to end
import pytest from nltk.corpus.reader import CorpusReader @pytest.fixture(autouse=True) def mock_plot(mocker): try: import matplotlib.pyplot as plt mocker.patch.object(plt, "gca") mocker.patch.object(plt, "show") except ImportError: pass @pytest.fixture(scope="module", autouse=True) def teardown_loaded_corpora(): yield import nltk.corpus for name in dir(nltk.corpus): obj = getattr(nltk.corpus, name, None) if isinstance(obj, CorpusReader) and hasattr(obj, "_unload"): obj._unload()
probability doctest uses hmm which requires numpy skip probability doctest if numpy is not available probability doctest uses hmm which requires numpy skip probability doctest if numpy is not available
def setup_module(): import pytest pytest.importorskip("numpy")
skip a test via pytest skip if the binary executable is not found keyword arguments are passed to nltk internals findbinary import pytest try findbinarybinary args except lookuperror pytest skipfskipping test because the binary binary was not found def checkjarnamepattern str args import pytest pytest skip skipping test because the doctests requiring jars are inconsistent on the ci skip a test via pytest skip if the binary executable is not found keyword arguments are passed to nltk internals find_binary skip a test via pytest skip if the name_pattern jar is not found keyword arguments are passed to nltk internals find_jar todo investigate why the corenlp tests that rely on this check_jar failed on the ci https github com nltk nltk pull 3060 issuecomment 1268355108
from nltk.internals import find_binary, find_jar def check_binary(binary: str, **args): import pytest try: find_binary(binary, **args) except LookupError: pytest.skip(f"Skipping test because the {binary} binary was not found.") def check_jar(name_pattern: str, **args): import pytest pytest.skip( "Skipping test because the doctests requiring jars are inconsistent on the CI." )
natural language toolkit language model unit tests c 20012023 nltk project ilia kurenkov ilia kurenkovgmail com url https www nltk org for license information see license txt tests for ngramcounter that only involve lookup no modification classmethod def setupclassself text listabcd listegdbe self trigramcounter ngramcounter everygramssent maxlen3 for sent in text self bigramcounter ngramcountereverygramssent maxlen2 for sent in text self case unittest testcase def testnself assert self bigramcounter n 16 assert self trigramcounter n 21 def testcounterlenchangeswithlookupself assert lenself bigramcounter 2 self bigramcounter50 assert lenself bigramcounter 3 def testngramorderaccessunigramsself assert self bigramcounter1 self bigramcounter unigrams def testngramconditionalfreqdistself case unittest testcase expectedtrigramcontexts a b b c e g g d d b expectedbigramcontexts a b d e c g bigrams self trigramcounter2 trigrams self trigramcounter3 self case assertcountequalexpectedbigramcontexts bigrams conditions self case assertcountequalexpectedtrigramcontexts trigrams conditions def testbigramcountsseenngramsself assert self bigramcounterab 1 assert self bigramcounterbc 1 def testbigramcountsunseenngramsself assert self bigramcounterbz 0 def testunigramcountsseenwordsself assert self bigramcounterb 2 def testunigramcountscompletelyunseenwordsself assert self bigramcounterz 0 class testngramcountertraining classmethod def setupclassself self counter ngramcounter self case unittest testcase pytest mark parametrizecase none def testemptyinputsself case test ngramcountercase assert 2 not in test assert test1 freqdist def testtrainonunigramsself words listabcd counter ngramcounterw for w in words assert not counter3 assert not counter2 self case assertcountequalwords counter1 keys def testtrainonillegalsentencesself strsent check this out listsent check this this out out with pytest raisestypeerror ngramcounterstrsent with pytest raisestypeerror ngramcounterlistsent def testtrainonbigramsself bigramsent a b c d counter ngramcounterbigramsent assert not boolcounter3 def testtrainonmixself mixedsent a b c d e f g h counter ngramcountermixedsent unigrams h bigramcontexts a c trigramcontexts e f self case assertcountequalunigrams counter1 keys self case assertcountequalbigramcontexts counter2 keys self case assertcountequaltrigramcontexts counter3 keys natural language toolkit language model unit tests c 2001 2023 nltk project ilia kurenkov ilia kurenkov gmail com url https www nltk org for license information see license txt tests for ngramcounter that only involve lookup no modification
import unittest import pytest from nltk import FreqDist from nltk.lm import NgramCounter from nltk.util import everygrams class TestNgramCounter: @classmethod def setup_class(self): text = [list("abcd"), list("egdbe")] self.trigram_counter = NgramCounter( everygrams(sent, max_len=3) for sent in text ) self.bigram_counter = NgramCounter(everygrams(sent, max_len=2) for sent in text) self.case = unittest.TestCase() def test_N(self): assert self.bigram_counter.N() == 16 assert self.trigram_counter.N() == 21 def test_counter_len_changes_with_lookup(self): assert len(self.bigram_counter) == 2 self.bigram_counter[50] assert len(self.bigram_counter) == 3 def test_ngram_order_access_unigrams(self): assert self.bigram_counter[1] == self.bigram_counter.unigrams def test_ngram_conditional_freqdist(self): case = unittest.TestCase() expected_trigram_contexts = [ ("a", "b"), ("b", "c"), ("e", "g"), ("g", "d"), ("d", "b"), ] expected_bigram_contexts = [("a",), ("b",), ("d",), ("e",), ("c",), ("g",)] bigrams = self.trigram_counter[2] trigrams = self.trigram_counter[3] self.case.assertCountEqual(expected_bigram_contexts, bigrams.conditions()) self.case.assertCountEqual(expected_trigram_contexts, trigrams.conditions()) def test_bigram_counts_seen_ngrams(self): assert self.bigram_counter[["a"]]["b"] == 1 assert self.bigram_counter[["b"]]["c"] == 1 def test_bigram_counts_unseen_ngrams(self): assert self.bigram_counter[["b"]]["z"] == 0 def test_unigram_counts_seen_words(self): assert self.bigram_counter["b"] == 2 def test_unigram_counts_completely_unseen_words(self): assert self.bigram_counter["z"] == 0 class TestNgramCounterTraining: @classmethod def setup_class(self): self.counter = NgramCounter() self.case = unittest.TestCase() @pytest.mark.parametrize("case", ["", [], None]) def test_empty_inputs(self, case): test = NgramCounter(case) assert 2 not in test assert test[1] == FreqDist() def test_train_on_unigrams(self): words = list("abcd") counter = NgramCounter([[(w,) for w in words]]) assert not counter[3] assert not counter[2] self.case.assertCountEqual(words, counter[1].keys()) def test_train_on_illegal_sentences(self): str_sent = ["Check", "this", "out", "!"] list_sent = [["Check", "this"], ["this", "out"], ["out", "!"]] with pytest.raises(TypeError): NgramCounter([str_sent]) with pytest.raises(TypeError): NgramCounter([list_sent]) def test_train_on_bigrams(self): bigram_sent = [("a", "b"), ("c", "d")] counter = NgramCounter([bigram_sent]) assert not bool(counter[3]) def test_train_on_mix(self): mixed_sent = [("a", "b"), ("c", "d"), ("e", "f", "g"), ("h",)] counter = NgramCounter([mixed_sent]) unigrams = ["h"] bigram_contexts = [("a",), ("c",)] trigram_contexts = [("e", "f")] self.case.assertCountEqual(unigrams, counter[1].keys()) self.case.assertCountEqual(bigram_contexts, counter[2].keys()) self.case.assertCountEqual(trigram_contexts, counter[3].keys())
natural language toolkit language model unit tests c 20012023 nltk project ilia kurenkov ilia kurenkovgmail com url https www nltk org for license information see license txt unseen ngrams should yield 0 unigrams should also be 0 n unigrams 14 count a 2 count y 3 ngrams seen during training ngram log score s a 1 a b 1 b unk 1 unk a 1 585 a d 1 d s 1 total logscores 6 585 avg logscores 1 0975 in mle even one unseen ngram should make entropy and perplexity infinite word score log score s 0 1429 2 8074 a 0 1429 2 8074 c 0 0714 3 8073 unk 0 2143 2 2224 d 0 1429 2 8074 c 0 0714 3 8073 s 0 1429 2 8074 total logscores 21 6243 avg logscores 3 0095 countd b c 1 countb c 1 countd c 1 countc 1 total number of tokens is 18 of which a occurred 2 times in vocabulary but unseen out of vocabulary should use unk score countd c 1 countd c 1 1 countw c for w in vocab 1 countw c for w in vocab 1 8 total unigrams 14 vocab size 8 denominator 14 0 8 14 8 counta 2 counta 2 1 in vocabulary but unseen countz 0 countz 0 1 out of vocabulary should use unk score countunk 3 countunk 3 1 unlike mle this should be able to handle completely novel ngrams ngram score log score s a 0 3929 1 3479 a c 0 0357 4 8074 c unk 0 05 4 1699 unk d 0 0263 5 2479 d c 0 0357 4 8074 c s 0 05 4 1699 total logscore 24 5504 avg logscore 4 0917 logic behind this is the same as for bigram model if we choose a word that hasn t appeared after b c trigram score now basic sanitycheck countd c 1 countd c 2 countw c for w in vocab 1 countw c for w in vocab 9 total unigrams 14 vocab size 8 denominator 14 8 22 counta 2 counta 3 in vocabulary but unseen countz 0 countz 1 out of vocabulary should use unk score countunk 3 countunk 4 unlike mle this should be able to handle completely novel ngrams ngram score log score s a 0 2 2 3219 a c 0 1 3 3219 c unk 0 1 3 1699 unk d 0 09 3 4594 d c 0 1 3 3219 c s 0 1 3 1699 total logscores 18 7651 avg logscores 3 1275 for unigram scores by default revert to regular mle total unigrams 18 vocab size 7 count c 1 in vocabulary but unseen countz 0 out of vocabulary should use unk score countunk 3 2 words follow b and b occurred a total of 2 times gamma b 2 2 2 0 5 mle score c b 0 5 mle c 1 18 0 055 1 gamma mle gamma mle c 0 27 0 055 building on that let s try a b c as the trigram 1 word follows a b and a b occurred 1 time gamma a b 1 1 1 0 5 mlec a b 1 pczb the ngram zbc was not seen so we use pcb see issue 2332 notation explained for all subsequent calculations we use the following notation 1 placeholder for any wordcharacter e g b stands for all bigrams that end in b b stands for all trigrams that contain b in the middle 1 countngram count all instances tokens of an ngram 1 uniquengram count unique instances types of an ngram pc count c unique 1 14 pz count z unique 0 14 z is in the vocabulary but it was not seen during training py out of vocabulary should use unk score py punk count unk unique we start with pcb pcb alpha bc gamma b pc alpha bc maxunique bc discount 0 unique b max1 0 75 0 2 0 125 gamma b discount unique b unique b 0 75 2 2 0 75 building on that let s try pcab pcab alpha abc gamma ab pcb alpha abc maxcount abc discount 0 count ab max1 0 75 0 1 0 25 gamma ab discount unique ab count ab 0 75 1 1 pczb the ngram zbc was not seen so we use pcb see issue 2332 for unigram scores revert to uniform pc count c count in vocabulary but unseen count z 0 out of vocabulary should use unk score count unk 3 pcb alpha bc gamma b pc alpha bc maxcount bc discount 0 count b max1 0 75 0 2 0 125 gamma b discount unique b count b 0 75 2 2 0 75 building on that let s try pcab pcab alpha abc gamma ab pcb alpha abc maxcount abc discount 0 count ab max1 0 75 0 1 0 25 gamma ab discount unique ab count ab 0 75 1 1 pczb the ngram zbc was not seen so we use pcb see issue 2332 for unigram scores revert to uniform total bigrams 18 in vocabulary but unseen bigrams ending with z 0 out of vocabulary should use unk score count unk 3 c follows 1 time out of 2 after b c always follows ab the ngram z b c was not seen so we backoff to the score of the ngram b c smoothing factor probability distributions should sum up to unity generating text we don t need randomseed for contexts with only one continuation when context doesn t limit our options enough seed the random choice add a cycle to the model bd b db d test that we can escape the cycle should crash with type error when we try to look it up in vocabulary this will work natural language toolkit language model unit tests c 2001 2023 nltk project ilia kurenkov ilia kurenkov gmail com url https www nltk org for license information see license txt unseen ngrams should yield 0 unigrams should also be 0 n unigrams 14 count a 2 count y 3 ngrams seen during training ngram log score s a 1 a b 1 b unk 1 unk a 1 585 a d 1 d s 1 total logscores 6 585 avg logscores 1 0975 in mle even one unseen ngram should make entropy and perplexity infinite word score log score s 0 1429 2 8074 a 0 1429 2 8074 c 0 0714 3 8073 unk 0 2143 2 2224 d 0 1429 2 8074 c 0 0714 3 8073 s 0 1429 2 8074 total logscores 21 6243 avg logscores 3 0095 count d b c 1 count b c 1 count d c 1 count c 1 total number of tokens is 18 of which a occurred 2 times in vocabulary but unseen out of vocabulary should use unk score count d c 1 count d c 1 1 count w c for w in vocab 1 count w c for w in vocab 1 8 total unigrams 14 vocab size 8 denominator 14 0 8 14 8 count a 2 count a 2 1 in vocabulary but unseen count z 0 count z 0 1 out of vocabulary should use unk score count unk 3 count unk 3 1 unlike mle this should be able to handle completely novel ngrams ngram score log score s a 0 3929 1 3479 a c 0 0357 4 8074 c unk 0 0 5 4 1699 unk d 0 0263 5 2479 d c 0 0357 4 8074 c s 0 0 5 4 1699 total logscore 24 5504 avg logscore 4 0917 logic behind this is the same as for bigram model if we choose a word that hasn t appeared after b c trigram score now basic sanity check count d c 1 count d c 2 count w c for w in vocab 1 count w c for w in vocab 9 total unigrams 14 vocab size 8 denominator 14 8 22 count a 2 count a 3 in vocabulary but unseen count z 0 count z 1 out of vocabulary should use unk score count unk 3 count unk 4 unlike mle this should be able to handle completely novel ngrams ngram score log score s a 0 2 2 3219 a c 0 1 3 3219 c unk 0 1 3 1699 unk d 0 09 3 4594 d c 0 1 3 3219 c s 0 1 3 1699 total logscores 18 7651 avg logscores 3 1275 for unigram scores by default revert to regular mle total unigrams 18 vocab size 7 count c 1 in vocabulary but unseen count z 0 out of vocabulary should use unk score count unk 3 2 words follow b and b occurred a total of 2 times gamma b 2 2 2 0 5 mle score c b 0 5 mle c 1 18 0 055 1 gamma mle gamma mle c 0 27 0 055 building on that let s try a b c as the trigram 1 word follows a b and a b occurred 1 time gamma a b 1 1 1 0 5 mle c a b 1 p c zb the ngram zbc was not seen so we use p c b see issue 2332 notation explained for all subsequent calculations we use the following notation 1 placeholder for any word character e g b stands for all bigrams that end in b b stands for all trigrams that contain b in the middle 1 count ngram count all instances tokens of an ngram 1 unique ngram count unique instances types of an ngram p c count c unique 1 14 p z count z unique 0 14 z is in the vocabulary but it was not seen during training p y out of vocabulary should use unk score p y p unk count unk unique we start with p c b p c b alpha bc gamma b p c alpha bc max unique bc discount 0 unique b max 1 0 75 0 2 0 125 gamma b discount unique b unique b 0 75 2 2 0 75 building on that let s try p c ab p c ab alpha abc gamma ab p c b alpha abc max count abc discount 0 count ab max 1 0 75 0 1 0 25 gamma ab discount unique ab count ab 0 75 1 1 p c zb the ngram zbc was not seen so we use p c b see issue 2332 for unigram scores revert to uniform p c count c count in vocabulary but unseen count z 0 out of vocabulary should use unk score count unk 3 p c b alpha bc gamma b p c alpha bc max count bc discount 0 count b max 1 0 75 0 2 0 125 gamma b discount unique b count b 0 75 2 2 0 75 building on that let s try p c ab p c ab alpha abc gamma ab p c b alpha abc max count abc discount 0 count ab max 1 0 75 0 1 0 25 gamma ab discount unique ab count ab 0 75 1 1 p c zb the ngram zbc was not seen so we use p c b see issue 2332 for unigram scores revert to uniform total bigrams 18 in vocabulary but unseen bigrams ending with z 0 out of vocabulary should use unk score count unk 3 c follows 1 time out of 2 after b c always follows ab the ngram z b c was not seen so we backoff to the score of the ngram b c smoothing factor probability distributions should sum up to unity generating text we don t need random_seed for contexts with only one continuation when context doesn t limit our options enough seed the random choice add a cycle to the model bd b db d test that we can escape the cycle should crash with type error when we try to look it up in vocabulary this will work
import math from operator import itemgetter import pytest from nltk.lm import ( MLE, AbsoluteDiscountingInterpolated, KneserNeyInterpolated, Laplace, Lidstone, StupidBackoff, Vocabulary, WittenBellInterpolated, ) from nltk.lm.preprocessing import padded_everygrams @pytest.fixture(scope="session") def vocabulary(): return Vocabulary(["a", "b", "c", "d", "z", "<s>", "</s>"], unk_cutoff=1) @pytest.fixture(scope="session") def training_data(): return [["a", "b", "c", "d"], ["e", "g", "a", "d", "b", "e"]] @pytest.fixture(scope="session") def bigram_training_data(training_data): return [list(padded_everygrams(2, sent)) for sent in training_data] @pytest.fixture(scope="session") def trigram_training_data(training_data): return [list(padded_everygrams(3, sent)) for sent in training_data] @pytest.fixture def mle_bigram_model(vocabulary, bigram_training_data): model = MLE(2, vocabulary=vocabulary) model.fit(bigram_training_data) return model @pytest.mark.parametrize( "word, context, expected_score", [ ("d", ["c"], 1), ("d", ["e"], 0), ("z", None, 0), ("a", None, 2.0 / 14), ("y", None, 3.0 / 14), ], ) def test_mle_bigram_scores(mle_bigram_model, word, context, expected_score): assert pytest.approx(mle_bigram_model.score(word, context), 1e-4) == expected_score def test_mle_bigram_logscore_for_zero_score(mle_bigram_model): assert math.isinf(mle_bigram_model.logscore("d", ["e"])) def test_mle_bigram_entropy_perplexity_seen(mle_bigram_model): trained = [ ("<s>", "a"), ("a", "b"), ("b", "<UNK>"), ("<UNK>", "a"), ("a", "d"), ("d", "</s>"), ] H = 1.0975 perplexity = 2.1398 assert pytest.approx(mle_bigram_model.entropy(trained), 1e-4) == H assert pytest.approx(mle_bigram_model.perplexity(trained), 1e-4) == perplexity def test_mle_bigram_entropy_perplexity_unseen(mle_bigram_model): untrained = [("<s>", "a"), ("a", "c"), ("c", "d"), ("d", "</s>")] assert math.isinf(mle_bigram_model.entropy(untrained)) assert math.isinf(mle_bigram_model.perplexity(untrained)) def test_mle_bigram_entropy_perplexity_unigrams(mle_bigram_model): H = 3.0095 perplexity = 8.0529 text = [("<s>",), ("a",), ("c",), ("-",), ("d",), ("c",), ("</s>",)] assert pytest.approx(mle_bigram_model.entropy(text), 1e-4) == H assert pytest.approx(mle_bigram_model.perplexity(text), 1e-4) == perplexity @pytest.fixture def mle_trigram_model(trigram_training_data, vocabulary): model = MLE(order=3, vocabulary=vocabulary) model.fit(trigram_training_data) return model @pytest.mark.parametrize( "word, context, expected_score", [ ("d", ("b", "c"), 1), ("d", ["c"], 1), ("a", None, 2.0 / 18), ("z", None, 0), ("y", None, 3.0 / 18), ], ) def test_mle_trigram_scores(mle_trigram_model, word, context, expected_score): assert pytest.approx(mle_trigram_model.score(word, context), 1e-4) == expected_score @pytest.fixture def lidstone_bigram_model(bigram_training_data, vocabulary): model = Lidstone(0.1, order=2, vocabulary=vocabulary) model.fit(bigram_training_data) return model @pytest.mark.parametrize( "word, context, expected_score", [ ("d", ["c"], 1.1 / 1.8), ("a", None, 2.1 / 14.8), ("z", None, 0.1 / 14.8), ("y", None, 3.1 / 14.8), ], ) def test_lidstone_bigram_score(lidstone_bigram_model, word, context, expected_score): assert ( pytest.approx(lidstone_bigram_model.score(word, context), 1e-4) == expected_score ) def test_lidstone_entropy_perplexity(lidstone_bigram_model): text = [ ("<s>", "a"), ("a", "c"), ("c", "<UNK>"), ("<UNK>", "d"), ("d", "c"), ("c", "</s>"), ] H = 4.0917 perplexity = 17.0504 assert pytest.approx(lidstone_bigram_model.entropy(text), 1e-4) == H assert pytest.approx(lidstone_bigram_model.perplexity(text), 1e-4) == perplexity @pytest.fixture def lidstone_trigram_model(trigram_training_data, vocabulary): model = Lidstone(0.1, order=3, vocabulary=vocabulary) model.fit(trigram_training_data) return model @pytest.mark.parametrize( "word, context, expected_score", [ ("d", ["c"], 1.1 / 1.8), ("e", ["c"], 0.1 / 1.8), ("d", ["b", "c"], 1.1 / 1.8), ("e", ["b", "c"], 0.1 / 1.8), ], ) def test_lidstone_trigram_score(lidstone_trigram_model, word, context, expected_score): assert ( pytest.approx(lidstone_trigram_model.score(word, context), 1e-4) == expected_score ) @pytest.fixture def laplace_bigram_model(bigram_training_data, vocabulary): model = Laplace(2, vocabulary=vocabulary) model.fit(bigram_training_data) return model @pytest.mark.parametrize( "word, context, expected_score", [ ("d", ["c"], 2.0 / 9), ("a", None, 3.0 / 22), ("z", None, 1.0 / 22), ("y", None, 4.0 / 22), ], ) def test_laplace_bigram_score(laplace_bigram_model, word, context, expected_score): assert ( pytest.approx(laplace_bigram_model.score(word, context), 1e-4) == expected_score ) def test_laplace_bigram_entropy_perplexity(laplace_bigram_model): text = [ ("<s>", "a"), ("a", "c"), ("c", "<UNK>"), ("<UNK>", "d"), ("d", "c"), ("c", "</s>"), ] H = 3.1275 perplexity = 8.7393 assert pytest.approx(laplace_bigram_model.entropy(text), 1e-4) == H assert pytest.approx(laplace_bigram_model.perplexity(text), 1e-4) == perplexity def test_laplace_gamma(laplace_bigram_model): assert laplace_bigram_model.gamma == 1 @pytest.fixture def wittenbell_trigram_model(trigram_training_data, vocabulary): model = WittenBellInterpolated(3, vocabulary=vocabulary) model.fit(trigram_training_data) return model @pytest.mark.parametrize( "word, context, expected_score", [ ("c", None, 1.0 / 18), ("z", None, 0 / 18), ("y", None, 3.0 / 18), ("c", ["b"], (1 - 0.5) * 0.5 + 0.5 * 1 / 18), ("c", ["a", "b"], (1 - 0.5) + 0.5 * ((1 - 0.5) * 0.5 + 0.5 * 1 / 18)), ("c", ["z", "b"], ((1 - 0.5) * 0.5 + 0.5 * 1 / 18)), ], ) def test_wittenbell_trigram_score( wittenbell_trigram_model, word, context, expected_score ): assert ( pytest.approx(wittenbell_trigram_model.score(word, context), 1e-4) == expected_score ) @pytest.fixture def kneserney_trigram_model(trigram_training_data, vocabulary): model = KneserNeyInterpolated(order=3, discount=0.75, vocabulary=vocabulary) model.fit(trigram_training_data) return model @pytest.mark.parametrize( "word, context, expected_score", [ ("c", None, 1.0 / 14), ("z", None, 0.0 / 14), ("y", None, 3 / 14), ("c", ["b"], (0.125 + 0.75 * (1 / 14))), ("c", ["a", "b"], 0.25 + 0.75 * (0.125 + 0.75 * (1 / 14))), ("c", ["z", "b"], (0.125 + 0.75 * (1 / 14))), ], ) def test_kneserney_trigram_score( kneserney_trigram_model, word, context, expected_score ): assert ( pytest.approx(kneserney_trigram_model.score(word, context), 1e-4) == expected_score ) @pytest.fixture def absolute_discounting_trigram_model(trigram_training_data, vocabulary): model = AbsoluteDiscountingInterpolated(order=3, vocabulary=vocabulary) model.fit(trigram_training_data) return model @pytest.mark.parametrize( "word, context, expected_score", [ ("c", None, 1.0 / 18), ("z", None, 0.0 / 18), ("y", None, 3 / 18), ("c", ["b"], (0.125 + 0.75 * (2 / 2) * (1 / 18))), ("c", ["a", "b"], 0.25 + 0.75 * (0.125 + 0.75 * (2 / 2) * (1 / 18))), ("c", ["z", "b"], (0.125 + 0.75 * (2 / 2) * (1 / 18))), ], ) def test_absolute_discounting_trigram_score( absolute_discounting_trigram_model, word, context, expected_score ): assert ( pytest.approx(absolute_discounting_trigram_model.score(word, context), 1e-4) == expected_score ) @pytest.fixture def stupid_backoff_trigram_model(trigram_training_data, vocabulary): model = StupidBackoff(order=3, vocabulary=vocabulary) model.fit(trigram_training_data) return model @pytest.mark.parametrize( "word, context, expected_score", [ ("c", None, 1.0 / 18), ("z", None, 0.0 / 18), ("y", None, 3 / 18), ("c", ["b"], 1 / 2), ("c", ["a", "b"], 1 / 1), ("c", ["z", "b"], (0.4 * (1 / 2))), ], ) def test_stupid_backoff_trigram_score( stupid_backoff_trigram_model, word, context, expected_score ): assert ( pytest.approx(stupid_backoff_trigram_model.score(word, context), 1e-4) == expected_score ) @pytest.fixture(scope="session") def kneserney_bigram_model(bigram_training_data, vocabulary): model = KneserNeyInterpolated(order=2, vocabulary=vocabulary) model.fit(bigram_training_data) return model @pytest.mark.parametrize( "model_fixture", [ "mle_bigram_model", "mle_trigram_model", "lidstone_bigram_model", "laplace_bigram_model", "wittenbell_trigram_model", "absolute_discounting_trigram_model", "kneserney_bigram_model", pytest.param( "stupid_backoff_trigram_model", marks=pytest.mark.xfail( reason="Stupid Backoff is not a valid distribution" ), ), ], ) @pytest.mark.parametrize( "context", [("a",), ("c",), ("<s>",), ("b",), ("<UNK>",), ("d",), ("e",), ("r",), ("w",)], ids=itemgetter(0), ) def test_sums_to_1(model_fixture, context, request): model = request.getfixturevalue(model_fixture) scores_for_context = sum(model.score(w, context) for w in model.vocab) assert pytest.approx(scores_for_context, 1e-7) == 1.0 def test_generate_one_no_context(mle_trigram_model): assert mle_trigram_model.generate(random_seed=3) == "<UNK>" def test_generate_one_from_limiting_context(mle_trigram_model): assert mle_trigram_model.generate(text_seed=["c"]) == "d" assert mle_trigram_model.generate(text_seed=["b", "c"]) == "d" assert mle_trigram_model.generate(text_seed=["a", "c"]) == "d" def test_generate_one_from_varied_context(mle_trigram_model): assert mle_trigram_model.generate(text_seed=("a", "<s>"), random_seed=2) == "a" def test_generate_cycle(mle_trigram_model): more_training_text = [padded_everygrams(mle_trigram_model.order, list("bdbdbd"))] mle_trigram_model.fit(more_training_text) assert mle_trigram_model.generate(7, text_seed=("b", "d"), random_seed=5) == [ "b", "d", "b", "d", "b", "d", "</s>", ] def test_generate_with_text_seed(mle_trigram_model): assert mle_trigram_model.generate(5, text_seed=("<s>", "e"), random_seed=3) == [ "<UNK>", "a", "d", "b", "<UNK>", ] def test_generate_oov_text_seed(mle_trigram_model): assert mle_trigram_model.generate( text_seed=("aliens",), random_seed=3 ) == mle_trigram_model.generate(text_seed=("<UNK>",), random_seed=3) def test_generate_None_text_seed(mle_trigram_model): with pytest.raises(TypeError): mle_trigram_model.generate(text_seed=(None,)) assert mle_trigram_model.generate( text_seed=None, random_seed=3 ) == mle_trigram_model.generate(random_seed=3)
natural language toolkit language model unit tests c 20012023 nltk project ilia kurenkov ilia kurenkovgmail com url https www nltk org for license information see license txt natural language toolkit language model unit tests c 2001 2023 nltk project ilia kurenkov ilia kurenkov gmail com url https www nltk org for license information see license txt
import unittest from nltk.lm.preprocessing import padded_everygram_pipeline class TestPreprocessing(unittest.TestCase): def test_padded_everygram_pipeline(self): expected_train = [ [ ("<s>",), ("<s>", "a"), ("a",), ("a", "b"), ("b",), ("b", "c"), ("c",), ("c", "</s>"), ("</s>",), ] ] expected_vocab = ["<s>", "a", "b", "c", "</s>"] train_data, vocab_data = padded_everygram_pipeline(2, [["a", "b", "c"]]) self.assertEqual([list(sent) for sent in train_data], expected_train) self.assertEqual(list(vocab_data), expected_vocab)
natural language toolkit language model unit tests c 20012023 nltk project ilia kurenkov ilia kurenkovgmail com url https www nltk org for license information see license txt tests vocabulary class classmethod def setupclasscls cls vocab vocabulary z a b c f d e g a d b e w unkcutoff2 def testtruthinessself self asserttrueself vocab def testcutoffvaluesetcorrectlyself self assertequalself vocab cutoff 2 def testunabletochangecutoffself with self assertraisesattributeerror self vocab cutoff 3 def testcutoffsetterchecksvalueself with self assertraisesvalueerror as excinfo vocabularyabc unkcutoff0 expectederrormsg cutoff value cannot be less than 1 got 0 self assertequalexpectederrormsg strexcinfo exception def testcountssetcorrectlyself self assertequalself vocab countsa 2 self assertequalself vocab countsb 2 self assertequalself vocab countsc 1 def testmembershipcheckrespectscutoffself a was seen 2 times so it should be considered part of the vocabulary self asserttruea in self vocab c was seen once it shouldn t be considered part of the vocab self assertfalsec in self vocab z was never seen at all also shouldn t be considered in the vocab self assertfalsez in self vocab def testvocablenrespectscutoffself vocab size is the number of unique tokens that occur at least as often as the cutoff value plus 1 to account for unknown words self assertequal5 lenself vocab def testvocabiterrespectscutoffself vocabcounts a b c d e f g w z vocabitems a b d e unk self assertcountequalvocabcounts listself vocab counts keys self assertcountequalvocabitems listself vocab def testupdateemptyvocabself empty vocabularyunkcutoff2 self assertequallenempty 0 self assertfalseempty self assertinempty unklabel empty empty updatelistabcde self assertinempty unklabel empty def testlookupself self assertequalself vocab lookupa a self assertequalself vocab lookupc unk def testlookupiterablesself self assertequalself vocab lookupa b a b self assertequalself vocab lookupa b a b self assertequalself vocab lookupa c a unk self assertequal self vocab lookupmapstr range3 unk unk unk def testlookupemptyiterablesself self assertequalself vocab lookup self assertequalself vocab lookup self assertequalself vocab lookupiter self assertequalself vocab lookupn for n in range0 0 def testlookuprecursiveself self assertequal self vocab lookupa b a c a b a unk self assertequalself vocab lookupa b c a b unk self assertequalself vocab lookupa b a b def testlookupnoneself with self assertraisestypeerror self vocab lookupnone with self assertraisestypeerror listself vocab lookupnone none def testlookupintself with self assertraisestypeerror self vocab lookup1 with self assertraisestypeerror listself vocab lookup1 2 def testlookupemptystrself self assertequalself vocab lookup unk def testeqalityself v1 vocabularya b c unkcutoff1 v2 vocabularya b c unkcutoff1 v3 vocabularya b c unkcutoff1 unklabelblah v4 vocabularya b unkcutoff1 self assertequalv1 v2 self assertnotequalv1 v3 self assertnotequalv1 v4 def teststrself self assertequal strself vocab vocabulary with cutoff2 unklabel unk and 5 items def testcreationwithcounterself self assertequal self vocab vocabulary counter z a b c f d e g a d b e w unkcutoff2 unittest skip reasontest is known to be flaky as it compares runtime performance def testlenisconstantself given an obviously small and an obviously large vocabulary smallvocab vocabularyabcde from nltk corpus europarlraw import english largevocab vocabularyenglish words if we time calling len on them smallvocablentime timeitlensmallvocab globalslocals largevocablentime timeitlenlargevocab globalslocals the timing should be the same order of magnitude self assertalmostequalsmallvocablentime largevocablentime places1 natural language toolkit language model unit tests c 2001 2023 nltk project ilia kurenkov ilia kurenkov gmail com url https www nltk org for license information see license txt tests vocabulary class a was seen 2 times so it should be considered part of the vocabulary c was seen once it shouldn t be considered part of the vocab z was never seen at all also shouldn t be considered in the vocab vocab size is the number of unique tokens that occur at least as often as the cutoff value plus 1 to account for unknown words given an obviously small and an obviously large vocabulary if we time calling len on them the timing should be the same order of magnitude
import unittest from collections import Counter from timeit import timeit from nltk.lm import Vocabulary class NgramModelVocabularyTests(unittest.TestCase): @classmethod def setUpClass(cls): cls.vocab = Vocabulary( ["z", "a", "b", "c", "f", "d", "e", "g", "a", "d", "b", "e", "w"], unk_cutoff=2, ) def test_truthiness(self): self.assertTrue(self.vocab) def test_cutoff_value_set_correctly(self): self.assertEqual(self.vocab.cutoff, 2) def test_unable_to_change_cutoff(self): with self.assertRaises(AttributeError): self.vocab.cutoff = 3 def test_cutoff_setter_checks_value(self): with self.assertRaises(ValueError) as exc_info: Vocabulary("abc", unk_cutoff=0) expected_error_msg = "Cutoff value cannot be less than 1. Got: 0" self.assertEqual(expected_error_msg, str(exc_info.exception)) def test_counts_set_correctly(self): self.assertEqual(self.vocab.counts["a"], 2) self.assertEqual(self.vocab.counts["b"], 2) self.assertEqual(self.vocab.counts["c"], 1) def test_membership_check_respects_cutoff(self): self.assertTrue("a" in self.vocab) self.assertFalse("c" in self.vocab) self.assertFalse("z" in self.vocab) def test_vocab_len_respects_cutoff(self): self.assertEqual(5, len(self.vocab)) def test_vocab_iter_respects_cutoff(self): vocab_counts = ["a", "b", "c", "d", "e", "f", "g", "w", "z"] vocab_items = ["a", "b", "d", "e", "<UNK>"] self.assertCountEqual(vocab_counts, list(self.vocab.counts.keys())) self.assertCountEqual(vocab_items, list(self.vocab)) def test_update_empty_vocab(self): empty = Vocabulary(unk_cutoff=2) self.assertEqual(len(empty), 0) self.assertFalse(empty) self.assertIn(empty.unk_label, empty) empty.update(list("abcde")) self.assertIn(empty.unk_label, empty) def test_lookup(self): self.assertEqual(self.vocab.lookup("a"), "a") self.assertEqual(self.vocab.lookup("c"), "<UNK>") def test_lookup_iterables(self): self.assertEqual(self.vocab.lookup(["a", "b"]), ("a", "b")) self.assertEqual(self.vocab.lookup(("a", "b")), ("a", "b")) self.assertEqual(self.vocab.lookup(("a", "c")), ("a", "<UNK>")) self.assertEqual( self.vocab.lookup(map(str, range(3))), ("<UNK>", "<UNK>", "<UNK>") ) def test_lookup_empty_iterables(self): self.assertEqual(self.vocab.lookup(()), ()) self.assertEqual(self.vocab.lookup([]), ()) self.assertEqual(self.vocab.lookup(iter([])), ()) self.assertEqual(self.vocab.lookup(n for n in range(0, 0)), ()) def test_lookup_recursive(self): self.assertEqual( self.vocab.lookup([["a", "b"], ["a", "c"]]), (("a", "b"), ("a", "<UNK>")) ) self.assertEqual(self.vocab.lookup([["a", "b"], "c"]), (("a", "b"), "<UNK>")) self.assertEqual(self.vocab.lookup([[[[["a", "b"]]]]]), ((((("a", "b"),),),),)) def test_lookup_None(self): with self.assertRaises(TypeError): self.vocab.lookup(None) with self.assertRaises(TypeError): list(self.vocab.lookup([None, None])) def test_lookup_int(self): with self.assertRaises(TypeError): self.vocab.lookup(1) with self.assertRaises(TypeError): list(self.vocab.lookup([1, 2])) def test_lookup_empty_str(self): self.assertEqual(self.vocab.lookup(""), "<UNK>") def test_eqality(self): v1 = Vocabulary(["a", "b", "c"], unk_cutoff=1) v2 = Vocabulary(["a", "b", "c"], unk_cutoff=1) v3 = Vocabulary(["a", "b", "c"], unk_cutoff=1, unk_label="blah") v4 = Vocabulary(["a", "b"], unk_cutoff=1) self.assertEqual(v1, v2) self.assertNotEqual(v1, v3) self.assertNotEqual(v1, v4) def test_str(self): self.assertEqual( str(self.vocab), "<Vocabulary with cutoff=2 unk_label='<UNK>' and 5 items>" ) def test_creation_with_counter(self): self.assertEqual( self.vocab, Vocabulary( Counter( ["z", "a", "b", "c", "f", "d", "e", "g", "a", "d", "b", "e", "w"] ), unk_cutoff=2, ), ) @unittest.skip( reason="Test is known to be flaky as it compares (runtime) performance." ) def test_len_is_constant(self): small_vocab = Vocabulary("abcde") from nltk.corpus.europarl_raw import english large_vocab = Vocabulary(english.words()) small_vocab_len_time = timeit("len(small_vocab)", globals=locals()) large_vocab_len_time = timeit("len(large_vocab)", globals=locals()) self.assertAlmostEqual(small_vocab_len_time, large_vocab_len_time, places=1)
test aline algorithm for aligning phonetic sequences test aline for computing the difference between two segments test aline for computing the difference between two segments
from nltk.metrics import aline def test_aline(): result = aline.align("θin", "tenwis") expected = [[("θ", "t"), ("i", "e"), ("n", "n")]] assert result == expected result = aline.align("jo", "ʒə") expected = [[("j", "ʒ"), ("o", "ə")]] assert result == expected result = aline.align("pematesiweni", "pematesewen") expected = [ [ ("p", "p"), ("e", "e"), ("m", "m"), ("a", "a"), ("t", "t"), ("e", "e"), ("s", "s"), ("i", "e"), ("w", "w"), ("e", "e"), ("n", "n"), ] ] assert result == expected result = aline.align("tuwθ", "dentis") expected = [[("t", "t"), ("u", "i"), ("w", "-"), ("θ", "s")]] assert result == expected def test_aline_delta(): assert aline.delta("p", "q") == 20.0 assert aline.delta("a", "A") == 0.0
tests for brill tagger example from https github comnltknltkissues769 example from https github com nltk nltk issues 769
import unittest from nltk.corpus import treebank from nltk.tag import UnigramTagger, brill, brill_trainer from nltk.tbl import demo class TestBrill(unittest.TestCase): def test_pos_template(self): train_sents = treebank.tagged_sents()[:1000] tagger = UnigramTagger(train_sents) trainer = brill_trainer.BrillTaggerTrainer( tagger, [brill.Template(brill.Pos([-1]))] ) brill_tagger = trainer.train(train_sents) result = brill_tagger.tag("This is a foo bar sentence".split()) expected = [ ("This", "DT"), ("is", "VBZ"), ("a", "DT"), ("foo", None), ("bar", "NN"), ("sentence", None), ] self.assertEqual(result, expected) @unittest.skip("Should be tested in __main__ of nltk.tbl.demo") def test_brill_demo(self): demo()
make sure that we can still mutate cfd normally create cfd with word length as condition incrementing previously unseen key is still possible nonexistent keys shouldn t be added nonexistent keys shouldn t be added make sure that we can still mutate cfd normally create cfd with word length as condition incrementing previously unseen key is still possible new condition added key s frequency incremented from 0 unseen to 1
import unittest import pytest from nltk import ConditionalFreqDist, tokenize class TestEmptyCondFreq(unittest.TestCase): def test_tabulate(self): empty = ConditionalFreqDist() self.assertEqual(empty.conditions(), []) with pytest.raises(ValueError): empty.tabulate(conditions="BUG") self.assertEqual(empty.conditions(), []) def test_plot(self): empty = ConditionalFreqDist() self.assertEqual(empty.conditions(), []) empty.plot(conditions=["BUG"]) self.assertEqual(empty.conditions(), []) def test_increment(self): text = "cow cat mouse cat tiger" cfd = ConditionalFreqDist() for word in tokenize.word_tokenize(text): condition = len(word) cfd[condition][word] += 1 self.assertEqual(cfd.conditions(), [3, 5]) cfd[2]["hi"] += 1 self.assertCountEqual(cfd.conditions(), [3, 5, 2]) self.assertEqual( cfd[2]["hi"], 1 )
s np vp pp p np np det n np pp p vp v np vp pp vp det det a the n dog cat v chased sat p on in s np vp np vp n p vp p n dog cat p on in s np vp pp p np np det n np pp p vp v np vp pp vp det det a the n dog cat v chased sat p on in s np vp np vp n p vp p n dog cat p on in
import unittest import nltk from nltk.grammar import CFG class ChomskyNormalFormForCFGTest(unittest.TestCase): def test_simple(self): grammar = CFG.fromstring( ) self.assertFalse(grammar.is_flexible_chomsky_normal_form()) self.assertFalse(grammar.is_chomsky_normal_form()) grammar = grammar.chomsky_normal_form(flexible=True) self.assertTrue(grammar.is_flexible_chomsky_normal_form()) self.assertFalse(grammar.is_chomsky_normal_form()) grammar2 = CFG.fromstring( ) self.assertFalse(grammar2.is_flexible_chomsky_normal_form()) self.assertFalse(grammar2.is_chomsky_normal_form()) grammar2 = grammar2.chomsky_normal_form() self.assertTrue(grammar2.is_flexible_chomsky_normal_form()) self.assertTrue(grammar2.is_chomsky_normal_form()) def test_complex(self): grammar = nltk.data.load("grammars/large_grammars/atis.cfg") self.assertFalse(grammar.is_flexible_chomsky_normal_form()) self.assertFalse(grammar.is_chomsky_normal_form()) grammar = grammar.chomsky_normal_form(flexible=True) self.assertTrue(grammar.is_flexible_chomsky_normal_form()) self.assertFalse(grammar.is_chomsky_normal_form())
unit tests for nltk classify see also nltktestclassify doctest unseen unseen seen 3 times labels y y x seen 1 time label x
import pytest from nltk import classify TRAIN = [ (dict(a=1, b=1, c=1), "y"), (dict(a=1, b=1, c=1), "x"), (dict(a=1, b=1, c=0), "y"), (dict(a=0, b=1, c=1), "x"), (dict(a=0, b=1, c=1), "y"), (dict(a=0, b=0, c=1), "y"), (dict(a=0, b=1, c=0), "x"), (dict(a=0, b=0, c=0), "x"), (dict(a=0, b=1, c=1), "y"), ] TEST = [ (dict(a=1, b=0, c=1)), (dict(a=1, b=0, c=0)), (dict(a=0, b=1, c=1)), (dict(a=0, b=1, c=0)), ] RESULTS = [(0.16, 0.84), (0.46, 0.54), (0.41, 0.59), (0.76, 0.24)] def assert_classifier_correct(algorithm): try: classifier = classify.MaxentClassifier.train( TRAIN, algorithm, trace=0, max_iter=1000 ) except (LookupError, AttributeError) as e: pytest.skip(str(e)) for (px, py), featureset in zip(RESULTS, TEST): pdist = classifier.prob_classify(featureset) assert abs(pdist.prob("x") - px) < 1e-2, (pdist.prob("x"), px) assert abs(pdist.prob("y") - py) < 1e-2, (pdist.prob("y"), py) def test_megam(): assert_classifier_correct("MEGAM") def test_tadm(): assert_classifier_correct("TADM")
test bigram counters with discontinuous bigrams and repeated words verify that two sequences of ngram association values are within epsilon of each other test bigram counters with discontinuous bigrams and repeated words verify that two sequences of n gram association values are within _epsilon of each other
from nltk.collocations import BigramCollocationFinder from nltk.metrics import BigramAssocMeasures _EPSILON = 1e-8 SENT = "this this is is a a test test".split() def close_enough(x, y): return all(abs(x1[1] - y1[1]) <= _EPSILON for x1, y1 in zip(x, y)) def test_bigram2(): b = BigramCollocationFinder.from_words(SENT) assert sorted(b.ngram_fd.items()) == [ (("a", "a"), 1), (("a", "test"), 1), (("is", "a"), 1), (("is", "is"), 1), (("test", "test"), 1), (("this", "is"), 1), (("this", "this"), 1), ] assert sorted(b.word_fd.items()) == [("a", 2), ("is", 2), ("test", 2), ("this", 2)] assert len(SENT) == sum(b.word_fd.values()) == sum(b.ngram_fd.values()) + 1 assert close_enough( sorted(b.score_ngrams(BigramAssocMeasures.pmi)), [ (("a", "a"), 1.0), (("a", "test"), 1.0), (("is", "a"), 1.0), (("is", "is"), 1.0), (("test", "test"), 1.0), (("this", "is"), 1.0), (("this", "this"), 1.0), ], ) def test_bigram3(): b = BigramCollocationFinder.from_words(SENT, window_size=3) assert sorted(b.ngram_fd.items()) == sorted( [ (("a", "test"), 3), (("is", "a"), 3), (("this", "is"), 3), (("a", "a"), 1), (("is", "is"), 1), (("test", "test"), 1), (("this", "this"), 1), ] ) assert sorted(b.word_fd.items()) == sorted( [("a", 2), ("is", 2), ("test", 2), ("this", 2)] ) assert ( len(SENT) == sum(b.word_fd.values()) == (sum(b.ngram_fd.values()) + 2 + 1) / 2.0 ) assert close_enough( sorted(b.score_ngrams(BigramAssocMeasures.pmi)), sorted( [ (("a", "test"), 1.584962500721156), (("is", "a"), 1.584962500721156), (("this", "is"), 1.584962500721156), (("a", "a"), 0.0), (("is", "is"), 0.0), (("test", "test"), 0.0), (("this", "this"), 0.0), ] ), ) def test_bigram5(): b = BigramCollocationFinder.from_words(SENT, window_size=5) assert sorted(b.ngram_fd.items()) == sorted( [ (("a", "test"), 4), (("is", "a"), 4), (("this", "is"), 4), (("is", "test"), 3), (("this", "a"), 3), (("a", "a"), 1), (("is", "is"), 1), (("test", "test"), 1), (("this", "this"), 1), ] ) assert sorted(b.word_fd.items()) == sorted( [("a", 2), ("is", 2), ("test", 2), ("this", 2)] ) n_word_fd = sum(b.word_fd.values()) n_ngram_fd = (sum(b.ngram_fd.values()) + 4 + 3 + 2 + 1) / 4.0 assert len(SENT) == n_word_fd == n_ngram_fd assert close_enough( sorted(b.score_ngrams(BigramAssocMeasures.pmi)), sorted( [ (("a", "test"), 1.0), (("is", "a"), 1.0), (("this", "is"), 1.0), (("is", "test"), 0.5849625007211562), (("this", "a"), 0.5849625007211562), (("a", "a"), -1.0), (("is", "is"), -1.0), (("test", "test"), -1.0), (("this", "this"), -1.0), ] ), )
mock test for stanford corenlp wrappers
from unittest import TestCase from unittest.mock import MagicMock import pytest from nltk.parse import corenlp from nltk.tree import Tree def setup_module(module): global server try: server = corenlp.CoreNLPServer(port=9000) except LookupError: pytest.skip("Could not instantiate CoreNLPServer.") try: server.start() except corenlp.CoreNLPServerError as e: pytest.skip( "Skipping CoreNLP tests because the server could not be started. " "Make sure that the 9000 port is free. " "{}".format(e.strerror) ) def teardown_module(module): server.stop() class TestTokenizerAPI(TestCase): def test_tokenize(self): corenlp_tokenizer = corenlp.CoreNLPParser() api_return_value = { "sentences": [ { "index": 0, "tokens": [ { "after": " ", "before": "", "characterOffsetBegin": 0, "characterOffsetEnd": 4, "index": 1, "originalText": "Good", "word": "Good", }, { "after": " ", "before": " ", "characterOffsetBegin": 5, "characterOffsetEnd": 12, "index": 2, "originalText": "muffins", "word": "muffins", }, { "after": " ", "before": " ", "characterOffsetBegin": 13, "characterOffsetEnd": 17, "index": 3, "originalText": "cost", "word": "cost", }, { "after": "", "before": " ", "characterOffsetBegin": 18, "characterOffsetEnd": 19, "index": 4, "originalText": "$", "word": "$", }, { "after": "\n", "before": "", "characterOffsetBegin": 19, "characterOffsetEnd": 23, "index": 5, "originalText": "3.88", "word": "3.88", }, { "after": " ", "before": "\n", "characterOffsetBegin": 24, "characterOffsetEnd": 26, "index": 6, "originalText": "in", "word": "in", }, { "after": " ", "before": " ", "characterOffsetBegin": 27, "characterOffsetEnd": 30, "index": 7, "originalText": "New", "word": "New", }, { "after": "", "before": " ", "characterOffsetBegin": 31, "characterOffsetEnd": 35, "index": 8, "originalText": "York", "word": "York", }, { "after": " ", "before": "", "characterOffsetBegin": 35, "characterOffsetEnd": 36, "index": 9, "originalText": ".", "word": ".", }, ], }, { "index": 1, "tokens": [ { "after": " ", "before": " ", "characterOffsetBegin": 38, "characterOffsetEnd": 44, "index": 1, "originalText": "Please", "word": "Please", }, { "after": " ", "before": " ", "characterOffsetBegin": 45, "characterOffsetEnd": 48, "index": 2, "originalText": "buy", "word": "buy", }, { "after": "\n", "before": " ", "characterOffsetBegin": 49, "characterOffsetEnd": 51, "index": 3, "originalText": "me", "word": "me", }, { "after": " ", "before": "\n", "characterOffsetBegin": 52, "characterOffsetEnd": 55, "index": 4, "originalText": "two", "word": "two", }, { "after": " ", "before": " ", "characterOffsetBegin": 56, "characterOffsetEnd": 58, "index": 5, "originalText": "of", "word": "of", }, { "after": "", "before": " ", "characterOffsetBegin": 59, "characterOffsetEnd": 63, "index": 6, "originalText": "them", "word": "them", }, { "after": "\n", "before": "", "characterOffsetBegin": 63, "characterOffsetEnd": 64, "index": 7, "originalText": ".", "word": ".", }, ], }, { "index": 2, "tokens": [ { "after": "", "before": "\n", "characterOffsetBegin": 65, "characterOffsetEnd": 71, "index": 1, "originalText": "Thanks", "word": "Thanks", }, { "after": "", "before": "", "characterOffsetBegin": 71, "characterOffsetEnd": 72, "index": 2, "originalText": ".", "word": ".", }, ], }, ] } corenlp_tokenizer.api_call = MagicMock(return_value=api_return_value) input_string = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\nThanks." expected_output = [ "Good", "muffins", "cost", "$", "3.88", "in", "New", "York", ".", "Please", "buy", "me", "two", "of", "them", ".", "Thanks", ".", ] tokenized_output = list(corenlp_tokenizer.tokenize(input_string)) corenlp_tokenizer.api_call.assert_called_once_with( "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\nThanks.", properties={"annotators": "tokenize,ssplit"}, ) self.assertEqual(expected_output, tokenized_output) class TestTaggerAPI(TestCase): def test_pos_tagger(self): corenlp_tagger = corenlp.CoreNLPParser(tagtype="pos") api_return_value = { "sentences": [ { "basicDependencies": [ { "dep": "ROOT", "dependent": 1, "dependentGloss": "What", "governor": 0, "governorGloss": "ROOT", }, { "dep": "cop", "dependent": 2, "dependentGloss": "is", "governor": 1, "governorGloss": "What", }, { "dep": "det", "dependent": 3, "dependentGloss": "the", "governor": 4, "governorGloss": "airspeed", }, { "dep": "nsubj", "dependent": 4, "dependentGloss": "airspeed", "governor": 1, "governorGloss": "What", }, { "dep": "case", "dependent": 5, "dependentGloss": "of", "governor": 8, "governorGloss": "swallow", }, { "dep": "det", "dependent": 6, "dependentGloss": "an", "governor": 8, "governorGloss": "swallow", }, { "dep": "compound", "dependent": 7, "dependentGloss": "unladen", "governor": 8, "governorGloss": "swallow", }, { "dep": "nmod", "dependent": 8, "dependentGloss": "swallow", "governor": 4, "governorGloss": "airspeed", }, { "dep": "punct", "dependent": 9, "dependentGloss": "?", "governor": 1, "governorGloss": "What", }, ], "enhancedDependencies": [ { "dep": "ROOT", "dependent": 1, "dependentGloss": "What", "governor": 0, "governorGloss": "ROOT", }, { "dep": "cop", "dependent": 2, "dependentGloss": "is", "governor": 1, "governorGloss": "What", }, { "dep": "det", "dependent": 3, "dependentGloss": "the", "governor": 4, "governorGloss": "airspeed", }, { "dep": "nsubj", "dependent": 4, "dependentGloss": "airspeed", "governor": 1, "governorGloss": "What", }, { "dep": "case", "dependent": 5, "dependentGloss": "of", "governor": 8, "governorGloss": "swallow", }, { "dep": "det", "dependent": 6, "dependentGloss": "an", "governor": 8, "governorGloss": "swallow", }, { "dep": "compound", "dependent": 7, "dependentGloss": "unladen", "governor": 8, "governorGloss": "swallow", }, { "dep": "nmod:of", "dependent": 8, "dependentGloss": "swallow", "governor": 4, "governorGloss": "airspeed", }, { "dep": "punct", "dependent": 9, "dependentGloss": "?", "governor": 1, "governorGloss": "What", }, ], "enhancedPlusPlusDependencies": [ { "dep": "ROOT", "dependent": 1, "dependentGloss": "What", "governor": 0, "governorGloss": "ROOT", }, { "dep": "cop", "dependent": 2, "dependentGloss": "is", "governor": 1, "governorGloss": "What", }, { "dep": "det", "dependent": 3, "dependentGloss": "the", "governor": 4, "governorGloss": "airspeed", }, { "dep": "nsubj", "dependent": 4, "dependentGloss": "airspeed", "governor": 1, "governorGloss": "What", }, { "dep": "case", "dependent": 5, "dependentGloss": "of", "governor": 8, "governorGloss": "swallow", }, { "dep": "det", "dependent": 6, "dependentGloss": "an", "governor": 8, "governorGloss": "swallow", }, { "dep": "compound", "dependent": 7, "dependentGloss": "unladen", "governor": 8, "governorGloss": "swallow", }, { "dep": "nmod:of", "dependent": 8, "dependentGloss": "swallow", "governor": 4, "governorGloss": "airspeed", }, { "dep": "punct", "dependent": 9, "dependentGloss": "?", "governor": 1, "governorGloss": "What", }, ], "index": 0, "parse": "(ROOT\n (SBARQ\n (WHNP (WP What))\n (SQ (VBZ is)\n (NP\n (NP (DT the) (NN airspeed))\n (PP (IN of)\n (NP (DT an) (NN unladen) (NN swallow)))))\n (. ?)))", "tokens": [ { "after": " ", "before": "", "characterOffsetBegin": 0, "characterOffsetEnd": 4, "index": 1, "lemma": "what", "originalText": "What", "pos": "WP", "word": "What", }, { "after": " ", "before": " ", "characterOffsetBegin": 5, "characterOffsetEnd": 7, "index": 2, "lemma": "be", "originalText": "is", "pos": "VBZ", "word": "is", }, { "after": " ", "before": " ", "characterOffsetBegin": 8, "characterOffsetEnd": 11, "index": 3, "lemma": "the", "originalText": "the", "pos": "DT", "word": "the", }, { "after": " ", "before": " ", "characterOffsetBegin": 12, "characterOffsetEnd": 20, "index": 4, "lemma": "airspeed", "originalText": "airspeed", "pos": "NN", "word": "airspeed", }, { "after": " ", "before": " ", "characterOffsetBegin": 21, "characterOffsetEnd": 23, "index": 5, "lemma": "of", "originalText": "of", "pos": "IN", "word": "of", }, { "after": " ", "before": " ", "characterOffsetBegin": 24, "characterOffsetEnd": 26, "index": 6, "lemma": "a", "originalText": "an", "pos": "DT", "word": "an", }, { "after": " ", "before": " ", "characterOffsetBegin": 27, "characterOffsetEnd": 34, "index": 7, "lemma": "unladen", "originalText": "unladen", "pos": "JJ", "word": "unladen", }, { "after": " ", "before": " ", "characterOffsetBegin": 35, "characterOffsetEnd": 42, "index": 8, "lemma": "swallow", "originalText": "swallow", "pos": "VB", "word": "swallow", }, { "after": "", "before": " ", "characterOffsetBegin": 43, "characterOffsetEnd": 44, "index": 9, "lemma": "?", "originalText": "?", "pos": ".", "word": "?", }, ], } ] } corenlp_tagger.api_call = MagicMock(return_value=api_return_value) input_tokens = "What is the airspeed of an unladen swallow ?".split() expected_output = [ ("What", "WP"), ("is", "VBZ"), ("the", "DT"), ("airspeed", "NN"), ("of", "IN"), ("an", "DT"), ("unladen", "JJ"), ("swallow", "VB"), ("?", "."), ] tagged_output = corenlp_tagger.tag(input_tokens) corenlp_tagger.api_call.assert_called_once_with( "What is the airspeed of an unladen swallow ?", properties={ "ssplit.isOneSentence": "true", "annotators": "tokenize,ssplit,pos", }, ) self.assertEqual(expected_output, tagged_output) def test_ner_tagger(self): corenlp_tagger = corenlp.CoreNLPParser(tagtype="ner") api_return_value = { "sentences": [ { "index": 0, "tokens": [ { "after": " ", "before": "", "characterOffsetBegin": 0, "characterOffsetEnd": 4, "index": 1, "lemma": "Rami", "ner": "PERSON", "originalText": "Rami", "pos": "NNP", "word": "Rami", }, { "after": " ", "before": " ", "characterOffsetBegin": 5, "characterOffsetEnd": 8, "index": 2, "lemma": "Eid", "ner": "PERSON", "originalText": "Eid", "pos": "NNP", "word": "Eid", }, { "after": " ", "before": " ", "characterOffsetBegin": 9, "characterOffsetEnd": 11, "index": 3, "lemma": "be", "ner": "O", "originalText": "is", "pos": "VBZ", "word": "is", }, { "after": " ", "before": " ", "characterOffsetBegin": 12, "characterOffsetEnd": 20, "index": 4, "lemma": "study", "ner": "O", "originalText": "studying", "pos": "VBG", "word": "studying", }, { "after": " ", "before": " ", "characterOffsetBegin": 21, "characterOffsetEnd": 23, "index": 5, "lemma": "at", "ner": "O", "originalText": "at", "pos": "IN", "word": "at", }, { "after": " ", "before": " ", "characterOffsetBegin": 24, "characterOffsetEnd": 29, "index": 6, "lemma": "Stony", "ner": "ORGANIZATION", "originalText": "Stony", "pos": "NNP", "word": "Stony", }, { "after": " ", "before": " ", "characterOffsetBegin": 30, "characterOffsetEnd": 35, "index": 7, "lemma": "Brook", "ner": "ORGANIZATION", "originalText": "Brook", "pos": "NNP", "word": "Brook", }, { "after": " ", "before": " ", "characterOffsetBegin": 36, "characterOffsetEnd": 46, "index": 8, "lemma": "University", "ner": "ORGANIZATION", "originalText": "University", "pos": "NNP", "word": "University", }, { "after": " ", "before": " ", "characterOffsetBegin": 47, "characterOffsetEnd": 49, "index": 9, "lemma": "in", "ner": "O", "originalText": "in", "pos": "IN", "word": "in", }, { "after": "", "before": " ", "characterOffsetBegin": 50, "characterOffsetEnd": 52, "index": 10, "lemma": "NY", "ner": "O", "originalText": "NY", "pos": "NNP", "word": "NY", }, ], } ] } corenlp_tagger.api_call = MagicMock(return_value=api_return_value) input_tokens = "Rami Eid is studying at Stony Brook University in NY".split() expected_output = [ ("Rami", "PERSON"), ("Eid", "PERSON"), ("is", "O"), ("studying", "O"), ("at", "O"), ("Stony", "ORGANIZATION"), ("Brook", "ORGANIZATION"), ("University", "ORGANIZATION"), ("in", "O"), ("NY", "O"), ] tagged_output = corenlp_tagger.tag(input_tokens) corenlp_tagger.api_call.assert_called_once_with( "Rami Eid is studying at Stony Brook University in NY", properties={ "ssplit.isOneSentence": "true", "annotators": "tokenize,ssplit,ner", }, ) self.assertEqual(expected_output, tagged_output) def test_unexpected_tagtype(self): with self.assertRaises(ValueError): corenlp_tagger = corenlp.CoreNLPParser(tagtype="test") class TestParserAPI(TestCase): def test_parse(self): corenlp_parser = corenlp.CoreNLPParser() api_return_value = { "sentences": [ { "basicDependencies": [ { "dep": "ROOT", "dependent": 4, "dependentGloss": "fox", "governor": 0, "governorGloss": "ROOT", }, { "dep": "det", "dependent": 1, "dependentGloss": "The", "governor": 4, "governorGloss": "fox", }, { "dep": "amod", "dependent": 2, "dependentGloss": "quick", "governor": 4, "governorGloss": "fox", }, { "dep": "amod", "dependent": 3, "dependentGloss": "brown", "governor": 4, "governorGloss": "fox", }, { "dep": "dep", "dependent": 5, "dependentGloss": "jumps", "governor": 4, "governorGloss": "fox", }, { "dep": "case", "dependent": 6, "dependentGloss": "over", "governor": 9, "governorGloss": "dog", }, { "dep": "det", "dependent": 7, "dependentGloss": "the", "governor": 9, "governorGloss": "dog", }, { "dep": "amod", "dependent": 8, "dependentGloss": "lazy", "governor": 9, "governorGloss": "dog", }, { "dep": "nmod", "dependent": 9, "dependentGloss": "dog", "governor": 5, "governorGloss": "jumps", }, ], "enhancedDependencies": [ { "dep": "ROOT", "dependent": 4, "dependentGloss": "fox", "governor": 0, "governorGloss": "ROOT", }, { "dep": "det", "dependent": 1, "dependentGloss": "The", "governor": 4, "governorGloss": "fox", }, { "dep": "amod", "dependent": 2, "dependentGloss": "quick", "governor": 4, "governorGloss": "fox", }, { "dep": "amod", "dependent": 3, "dependentGloss": "brown", "governor": 4, "governorGloss": "fox", }, { "dep": "dep", "dependent": 5, "dependentGloss": "jumps", "governor": 4, "governorGloss": "fox", }, { "dep": "case", "dependent": 6, "dependentGloss": "over", "governor": 9, "governorGloss": "dog", }, { "dep": "det", "dependent": 7, "dependentGloss": "the", "governor": 9, "governorGloss": "dog", }, { "dep": "amod", "dependent": 8, "dependentGloss": "lazy", "governor": 9, "governorGloss": "dog", }, { "dep": "nmod:over", "dependent": 9, "dependentGloss": "dog", "governor": 5, "governorGloss": "jumps", }, ], "enhancedPlusPlusDependencies": [ { "dep": "ROOT", "dependent": 4, "dependentGloss": "fox", "governor": 0, "governorGloss": "ROOT", }, { "dep": "det", "dependent": 1, "dependentGloss": "The", "governor": 4, "governorGloss": "fox", }, { "dep": "amod", "dependent": 2, "dependentGloss": "quick", "governor": 4, "governorGloss": "fox", }, { "dep": "amod", "dependent": 3, "dependentGloss": "brown", "governor": 4, "governorGloss": "fox", }, { "dep": "dep", "dependent": 5, "dependentGloss": "jumps", "governor": 4, "governorGloss": "fox", }, { "dep": "case", "dependent": 6, "dependentGloss": "over", "governor": 9, "governorGloss": "dog", }, { "dep": "det", "dependent": 7, "dependentGloss": "the", "governor": 9, "governorGloss": "dog", }, { "dep": "amod", "dependent": 8, "dependentGloss": "lazy", "governor": 9, "governorGloss": "dog", }, { "dep": "nmod:over", "dependent": 9, "dependentGloss": "dog", "governor": 5, "governorGloss": "jumps", }, ], "index": 0, "parse": "(ROOT\n (NP\n (NP (DT The) (JJ quick) (JJ brown) (NN fox))\n (NP\n (NP (NNS jumps))\n (PP (IN over)\n (NP (DT the) (JJ lazy) (NN dog))))))", "tokens": [ { "after": " ", "before": "", "characterOffsetBegin": 0, "characterOffsetEnd": 3, "index": 1, "lemma": "the", "originalText": "The", "pos": "DT", "word": "The", }, { "after": " ", "before": " ", "characterOffsetBegin": 4, "characterOffsetEnd": 9, "index": 2, "lemma": "quick", "originalText": "quick", "pos": "JJ", "word": "quick", }, { "after": " ", "before": " ", "characterOffsetBegin": 10, "characterOffsetEnd": 15, "index": 3, "lemma": "brown", "originalText": "brown", "pos": "JJ", "word": "brown", }, { "after": " ", "before": " ", "characterOffsetBegin": 16, "characterOffsetEnd": 19, "index": 4, "lemma": "fox", "originalText": "fox", "pos": "NN", "word": "fox", }, { "after": " ", "before": " ", "characterOffsetBegin": 20, "characterOffsetEnd": 25, "index": 5, "lemma": "jump", "originalText": "jumps", "pos": "VBZ", "word": "jumps", }, { "after": " ", "before": " ", "characterOffsetBegin": 26, "characterOffsetEnd": 30, "index": 6, "lemma": "over", "originalText": "over", "pos": "IN", "word": "over", }, { "after": " ", "before": " ", "characterOffsetBegin": 31, "characterOffsetEnd": 34, "index": 7, "lemma": "the", "originalText": "the", "pos": "DT", "word": "the", }, { "after": " ", "before": " ", "characterOffsetBegin": 35, "characterOffsetEnd": 39, "index": 8, "lemma": "lazy", "originalText": "lazy", "pos": "JJ", "word": "lazy", }, { "after": "", "before": " ", "characterOffsetBegin": 40, "characterOffsetEnd": 43, "index": 9, "lemma": "dog", "originalText": "dog", "pos": "NN", "word": "dog", }, ], } ] } corenlp_parser.api_call = MagicMock(return_value=api_return_value) input_string = "The quick brown fox jumps over the lazy dog".split() expected_output = Tree( "ROOT", [ Tree( "NP", [ Tree( "NP", [ Tree("DT", ["The"]), Tree("JJ", ["quick"]), Tree("JJ", ["brown"]), Tree("NN", ["fox"]), ], ), Tree( "NP", [ Tree("NP", [Tree("NNS", ["jumps"])]), Tree( "PP", [ Tree("IN", ["over"]), Tree( "NP", [ Tree("DT", ["the"]), Tree("JJ", ["lazy"]), Tree("NN", ["dog"]), ], ), ], ), ], ), ], ) ], ) parsed_data = next(corenlp_parser.parse(input_string)) corenlp_parser.api_call.assert_called_once_with( "The quick brown fox jumps over the lazy dog", properties={"ssplit.eolonly": "true"}, ) self.assertEqual(expected_output, parsed_data) def test_dependency_parser(self): corenlp_parser = corenlp.CoreNLPDependencyParser() api_return_value = { "sentences": [ { "basicDependencies": [ { "dep": "ROOT", "dependent": 5, "dependentGloss": "jumps", "governor": 0, "governorGloss": "ROOT", }, { "dep": "det", "dependent": 1, "dependentGloss": "The", "governor": 4, "governorGloss": "fox", }, { "dep": "amod", "dependent": 2, "dependentGloss": "quick", "governor": 4, "governorGloss": "fox", }, { "dep": "amod", "dependent": 3, "dependentGloss": "brown", "governor": 4, "governorGloss": "fox", }, { "dep": "nsubj", "dependent": 4, "dependentGloss": "fox", "governor": 5, "governorGloss": "jumps", }, { "dep": "case", "dependent": 6, "dependentGloss": "over", "governor": 9, "governorGloss": "dog", }, { "dep": "det", "dependent": 7, "dependentGloss": "the", "governor": 9, "governorGloss": "dog", }, { "dep": "amod", "dependent": 8, "dependentGloss": "lazy", "governor": 9, "governorGloss": "dog", }, { "dep": "nmod", "dependent": 9, "dependentGloss": "dog", "governor": 5, "governorGloss": "jumps", }, ], "enhancedDependencies": [ { "dep": "ROOT", "dependent": 5, "dependentGloss": "jumps", "governor": 0, "governorGloss": "ROOT", }, { "dep": "det", "dependent": 1, "dependentGloss": "The", "governor": 4, "governorGloss": "fox", }, { "dep": "amod", "dependent": 2, "dependentGloss": "quick", "governor": 4, "governorGloss": "fox", }, { "dep": "amod", "dependent": 3, "dependentGloss": "brown", "governor": 4, "governorGloss": "fox", }, { "dep": "nsubj", "dependent": 4, "dependentGloss": "fox", "governor": 5, "governorGloss": "jumps", }, { "dep": "case", "dependent": 6, "dependentGloss": "over", "governor": 9, "governorGloss": "dog", }, { "dep": "det", "dependent": 7, "dependentGloss": "the", "governor": 9, "governorGloss": "dog", }, { "dep": "amod", "dependent": 8, "dependentGloss": "lazy", "governor": 9, "governorGloss": "dog", }, { "dep": "nmod:over", "dependent": 9, "dependentGloss": "dog", "governor": 5, "governorGloss": "jumps", }, ], "enhancedPlusPlusDependencies": [ { "dep": "ROOT", "dependent": 5, "dependentGloss": "jumps", "governor": 0, "governorGloss": "ROOT", }, { "dep": "det", "dependent": 1, "dependentGloss": "The", "governor": 4, "governorGloss": "fox", }, { "dep": "amod", "dependent": 2, "dependentGloss": "quick", "governor": 4, "governorGloss": "fox", }, { "dep": "amod", "dependent": 3, "dependentGloss": "brown", "governor": 4, "governorGloss": "fox", }, { "dep": "nsubj", "dependent": 4, "dependentGloss": "fox", "governor": 5, "governorGloss": "jumps", }, { "dep": "case", "dependent": 6, "dependentGloss": "over", "governor": 9, "governorGloss": "dog", }, { "dep": "det", "dependent": 7, "dependentGloss": "the", "governor": 9, "governorGloss": "dog", }, { "dep": "amod", "dependent": 8, "dependentGloss": "lazy", "governor": 9, "governorGloss": "dog", }, { "dep": "nmod:over", "dependent": 9, "dependentGloss": "dog", "governor": 5, "governorGloss": "jumps", }, ], "index": 0, "tokens": [ { "after": " ", "before": "", "characterOffsetBegin": 0, "characterOffsetEnd": 3, "index": 1, "lemma": "the", "originalText": "The", "pos": "DT", "word": "The", }, { "after": " ", "before": " ", "characterOffsetBegin": 4, "characterOffsetEnd": 9, "index": 2, "lemma": "quick", "originalText": "quick", "pos": "JJ", "word": "quick", }, { "after": " ", "before": " ", "characterOffsetBegin": 10, "characterOffsetEnd": 15, "index": 3, "lemma": "brown", "originalText": "brown", "pos": "JJ", "word": "brown", }, { "after": " ", "before": " ", "characterOffsetBegin": 16, "characterOffsetEnd": 19, "index": 4, "lemma": "fox", "originalText": "fox", "pos": "NN", "word": "fox", }, { "after": " ", "before": " ", "characterOffsetBegin": 20, "characterOffsetEnd": 25, "index": 5, "lemma": "jump", "originalText": "jumps", "pos": "VBZ", "word": "jumps", }, { "after": " ", "before": " ", "characterOffsetBegin": 26, "characterOffsetEnd": 30, "index": 6, "lemma": "over", "originalText": "over", "pos": "IN", "word": "over", }, { "after": " ", "before": " ", "characterOffsetBegin": 31, "characterOffsetEnd": 34, "index": 7, "lemma": "the", "originalText": "the", "pos": "DT", "word": "the", }, { "after": " ", "before": " ", "characterOffsetBegin": 35, "characterOffsetEnd": 39, "index": 8, "lemma": "lazy", "originalText": "lazy", "pos": "JJ", "word": "lazy", }, { "after": "", "before": " ", "characterOffsetBegin": 40, "characterOffsetEnd": 43, "index": 9, "lemma": "dog", "originalText": "dog", "pos": "NN", "word": "dog", }, ], } ] } corenlp_parser.api_call = MagicMock(return_value=api_return_value) input_string = "The quick brown fox jumps over the lazy dog".split() expected_output = Tree( "jumps", [ Tree("fox", ["The", "quick", "brown"]), Tree("dog", ["over", "the", "lazy"]), ], ) parsed_data = next(corenlp_parser.parse(input_string)) corenlp_parser.api_call.assert_called_once_with( "The quick brown fox jumps over the lazy dog", properties={"ssplit.eolonly": "true"}, ) self.assertEqual(expected_output, parsed_data.tree())
corpus view regression tests check that corpus views produce the correct sequence of values check that the corpus views report the correct lengths a very short file 160 chars a relatively short file 791 chars a longer file 32k chars check that corpus views produce the correct sequence of values check that the corpus views report the correct lengths
import unittest import nltk.data from nltk.corpus.reader.util import ( StreamBackedCorpusView, read_line_block, read_whitespace_block, ) class TestCorpusViews(unittest.TestCase): linetok = nltk.LineTokenizer(blanklines="keep") names = [ "corpora/inaugural/README", "corpora/inaugural/1793-Washington.txt", "corpora/inaugural/1909-Taft.txt", ] def data(self): for name in self.names: f = nltk.data.find(name) with f.open() as fp: file_data = fp.read().decode("utf8") yield f, file_data def test_correct_values(self): for f, file_data in self.data(): v = StreamBackedCorpusView(f, read_whitespace_block) self.assertEqual(list(v), file_data.split()) v = StreamBackedCorpusView(f, read_line_block) self.assertEqual(list(v), self.linetok.tokenize(file_data)) def test_correct_length(self): for f, file_data in self.data(): v = StreamBackedCorpusView(f, read_whitespace_block) self.assertEqual(len(v), len(file_data.split())) v = StreamBackedCorpusView(f, read_line_block) self.assertEqual(len(v), len(self.linetok.tokenize(file_data)))
class containing unit tests for nltk metrics agreement disagreement simple test based on https github comfoolswoodkrippendorffsalpharawmasterkrippendorff pdf same simple test with 1 rating removed removal of that rating should not matter kapha ignores items with only 1 rating more advanced test based on http www agreestat comresearchpapersonkrippendorffalpha pdf same more advanced example but with 1 rating removed again removal of that 1 rating should not matter class containing unit tests for nltk metrics agreement disagreement simple test based on https github com foolswood krippendorffs_alpha raw master krippendorff pdf same simple test with 1 rating removed removal of that rating should not matter k apha ignores items with only 1 rating more advanced test based on http www agreestat com research_papers onkrippendorffalpha pdf same more advanced example but with 1 rating removed again removal of that 1 rating should not matter
import unittest from nltk.metrics.agreement import AnnotationTask class TestDisagreement(unittest.TestCase): def test_easy(self): data = [ ("coder1", "dress1", "YES"), ("coder2", "dress1", "NO"), ("coder3", "dress1", "NO"), ("coder1", "dress2", "YES"), ("coder2", "dress2", "NO"), ("coder3", "dress3", "NO"), ] annotation_task = AnnotationTask(data) self.assertAlmostEqual(annotation_task.alpha(), -0.3333333) def test_easy2(self): data = [ ("coder1", "dress1", "YES"), ("coder2", "dress1", "NO"), ("coder3", "dress1", "NO"), ("coder1", "dress2", "YES"), ("coder2", "dress2", "NO"), ] annotation_task = AnnotationTask(data) self.assertAlmostEqual(annotation_task.alpha(), -0.3333333) def test_advanced(self): data = [ ("A", "1", "1"), ("B", "1", "1"), ("D", "1", "1"), ("A", "2", "2"), ("B", "2", "2"), ("C", "2", "3"), ("D", "2", "2"), ("A", "3", "3"), ("B", "3", "3"), ("C", "3", "3"), ("D", "3", "3"), ("A", "4", "3"), ("B", "4", "3"), ("C", "4", "3"), ("D", "4", "3"), ("A", "5", "2"), ("B", "5", "2"), ("C", "5", "2"), ("D", "5", "2"), ("A", "6", "1"), ("B", "6", "2"), ("C", "6", "3"), ("D", "6", "4"), ("A", "7", "4"), ("B", "7", "4"), ("C", "7", "4"), ("D", "7", "4"), ("A", "8", "1"), ("B", "8", "1"), ("C", "8", "2"), ("D", "8", "1"), ("A", "9", "2"), ("B", "9", "2"), ("C", "9", "2"), ("D", "9", "2"), ("B", "10", "5"), ("C", "10", "5"), ("D", "10", "5"), ("C", "11", "1"), ("D", "11", "1"), ("C", "12", "3"), ] annotation_task = AnnotationTask(data) self.assertAlmostEqual(annotation_task.alpha(), 0.743421052632) def test_advanced2(self): data = [ ("A", "1", "1"), ("B", "1", "1"), ("D", "1", "1"), ("A", "2", "2"), ("B", "2", "2"), ("C", "2", "3"), ("D", "2", "2"), ("A", "3", "3"), ("B", "3", "3"), ("C", "3", "3"), ("D", "3", "3"), ("A", "4", "3"), ("B", "4", "3"), ("C", "4", "3"), ("D", "4", "3"), ("A", "5", "2"), ("B", "5", "2"), ("C", "5", "2"), ("D", "5", "2"), ("A", "6", "1"), ("B", "6", "2"), ("C", "6", "3"), ("D", "6", "4"), ("A", "7", "4"), ("B", "7", "4"), ("C", "7", "4"), ("D", "7", "4"), ("A", "8", "1"), ("B", "8", "1"), ("C", "8", "2"), ("D", "8", "1"), ("A", "9", "2"), ("B", "9", "2"), ("C", "9", "2"), ("D", "9", "2"), ("B", "10", "5"), ("C", "10", "5"), ("D", "10", "5"), ("C", "11", "1"), ("D", "11", "1"), ("C", "12", "3"), ] annotation_task = AnnotationTask(data) self.assertAlmostEqual(annotation_task.alpha(), 0.743421052632)
allowing transpositions reduces the number of edits required with transpositions e g abc t cba d ca 2 steps without transpositions e g abc d ab d a i ca 3 steps note a substitioncost of higher than 2 doesn t make much sense as a deletion insertion is identical and always costs 2 transpositions don t always reduce the number of edits required with or without transpositions e g wants d wats d was i wasp 3 steps ought to have the same results with and without transpositions with or without transpositions e g rain s sain s shin i shine 3 steps but cost 5 if substitutioncost2 several potentially interesting typos with transpositions e g acbdef t abcdef 1 step without transpositions e g acbdef d abdef i abcdef 2 steps with transpositions e g lnaguaeg t languaeg t language 2 steps without transpositions e g lnaguaeg d laguaeg i languaeg d languag i language 4 steps with transpositions e g lnaugage t lanugage t language 2 steps without transpositions e g lnaugage s lnangage d langage i language 3 steps but one substitution so a cost of 4 if substitioncost 2 does require substitutions if no transpositions with transpositions e g lngauage t lnaguage t language 2 steps without transpositions e g lngauage i lanaguage d language 2 steps with or without transpositions e g wants s sants s swnts s swits s swims d swim 5 steps with substitutioncost2 and transpositions e g wants t santw d sntw d stw d sw i swi i swim 6 steps with substitutioncost2 and no transpositions e g wants i swants d swant d swan d swa d sw i swi i swim 7 steps with or without transpositions e g kitten s sitten s sittin i sitting 3 steps but cost 5 if substitutioncost2 duplicated letter e g duplicated d duplicated test editdistance between two strings given some substitutioncost and whether transpositions are allowed param str left first input string to editdistance param str right second input string to editdistance param int substitutioncost the cost of a substitution action in editdistance param tupleint int expecteds a tuple of expected outputs such that expecteds0 is the expected output with transpositionstrue and expecteds1 is the expected output with transpositionsfalse test the input strings in both orderings zip with true false to get the transpositions value allowing transpositions reduces the number of edits required with transpositions e g abc t cba d ca 2 steps without transpositions e g abc d ab d a i ca 3 steps doesn t require substitutions note a substition_cost of higher than 2 doesn t make much sense as a deletion insertion is identical and always costs 2 transpositions don t always reduce the number of edits required with or without transpositions e g wants d wats d was i wasp 3 steps doesn t require substitutions ought to have the same results with and without transpositions with or without transpositions e g rain s sain s shin i shine 3 steps but cost 5 if substitution_cost 2 does require substitutions several potentially interesting typos with transpositions e g acbdef t abcdef 1 step without transpositions e g acbdef d abdef i abcdef 2 steps doesn t require substitutions with transpositions e g lnaguaeg t languaeg t language 2 steps without transpositions e g lnaguaeg d laguaeg i languaeg d languag i language 4 steps doesn t require substitutions with transpositions e g lnaugage t lanugage t language 2 steps without transpositions e g lnaugage s lnangage d langage i language 3 steps but one substitution so a cost of 4 if substition_cost 2 does require substitutions if no transpositions with transpositions e g lngauage t lnaguage t language 2 steps without transpositions e g lngauage i lanaguage d language 2 steps doesn t require substitutions with or without transpositions e g wants s sants s swnts s swits s swims d swim 5 steps with substitution_cost 2 and transpositions e g wants t santw d sntw d stw d sw i swi i swim 6 steps with substitution_cost 2 and no transpositions e g wants i swants d swant d swan d swa d sw i swi i swim 7 steps with or without transpositions e g kitten s sitten s sittin i sitting 3 steps but cost 5 if substitution_cost 2 duplicated letter e g duplicated d duplicated test edit_distance between two strings given some substitution_cost and whether transpositions are allowed param str left first input string to edit_distance param str right second input string to edit_distance param int substitution_cost the cost of a substitution action in edit_distance param tuple int int expecteds a tuple of expected outputs such that expecteds 0 is the expected output with transpositions true and expecteds 1 is the expected output with transpositions false test the input strings in both orderings zip with true false to get the transpositions value
from typing import Tuple import pytest from nltk.metrics.distance import edit_distance class TestEditDistance: @pytest.mark.parametrize( "left,right,substitution_cost,expecteds", [ ("abc", "ca", 1, (2, 3)), ("abc", "ca", 5, (2, 3)), ("wants", "wasp", 1, (3, 3)), ("wants", "wasp", 5, (3, 3)), ("rain", "shine", 1, (3, 3)), ("rain", "shine", 2, (5, 5)), ("acbdef", "abcdef", 1, (1, 2)), ("acbdef", "abcdef", 2, (1, 2)), ("lnaguaeg", "language", 1, (2, 4)), ("lnaguaeg", "language", 2, (2, 4)), ("lnaugage", "language", 1, (2, 3)), ("lnaugage", "language", 2, (2, 4)), ("lngauage", "language", 1, (2, 2)), ("lngauage", "language", 2, (2, 2)), ("wants", "swim", 1, (5, 5)), ("wants", "swim", 2, (6, 7)), ("kitten", "sitting", 1, (3, 3)), ("kitten", "sitting", 2, (5, 5)), ("duplicated", "duuplicated", 1, (1, 1)), ("duplicated", "duuplicated", 2, (1, 1)), ("very duplicated", "very duuplicateed", 2, (2, 2)), ], ) def test_with_transpositions( self, left: str, right: str, substitution_cost: int, expecteds: Tuple[int, int] ): for s1, s2 in ((left, right), (right, left)): for expected, transpositions in zip(expecteds, [True, False]): predicted = edit_distance( s1, s2, substitution_cost=substitution_cost, transpositions=transpositions, ) assert predicted == expected
test that download works properly when the parent folder of the downloaddir exists downloaddir strtmppath joinpathanotherdir downloadstatus downloadmwappdb downloaddir assert downloadstatus is true def testdownloaderusingnonexistingparentdownloaddirtmppath test that download works properly when the parent folder of the download_dir exists test that download works properly when the parent folder of the download_dir does not exist
from nltk import download def test_downloader_using_existing_parent_download_dir(tmp_path): download_dir = str(tmp_path.joinpath("another_dir")) download_status = download("mwa_ppdb", download_dir) assert download_status is True def test_downloader_using_non_existing_parent_download_dir(tmp_path): download_dir = str( tmp_path.joinpath("non-existing-parent-folder", "another-non-existing-folder") ) download_status = download("mwa_ppdb", download_dir) assert download_status is True
example from wikipedia https en wikipedia orgwikiforwarde28093backwardalgorithm example from p 385 huang et al examples in wikipedia are normalized examples in wikipedia are normalized forwardbackward algorithm doesn t need b05 so backwardprobability doesn t compute it 0 6469 0 3531 example from wikipedia https en wikipedia org wiki forward e2 80 93backward_algorithm transition probabilities emission probabilities initial probabilities example from p 385 huang et al examples in wikipedia are normalized examples in wikipedia are normalized forward backward algorithm doesn t need b0_5 so backward_probability doesn t compute it 0 6469 0 3531
import pytest from nltk.tag import hmm def _wikipedia_example_hmm(): states = ["rain", "no rain"] symbols = ["umbrella", "no umbrella"] A = [[0.7, 0.3], [0.3, 0.7]] B = [[0.9, 0.1], [0.2, 0.8]] pi = [0.5, 0.5] seq = ["umbrella", "umbrella", "no umbrella", "umbrella", "umbrella"] seq = list(zip(seq, [None] * len(seq))) model = hmm._create_hmm_tagger(states, symbols, A, B, pi) return model, states, symbols, seq def test_forward_probability(): from numpy.testing import assert_array_almost_equal model, states, symbols = hmm._market_hmm_example() seq = [("up", None), ("up", None)] expected = [[0.35, 0.02, 0.09], [0.1792, 0.0085, 0.0357]] fp = 2 ** model._forward_probability(seq) assert_array_almost_equal(fp, expected) def test_forward_probability2(): from numpy.testing import assert_array_almost_equal model, states, symbols, seq = _wikipedia_example_hmm() fp = 2 ** model._forward_probability(seq) fp = (fp.T / fp.sum(axis=1)).T wikipedia_results = [ [0.8182, 0.1818], [0.8834, 0.1166], [0.1907, 0.8093], [0.7308, 0.2692], [0.8673, 0.1327], ] assert_array_almost_equal(wikipedia_results, fp, 4) def test_backward_probability(): from numpy.testing import assert_array_almost_equal model, states, symbols, seq = _wikipedia_example_hmm() bp = 2 ** model._backward_probability(seq) bp = (bp.T / bp.sum(axis=1)).T wikipedia_results = [ [0.5923, 0.4077], [0.3763, 0.6237], [0.6533, 0.3467], [0.6273, 0.3727], [0.5, 0.5], ] assert_array_almost_equal(wikipedia_results, bp, 4) def setup_module(module): pytest.importorskip("numpy")
natural language toolkit twitter client c 20012023 nltk project lorenzo rubio lrnzciggmail com url https www nltk org for license information see license txt regression tests for json2csv and json2csventities in twitter package compare two files ignoring carriage returns leading whitespace and trailing whitespace sanity check that file comparison is not giving false positives natural language toolkit twitter client c 2001 2023 nltk project lorenzo rubio lrnzcig gmail com url https www nltk org for license information see license txt regression tests for json2csv and json2csv_entities in twitter package compare two files ignoring carriage returns leading whitespace and trailing whitespace sanity check that file comparison is not giving false positives
from pathlib import Path import pytest from nltk.corpus import twitter_samples from nltk.twitter.common import json2csv, json2csv_entities def files_are_identical(pathA, pathB): f1 = [l.strip() for l in pathA.read_bytes().splitlines()] f2 = [l.strip() for l in pathB.read_bytes().splitlines()] return f1 == f2 subdir = Path(__file__).parent / "files" @pytest.fixture def infile(): with open(twitter_samples.abspath("tweets.20150430-223406.json")) as infile: return [next(infile) for x in range(100)] def test_textoutput(tmp_path, infile): ref_fn = subdir / "tweets.20150430-223406.text.csv.ref" outfn = tmp_path / "tweets.20150430-223406.text.csv" json2csv(infile, outfn, ["text"], gzip_compress=False) assert files_are_identical(outfn, ref_fn) def test_tweet_metadata(tmp_path, infile): ref_fn = subdir / "tweets.20150430-223406.tweet.csv.ref" fields = [ "created_at", "favorite_count", "id", "in_reply_to_status_id", "in_reply_to_user_id", "retweet_count", "retweeted", "text", "truncated", "user.id", ] outfn = tmp_path / "tweets.20150430-223406.tweet.csv" json2csv(infile, outfn, fields, gzip_compress=False) assert files_are_identical(outfn, ref_fn) def test_user_metadata(tmp_path, infile): ref_fn = subdir / "tweets.20150430-223406.user.csv.ref" fields = ["id", "text", "user.id", "user.followers_count", "user.friends_count"] outfn = tmp_path / "tweets.20150430-223406.user.csv" json2csv(infile, outfn, fields, gzip_compress=False) assert files_are_identical(outfn, ref_fn) def test_tweet_hashtag(tmp_path, infile): ref_fn = subdir / "tweets.20150430-223406.hashtag.csv.ref" outfn = tmp_path / "tweets.20150430-223406.hashtag.csv" json2csv_entities( infile, outfn, ["id", "text"], "hashtags", ["text"], gzip_compress=False, ) assert files_are_identical(outfn, ref_fn) def test_tweet_usermention(tmp_path, infile): ref_fn = subdir / "tweets.20150430-223406.usermention.csv.ref" outfn = tmp_path / "tweets.20150430-223406.usermention.csv" json2csv_entities( infile, outfn, ["id", "text"], "user_mentions", ["id", "screen_name"], gzip_compress=False, ) assert files_are_identical(outfn, ref_fn) def test_tweet_media(tmp_path, infile): ref_fn = subdir / "tweets.20150430-223406.media.csv.ref" outfn = tmp_path / "tweets.20150430-223406.media.csv" json2csv_entities( infile, outfn, ["id"], "media", ["media_url", "url"], gzip_compress=False, ) assert files_are_identical(outfn, ref_fn) def test_tweet_url(tmp_path, infile): ref_fn = subdir / "tweets.20150430-223406.url.csv.ref" outfn = tmp_path / "tweets.20150430-223406.url.csv" json2csv_entities( infile, outfn, ["id"], "urls", ["url", "expanded_url"], gzip_compress=False, ) assert files_are_identical(outfn, ref_fn) def test_userurl(tmp_path, infile): ref_fn = subdir / "tweets.20150430-223406.userurl.csv.ref" outfn = tmp_path / "tweets.20150430-223406.userurl.csv" json2csv_entities( infile, outfn, ["id", "screen_name"], "user.urls", ["url", "expanded_url"], gzip_compress=False, ) assert files_are_identical(outfn, ref_fn) def test_tweet_place(tmp_path, infile): ref_fn = subdir / "tweets.20150430-223406.place.csv.ref" outfn = tmp_path / "tweets.20150430-223406.place.csv" json2csv_entities( infile, outfn, ["id", "text"], "place", ["name", "country"], gzip_compress=False, ) assert files_are_identical(outfn, ref_fn) def test_tweet_place_boundingbox(tmp_path, infile): ref_fn = subdir / "tweets.20150430-223406.placeboundingbox.csv.ref" outfn = tmp_path / "tweets.20150430-223406.placeboundingbox.csv" json2csv_entities( infile, outfn, ["id", "name"], "place.bounding_box", ["coordinates"], gzip_compress=False, ) assert files_are_identical(outfn, ref_fn) def test_retweet_original_tweet(tmp_path, infile): ref_fn = subdir / "tweets.20150430-223406.retweet.csv.ref" outfn = tmp_path / "tweets.20150430-223406.retweet.csv" json2csv_entities( infile, outfn, ["id"], "retweeted_status", [ "created_at", "favorite_count", "id", "in_reply_to_status_id", "in_reply_to_user_id", "retweet_count", "text", "truncated", "user.id", ], gzip_compress=False, ) assert files_are_identical(outfn, ref_fn) def test_file_is_wrong(tmp_path, infile): ref_fn = subdir / "tweets.20150430-223406.retweet.csv.ref" outfn = tmp_path / "tweets.20150430-223406.text.csv" json2csv(infile, outfn, ["text"], gzip_compress=False) assert not files_are_identical(outfn, ref_fn)
test the likelihood ratio metric test the likelihood ratio metric
import unittest from nltk.metrics import ( BigramAssocMeasures, QuadgramAssocMeasures, TrigramAssocMeasures, ) _DELTA = 1e-8 class TestLikelihoodRatio(unittest.TestCase): def test_lr_bigram(self): self.assertAlmostEqual( BigramAssocMeasures.likelihood_ratio(2, (4, 4), 20), 2.4142743368419755, delta=_DELTA, ) self.assertAlmostEqual( BigramAssocMeasures.likelihood_ratio(1, (1, 1), 1), 0.0, delta=_DELTA ) self.assertRaises( ValueError, BigramAssocMeasures.likelihood_ratio, *(0, (2, 2), 2), ) def test_lr_trigram(self): self.assertAlmostEqual( TrigramAssocMeasures.likelihood_ratio(1, (1, 1, 1), (1, 1, 1), 2), 5.545177444479562, delta=_DELTA, ) self.assertAlmostEqual( TrigramAssocMeasures.likelihood_ratio(1, (1, 1, 1), (1, 1, 1), 1), 0.0, delta=_DELTA, ) self.assertRaises( ValueError, TrigramAssocMeasures.likelihood_ratio, *(1, (1, 1, 2), (1, 1, 2), 2), ) def test_lr_quadgram(self): self.assertAlmostEqual( QuadgramAssocMeasures.likelihood_ratio( 1, (1, 1, 1, 1), (1, 1, 1, 1, 1, 1), (1, 1, 1, 1), 2 ), 8.317766166719343, delta=_DELTA, ) self.assertAlmostEqual( QuadgramAssocMeasures.likelihood_ratio( 1, (1, 1, 1, 1), (1, 1, 1, 1, 1, 1), (1, 1, 1, 1), 1 ), 0.0, delta=_DELTA, ) self.assertRaises( ValueError, QuadgramAssocMeasures.likelihood_ratio, *(1, (1, 1, 1, 1), (1, 1, 1, 1, 1, 2), (1, 1, 1, 1), 1), )
unit tests for nltk corpus nombank load the nombank once no of instances no of rolesets no of nouns load the nombank once no of instances no of rolesets no of nouns
import unittest from nltk.corpus import nombank nombank.nouns() class NombankDemo(unittest.TestCase): def test_numbers(self): self.assertEqual(len(nombank.instances()), 114574) self.assertEqual(len(nombank.rolesets()), 5577) self.assertEqual(len(nombank.nouns()), 4704) def test_instance(self): self.assertEqual(nombank.instances()[0].roleset, "perc-sign.01") def test_framefiles_fileids(self): self.assertEqual(len(nombank.fileids()), 4705) self.assertTrue(all(fileid.endswith(".xml") for fileid in nombank.fileids()))
tests for nltk postag test for default kwarg langnone tries to force the lang eng option test for default kwarg lang none tries to force the lang eng option
import unittest from nltk import pos_tag, word_tokenize class TestPosTag(unittest.TestCase): def test_pos_tag_eng(self): text = "John's big idea isn't all that bad." expected_tagged = [ ("John", "NNP"), ("'s", "POS"), ("big", "JJ"), ("idea", "NN"), ("is", "VBZ"), ("n't", "RB"), ("all", "PDT"), ("that", "DT"), ("bad", "JJ"), (".", "."), ] assert pos_tag(word_tokenize(text)) == expected_tagged def test_pos_tag_eng_universal(self): text = "John's big idea isn't all that bad." expected_tagged = [ ("John", "NOUN"), ("'s", "PRT"), ("big", "ADJ"), ("idea", "NOUN"), ("is", "VERB"), ("n't", "ADV"), ("all", "DET"), ("that", "DET"), ("bad", "ADJ"), (".", "."), ] assert pos_tag(word_tokenize(text), tagset="universal") == expected_tagged def test_pos_tag_rus(self): text = "Илья оторопел и дважды перечитал бумажку." expected_tagged = [ ("Илья", "S"), ("оторопел", "V"), ("и", "CONJ"), ("дважды", "ADV"), ("перечитал", "V"), ("бумажку", "S"), (".", "NONLEX"), ] assert pos_tag(word_tokenize(text), lang="rus") == expected_tagged def test_pos_tag_rus_universal(self): text = "Илья оторопел и дважды перечитал бумажку." expected_tagged = [ ("Илья", "NOUN"), ("оторопел", "VERB"), ("и", "CONJ"), ("дважды", "ADV"), ("перечитал", "VERB"), ("бумажку", "NOUN"), (".", "."), ] assert ( pos_tag(word_tokenize(text), tagset="universal", lang="rus") == expected_tagged ) def test_pos_tag_unknown_lang(self): text = "모르겠 습니 다" self.assertRaises(NotImplementedError, pos_tag, word_tokenize(text), lang="kor") self.assertRaises(NotImplementedError, pos_tag, word_tokenize(text), lang=None) def test_unspecified_lang(self): text = "모르겠 습니 다" expected_but_wrong = [("모르겠", "JJ"), ("습니", "NNP"), ("다", "NN")] assert pos_tag(word_tokenize(text)) == expected_but_wrong
verifies that these two sentences have no alignment and hence have the lowest possible ribes score verifies that these two sentences have just one match and the ribes score for this sentence with very little correspondence is 0 verifies that these two sentences have two matches but still get the lowest possible ribes score due to the lack of similarity based on the doctest of the corpusribes function regression test for issue 2529 assure that no zerodivisionerror is thrown worder as in word order verifies that these two sentences have no alignment and hence have the lowest possible ribes score verifies that these two sentences have just one match and the ribes score for this sentence with very little correspondence is 0 verifies that these two sentences have two matches but still get the lowest possible ribes score due to the lack of similarity based on the doctest of the corpus_ribes function regression test for issue 2529 assure that no zerodivisionerror is thrown
from nltk.translate.ribes_score import corpus_ribes, word_rank_alignment def test_ribes_empty_worder(): hyp = "This is a nice sentence which I quite like".split() ref = "Okay well that's neat and all but the reference's different".split() assert word_rank_alignment(ref, hyp) == [] list_of_refs = [[ref]] hypotheses = [hyp] assert corpus_ribes(list_of_refs, hypotheses) == 0.0 def test_ribes_one_worder(): hyp = "This is a nice sentence which I quite like".split() ref = "Okay well that's nice and all but the reference's different".split() assert word_rank_alignment(ref, hyp) == [3] list_of_refs = [[ref]] hypotheses = [hyp] assert corpus_ribes(list_of_refs, hypotheses) == 0.0 def test_ribes_two_worder(): hyp = "This is a nice sentence which I quite like".split() ref = "Okay well that's nice and all but the reference is different".split() assert word_rank_alignment(ref, hyp) == [9, 3] list_of_refs = [[ref]] hypotheses = [hyp] assert corpus_ribes(list_of_refs, hypotheses) == 0.0 def test_ribes(): hyp1 = [ "It", "is", "a", "guide", "to", "action", "which", "ensures", "that", "the", "military", "always", "obeys", "the", "commands", "of", "the", "party", ] ref1a = [ "It", "is", "a", "guide", "to", "action", "that", "ensures", "that", "the", "military", "will", "forever", "heed", "Party", "commands", ] ref1b = [ "It", "is", "the", "guiding", "principle", "which", "guarantees", "the", "military", "forces", "always", "being", "under", "the", "command", "of", "the", "Party", ] ref1c = [ "It", "is", "the", "practical", "guide", "for", "the", "army", "always", "to", "heed", "the", "directions", "of", "the", "party", ] hyp2 = [ "he", "read", "the", "book", "because", "he", "was", "interested", "in", "world", "history", ] ref2a = [ "he", "was", "interested", "in", "world", "history", "because", "he", "read", "the", "book", ] list_of_refs = [[ref1a, ref1b, ref1c], [ref2a]] hypotheses = [hyp1, hyp2] score = corpus_ribes(list_of_refs, hypotheses) assert round(score, 4) == 0.3597 def test_no_zero_div(): hyp1 = [ "It", "is", "a", "guide", "to", "action", "which", "ensures", "that", "the", "military", "always", "obeys", "the", "commands", "of", "the", "party", ] ref1a = [ "It", "is", "a", "guide", "to", "action", "that", "ensures", "that", "the", "military", "will", "forever", "heed", "Party", "commands", ] ref1b = [ "It", "is", "the", "guiding", "principle", "which", "guarantees", "the", "military", "forces", "always", "being", "under", "the", "command", "of", "the", "Party", ] ref1c = [ "It", "is", "the", "practical", "guide", "for", "the", "army", "always", "to", "heed", "the", "directions", "of", "the", "party", ] hyp2 = ["he", "read", "the"] ref2a = ["he", "was", "interested", "in", "world", "history", "because", "he"] list_of_refs = [[ref1a, ref1b, ref1c], [ref2a]] hypotheses = [hyp1, hyp2] score = corpus_ribes(list_of_refs, hypotheses) assert round(score, 4) == 0.1688
unit tests for senna set senna executable path for tests if it is not specified as an environment variable unittest for nltk classify senna def testsennapipelineself unittest for nltk tag senna def testsennataggerself tagger sennataggersennaexecutablepath result tagger tagwhat is the airspeed of an unladen swallow split expected what wp is vbz the dt airspeed nn of in an dt unladen nn swallow nn self assertequalresult expected def testsennachunktaggerself chktagger sennachunktaggersennaexecutablepath result1 chktagger tagwhat is the airspeed of an unladen swallow split expected1 what bnp is bvp the bnp airspeed inp of bpp an bnp unladen inp swallow inp o result2 listchktagger biotochunksresult1 chunktypenp expected2 what 0 the airspeed 23 an unladen swallow 567 self assertequalresult1 expected1 self assertequalresult2 expected2 def testsennanertaggerself nertagger sennanertaggersennaexecutablepath result1 nertagger tagshakespeare theatre was in london split expected1 shakespeare bper theatre o was o in o london bloc o result2 nertagger tagun headquarters are in ny usa split expected2 un borg headquarters o are o in o ny bloc o usa bloc o self assertequalresult1 expected1 self assertequalresult2 expected2 set senna executable path for tests if it is not specified as an environment variable unittest for nltk classify senna senna pipeline interface unittest for nltk tag senna
import unittest from os import environ, path, sep from nltk.classify import Senna from nltk.tag import SennaChunkTagger, SennaNERTagger, SennaTagger if "SENNA" in environ: SENNA_EXECUTABLE_PATH = path.normpath(environ["SENNA"]) + sep else: SENNA_EXECUTABLE_PATH = "/usr/share/senna-v3.0" senna_is_installed = path.exists(SENNA_EXECUTABLE_PATH) @unittest.skipUnless(senna_is_installed, "Requires Senna executable") class TestSennaPipeline(unittest.TestCase): def test_senna_pipeline(self): pipeline = Senna(SENNA_EXECUTABLE_PATH, ["pos", "chk", "ner"]) sent = "Dusseldorf is an international business center".split() result = [ (token["word"], token["chk"], token["ner"], token["pos"]) for token in pipeline.tag(sent) ] expected = [ ("Dusseldorf", "B-NP", "B-LOC", "NNP"), ("is", "B-VP", "O", "VBZ"), ("an", "B-NP", "O", "DT"), ("international", "I-NP", "O", "JJ"), ("business", "I-NP", "O", "NN"), ("center", "I-NP", "O", "NN"), ] self.assertEqual(result, expected) @unittest.skipUnless(senna_is_installed, "Requires Senna executable") class TestSennaTagger(unittest.TestCase): def test_senna_tagger(self): tagger = SennaTagger(SENNA_EXECUTABLE_PATH) result = tagger.tag("What is the airspeed of an unladen swallow ?".split()) expected = [ ("What", "WP"), ("is", "VBZ"), ("the", "DT"), ("airspeed", "NN"), ("of", "IN"), ("an", "DT"), ("unladen", "NN"), ("swallow", "NN"), ("?", "."), ] self.assertEqual(result, expected) def test_senna_chunk_tagger(self): chktagger = SennaChunkTagger(SENNA_EXECUTABLE_PATH) result_1 = chktagger.tag("What is the airspeed of an unladen swallow ?".split()) expected_1 = [ ("What", "B-NP"), ("is", "B-VP"), ("the", "B-NP"), ("airspeed", "I-NP"), ("of", "B-PP"), ("an", "B-NP"), ("unladen", "I-NP"), ("swallow", "I-NP"), ("?", "O"), ] result_2 = list(chktagger.bio_to_chunks(result_1, chunk_type="NP")) expected_2 = [ ("What", "0"), ("the airspeed", "2-3"), ("an unladen swallow", "5-6-7"), ] self.assertEqual(result_1, expected_1) self.assertEqual(result_2, expected_2) def test_senna_ner_tagger(self): nertagger = SennaNERTagger(SENNA_EXECUTABLE_PATH) result_1 = nertagger.tag("Shakespeare theatre was in London .".split()) expected_1 = [ ("Shakespeare", "B-PER"), ("theatre", "O"), ("was", "O"), ("in", "O"), ("London", "B-LOC"), (".", "O"), ] result_2 = nertagger.tag("UN headquarters are in NY , USA .".split()) expected_2 = [ ("UN", "B-ORG"), ("headquarters", "O"), ("are", "O"), ("in", "O"), ("NY", "B-LOC"), (",", "O"), ("USA", "B-LOC"), (".", "O"), ] self.assertEqual(result_1, expected_1) self.assertEqual(result_2, expected_2)
this unit testing for test the snowball arabic light stemmer this stemmer deals with prefixes and suffixes test where the ignorestopwordstrue test where the ignorestopwordsfalse test where create the arabic stemmer without given init value to ignorestopwords the word algue was raising an indexerror tests all words from the test vocabulary provided by m porter the sample vocabulary and output were sourced from https tartarus orgmartinporterstemmervoc txt and https tartarus orgmartinporterstemmeroutput txt and are linked to from the porter stemmer algorithm s homepage at https tartarus orgmartinporterstemmer the list of stems for this test was generated by taking the martinblessed stemmer from https tartarus orgmartinporterstemmerc txt and removing all the departure sections from it and running it against martin s test vocabulary test for bug https github comnltknltkissues1581 ensures that oed can be stemmed without throwing an error test for improvement on https github comnltknltkissues2507 ensures that stems are lowercased when tolowercasetrue this unit testing for test the snowball arabic light stemmer this stemmer deals with prefixes and suffixes test where the ignore_stopwords true test where the ignore_stopwords false this is a stop word test where create the arabic stemmer without given init value to ignore_stopwords the word algue was raising an indexerror tests all words from the test vocabulary provided by m porter the sample vocabulary and output were sourced from https tartarus org martin porterstemmer voc txt and https tartarus org martin porterstemmer output txt and are linked to from the porter stemmer algorithm s homepage at https tartarus org martin porterstemmer the list of stems for this test was generated by taking the martin blessed stemmer from https tartarus org martin porterstemmer c txt and removing all the departure sections from it and running it against martin s test vocabulary test for bug https github com nltk nltk issues 1581 ensures that oed can be stemmed without throwing an error test for improvement on https github com nltk nltk issues 2507 ensures that stems are lowercased when to_lowercase true
import unittest from contextlib import closing from nltk import data from nltk.stem.porter import PorterStemmer from nltk.stem.snowball import SnowballStemmer class SnowballTest(unittest.TestCase): def test_arabic(self): ar_stemmer = SnowballStemmer("arabic", True) assert ar_stemmer.stem("الْعَرَبِــــــيَّة") == "عرب" assert ar_stemmer.stem("العربية") == "عرب" assert ar_stemmer.stem("فقالوا") == "قال" assert ar_stemmer.stem("الطالبات") == "طالب" assert ar_stemmer.stem("فالطالبات") == "طالب" assert ar_stemmer.stem("والطالبات") == "طالب" assert ar_stemmer.stem("الطالبون") == "طالب" assert ar_stemmer.stem("اللذان") == "اللذان" assert ar_stemmer.stem("من") == "من" ar_stemmer = SnowballStemmer("arabic", False) assert ar_stemmer.stem("اللذان") == "اللذ" assert ar_stemmer.stem("الطالبات") == "طالب" assert ar_stemmer.stem("الكلمات") == "كلم" ar_stemmer = SnowballStemmer("arabic") assert ar_stemmer.stem("الْعَرَبِــــــيَّة") == "عرب" assert ar_stemmer.stem("العربية") == "عرب" assert ar_stemmer.stem("فقالوا") == "قال" assert ar_stemmer.stem("الطالبات") == "طالب" assert ar_stemmer.stem("الكلمات") == "كلم" def test_russian(self): stemmer_russian = SnowballStemmer("russian") assert stemmer_russian.stem("авантненькая") == "авантненьк" def test_german(self): stemmer_german = SnowballStemmer("german") stemmer_german2 = SnowballStemmer("german", ignore_stopwords=True) assert stemmer_german.stem("Schr\xe4nke") == "schrank" assert stemmer_german2.stem("Schr\xe4nke") == "schrank" assert stemmer_german.stem("keinen") == "kein" assert stemmer_german2.stem("keinen") == "keinen" def test_spanish(self): stemmer = SnowballStemmer("spanish") assert stemmer.stem("Visionado") == "vision" assert stemmer.stem("algue") == "algu" def test_short_strings_bug(self): stemmer = SnowballStemmer("english") assert stemmer.stem("y's") == "y" class PorterTest(unittest.TestCase): def _vocabulary(self): with closing( data.find("stemmers/porter_test/porter_vocabulary.txt").open( encoding="utf-8" ) ) as fp: return fp.read().splitlines() def _test_against_expected_output(self, stemmer_mode, expected_stems): stemmer = PorterStemmer(mode=stemmer_mode) for word, true_stem in zip(self._vocabulary(), expected_stems): our_stem = stemmer.stem(word) assert ( our_stem == true_stem ), "{} should stem to {} in {} mode but got {}".format( word, true_stem, stemmer_mode, our_stem, ) def test_vocabulary_martin_mode(self): with closing( data.find("stemmers/porter_test/porter_martin_output.txt").open( encoding="utf-8" ) ) as fp: self._test_against_expected_output( PorterStemmer.MARTIN_EXTENSIONS, fp.read().splitlines() ) def test_vocabulary_nltk_mode(self): with closing( data.find("stemmers/porter_test/porter_nltk_output.txt").open( encoding="utf-8" ) ) as fp: self._test_against_expected_output( PorterStemmer.NLTK_EXTENSIONS, fp.read().splitlines() ) def test_vocabulary_original_mode(self): with closing( data.find("stemmers/porter_test/porter_original_output.txt").open( encoding="utf-8" ) ) as fp: self._test_against_expected_output( PorterStemmer.ORIGINAL_ALGORITHM, fp.read().splitlines() ) self._test_against_expected_output( PorterStemmer.ORIGINAL_ALGORITHM, data.find("stemmers/porter_test/porter_original_output.txt") .open(encoding="utf-8") .read() .splitlines(), ) def test_oed_bug(self): assert PorterStemmer().stem("oed") == "o" def test_lowercase_option(self): porter = PorterStemmer() assert porter.stem("On") == "on" assert porter.stem("I") == "i" assert porter.stem("I", to_lowercase=False) == "I" assert porter.stem("Github") == "github" assert porter.stem("Github", to_lowercase=False) == "Github"
tests for static parts of twitter package tests that twitter credentials from a file are handled correctly test that environment variable has been read correctly each of the following scenarios should raise an error an empty subdir path a subdir path of none a nonexistent directory credentials txt is not in default subdir as read from os environ twitter nonexistent credentials file foobar badoauth11 txt is incomplete the first key in credentials file badoauth12 txt is illformed the first two lines in badoauth13 txt are collapsed various scenarios that should raise errors try auth loadcredskwargs raises valueerror zero length field name in format for python 2 6 oserror for the rest except oserror valueerror pass except exception as e pytest failunexpected exception thrown s e else pytest failoserror exception not thrown def testcorrectfileself auth tests that twitter credentials from a file are handled correctly test that environment variable has been read correctly each of the following scenarios should raise an error an empty subdir path a subdir path of none a nonexistent directory credentials txt is not in default subdir as read from os environ twitter nonexistent credentials file foobar bad_oauth1 1 txt is incomplete the first key in credentials file bad_oauth1 2 txt is ill formed the first two lines in bad_oauth1 3 txt are collapsed various scenarios that should raise errors raises valueerror zero length field name in format for python 2 6 oserror for the rest test that a proper file succeeds and is read correctly
import os import pytest pytest.importorskip("twython") from nltk.twitter import Authenticate @pytest.fixture def auth(): return Authenticate() class TestCredentials: @classmethod def setup_class(self): self.subdir = os.path.join(os.path.dirname(__file__), "files") os.environ["TWITTER"] = "twitter-files" def test_environment(self, auth): fn = os.path.basename(auth.creds_subdir) assert fn == os.environ["TWITTER"] @pytest.mark.parametrize( "kwargs", [ {"subdir": ""}, {"subdir": None}, {"subdir": "/nosuchdir"}, {}, {"creds_file": "foobar"}, {"creds_file": "bad_oauth1-1.txt"}, {"creds_file": "bad_oauth1-2.txt"}, {"creds_file": "bad_oauth1-3.txt"}, ], ) def test_scenarios_that_should_raise_errors(self, kwargs, auth): try: auth.load_creds(**kwargs) except (OSError, ValueError): pass except Exception as e: pytest.fail("Unexpected exception thrown: %s" % e) else: pytest.fail("OSError exception not thrown.") def test_correct_file(self, auth): oauth = auth.load_creds(subdir=self.subdir) assert auth.creds_fullpath == os.path.join(self.subdir, auth.creds_file) assert auth.creds_file == "credentials.txt" assert oauth["app_key"] == "a"
form test data for tests return itera b c def testeverygramswithoutpaddingeverygraminput expectedoutput a a b a b c b b c c output listeverygramseverygraminput assert output expectedoutput def testeverygramsmaxleneverygraminput expectedoutput a a b b b c c output listeverygramseverygraminput maxlen2 assert output expectedoutput def testeverygramsminleneverygraminput expectedoutput a b a b c b c output listeverygramseverygraminput minlen2 assert output expectedoutput def testeverygramspadrighteverygraminput expectedoutput a a b a b c b b c b c none c c none c none none none none none none output listeverygramseverygraminput maxlen3 padrighttrue assert output expectedoutput def testeverygramspadlefteverygraminput expectedoutput none none none none none a none none a none a b a a b a b c b b c c output listeverygramseverygraminput maxlen3 padlefttrue assert output expectedoutput form test data for tests
import pytest from nltk.util import everygrams @pytest.fixture def everygram_input(): return iter(["a", "b", "c"]) def test_everygrams_without_padding(everygram_input): expected_output = [ ("a",), ("a", "b"), ("a", "b", "c"), ("b",), ("b", "c"), ("c",), ] output = list(everygrams(everygram_input)) assert output == expected_output def test_everygrams_max_len(everygram_input): expected_output = [ ("a",), ("a", "b"), ("b",), ("b", "c"), ("c",), ] output = list(everygrams(everygram_input, max_len=2)) assert output == expected_output def test_everygrams_min_len(everygram_input): expected_output = [ ("a", "b"), ("a", "b", "c"), ("b", "c"), ] output = list(everygrams(everygram_input, min_len=2)) assert output == expected_output def test_everygrams_pad_right(everygram_input): expected_output = [ ("a",), ("a", "b"), ("a", "b", "c"), ("b",), ("b", "c"), ("b", "c", None), ("c",), ("c", None), ("c", None, None), (None,), (None, None), (None,), ] output = list(everygrams(everygram_input, max_len=3, pad_right=True)) assert output == expected_output def test_everygrams_pad_left(everygram_input): expected_output = [ (None,), (None, None), (None, None, "a"), (None,), (None, "a"), (None, "a", "b"), ("a",), ("a", "b"), ("a", "b", "c"), ("b",), ("b", "c"), ("c",), ] output = list(everygrams(everygram_input, max_len=3, pad_left=True)) assert output == expected_output
unit tests for nltk corpus wordnet see also nltktestwordnet doctest not every synset as hypernyms test hyperhyponyms test root hyperhyponyms test derivationallyrelatedforms test meronyms holonyms test antonyms test misc relations test pertainyms test lch test domains test in domains path based similarities information content similarities ducktest for iterables see https stackoverflow coma36230057610569 not every synset as hypernyms test hyper hyponyms test root hyper hyponyms test derivationally_related_forms test meronyms holonyms test antonyms test misc relations test pertainyms test lch test domains test in domains path based similarities information content similarities duck test for iterables see https stackoverflow com a 36230057 610569
import unittest from nltk.corpus import wordnet as wn from nltk.corpus import wordnet_ic as wnic wn.ensure_loaded() S = wn.synset L = wn.lemma class WordnNetDemo(unittest.TestCase): def test_retrieve_synset(self): move_synset = S("go.v.21") self.assertEqual(move_synset.name(), "move.v.15") self.assertEqual(move_synset.lemma_names(), ["move", "go"]) self.assertEqual( move_synset.definition(), "have a turn; make one's move in a game" ) self.assertEqual(move_synset.examples(), ["Can I go now?"]) def test_retrieve_synsets(self): self.assertEqual(sorted(wn.synsets("zap", pos="n")), [S("zap.n.01")]) self.assertEqual( sorted(wn.synsets("zap", pos="v")), [S("microwave.v.01"), S("nuke.v.01"), S("zap.v.01"), S("zap.v.02")], ) def test_hyperhyponyms(self): self.assertEqual(S("travel.v.01").hypernyms(), []) self.assertEqual(S("travel.v.02").hypernyms(), [S("travel.v.03")]) self.assertEqual(S("travel.v.03").hypernyms(), []) self.assertEqual(S("breakfast.n.1").hypernyms(), [S("meal.n.01")]) first_five_meal_hypo = [ S("banquet.n.02"), S("bite.n.04"), S("breakfast.n.01"), S("brunch.n.01"), S("buffet.n.02"), ] self.assertEqual(sorted(S("meal.n.1").hyponyms()[:5]), first_five_meal_hypo) self.assertEqual(S("Austen.n.1").instance_hypernyms(), [S("writer.n.01")]) first_five_composer_hypo = [ S("ambrose.n.01"), S("bach.n.01"), S("barber.n.01"), S("bartok.n.01"), S("beethoven.n.01"), ] self.assertEqual( S("composer.n.1").instance_hyponyms()[:5], first_five_composer_hypo ) self.assertEqual(S("person.n.01").root_hypernyms(), [S("entity.n.01")]) self.assertEqual(S("sail.v.01").root_hypernyms(), [S("travel.v.01")]) self.assertEqual( S("fall.v.12").root_hypernyms(), [S("act.v.01"), S("fall.v.17")] ) def test_derivationally_related_forms(self): self.assertEqual( L("zap.v.03.nuke").derivationally_related_forms(), [L("atomic_warhead.n.01.nuke")], ) self.assertEqual( L("zap.v.03.atomize").derivationally_related_forms(), [L("atomization.n.02.atomization")], ) self.assertEqual( L("zap.v.03.atomise").derivationally_related_forms(), [L("atomization.n.02.atomisation")], ) self.assertEqual(L("zap.v.03.zap").derivationally_related_forms(), []) def test_meronyms_holonyms(self): self.assertEqual( S("dog.n.01").member_holonyms(), [S("canis.n.01"), S("pack.n.06")] ) self.assertEqual(S("dog.n.01").part_meronyms(), [S("flag.n.07")]) self.assertEqual(S("faculty.n.2").member_meronyms(), [S("professor.n.01")]) self.assertEqual(S("copilot.n.1").member_holonyms(), [S("crew.n.01")]) self.assertEqual( S("table.n.2").part_meronyms(), [S("leg.n.03"), S("tabletop.n.01"), S("tableware.n.01")], ) self.assertEqual(S("course.n.7").part_holonyms(), [S("meal.n.01")]) self.assertEqual( S("water.n.1").substance_meronyms(), [S("hydrogen.n.01"), S("oxygen.n.01")] ) self.assertEqual( S("gin.n.1").substance_holonyms(), [ S("gin_and_it.n.01"), S("gin_and_tonic.n.01"), S("martini.n.01"), S("pink_lady.n.01"), ], ) def test_antonyms(self): self.assertEqual( L("leader.n.1.leader").antonyms(), [L("follower.n.01.follower")] ) self.assertEqual( L("increase.v.1.increase").antonyms(), [L("decrease.v.01.decrease")] ) def test_misc_relations(self): self.assertEqual(S("snore.v.1").entailments(), [S("sleep.v.01")]) self.assertEqual( S("heavy.a.1").similar_tos(), [ S("dense.s.03"), S("doughy.s.01"), S("heavier-than-air.s.01"), S("hefty.s.02"), S("massive.s.04"), S("non-buoyant.s.01"), S("ponderous.s.02"), ], ) self.assertEqual(S("light.a.1").attributes(), [S("weight.n.01")]) self.assertEqual(S("heavy.a.1").attributes(), [S("weight.n.01")]) self.assertEqual( L("English.a.1.English").pertainyms(), [L("england.n.01.England")] ) def test_lch(self): self.assertEqual( S("person.n.01").lowest_common_hypernyms(S("dog.n.01")), [S("organism.n.01")], ) self.assertEqual( S("woman.n.01").lowest_common_hypernyms(S("girlfriend.n.02")), [S("woman.n.01")], ) def test_domains(self): self.assertEqual(S("code.n.03").topic_domains(), [S("computer_science.n.01")]) self.assertEqual(S("pukka.a.01").region_domains(), [S("india.n.01")]) self.assertEqual(S("freaky.a.01").usage_domains(), [S("slang.n.02")]) def test_in_topic_domains(self): self.assertEqual( S("computer_science.n.01").in_topic_domains()[0], S("access.n.05") ) self.assertEqual(S("germany.n.01").in_region_domains()[23], S("trillion.n.02")) self.assertEqual(S("slang.n.02").in_usage_domains()[1], S("airhead.n.01")) def test_wordnet_similarities(self): self.assertAlmostEqual(S("cat.n.01").path_similarity(S("cat.n.01")), 1.0) self.assertAlmostEqual(S("dog.n.01").path_similarity(S("cat.n.01")), 0.2) self.assertAlmostEqual( S("car.n.01").path_similarity(S("automobile.v.01")), S("automobile.v.01").path_similarity(S("car.n.01")), ) self.assertAlmostEqual( S("big.a.01").path_similarity(S("dog.n.01")), S("dog.n.01").path_similarity(S("big.a.01")), ) self.assertAlmostEqual( S("big.a.01").path_similarity(S("long.a.01")), S("long.a.01").path_similarity(S("big.a.01")), ) self.assertAlmostEqual( S("dog.n.01").lch_similarity(S("cat.n.01")), 2.028, places=3 ) self.assertAlmostEqual( S("dog.n.01").wup_similarity(S("cat.n.01")), 0.8571, places=3 ) self.assertAlmostEqual( S("car.n.01").wup_similarity(S("automobile.v.01")), S("automobile.v.01").wup_similarity(S("car.n.01")), ) self.assertAlmostEqual( S("big.a.01").wup_similarity(S("dog.n.01")), S("dog.n.01").wup_similarity(S("big.a.01")), ) self.assertAlmostEqual( S("big.a.01").wup_similarity(S("long.a.01")), S("long.a.01").wup_similarity(S("big.a.01")), ) self.assertAlmostEqual( S("big.a.01").lch_similarity(S("long.a.01")), S("long.a.01").lch_similarity(S("big.a.01")), ) brown_ic = wnic.ic("ic-brown.dat") self.assertAlmostEqual( S("dog.n.01").jcn_similarity(S("cat.n.01"), brown_ic), 0.4497, places=3 ) semcor_ic = wnic.ic("ic-semcor.dat") self.assertAlmostEqual( S("dog.n.01").lin_similarity(S("cat.n.01"), semcor_ic), 0.8863, places=3 ) def test_omw_lemma_no_trailing_underscore(self): expected = sorted( [ "popolna_sprememba_v_mišljenju", "popoln_obrat", "preobrat", "preobrat_v_mišljenju", ] ) self.assertEqual(sorted(S("about-face.n.02").lemma_names(lang="slv")), expected) def test_iterable_type_for_all_lemma_names(self): cat_lemmas = wn.all_lemma_names(lang="cat") eng_lemmas = wn.all_lemma_names(lang="eng") self.assertTrue(hasattr(eng_lemmas, "__iter__")) self.assertTrue(hasattr(eng_lemmas, "__next__") or hasattr(eng_lemmas, "next")) self.assertTrue(eng_lemmas.__iter__() is eng_lemmas) self.assertTrue(hasattr(cat_lemmas, "__iter__")) self.assertTrue(hasattr(cat_lemmas, "__next__") or hasattr(eng_lemmas, "next")) self.assertTrue(cat_lemmas.__iter__() is cat_lemmas)
tests for bleu translation evaluation metric examples from the original bleu paper https www aclweb organthologyp021040 pdf example 1 the the example reference sentences hypothesis sentences testing modified unigram precision with assertalmostequal at 4 place precision testing modified bigram precision example 2 the of the example reference sentences hypothesis sentences testing modified unigram precision testing modified bigram precision example 3 proper mt outputs unigram precision test unigram precision with assertalmostequal at 4 place precision test unigram precision with rounding bigram precision test bigram precision with assertalmostequal at 4 place precision test bigram precision with rounding test case from brevitypenaltyclosest function in mtevalv13a pl same test cases as in the doctest in nltk translate bleuscore py test case where there s 0 matches test bleu to nth order of ngrams where n is lenhypothesis test case where there s 100 matches test bleu to nth order of ngrams where n is lenhypothesis since no 4grams matches were found the result should be zero expw1 1 w2 1 w3 1 w4 inf 0 checks that the warning has been raised because lenreference 4 unittest skipskipping fringe cases for bleu test bleu to nth order of ngrams where n lenhypothesis since no ngrams matches were found the result should be zero expw1 1 w2 1 w3 1 w4 inf 0 checks that the warning has been raised because lenhypothesis 4 test case where n lenhypothesis but so is n lenreference and it s a special case where reference hypothesis since no 4grams matches were found the result should be zero expw1 1 w2 1 w3 1 w4 inf 0 test case where there s hypothesis is empty test case where there s hypothesis is of length 1 in smoothing method 4 test case where there s reference is empty test case where both references and hypothesis is empty test case where the length of reference or hypothesis is shorter than 4 checks that the value the hypothesis and reference returns is 0 0 expw1 1 w2 1 w3 1 w4 inf 0 checks that the warning has been raised test case where there s 0 matches reads the bleu scores from the mteval13a output file the order of the list corresponds to the order of the ngrams the numbers are located in the last 2nd line of the file the first and 2nd item in the list are the score and system names whitespace tokenize the file note split automatically strip note that the corpusbleu input is list of list of references without smoothing check that the bleu scores difference is less than 0 005 note this is an approximate comparison as much as 0 01 bleu might be statistically significant the actual translation quality might not be with the same smoothing method used in mtevalv13a pl verify that the bleu output is undesired since no of 2grams 0 examples from the original bleu paper https www aclweb org anthology p02 1040 pdf example 1 the the example reference sentences hypothesis sentence s testing modified unigram precision with assertalmostequal at 4 place precision testing modified bigram precision example 2 the of the example reference sentences hypothesis sentence s testing modified unigram precision testing modified bigram precision example 3 proper mt outputs unigram precision test unigram precision with assertalmostequal at 4 place precision test unigram precision with rounding bigram precision test bigram precision with assertalmostequal at 4 place precision test bigram precision with rounding test case from brevity_penalty_closest function in mteval v13a pl same test cases as in the doctest in nltk translate bleu_score py test case where there s 0 matches test bleu to nth order of n grams where n is len hypothesis uniform weights test case where there s 100 matches test bleu to nth order of n grams where n is len hypothesis uniform weights since no 4 grams matches were found the result should be zero exp w_1 1 w_2 1 w_3 1 w_4 inf 0 checks that the warning has been raised because len reference 4 unittest testcase assertwarns is only supported in python 3 2 unittest skip skipping fringe cases for bleu test bleu to nth order of n grams where n len hypothesis uniform weights since no n grams matches were found the result should be zero exp w_1 1 w_2 1 w_3 1 w_4 inf 0 checks that the warning has been raised because len hypothesis 4 unittest testcase assertwarns is only supported in python 3 2 test case where n len hypothesis but so is n len reference and it s a special case where reference hypothesis since no 4 grams matches were found the result should be zero exp w_1 1 w_2 1 w_3 1 w_4 inf 0 test case where there s hypothesis is empty test case where there s hypothesis is of length 1 in smoothing method 4 unittest testcase assertwarns is only supported in python 3 2 test case where there s reference is empty test case where both references and hypothesis is empty test case where the length of reference or hypothesis is shorter than 4 checks that the value the hypothesis and reference returns is 0 0 exp w_1 1 w_2 1 w_3 1 w_4 inf 0 checks that the warning has been raised unittest testcase assertwarns is only supported in python 3 2 test case where there s 0 matches reads the bleu scores from the mteval 13a output file the order of the list corresponds to the order of the ngrams the numbers are located in the last 2nd line of the file the first and 2nd item in the list are the score and system names whitespace tokenize the file note split automatically strip note that the corpus_bleu input is list of list of references without smoothing check that the bleu scores difference is less than 0 005 note this is an approximate comparison as much as 0 01 bleu might be statistically significant the actual translation quality might not be with the same smoothing method used in mteval v13a pl check that the warning is raised since no of 2 grams 0 verify that the bleu output is undesired since no of 2 grams 0 unittest testcase assertwarns is only supported in python 3 2
import io import unittest import numpy as np from nltk.data import find from nltk.translate.bleu_score import ( SmoothingFunction, brevity_penalty, closest_ref_length, corpus_bleu, modified_precision, sentence_bleu, ) class TestBLEU(unittest.TestCase): def test_modified_precision(self): ref1 = "the cat is on the mat".split() ref2 = "there is a cat on the mat".split() hyp1 = "the the the the the the the".split() references = [ref1, ref2] hyp1_unigram_precision = float(modified_precision(references, hyp1, n=1)) assert round(hyp1_unigram_precision, 4) == 0.2857 self.assertAlmostEqual(hyp1_unigram_precision, 0.28571428, places=4) assert float(modified_precision(references, hyp1, n=2)) == 0.0 ref1 = str( "It is a guide to action that ensures that the military " "will forever heed Party commands" ).split() ref2 = str( "It is the guiding principle which guarantees the military " "forces always being under the command of the Party" ).split() ref3 = str( "It is the practical guide for the army always to heed " "the directions of the party" ).split() hyp1 = "of the".split() references = [ref1, ref2, ref3] assert float(modified_precision(references, hyp1, n=1)) == 1.0 assert float(modified_precision(references, hyp1, n=2)) == 1.0 hyp1 = str( "It is a guide to action which ensures that the military " "always obeys the commands of the party" ).split() hyp2 = str( "It is to insure the troops forever hearing the activity " "guidebook that party direct" ).split() references = [ref1, ref2, ref3] hyp1_unigram_precision = float(modified_precision(references, hyp1, n=1)) hyp2_unigram_precision = float(modified_precision(references, hyp2, n=1)) self.assertAlmostEqual(hyp1_unigram_precision, 0.94444444, places=4) self.assertAlmostEqual(hyp2_unigram_precision, 0.57142857, places=4) assert round(hyp1_unigram_precision, 4) == 0.9444 assert round(hyp2_unigram_precision, 4) == 0.5714 hyp1_bigram_precision = float(modified_precision(references, hyp1, n=2)) hyp2_bigram_precision = float(modified_precision(references, hyp2, n=2)) self.assertAlmostEqual(hyp1_bigram_precision, 0.58823529, places=4) self.assertAlmostEqual(hyp2_bigram_precision, 0.07692307, places=4) assert round(hyp1_bigram_precision, 4) == 0.5882 assert round(hyp2_bigram_precision, 4) == 0.0769 def test_brevity_penalty(self): references = [["a"] * 11, ["a"] * 8] hypothesis = ["a"] * 7 hyp_len = len(hypothesis) closest_ref_len = closest_ref_length(references, hyp_len) self.assertAlmostEqual( brevity_penalty(closest_ref_len, hyp_len), 0.8669, places=4 ) references = [["a"] * 11, ["a"] * 8, ["a"] * 6, ["a"] * 7] hypothesis = ["a"] * 7 hyp_len = len(hypothesis) closest_ref_len = closest_ref_length(references, hyp_len) assert brevity_penalty(closest_ref_len, hyp_len) == 1.0 def test_zero_matches(self): references = ["The candidate has no alignment to any of the references".split()] hypothesis = "John loves Mary".split() for n in range(1, len(hypothesis)): weights = (1.0 / n,) * n assert sentence_bleu(references, hypothesis, weights) == 0 def test_full_matches(self): references = ["John loves Mary".split()] hypothesis = "John loves Mary".split() for n in range(1, len(hypothesis)): weights = (1.0 / n,) * n assert sentence_bleu(references, hypothesis, weights) == 1.0 def test_partial_matches_hypothesis_longer_than_reference(self): references = ["John loves Mary".split()] hypothesis = "John loves Mary who loves Mike".split() self.assertAlmostEqual(sentence_bleu(references, hypothesis), 0.0, places=4) try: self.assertWarns(UserWarning, sentence_bleu, references, hypothesis) except AttributeError: pass class TestBLEUFringeCases(unittest.TestCase): def test_case_where_n_is_bigger_than_hypothesis_length(self): references = ["John loves Mary ?".split()] hypothesis = "John loves Mary".split() n = len(hypothesis) + 1 weights = (1.0 / n,) * n self.assertAlmostEqual( sentence_bleu(references, hypothesis, weights), 0.0, places=4 ) try: self.assertWarns(UserWarning, sentence_bleu, references, hypothesis) except AttributeError: pass references = ["John loves Mary".split()] hypothesis = "John loves Mary".split() self.assertAlmostEqual( sentence_bleu(references, hypothesis, weights), 0.0, places=4 ) def test_empty_hypothesis(self): references = ["The candidate has no alignment to any of the references".split()] hypothesis = [] assert sentence_bleu(references, hypothesis) == 0 def test_length_one_hypothesis(self): references = ["The candidate has no alignment to any of the references".split()] hypothesis = ["Foo"] method4 = SmoothingFunction().method4 try: sentence_bleu(references, hypothesis, smoothing_function=method4) except ValueError: pass def test_empty_references(self): references = [[]] hypothesis = "John loves Mary".split() assert sentence_bleu(references, hypothesis) == 0 def test_empty_references_and_hypothesis(self): references = [[]] hypothesis = [] assert sentence_bleu(references, hypothesis) == 0 def test_reference_or_hypothesis_shorter_than_fourgrams(self): references = ["let it go".split()] hypothesis = "let go it".split() self.assertAlmostEqual(sentence_bleu(references, hypothesis), 0.0, places=4) try: self.assertWarns(UserWarning, sentence_bleu, references, hypothesis) except AttributeError: pass def test_numpy_weights(self): references = ["The candidate has no alignment to any of the references".split()] hypothesis = "John loves Mary".split() weights = np.array([0.25] * 4) assert sentence_bleu(references, hypothesis, weights) == 0 class TestBLEUvsMteval13a(unittest.TestCase): def test_corpus_bleu(self): ref_file = find("models/wmt15_eval/ref.ru") hyp_file = find("models/wmt15_eval/google.ru") mteval_output_file = find("models/wmt15_eval/mteval-13a.output") with open(mteval_output_file) as mteval_fin: mteval_bleu_scores = map(float, mteval_fin.readlines()[-2].split()[1:-1]) with open(ref_file, encoding="utf8") as ref_fin: with open(hyp_file, encoding="utf8") as hyp_fin: hypothesis = list(map(lambda x: x.split(), hyp_fin)) references = list(map(lambda x: [x.split()], ref_fin)) for i, mteval_bleu in zip(range(1, 10), mteval_bleu_scores): nltk_bleu = corpus_bleu( references, hypothesis, weights=(1.0 / i,) * i ) assert abs(mteval_bleu - nltk_bleu) < 0.005 chencherry = SmoothingFunction() for i, mteval_bleu in zip(range(1, 10), mteval_bleu_scores): nltk_bleu = corpus_bleu( references, hypothesis, weights=(1.0 / i,) * i, smoothing_function=chencherry.method3, ) assert abs(mteval_bleu - nltk_bleu) < 0.005 class TestBLEUWithBadSentence(unittest.TestCase): def test_corpus_bleu_with_bad_sentence(self): hyp = "Teo S yb , oe uNb , R , T t , , t Tue Ar saln S , , 5istsi l , 5oe R ulO sae oR R" ref = str( "Their tasks include changing a pump on the faulty stokehold ." "Likewise , two species that are very similar in morphology " "were distinguished using genetics ." ) references = [[ref.split()]] hypotheses = [hyp.split()] try: with self.assertWarns(UserWarning): self.assertAlmostEqual( corpus_bleu(references, hypotheses), 0.0, places=4 ) except AttributeError: self.assertAlmostEqual(corpus_bleu(references, hypotheses), 0.0, places=4) class TestBLEUWithMultipleWeights(unittest.TestCase): def test_corpus_bleu_with_multiple_weights(self): hyp1 = [ "It", "is", "a", "guide", "to", "action", "which", "ensures", "that", "the", "military", "always", "obeys", "the", "commands", "of", "the", "party", ] ref1a = [ "It", "is", "a", "guide", "to", "action", "that", "ensures", "that", "the", "military", "will", "forever", "heed", "Party", "commands", ] ref1b = [ "It", "is", "the", "guiding", "principle", "which", "guarantees", "the", "military", "forces", "always", "being", "under", "the", "command", "of", "the", "Party", ] ref1c = [ "It", "is", "the", "practical", "guide", "for", "the", "army", "always", "to", "heed", "the", "directions", "of", "the", "party", ] hyp2 = [ "he", "read", "the", "book", "because", "he", "was", "interested", "in", "world", "history", ] ref2a = [ "he", "was", "interested", "in", "world", "history", "because", "he", "read", "the", "book", ] weight_1 = (1, 0, 0, 0) weight_2 = (0.25, 0.25, 0.25, 0.25) weight_3 = (0, 0, 0, 0, 1) bleu_scores = corpus_bleu( list_of_references=[[ref1a, ref1b, ref1c], [ref2a]], hypotheses=[hyp1, hyp2], weights=[weight_1, weight_2, weight_3], ) assert bleu_scores[0] == corpus_bleu( [[ref1a, ref1b, ref1c], [ref2a]], [hyp1, hyp2], weight_1 ) assert bleu_scores[1] == corpus_bleu( [[ref1a, ref1b, ref1c], [ref2a]], [hyp1, hyp2], weight_2 ) assert bleu_scores[2] == corpus_bleu( [[ref1a, ref1b, ref1c], [ref2a]], [hyp1, hyp2], weight_3 )
tests gdfa alignments testing gdfa with first 10 eflomal outputs from issue 1829 https github comnltknltkissues1829 input expected output iterate through all 10 examples and check for expected outputs testing gdfa with first 10 eflomal outputs from issue 1829 https github com nltk nltk issues 1829 input expected output iterate through all 10 examples and check for expected outputs
import unittest from nltk.translate.gdfa import grow_diag_final_and class TestGDFA(unittest.TestCase): def test_from_eflomal_outputs(self): forwards = [ "0-0 1-2", "0-0 1-1", "0-0 2-1 3-2 4-3 5-4 6-5 7-6 8-7 7-8 9-9 10-10 9-11 11-12 12-13 13-14", "0-0 1-1 1-2 2-3 3-4 4-5 4-6 5-7 6-8 8-9 9-10", "0-0 14-1 15-2 16-3 20-5 21-6 22-7 5-8 6-9 7-10 8-11 9-12 10-13 11-14 12-15 13-16 14-17 17-18 18-19 19-20 20-21 23-22 24-23 25-24 26-25 27-27 28-28 29-29 30-30 31-31", "0-0 1-1 0-2 2-3", "0-0 2-2 4-4", "0-0 1-1 2-3 3-4 5-5 7-6 8-7 9-8 10-9 11-10 12-11 13-12 14-13 15-14 16-16 17-17 18-18 19-19 20-20", "3-0 4-1 6-2 5-3 6-4 7-5 8-6 9-7 10-8 11-9 16-10 9-12 10-13 12-14", "1-0", ] backwards = [ "0-0 1-2", "0-0 1-1", "0-0 2-1 3-2 4-3 5-4 6-5 7-6 8-7 9-8 10-10 11-12 12-11 13-13", "0-0 1-2 2-3 3-4 4-6 6-8 7-5 8-7 9-8", "0-0 1-8 2-9 3-10 4-11 5-12 6-11 8-13 9-14 10-15 11-16 12-17 13-18 14-19 15-20 16-21 17-22 18-23 19-24 20-29 21-30 22-31 23-2 24-3 25-4 26-5 27-5 28-6 29-7 30-28 31-31", "0-0 1-1 2-3", "0-0 1-1 2-3 4-4", "0-0 1-1 2-3 3-4 5-5 7-6 8-7 9-8 10-9 11-10 12-11 13-12 14-13 15-14 16-16 17-17 18-18 19-19 20-16 21-18", "0-0 1-1 3-2 4-1 5-3 6-4 7-5 8-6 9-7 10-8 11-9 12-8 13-9 14-8 15-9 16-10", "1-0", ] source_lens = [2, 3, 3, 15, 11, 33, 4, 6, 23, 18] target_lens = [2, 4, 3, 16, 12, 33, 5, 6, 22, 16] expected = [ [(0, 0), (1, 2)], [(0, 0), (1, 1)], [ (0, 0), (2, 1), (3, 2), (4, 3), (5, 4), (6, 5), (7, 6), (8, 7), (10, 10), (11, 12), ], [ (0, 0), (1, 1), (1, 2), (2, 3), (3, 4), (4, 5), (4, 6), (5, 7), (6, 8), (7, 5), (8, 7), (8, 9), (9, 8), (9, 10), ], [ (0, 0), (1, 8), (2, 9), (3, 10), (4, 11), (5, 8), (6, 9), (6, 11), (7, 10), (8, 11), (31, 31), ], [(0, 0), (0, 2), (1, 1), (2, 3)], [(0, 0), (1, 1), (2, 2), (2, 3), (4, 4)], [ (0, 0), (1, 1), (2, 3), (3, 4), (5, 5), (7, 6), (8, 7), (9, 8), (10, 9), (11, 10), (12, 11), (13, 12), (14, 13), (15, 14), (16, 16), (17, 17), (18, 18), (19, 19), ], [ (0, 0), (1, 1), (3, 0), (3, 2), (4, 1), (5, 3), (6, 2), (6, 4), (7, 5), (8, 6), (9, 7), (9, 12), (10, 8), (10, 13), (11, 9), (12, 8), (12, 14), (13, 9), (14, 8), (15, 9), (16, 10), ], [(1, 0)], [ (0, 0), (1, 1), (3, 2), (4, 3), (5, 4), (6, 5), (7, 6), (9, 10), (10, 12), (11, 13), (12, 14), (13, 15), ], ] for fw, bw, src_len, trg_len, expect in zip( forwards, backwards, source_lens, target_lens, expected ): self.assertListEqual(expect, grow_diag_final_and(src_len, trg_len, fw, bw))
tests for ibm model 1 training methods arrange act assert expectedprob 1 0 target vocab size 1 arrange act assert examine target words that are not in the training data domain arrange act assert arrange act assert expected_prob 1 0 target vocab size 1 arrange act assert examine target words that are not in the training data domain arrange act assert
import unittest from collections import defaultdict from nltk.translate import AlignedSent, IBMModel, IBMModel1 from nltk.translate.ibm_model import AlignmentInfo class TestIBMModel1(unittest.TestCase): def test_set_uniform_translation_probabilities(self): corpus = [ AlignedSent(["ham", "eggs"], ["schinken", "schinken", "eier"]), AlignedSent(["spam", "spam", "spam", "spam"], ["spam", "spam"]), ] model1 = IBMModel1(corpus, 0) model1.set_uniform_probabilities(corpus) self.assertEqual(model1.translation_table["ham"]["eier"], 1.0 / 3) self.assertEqual(model1.translation_table["eggs"][None], 1.0 / 3) def test_set_uniform_translation_probabilities_of_non_domain_values(self): corpus = [ AlignedSent(["ham", "eggs"], ["schinken", "schinken", "eier"]), AlignedSent(["spam", "spam", "spam", "spam"], ["spam", "spam"]), ] model1 = IBMModel1(corpus, 0) model1.set_uniform_probabilities(corpus) self.assertEqual(model1.translation_table["parrot"]["eier"], IBMModel.MIN_PROB) def test_prob_t_a_given_s(self): src_sentence = ["ich", "esse", "ja", "gern", "räucherschinken"] trg_sentence = ["i", "love", "to", "eat", "smoked", "ham"] corpus = [AlignedSent(trg_sentence, src_sentence)] alignment_info = AlignmentInfo( (0, 1, 4, 0, 2, 5, 5), [None] + src_sentence, ["UNUSED"] + trg_sentence, None, ) translation_table = defaultdict(lambda: defaultdict(float)) translation_table["i"]["ich"] = 0.98 translation_table["love"]["gern"] = 0.98 translation_table["to"][None] = 0.98 translation_table["eat"]["esse"] = 0.98 translation_table["smoked"]["räucherschinken"] = 0.98 translation_table["ham"]["räucherschinken"] = 0.98 model1 = IBMModel1(corpus, 0) model1.translation_table = translation_table probability = model1.prob_t_a_given_s(alignment_info) lexical_translation = 0.98 * 0.98 * 0.98 * 0.98 * 0.98 * 0.98 expected_probability = lexical_translation self.assertEqual(round(probability, 4), round(expected_probability, 4))
tests for ibm model 2 training methods arrange act assert expectedprob 1 0 length of source sentence 1 arrange act assert examine i and j values that are not in the training data domain arrange act assert arrange act assert expected_prob 1 0 length of source sentence 1 arrange act assert examine i and j values that are not in the training data domain arrange none to ich i esse eat gern love räucherschinken smoked räucherschinken ham act assert
import unittest from collections import defaultdict from nltk.translate import AlignedSent, IBMModel, IBMModel2 from nltk.translate.ibm_model import AlignmentInfo class TestIBMModel2(unittest.TestCase): def test_set_uniform_alignment_probabilities(self): corpus = [ AlignedSent(["ham", "eggs"], ["schinken", "schinken", "eier"]), AlignedSent(["spam", "spam", "spam", "spam"], ["spam", "spam"]), ] model2 = IBMModel2(corpus, 0) model2.set_uniform_probabilities(corpus) self.assertEqual(model2.alignment_table[0][1][3][2], 1.0 / 4) self.assertEqual(model2.alignment_table[2][4][2][4], 1.0 / 3) def test_set_uniform_alignment_probabilities_of_non_domain_values(self): corpus = [ AlignedSent(["ham", "eggs"], ["schinken", "schinken", "eier"]), AlignedSent(["spam", "spam", "spam", "spam"], ["spam", "spam"]), ] model2 = IBMModel2(corpus, 0) model2.set_uniform_probabilities(corpus) self.assertEqual(model2.alignment_table[99][1][3][2], IBMModel.MIN_PROB) self.assertEqual(model2.alignment_table[2][99][2][4], IBMModel.MIN_PROB) def test_prob_t_a_given_s(self): src_sentence = ["ich", "esse", "ja", "gern", "räucherschinken"] trg_sentence = ["i", "love", "to", "eat", "smoked", "ham"] corpus = [AlignedSent(trg_sentence, src_sentence)] alignment_info = AlignmentInfo( (0, 1, 4, 0, 2, 5, 5), [None] + src_sentence, ["UNUSED"] + trg_sentence, None, ) translation_table = defaultdict(lambda: defaultdict(float)) translation_table["i"]["ich"] = 0.98 translation_table["love"]["gern"] = 0.98 translation_table["to"][None] = 0.98 translation_table["eat"]["esse"] = 0.98 translation_table["smoked"]["räucherschinken"] = 0.98 translation_table["ham"]["räucherschinken"] = 0.98 alignment_table = defaultdict( lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(float))) ) alignment_table[0][3][5][6] = 0.97 alignment_table[1][1][5][6] = 0.97 alignment_table[2][4][5][6] = 0.97 alignment_table[4][2][5][6] = 0.97 alignment_table[5][5][5][6] = 0.96 alignment_table[5][6][5][6] = 0.96 model2 = IBMModel2(corpus, 0) model2.translation_table = translation_table model2.alignment_table = alignment_table probability = model2.prob_t_a_given_s(alignment_info) lexical_translation = 0.98 * 0.98 * 0.98 * 0.98 * 0.98 * 0.98 alignment = 0.97 * 0.97 * 0.97 * 0.97 * 0.96 * 0.96 expected_probability = lexical_translation * alignment self.assertEqual(round(probability, 4), round(expected_probability, 4))
tests for ibm model 3 training methods arrange act assert expectedprob 1 0 length of target sentence arrange act assert examine i and j values that are not in the training data domain arrange act assert arrange act assert expected_prob 1 0 length of target sentence arrange act assert examine i and j values that are not in the training data domain arrange i ich love gern to null eat esse smoked räucherschinken ham räucherschinken act assert
import unittest from collections import defaultdict from nltk.translate import AlignedSent, IBMModel, IBMModel3 from nltk.translate.ibm_model import AlignmentInfo class TestIBMModel3(unittest.TestCase): def test_set_uniform_distortion_probabilities(self): corpus = [ AlignedSent(["ham", "eggs"], ["schinken", "schinken", "eier"]), AlignedSent(["spam", "spam", "spam", "spam"], ["spam", "spam"]), ] model3 = IBMModel3(corpus, 0) model3.set_uniform_probabilities(corpus) self.assertEqual(model3.distortion_table[1][0][3][2], 1.0 / 2) self.assertEqual(model3.distortion_table[4][2][2][4], 1.0 / 4) def test_set_uniform_distortion_probabilities_of_non_domain_values(self): corpus = [ AlignedSent(["ham", "eggs"], ["schinken", "schinken", "eier"]), AlignedSent(["spam", "spam", "spam", "spam"], ["spam", "spam"]), ] model3 = IBMModel3(corpus, 0) model3.set_uniform_probabilities(corpus) self.assertEqual(model3.distortion_table[0][0][3][2], IBMModel.MIN_PROB) self.assertEqual(model3.distortion_table[9][2][2][4], IBMModel.MIN_PROB) self.assertEqual(model3.distortion_table[2][9][2][4], IBMModel.MIN_PROB) def test_prob_t_a_given_s(self): src_sentence = ["ich", "esse", "ja", "gern", "räucherschinken"] trg_sentence = ["i", "love", "to", "eat", "smoked", "ham"] corpus = [AlignedSent(trg_sentence, src_sentence)] alignment_info = AlignmentInfo( (0, 1, 4, 0, 2, 5, 5), [None] + src_sentence, ["UNUSED"] + trg_sentence, [[3], [1], [4], [], [2], [5, 6]], ) distortion_table = defaultdict( lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(float))) ) distortion_table[1][1][5][6] = 0.97 distortion_table[2][4][5][6] = 0.97 distortion_table[3][0][5][6] = 0.97 distortion_table[4][2][5][6] = 0.97 distortion_table[5][5][5][6] = 0.97 distortion_table[6][5][5][6] = 0.97 translation_table = defaultdict(lambda: defaultdict(float)) translation_table["i"]["ich"] = 0.98 translation_table["love"]["gern"] = 0.98 translation_table["to"][None] = 0.98 translation_table["eat"]["esse"] = 0.98 translation_table["smoked"]["räucherschinken"] = 0.98 translation_table["ham"]["räucherschinken"] = 0.98 fertility_table = defaultdict(lambda: defaultdict(float)) fertility_table[1]["ich"] = 0.99 fertility_table[1]["esse"] = 0.99 fertility_table[0]["ja"] = 0.99 fertility_table[1]["gern"] = 0.99 fertility_table[2]["räucherschinken"] = 0.999 fertility_table[1][None] = 0.99 probabilities = { "p1": 0.167, "translation_table": translation_table, "distortion_table": distortion_table, "fertility_table": fertility_table, "alignment_table": None, } model3 = IBMModel3(corpus, 0, probabilities) probability = model3.prob_t_a_given_s(alignment_info) null_generation = 5 * pow(0.167, 1) * pow(0.833, 4) fertility = 1 * 0.99 * 1 * 0.99 * 1 * 0.99 * 1 * 0.99 * 2 * 0.999 lexical_translation = 0.98 * 0.98 * 0.98 * 0.98 * 0.98 * 0.98 distortion = 0.97 * 0.97 * 0.97 * 0.97 * 0.97 * 0.97 expected_probability = ( null_generation * fertility * lexical_translation * distortion ) self.assertEqual(round(probability, 4), round(expected_probability, 4))
tests for ibm model 4 training methods arrange act assert number of displacement values 2 number of words in longest target sentence 1 examine the boundary values for displacement srcclass trgclass arrange act assert examine displacement values that are not in the training data domain arrange act assert arrange act assert number of displacement values 2 number of words in longest target sentence 1 examine the boundary values for displacement src_class trg_class arrange act assert examine displacement values that are not in the training data domain arrange none i ich eat esse love gern smoked ham act assert
import unittest from collections import defaultdict from nltk.translate import AlignedSent, IBMModel, IBMModel4 from nltk.translate.ibm_model import AlignmentInfo class TestIBMModel4(unittest.TestCase): def test_set_uniform_distortion_probabilities_of_max_displacements(self): src_classes = {"schinken": 0, "eier": 0, "spam": 1} trg_classes = {"ham": 0, "eggs": 1, "spam": 2} corpus = [ AlignedSent(["ham", "eggs"], ["schinken", "schinken", "eier"]), AlignedSent(["spam", "spam", "spam", "spam"], ["spam", "spam"]), ] model4 = IBMModel4(corpus, 0, src_classes, trg_classes) model4.set_uniform_probabilities(corpus) expected_prob = 1.0 / (2 * (4 - 1)) self.assertEqual(model4.head_distortion_table[3][0][0], expected_prob) self.assertEqual(model4.head_distortion_table[-3][1][2], expected_prob) self.assertEqual(model4.non_head_distortion_table[3][0], expected_prob) self.assertEqual(model4.non_head_distortion_table[-3][2], expected_prob) def test_set_uniform_distortion_probabilities_of_non_domain_values(self): src_classes = {"schinken": 0, "eier": 0, "spam": 1} trg_classes = {"ham": 0, "eggs": 1, "spam": 2} corpus = [ AlignedSent(["ham", "eggs"], ["schinken", "schinken", "eier"]), AlignedSent(["spam", "spam", "spam", "spam"], ["spam", "spam"]), ] model4 = IBMModel4(corpus, 0, src_classes, trg_classes) model4.set_uniform_probabilities(corpus) self.assertEqual(model4.head_distortion_table[4][0][0], IBMModel.MIN_PROB) self.assertEqual(model4.head_distortion_table[100][1][2], IBMModel.MIN_PROB) self.assertEqual(model4.non_head_distortion_table[4][0], IBMModel.MIN_PROB) self.assertEqual(model4.non_head_distortion_table[100][2], IBMModel.MIN_PROB) def test_prob_t_a_given_s(self): src_sentence = ["ich", "esse", "ja", "gern", "räucherschinken"] trg_sentence = ["i", "love", "to", "eat", "smoked", "ham"] src_classes = {"räucherschinken": 0, "ja": 1, "ich": 2, "esse": 3, "gern": 4} trg_classes = {"ham": 0, "smoked": 1, "i": 3, "love": 4, "to": 2, "eat": 4} corpus = [AlignedSent(trg_sentence, src_sentence)] alignment_info = AlignmentInfo( (0, 1, 4, 0, 2, 5, 5), [None] + src_sentence, ["UNUSED"] + trg_sentence, [[3], [1], [4], [], [2], [5, 6]], ) head_distortion_table = defaultdict( lambda: defaultdict(lambda: defaultdict(float)) ) head_distortion_table[1][None][3] = 0.97 head_distortion_table[3][2][4] = 0.97 head_distortion_table[-2][3][4] = 0.97 head_distortion_table[3][4][1] = 0.97 non_head_distortion_table = defaultdict(lambda: defaultdict(float)) non_head_distortion_table[1][0] = 0.96 translation_table = defaultdict(lambda: defaultdict(float)) translation_table["i"]["ich"] = 0.98 translation_table["love"]["gern"] = 0.98 translation_table["to"][None] = 0.98 translation_table["eat"]["esse"] = 0.98 translation_table["smoked"]["räucherschinken"] = 0.98 translation_table["ham"]["räucherschinken"] = 0.98 fertility_table = defaultdict(lambda: defaultdict(float)) fertility_table[1]["ich"] = 0.99 fertility_table[1]["esse"] = 0.99 fertility_table[0]["ja"] = 0.99 fertility_table[1]["gern"] = 0.99 fertility_table[2]["räucherschinken"] = 0.999 fertility_table[1][None] = 0.99 probabilities = { "p1": 0.167, "translation_table": translation_table, "head_distortion_table": head_distortion_table, "non_head_distortion_table": non_head_distortion_table, "fertility_table": fertility_table, "alignment_table": None, } model4 = IBMModel4(corpus, 0, src_classes, trg_classes, probabilities) probability = model4.prob_t_a_given_s(alignment_info) null_generation = 5 * pow(0.167, 1) * pow(0.833, 4) fertility = 1 * 0.99 * 1 * 0.99 * 1 * 0.99 * 1 * 0.99 * 2 * 0.999 lexical_translation = 0.98 * 0.98 * 0.98 * 0.98 * 0.98 * 0.98 distortion = 0.97 * 0.97 * 1 * 0.97 * 0.97 * 0.96 expected_probability = ( null_generation * fertility * lexical_translation * distortion ) self.assertEqual(round(probability, 4), round(expected_probability, 4))
tests for ibm model 5 training methods arrange act assert number of vacancy difference values 2 number of words in longest target sentence examine the boundary values for dv maxv trgclass arrange act assert examine dv and maxv values that are not in the training data domain arrange act assert arrange mock static method act assert restore static method arrange act assert number of vacancy difference values 2 number of words in longest target sentence examine the boundary values for dv max_v trg_class arrange act assert examine dv and max_v values that are not in the training data domain arrange ich i esse eat gern love räucherschinken smoked räucherschinken ham act assert arrange above threshold at threshold low score above threshold mock static method act assert restore static method
import unittest from collections import defaultdict from nltk.translate import AlignedSent, IBMModel, IBMModel4, IBMModel5 from nltk.translate.ibm_model import AlignmentInfo class TestIBMModel5(unittest.TestCase): def test_set_uniform_vacancy_probabilities_of_max_displacements(self): src_classes = {"schinken": 0, "eier": 0, "spam": 1} trg_classes = {"ham": 0, "eggs": 1, "spam": 2} corpus = [ AlignedSent(["ham", "eggs"], ["schinken", "schinken", "eier"]), AlignedSent(["spam", "spam", "spam", "spam"], ["spam", "spam"]), ] model5 = IBMModel5(corpus, 0, src_classes, trg_classes) model5.set_uniform_probabilities(corpus) expected_prob = 1.0 / (2 * 4) self.assertEqual(model5.head_vacancy_table[4][4][0], expected_prob) self.assertEqual(model5.head_vacancy_table[-3][1][2], expected_prob) self.assertEqual(model5.non_head_vacancy_table[4][4][0], expected_prob) self.assertEqual(model5.non_head_vacancy_table[-3][1][2], expected_prob) def test_set_uniform_vacancy_probabilities_of_non_domain_values(self): src_classes = {"schinken": 0, "eier": 0, "spam": 1} trg_classes = {"ham": 0, "eggs": 1, "spam": 2} corpus = [ AlignedSent(["ham", "eggs"], ["schinken", "schinken", "eier"]), AlignedSent(["spam", "spam", "spam", "spam"], ["spam", "spam"]), ] model5 = IBMModel5(corpus, 0, src_classes, trg_classes) model5.set_uniform_probabilities(corpus) self.assertEqual(model5.head_vacancy_table[5][4][0], IBMModel.MIN_PROB) self.assertEqual(model5.head_vacancy_table[-4][1][2], IBMModel.MIN_PROB) self.assertEqual(model5.head_vacancy_table[4][0][0], IBMModel.MIN_PROB) self.assertEqual(model5.non_head_vacancy_table[5][4][0], IBMModel.MIN_PROB) self.assertEqual(model5.non_head_vacancy_table[-4][1][2], IBMModel.MIN_PROB) def test_prob_t_a_given_s(self): src_sentence = ["ich", "esse", "ja", "gern", "räucherschinken"] trg_sentence = ["i", "love", "to", "eat", "smoked", "ham"] src_classes = {"räucherschinken": 0, "ja": 1, "ich": 2, "esse": 3, "gern": 4} trg_classes = {"ham": 0, "smoked": 1, "i": 3, "love": 4, "to": 2, "eat": 4} corpus = [AlignedSent(trg_sentence, src_sentence)] alignment_info = AlignmentInfo( (0, 1, 4, 0, 2, 5, 5), [None] + src_sentence, ["UNUSED"] + trg_sentence, [[3], [1], [4], [], [2], [5, 6]], ) head_vacancy_table = defaultdict( lambda: defaultdict(lambda: defaultdict(float)) ) head_vacancy_table[1 - 0][6][3] = 0.97 head_vacancy_table[3 - 0][5][4] = 0.97 head_vacancy_table[1 - 2][4][4] = 0.97 head_vacancy_table[2 - 0][2][1] = 0.97 non_head_vacancy_table = defaultdict( lambda: defaultdict(lambda: defaultdict(float)) ) non_head_vacancy_table[1 - 0][1][0] = 0.96 translation_table = defaultdict(lambda: defaultdict(float)) translation_table["i"]["ich"] = 0.98 translation_table["love"]["gern"] = 0.98 translation_table["to"][None] = 0.98 translation_table["eat"]["esse"] = 0.98 translation_table["smoked"]["räucherschinken"] = 0.98 translation_table["ham"]["räucherschinken"] = 0.98 fertility_table = defaultdict(lambda: defaultdict(float)) fertility_table[1]["ich"] = 0.99 fertility_table[1]["esse"] = 0.99 fertility_table[0]["ja"] = 0.99 fertility_table[1]["gern"] = 0.99 fertility_table[2]["räucherschinken"] = 0.999 fertility_table[1][None] = 0.99 probabilities = { "p1": 0.167, "translation_table": translation_table, "fertility_table": fertility_table, "head_vacancy_table": head_vacancy_table, "non_head_vacancy_table": non_head_vacancy_table, "head_distortion_table": None, "non_head_distortion_table": None, "alignment_table": None, } model5 = IBMModel5(corpus, 0, src_classes, trg_classes, probabilities) probability = model5.prob_t_a_given_s(alignment_info) null_generation = 5 * pow(0.167, 1) * pow(0.833, 4) fertility = 1 * 0.99 * 1 * 0.99 * 1 * 0.99 * 1 * 0.99 * 2 * 0.999 lexical_translation = 0.98 * 0.98 * 0.98 * 0.98 * 0.98 * 0.98 vacancy = 0.97 * 0.97 * 1 * 0.97 * 0.97 * 0.96 expected_probability = ( null_generation * fertility * lexical_translation * vacancy ) self.assertEqual(round(probability, 4), round(expected_probability, 4)) def test_prune(self): alignment_infos = [ AlignmentInfo((1, 1), None, None, None), AlignmentInfo((1, 2), None, None, None), AlignmentInfo((2, 1), None, None, None), AlignmentInfo((2, 2), None, None, None), AlignmentInfo((0, 0), None, None, None), ] min_factor = IBMModel5.MIN_SCORE_FACTOR best_score = 0.9 scores = { (1, 1): min(min_factor * 1.5, 1) * best_score, (1, 2): best_score, (2, 1): min_factor * best_score, (2, 2): min_factor * best_score * 0.5, (0, 0): min(min_factor * 1.1, 1) * 1.2, } corpus = [AlignedSent(["a"], ["b"])] original_prob_function = IBMModel4.model4_prob_t_a_given_s IBMModel4.model4_prob_t_a_given_s = staticmethod( lambda a, model: scores[a.alignment] ) model5 = IBMModel5(corpus, 0, None, None) pruned_alignments = model5.prune(alignment_infos) self.assertEqual(len(pruned_alignments), 3) IBMModel4.model4_prob_t_a_given_s = original_prob_function
tests for common methods of ibm translation models arrange none and bien have zero fertility act assert arrange act force love to be pegged to jambon assert arrange bien produces 2 target words really and another really act assert arrange act assert arrange act assert arrange act assert moves swaps original alignment arrange act assert select a few particular alignments arrange act peg eggs to align with ufs assert moves no swaps original alignment arrange act assert hill climbing goes from 0 3 2 0 2 2 0 4 4 arrange act assert addition of null token arrange none and bien have zero fertility act assert 0th element unused arrange act force love to be pegged to jambon assert arrange bien produces 2 target words really and another really act assert arrange act assert arrange act assert arrange act assert moves swaps original alignment arrange act assert select a few particular alignments arrange act peg eggs to align with œufs assert moves no swaps original alignment arrange act assert hill climbing goes from 0 3 2 0 2 2 0 4 4 arrange act assert
import unittest from collections import defaultdict from nltk.translate import AlignedSent, IBMModel from nltk.translate.ibm_model import AlignmentInfo class TestIBMModel(unittest.TestCase): __TEST_SRC_SENTENCE = ["j'", "aime", "bien", "jambon"] __TEST_TRG_SENTENCE = ["i", "love", "ham"] def test_vocabularies_are_initialized(self): parallel_corpora = [ AlignedSent(["one", "two", "three", "four"], ["un", "deux", "trois"]), AlignedSent(["five", "one", "six"], ["quatre", "cinq", "six"]), AlignedSent([], ["sept"]), ] ibm_model = IBMModel(parallel_corpora) self.assertEqual(len(ibm_model.src_vocab), 8) self.assertEqual(len(ibm_model.trg_vocab), 6) def test_vocabularies_are_initialized_even_with_empty_corpora(self): parallel_corpora = [] ibm_model = IBMModel(parallel_corpora) self.assertEqual(len(ibm_model.src_vocab), 1) self.assertEqual(len(ibm_model.trg_vocab), 0) def test_best_model2_alignment(self): sentence_pair = AlignedSent( TestIBMModel.__TEST_TRG_SENTENCE, TestIBMModel.__TEST_SRC_SENTENCE ) translation_table = { "i": {"j'": 0.9, "aime": 0.05, "bien": 0.02, "jambon": 0.03, None: 0}, "love": {"j'": 0.05, "aime": 0.9, "bien": 0.01, "jambon": 0.01, None: 0.03}, "ham": {"j'": 0, "aime": 0.01, "bien": 0, "jambon": 0.99, None: 0}, } alignment_table = defaultdict( lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: 0.2))) ) ibm_model = IBMModel([]) ibm_model.translation_table = translation_table ibm_model.alignment_table = alignment_table a_info = ibm_model.best_model2_alignment(sentence_pair) self.assertEqual(a_info.alignment[1:], (1, 2, 4)) self.assertEqual(a_info.cepts, [[], [1], [2], [], [3]]) def test_best_model2_alignment_does_not_change_pegged_alignment(self): sentence_pair = AlignedSent( TestIBMModel.__TEST_TRG_SENTENCE, TestIBMModel.__TEST_SRC_SENTENCE ) translation_table = { "i": {"j'": 0.9, "aime": 0.05, "bien": 0.02, "jambon": 0.03, None: 0}, "love": {"j'": 0.05, "aime": 0.9, "bien": 0.01, "jambon": 0.01, None: 0.03}, "ham": {"j'": 0, "aime": 0.01, "bien": 0, "jambon": 0.99, None: 0}, } alignment_table = defaultdict( lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: 0.2))) ) ibm_model = IBMModel([]) ibm_model.translation_table = translation_table ibm_model.alignment_table = alignment_table a_info = ibm_model.best_model2_alignment(sentence_pair, 2, 4) self.assertEqual(a_info.alignment[1:], (1, 4, 4)) self.assertEqual(a_info.cepts, [[], [1], [], [], [2, 3]]) def test_best_model2_alignment_handles_fertile_words(self): sentence_pair = AlignedSent( ["i", "really", ",", "really", "love", "ham"], TestIBMModel.__TEST_SRC_SENTENCE, ) translation_table = { "i": {"j'": 0.9, "aime": 0.05, "bien": 0.02, "jambon": 0.03, None: 0}, "really": {"j'": 0, "aime": 0, "bien": 0.9, "jambon": 0.01, None: 0.09}, ",": {"j'": 0, "aime": 0, "bien": 0.3, "jambon": 0, None: 0.7}, "love": {"j'": 0.05, "aime": 0.9, "bien": 0.01, "jambon": 0.01, None: 0.03}, "ham": {"j'": 0, "aime": 0.01, "bien": 0, "jambon": 0.99, None: 0}, } alignment_table = defaultdict( lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: 0.2))) ) ibm_model = IBMModel([]) ibm_model.translation_table = translation_table ibm_model.alignment_table = alignment_table a_info = ibm_model.best_model2_alignment(sentence_pair) self.assertEqual(a_info.alignment[1:], (1, 3, 0, 3, 2, 4)) self.assertEqual(a_info.cepts, [[3], [1], [5], [2, 4], [6]]) def test_best_model2_alignment_handles_empty_src_sentence(self): sentence_pair = AlignedSent(TestIBMModel.__TEST_TRG_SENTENCE, []) ibm_model = IBMModel([]) a_info = ibm_model.best_model2_alignment(sentence_pair) self.assertEqual(a_info.alignment[1:], (0, 0, 0)) self.assertEqual(a_info.cepts, [[1, 2, 3]]) def test_best_model2_alignment_handles_empty_trg_sentence(self): sentence_pair = AlignedSent([], TestIBMModel.__TEST_SRC_SENTENCE) ibm_model = IBMModel([]) a_info = ibm_model.best_model2_alignment(sentence_pair) self.assertEqual(a_info.alignment[1:], ()) self.assertEqual(a_info.cepts, [[], [], [], [], []]) def test_neighboring_finds_neighbor_alignments(self): a_info = AlignmentInfo( (0, 3, 2), (None, "des", "œufs", "verts"), ("UNUSED", "green", "eggs"), [[], [], [2], [1]], ) ibm_model = IBMModel([]) neighbors = ibm_model.neighboring(a_info) neighbor_alignments = set() for neighbor in neighbors: neighbor_alignments.add(neighbor.alignment) expected_alignments = { (0, 0, 2), (0, 1, 2), (0, 2, 2), (0, 3, 0), (0, 3, 1), (0, 3, 3), (0, 2, 3), (0, 3, 2), } self.assertEqual(neighbor_alignments, expected_alignments) def test_neighboring_sets_neighbor_alignment_info(self): a_info = AlignmentInfo( (0, 3, 2), (None, "des", "œufs", "verts"), ("UNUSED", "green", "eggs"), [[], [], [2], [1]], ) ibm_model = IBMModel([]) neighbors = ibm_model.neighboring(a_info) for neighbor in neighbors: if neighbor.alignment == (0, 2, 2): moved_alignment = neighbor elif neighbor.alignment == (0, 3, 2): swapped_alignment = neighbor self.assertEqual(moved_alignment.cepts, [[], [], [1, 2], []]) self.assertEqual(swapped_alignment.cepts, [[], [], [2], [1]]) def test_neighboring_returns_neighbors_with_pegged_alignment(self): a_info = AlignmentInfo( (0, 3, 2), (None, "des", "œufs", "verts"), ("UNUSED", "green", "eggs"), [[], [], [2], [1]], ) ibm_model = IBMModel([]) neighbors = ibm_model.neighboring(a_info, 2) neighbor_alignments = set() for neighbor in neighbors: neighbor_alignments.add(neighbor.alignment) expected_alignments = { (0, 0, 2), (0, 1, 2), (0, 2, 2), (0, 3, 2), } self.assertEqual(neighbor_alignments, expected_alignments) def test_hillclimb(self): initial_alignment = AlignmentInfo((0, 3, 2), None, None, None) def neighboring_mock(a, j): if a.alignment == (0, 3, 2): return { AlignmentInfo((0, 2, 2), None, None, None), AlignmentInfo((0, 1, 1), None, None, None), } elif a.alignment == (0, 2, 2): return { AlignmentInfo((0, 3, 3), None, None, None), AlignmentInfo((0, 4, 4), None, None, None), } return set() def prob_t_a_given_s_mock(a): prob_values = { (0, 3, 2): 0.5, (0, 2, 2): 0.6, (0, 1, 1): 0.4, (0, 3, 3): 0.6, (0, 4, 4): 0.7, } return prob_values.get(a.alignment, 0.01) ibm_model = IBMModel([]) ibm_model.neighboring = neighboring_mock ibm_model.prob_t_a_given_s = prob_t_a_given_s_mock best_alignment = ibm_model.hillclimb(initial_alignment) self.assertEqual(best_alignment.alignment, (0, 4, 4)) def test_sample(self): sentence_pair = AlignedSent( TestIBMModel.__TEST_TRG_SENTENCE, TestIBMModel.__TEST_SRC_SENTENCE ) ibm_model = IBMModel([]) ibm_model.prob_t_a_given_s = lambda x: 0.001 samples, best_alignment = ibm_model.sample(sentence_pair) self.assertEqual(len(samples), 61)
tests for nist translation evaluation metric reads the nist scores from the mteval13a output file the order of the list corresponds to the order of the ngrams the numbers are located in the last 4th line of the file the first and 2nd item in the list are the score and system names whitespace tokenize the file note split automatically strip note that the corpusbleu input is list of list of references without smoothing check that the nist scores difference is less than 0 5 reads the nist scores from the mteval 13a output file the order of the list corresponds to the order of the ngrams the numbers are located in the last 4th line of the file the first and 2nd item in the list are the score and system names whitespace tokenize the file note split automatically strip note that the corpus_bleu input is list of list of references without smoothing check that the nist scores difference is less than 0 5
import io import unittest from nltk.data import find from nltk.translate.nist_score import corpus_nist class TestNIST(unittest.TestCase): def test_sentence_nist(self): ref_file = find("models/wmt15_eval/ref.ru") hyp_file = find("models/wmt15_eval/google.ru") mteval_output_file = find("models/wmt15_eval/mteval-13a.output") with open(mteval_output_file) as mteval_fin: mteval_nist_scores = map(float, mteval_fin.readlines()[-4].split()[1:-1]) with open(ref_file, encoding="utf8") as ref_fin: with open(hyp_file, encoding="utf8") as hyp_fin: hypotheses = list(map(lambda x: x.split(), hyp_fin)) references = list(map(lambda x: [x.split()], ref_fin)) for i, mteval_nist in zip(range(1, 10), mteval_nist_scores): nltk_nist = corpus_nist(references, hypotheses, i) assert abs(mteval_nist - nltk_nist) < 0.05
natural language toolkit stack decoder c 20012023 nltk project tah wei hoon hoon twgmail com url https www nltk org for license information see license txt tests for stack decoder arrange act assert arrange act assert arrange act assert expansion from empty hypothesis always has zero distortion cost arrange act assert arrange act assert arrange sentence with 8 words words 2 3 4 already translated act assert arrange mock untranslatedspans method act assert nltk model should be used here once it is implemented act assert arrange act assert act assert act assert act assert arrange act assert arrange act assert arrange act assert arrange act assert arrange act assert arrange assert natural language toolkit stack decoder c 2001 2023 nltk project tah wei hoon hoon tw gmail com url https www nltk org for license information see license txt tests for stack decoder arrange act assert my hovercraft hovercraft is full of full of eels no entry starting with of eels arrange act assert arrange act assert expansion from empty hypothesis always has zero distortion cost arrange act assert arrange act assert hovercraft is is not in phrase table backoff arrange sentence with 8 words words 2 3 4 already translated mock act assert arrange mock untranslated_spans method act assert nltk model should be used here once it is implemented act assert arrange act assert act assert act assert act assert arrange act assert arrange act assert arrange act greatly superior hypothesis assert arrange act greatly superior hypothesis assert arrange act assert arrange assert
import unittest from collections import defaultdict from math import log from nltk.translate import PhraseTable, StackDecoder from nltk.translate.stack_decoder import _Hypothesis, _Stack class TestStackDecoder(unittest.TestCase): def test_find_all_src_phrases(self): phrase_table = TestStackDecoder.create_fake_phrase_table() stack_decoder = StackDecoder(phrase_table, None) sentence = ("my", "hovercraft", "is", "full", "of", "eels") src_phrase_spans = stack_decoder.find_all_src_phrases(sentence) self.assertEqual(src_phrase_spans[0], [2]) self.assertEqual(src_phrase_spans[1], [2]) self.assertEqual(src_phrase_spans[2], [3]) self.assertEqual(src_phrase_spans[3], [5, 6]) self.assertFalse(src_phrase_spans[4]) self.assertEqual(src_phrase_spans[5], [6]) def test_distortion_score(self): stack_decoder = StackDecoder(None, None) stack_decoder.distortion_factor = 0.5 hypothesis = _Hypothesis() hypothesis.src_phrase_span = (3, 5) score = stack_decoder.distortion_score(hypothesis, (8, 10)) expected_score = log(stack_decoder.distortion_factor) * (8 - 5) self.assertEqual(score, expected_score) def test_distortion_score_of_first_expansion(self): stack_decoder = StackDecoder(None, None) stack_decoder.distortion_factor = 0.5 hypothesis = _Hypothesis() score = stack_decoder.distortion_score(hypothesis, (8, 10)) self.assertEqual(score, 0.0) def test_compute_future_costs(self): phrase_table = TestStackDecoder.create_fake_phrase_table() language_model = TestStackDecoder.create_fake_language_model() stack_decoder = StackDecoder(phrase_table, language_model) sentence = ("my", "hovercraft", "is", "full", "of", "eels") future_scores = stack_decoder.compute_future_scores(sentence) self.assertEqual( future_scores[1][2], ( phrase_table.translations_for(("hovercraft",))[0].log_prob + language_model.probability(("hovercraft",)) ), ) self.assertEqual( future_scores[0][2], ( phrase_table.translations_for(("my", "hovercraft"))[0].log_prob + language_model.probability(("my", "hovercraft")) ), ) def test_compute_future_costs_for_phrases_not_in_phrase_table(self): phrase_table = TestStackDecoder.create_fake_phrase_table() language_model = TestStackDecoder.create_fake_language_model() stack_decoder = StackDecoder(phrase_table, language_model) sentence = ("my", "hovercraft", "is", "full", "of", "eels") future_scores = stack_decoder.compute_future_scores(sentence) self.assertEqual( future_scores[1][3], future_scores[1][2] + future_scores[2][3], ) def test_future_score(self): hypothesis = _Hypothesis() hypothesis.untranslated_spans = lambda _: [(0, 2), (5, 8)] future_score_table = defaultdict(lambda: defaultdict(float)) future_score_table[0][2] = 0.4 future_score_table[5][8] = 0.5 stack_decoder = StackDecoder(None, None) future_score = stack_decoder.future_score(hypothesis, future_score_table, 8) self.assertEqual(future_score, 0.4 + 0.5) def test_valid_phrases(self): hypothesis = _Hypothesis() hypothesis.untranslated_spans = lambda _: [(0, 2), (3, 6)] all_phrases_from = [[1, 4], [2], [], [5], [5, 6, 7], [], [7]] phrase_spans = StackDecoder.valid_phrases(all_phrases_from, hypothesis) self.assertEqual(phrase_spans, [(0, 1), (1, 2), (3, 5), (4, 5), (4, 6)]) @staticmethod def create_fake_phrase_table(): phrase_table = PhraseTable() phrase_table.add(("hovercraft",), ("",), 0.8) phrase_table.add(("my", "hovercraft"), ("", ""), 0.7) phrase_table.add(("my", "cheese"), ("", ""), 0.7) phrase_table.add(("is",), ("",), 0.8) phrase_table.add(("is",), ("",), 0.5) phrase_table.add(("full", "of"), ("", ""), 0.01) phrase_table.add(("full", "of", "eels"), ("", "", ""), 0.5) phrase_table.add(("full", "of", "spam"), ("", ""), 0.5) phrase_table.add(("eels",), ("",), 0.5) phrase_table.add(("spam",), ("",), 0.5) return phrase_table @staticmethod def create_fake_language_model(): language_prob = defaultdict(lambda: -999.0) language_prob[("my",)] = log(0.1) language_prob[("hovercraft",)] = log(0.1) language_prob[("is",)] = log(0.1) language_prob[("full",)] = log(0.1) language_prob[("of",)] = log(0.1) language_prob[("eels",)] = log(0.1) language_prob[("my", "hovercraft")] = log(0.3) language_model = type( "", (object,), {"probability": lambda _, phrase: language_prob[phrase]} )() return language_model class TestHypothesis(unittest.TestCase): def setUp(self): root = _Hypothesis() child = _Hypothesis( raw_score=0.5, src_phrase_span=(3, 7), trg_phrase=("hello", "world"), previous=root, ) grandchild = _Hypothesis( raw_score=0.4, src_phrase_span=(1, 2), trg_phrase=("and", "goodbye"), previous=child, ) self.hypothesis_chain = grandchild def test_translation_so_far(self): translation = self.hypothesis_chain.translation_so_far() self.assertEqual(translation, ["hello", "world", "and", "goodbye"]) def test_translation_so_far_for_empty_hypothesis(self): hypothesis = _Hypothesis() translation = hypothesis.translation_so_far() self.assertEqual(translation, []) def test_total_translated_words(self): total_translated_words = self.hypothesis_chain.total_translated_words() self.assertEqual(total_translated_words, 5) def test_translated_positions(self): translated_positions = self.hypothesis_chain.translated_positions() translated_positions.sort() self.assertEqual(translated_positions, [1, 3, 4, 5, 6]) def test_untranslated_spans(self): untranslated_spans = self.hypothesis_chain.untranslated_spans(10) self.assertEqual(untranslated_spans, [(0, 1), (2, 3), (7, 10)]) def test_untranslated_spans_for_empty_hypothesis(self): hypothesis = _Hypothesis() untranslated_spans = hypothesis.untranslated_spans(10) self.assertEqual(untranslated_spans, [(0, 10)]) class TestStack(unittest.TestCase): def test_push_bumps_off_worst_hypothesis_when_stack_is_full(self): stack = _Stack(3) poor_hypothesis = _Hypothesis(0.01) stack.push(_Hypothesis(0.2)) stack.push(poor_hypothesis) stack.push(_Hypothesis(0.1)) stack.push(_Hypothesis(0.3)) self.assertFalse(poor_hypothesis in stack) def test_push_removes_hypotheses_that_fall_below_beam_threshold(self): stack = _Stack(3, 0.5) poor_hypothesis = _Hypothesis(0.01) worse_hypothesis = _Hypothesis(0.009) stack.push(poor_hypothesis) stack.push(worse_hypothesis) stack.push(_Hypothesis(0.9)) self.assertFalse(poor_hypothesis in stack) self.assertFalse(worse_hypothesis in stack) def test_push_does_not_add_hypothesis_that_falls_below_beam_threshold(self): stack = _Stack(3, 0.5) poor_hypothesis = _Hypothesis(0.01) stack.push(_Hypothesis(0.9)) stack.push(poor_hypothesis) self.assertFalse(poor_hypothesis in stack) def test_best_returns_the_best_hypothesis(self): stack = _Stack(3) best_hypothesis = _Hypothesis(0.99) stack.push(_Hypothesis(0.0)) stack.push(best_hypothesis) stack.push(_Hypothesis(0.5)) self.assertEqual(stack.best(), best_hypothesis) def test_best_returns_none_when_stack_is_empty(self): stack = _Stack(3) self.assertEqual(stack.best(), None)
natural language toolkit texts c 20012023 nltk project steven bird stevenbird1gmail com edward loper edlopergmail com url https www nltk org for license information see license txt this module brings together a variety of nltk functionality for text analysis and provides simple interactive interfaces functionality includes concordancing collocation discovery regular expression search over tokenized strings and distributional similarity a bidirectional index between words and their contexts in a text the context of a word is usually defined to be the words that occur in a fixed window around the word but other definitions may also be used by providing a custom context function one left token and one right token normalized to lowercase left tokensi 1 lower if i 0 else start right tokensi 1 lower if i lentokens 1 else end return left right def initself tokens contextfuncnone filternone keylambda x x self key key self tokens tokens if contextfunc self contextfunc contextfunc else self contextfunc self defaultcontext if filter tokens t for t in tokens if filtert self wordtocontexts cfd self keyw self contextfunctokens i for i w in enumeratetokens self contexttowords cfd self contextfunctokens i self keyw for i w in enumeratetokens def tokensself return self tokens def wordsimilaritydictself word word self keyword wordcontexts setself wordtocontextsword scores for w wcontexts in self wordtocontexts items scoresw fmeasurewordcontexts setwcontexts return scores def similarwordsself word n20 scores defaultdictint for c in self wordtocontextsself keyword for w in self contexttowordsc if w word scoresw self contexttowordscword self contexttowordscw return sortedscores keyscores get reversetrue n def commoncontextsself words failonunknownfalse words self keyw for w in words contexts setself wordtocontextsw for w in words empty wordsi for i in rangelenwords if not contextsi common reduceset intersection contexts if empty and failonunknown raise valueerrorthe following words were not found joinwords elif not common nothing in common just return an empty freqdist return freqdist else fd freqdist c for w in words for c in self wordtocontextsw if c in common return fd class concordanceindex def initself tokens keylambda x x self tokens tokens function mapping each token to an index key or none self offsets defaultdictlist initialize the index self offsets rtype liststr return the document that this concordance index was created from rtype listint return a list of the offset positions at which the given word occurs if a key function was specified for the index then given word s key will be looked up find all concordance lines given the query word provided with a list of words these will be found as a phrase find the instances of the word to create the concordanceline find the context of query word create the pretty lines with the queryword in the middle the wysiwyg line of the concordance create the concordanceline print concordance lines given the query word param word the target word or phrase a list of strings type word str or list param lines the number of lines to display default25 type lines int param width the width of each line in characters default80 type width int param save the option to save the concordance type save bool a class that makes it easier to use regular expressions to search over tokenized strings the tokenized string is converted to a string where tokens are marked with angle brackets e g thewindowisstillopen the regular expression passed to the findall method is modified to treat angle brackets as noncapturing parentheses in addition to matching the token boundaries and to have not match the angle brackets find instances of the regular expression in the text the text is a list of tokens and a regexp pattern to match a single token must be surrounded by angle brackets e g from nltk text import tokensearcher from nltk book import text1 text5 text9 text5 findall bro you rule bro telling you bro u twizted bro text1 findalla man monied nervous dangerous white white white pious queer good mature white cape great wise wise butterless white fiendish pale furious better certain complete dismasted younger brave brave brave brave text9 findallth 3 thread through those the thought that that the thing the thing that that that thing through these than through them that the through the thick them that they thought that the param regexp a regular expression type regexp str preprocess the regular expression perform the search sanity check postprocess the output a wrapper around a sequence of simple string tokens which is intended to support initial exploration of texts via the interactive console its methods perform a variety of analyses on the text s contexts e g counting concordancing collocation discovery and display the results if you wish to write a program which makes use of these analyses then you should bypass the text class and use the appropriate analysis function or class directly instead a text is typically initialized from a given document or corpus e g import nltk corpus from nltk text import text moby textnltk corpus gutenberg words melvillemobydick txt this defeats lazy loading but makes things faster this shouldn t be necessary because the corpus view should be doing intelligent caching but without this it s running slow look into whether the caching is working correctly create a text object param tokens the source text type tokens sequence of str support item slice access interactive console methods prints a concordance for word with the specified context window word matching is not casesensitive param word the target word or phrase a list of strings type word str or list param width the width of each line in characters default80 type width int param lines the number of lines to display default25 type lines int seealso concordanceindex generate a concordance for word with the specified context window word matching is not casesensitive param word the target word or phrase a list of strings type word str or list param width the width of each line in characters default80 type width int param lines the number of lines to display default25 type lines int seealso concordanceindex return collocations derived from the text ignoring stopwords from nltk book import text4 text4 collocationlist 2 united states fellow citizens param num the maximum number of collocations to return type num int param windowsize the number of tokens spanned by a collocation default2 type windowsize int rtype listtuplestr str printbuilding collocations list print collocations derived from the text ignoring stopwords from nltk book import text4 text4 collocations doctest normalizewhitespace united states fellow citizens years ago four years federal government general government american people vice president god bless chief justice one another fellow americans old world almighty god fellow citizens chief magistrate every citizen indian tribes public debt foreign nations param num the maximum number of collocations to print type num int param windowsize the number of tokens spanned by a collocation default2 type windowsize int count the number of times this word appears in the text find the index of the first occurrence of the word in the text code from nltkcontrib readability distributional similarity find other words which appear in the same contexts as the specified word list most similar words first param word the word used to seed the similarity search type word str param num the number of words to generate default20 type num int seealso contextindex similarwords print building wordcontext index words self wordcontextindex similarwordsword num find contexts where the specified words appear list most frequent common contexts first param words the words used to seed the similarity search type words str param num the number of words to generate default20 type num int seealso contextindex commoncontexts print building wordcontext index produce a plot showing the distribution of the words through the text requires pylab to be installed param words the words to be plotted type words liststr seealso nltk draw dispersionplot print random text generated using a trigram language model see also helpnltk lm param length the length of text to generate default100 type length int param textseed generation can be conditioned on preceding context type textseed liststr param randomseed a random seed or an instance of random random if provided makes the random sampling part of generation reproducible default42 type randomseed int create the model when using it the first time see documentation for freqdist plot seealso nltk prob freqdist plot seealso nltk prob freqdist printbuilding vocabulary index find instances of the regular expression in the text the text is a list of tokens and a regexp pattern to match a single token must be surrounded by angle brackets e g from nltk book import text1 text5 text9 text5 findall bro you rule bro telling you bro u twizted bro text1 findalla man monied nervous dangerous white white white pious queer good mature white cape great wise wise butterless white fiendish pale furious better certain complete dismasted younger brave brave brave brave text9 findallth 3 thread through those the thought that that the thing the thing that that that thing through these than through them that the through the thick them that they thought that the param regexp a regular expression type regexp str helper methods one left one right token both casenormalized skip over nonsentencefinal punctuation used by the contextindex that is created for similar and commoncontexts left context right context string display prototype only this approach will be slow to load a collection of texts which can be loaded with list of texts or with a corpus consisting of one or more texts and which supports counting concordancing collocation discovery etc initialize a textcollection as follows import nltk corpus from nltk text import textcollection from nltk book import text1 text2 text3 gutenberg textcollectionnltk corpus gutenberg mytexts textcollectiontext1 text2 text3 iterating over a textcollection produces all the tokens of all the texts in order the frequency of the term in text return text countterm lentext def idfself term idf values are cached for performance printautomatically generated text text generate print natural language toolkit texts c 2001 2023 nltk project steven bird stevenbird1 gmail com edward loper edloper gmail com url https www nltk org for license information see license txt this module brings together a variety of nltk functionality for text analysis and provides simple interactive interfaces functionality includes concordancing collocation discovery regular expression search over tokenized strings and distributional similarity a bidirectional index between words and their contexts in a text the context of a word is usually defined to be the words that occur in a fixed window around the word but other definitions may also be used by providing a custom context function one left token and one right token normalized to lowercase rtype list str return the document that this context index was created from return a dictionary mapping from words to similarity scores indicating how often these two words occur in the same context find contexts where the specified words can all appear and return a frequency distribution mapping each context to the number of times that context was used param words the words used to seed the similarity search type words str param fail_on_unknown if true then raise a value error if any of the given words do not occur at all in the index nothing in common just return an empty freqdist an index that can be used to look up the offset locations at which a given word occurs in a document construct a new concordance index param tokens the document list of tokens that this concordance index was created from this list can be used to access the context of a given word occurrence param key a function that maps each token to a normalized version that will be used as a key in the index e g if you use key lambda s s lower then the index will be case insensitive the document list of tokens that this concordance index was created from function mapping each token to an index key or none dictionary mapping words or keys to lists of offset indices initialize the index self _offsets rtype list str return the document that this concordance index was created from rtype list int return a list of the offset positions at which the given word occurs if a key function was specified for the index then given word s key will be looked up find all concordance lines given the query word provided with a list of words these will be found as a phrase approx number of words of context find the instances of the word to create the concordanceline find the context of query word create the pretty lines with the query_word in the middle the wysiwyg line of the concordance create the concordanceline print concordance lines given the query word param word the target word or phrase a list of strings type word str or list param lines the number of lines to display default 25 type lines int param width the width of each line in characters default 80 type width int param save the option to save the concordance type save bool a class that makes it easier to use regular expressions to search over tokenized strings the tokenized string is converted to a string where tokens are marked with angle brackets e g the window is still open the regular expression passed to the findall method is modified to treat angle brackets as non capturing parentheses in addition to matching the token boundaries and to have not match the angle brackets find instances of the regular expression in the text the text is a list of tokens and a regexp pattern to match a single token must be surrounded by angle brackets e g from nltk text import tokensearcher from nltk book import text1 text5 text9 text5 findall bro you rule bro telling you bro u twizted bro text1 findall a man monied nervous dangerous white white white pious queer good mature white cape great wise wise butterless white fiendish pale furious better certain complete dismasted younger brave brave brave brave text9 findall th 3 thread through those the thought that that the thing the thing that that that thing through these than through them that the through the thick them that they thought that the param regexp a regular expression type regexp str preprocess the regular expression perform the search sanity check postprocess the output a wrapper around a sequence of simple string tokens which is intended to support initial exploration of texts via the interactive console its methods perform a variety of analyses on the text s contexts e g counting concordancing collocation discovery and display the results if you wish to write a program which makes use of these analyses then you should bypass the text class and use the appropriate analysis function or class directly instead a text is typically initialized from a given document or corpus e g import nltk corpus from nltk text import text moby text nltk corpus gutenberg words melville moby_dick txt this defeats lazy loading but makes things faster this shouldn t be necessary because the corpus view should be doing intelligent caching but without this it s running slow look into whether the caching is working correctly create a text object param tokens the source text type tokens sequence of str support item slice access interactive console methods prints a concordance for word with the specified context window word matching is not case sensitive param word the target word or phrase a list of strings type word str or list param width the width of each line in characters default 80 type width int param lines the number of lines to display default 25 type lines int seealso concordanceindex generate a concordance for word with the specified context window word matching is not case sensitive param word the target word or phrase a list of strings type word str or list param width the width of each line in characters default 80 type width int param lines the number of lines to display default 25 type lines int seealso concordanceindex return collocations derived from the text ignoring stopwords from nltk book import text4 text4 collocation_list 2 united states fellow citizens param num the maximum number of collocations to return type num int param window_size the number of tokens spanned by a collocation default 2 type window_size int rtype list tuple str str print building collocations list print collocations derived from the text ignoring stopwords from nltk book import text4 text4 collocations doctest normalize_whitespace united states fellow citizens years ago four years federal government general government american people vice president god bless chief justice one another fellow americans old world almighty god fellow citizens chief magistrate every citizen indian tribes public debt foreign nations param num the maximum number of collocations to print type num int param window_size the number of tokens spanned by a collocation default 2 type window_size int count the number of times this word appears in the text find the index of the first occurrence of the word in the text code from nltk_contrib readability distributional similarity find other words which appear in the same contexts as the specified word list most similar words first param word the word used to seed the similarity search type word str param num the number of words to generate default 20 type num int seealso contextindex similar_words print building word context index words self _word_context_index similar_words word num find contexts where the specified words appear list most frequent common contexts first param words the words used to seed the similarity search type words str param num the number of words to generate default 20 type num int seealso contextindex common_contexts print building word context index produce a plot showing the distribution of the words through the text requires pylab to be installed param words the words to be plotted type words list str seealso nltk draw dispersion_plot print random text generated using a trigram language model see also help nltk lm param length the length of text to generate default 100 type length int param text_seed generation can be conditioned on preceding context type text_seed list str param random_seed a random seed or an instance of random random if provided makes the random sampling part of generation reproducible default 42 type random_seed int create the model when using it the first time see documentation for freqdist plot seealso nltk prob freqdist plot seealso nltk prob freqdist print building vocabulary index find instances of the regular expression in the text the text is a list of tokens and a regexp pattern to match a single token must be surrounded by angle brackets e g from nltk book import text1 text5 text9 text5 findall bro you rule bro telling you bro u twizted bro text1 findall a man monied nervous dangerous white white white pious queer good mature white cape great wise wise butterless white fiendish pale furious better certain complete dismasted younger brave brave brave brave text9 findall th 3 thread through those the thought that that the thing the thing that that that thing through these than through them that the through the thick them that they thought that the param regexp a regular expression type regexp str helper methods one left one right token both case normalized skip over non sentence final punctuation used by the contextindex that is created for similar and common_contexts left context right context string display prototype only this approach will be slow to load a collection of texts which can be loaded with list of texts or with a corpus consisting of one or more texts and which supports counting concordancing collocation discovery etc initialize a textcollection as follows import nltk corpus from nltk text import textcollection from nltk book import text1 text2 text3 gutenberg textcollection nltk corpus gutenberg mytexts textcollection text1 text2 text3 iterating over a textcollection produces all the tokens of all the texts in order bridge to the text corpus reader the frequency of the term in text the number of texts in the corpus divided by the number of texts that the term appears in if a term does not appear in the corpus 0 0 is returned idf values are cached for performance print automatically generated text text generate print
import re import sys from collections import Counter, defaultdict, namedtuple from functools import reduce from math import log from nltk.collocations import BigramCollocationFinder from nltk.lm import MLE from nltk.lm.preprocessing import padded_everygram_pipeline from nltk.metrics import BigramAssocMeasures, f_measure from nltk.probability import ConditionalFreqDist as CFD from nltk.probability import FreqDist from nltk.tokenize import sent_tokenize from nltk.util import LazyConcatenation, tokenwrap ConcordanceLine = namedtuple( "ConcordanceLine", ["left", "query", "right", "offset", "left_print", "right_print", "line"], ) class ContextIndex: @staticmethod def _default_context(tokens, i): left = tokens[i - 1].lower() if i != 0 else "*START*" right = tokens[i + 1].lower() if i != len(tokens) - 1 else "*END*" return (left, right) def __init__(self, tokens, context_func=None, filter=None, key=lambda x: x): self._key = key self._tokens = tokens if context_func: self._context_func = context_func else: self._context_func = self._default_context if filter: tokens = [t for t in tokens if filter(t)] self._word_to_contexts = CFD( (self._key(w), self._context_func(tokens, i)) for i, w in enumerate(tokens) ) self._context_to_words = CFD( (self._context_func(tokens, i), self._key(w)) for i, w in enumerate(tokens) ) def tokens(self): return self._tokens def word_similarity_dict(self, word): word = self._key(word) word_contexts = set(self._word_to_contexts[word]) scores = {} for w, w_contexts in self._word_to_contexts.items(): scores[w] = f_measure(word_contexts, set(w_contexts)) return scores def similar_words(self, word, n=20): scores = defaultdict(int) for c in self._word_to_contexts[self._key(word)]: for w in self._context_to_words[c]: if w != word: scores[w] += ( self._context_to_words[c][word] * self._context_to_words[c][w] ) return sorted(scores, key=scores.get, reverse=True)[:n] def common_contexts(self, words, fail_on_unknown=False): words = [self._key(w) for w in words] contexts = [set(self._word_to_contexts[w]) for w in words] empty = [words[i] for i in range(len(words)) if not contexts[i]] common = reduce(set.intersection, contexts) if empty and fail_on_unknown: raise ValueError("The following word(s) were not found:", " ".join(words)) elif not common: return FreqDist() else: fd = FreqDist( c for w in words for c in self._word_to_contexts[w] if c in common ) return fd class ConcordanceIndex: def __init__(self, tokens, key=lambda x: x): self._tokens = tokens self._key = key self._offsets = defaultdict(list) for index, word in enumerate(tokens): word = self._key(word) self._offsets[word].append(index) def tokens(self): return self._tokens def offsets(self, word): word = self._key(word) return self._offsets[word] def __repr__(self): return "<ConcordanceIndex for %d tokens (%d types)>" % ( len(self._tokens), len(self._offsets), ) def find_concordance(self, word, width=80): if isinstance(word, list): phrase = word else: phrase = [word] half_width = (width - len(" ".join(phrase)) - 2) // 2 context = width // 4 concordance_list = [] offsets = self.offsets(phrase[0]) for i, word in enumerate(phrase[1:]): word_offsets = {offset - i - 1 for offset in self.offsets(word)} offsets = sorted(word_offsets.intersection(offsets)) if offsets: for i in offsets: query_word = " ".join(self._tokens[i : i + len(phrase)]) left_context = self._tokens[max(0, i - context) : i] right_context = self._tokens[i + len(phrase) : i + context] left_print = " ".join(left_context)[-half_width:] right_print = " ".join(right_context)[:half_width] line_print = " ".join([left_print, query_word, right_print]) concordance_line = ConcordanceLine( left_context, query_word, right_context, i, left_print, right_print, line_print, ) concordance_list.append(concordance_line) return concordance_list def print_concordance(self, word, width=80, lines=25): concordance_list = self.find_concordance(word, width=width) if not concordance_list: print("no matches") else: lines = min(lines, len(concordance_list)) print(f"Displaying {lines} of {len(concordance_list)} matches:") for i, concordance_line in enumerate(concordance_list[:lines]): print(concordance_line.line) class TokenSearcher: def __init__(self, tokens): self._raw = "".join("<" + w + ">" for w in tokens) def findall(self, regexp): regexp = re.sub(r"\s", "", regexp) regexp = re.sub(r"<", "(?:<(?:", regexp) regexp = re.sub(r">", ")>)", regexp) regexp = re.sub(r"(?<!\\)\.", "[^>]", regexp) hits = re.findall(regexp, self._raw) for h in hits: if not h.startswith("<") and h.endswith(">"): raise ValueError("Bad regexp for TokenSearcher.findall") hits = [h[1:-1].split("><") for h in hits] return hits class Text: _COPY_TOKENS = True def __init__(self, tokens, name=None): if self._COPY_TOKENS: tokens = list(tokens) self.tokens = tokens if name: self.name = name elif "]" in tokens[:20]: end = tokens[:20].index("]") self.name = " ".join(str(tok) for tok in tokens[1:end]) else: self.name = " ".join(str(tok) for tok in tokens[:8]) + "..." def __getitem__(self, i): return self.tokens[i] def __len__(self): return len(self.tokens) def concordance(self, word, width=79, lines=25): if "_concordance_index" not in self.__dict__: self._concordance_index = ConcordanceIndex( self.tokens, key=lambda s: s.lower() ) return self._concordance_index.print_concordance(word, width, lines) def concordance_list(self, word, width=79, lines=25): if "_concordance_index" not in self.__dict__: self._concordance_index = ConcordanceIndex( self.tokens, key=lambda s: s.lower() ) return self._concordance_index.find_concordance(word, width)[:lines] def collocation_list(self, num=20, window_size=2): if not ( "_collocations" in self.__dict__ and self._num == num and self._window_size == window_size ): self._num = num self._window_size = window_size from nltk.corpus import stopwords ignored_words = stopwords.words("english") finder = BigramCollocationFinder.from_words(self.tokens, window_size) finder.apply_freq_filter(2) finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in ignored_words) bigram_measures = BigramAssocMeasures() self._collocations = list( finder.nbest(bigram_measures.likelihood_ratio, num) ) return self._collocations def collocations(self, num=20, window_size=2): collocation_strings = [ w1 + " " + w2 for w1, w2 in self.collocation_list(num, window_size) ] print(tokenwrap(collocation_strings, separator="; ")) def count(self, word): return self.tokens.count(word) def index(self, word): return self.tokens.index(word) def readability(self, method): raise NotImplementedError def similar(self, word, num=20): if "_word_context_index" not in self.__dict__: self._word_context_index = ContextIndex( self.tokens, filter=lambda x: x.isalpha(), key=lambda s: s.lower() ) word = word.lower() wci = self._word_context_index._word_to_contexts if word in wci.conditions(): contexts = set(wci[word]) fd = Counter( w for w in wci.conditions() for c in wci[w] if c in contexts and not w == word ) words = [w for w, _ in fd.most_common(num)] print(tokenwrap(words)) else: print("No matches") def common_contexts(self, words, num=20): if "_word_context_index" not in self.__dict__: self._word_context_index = ContextIndex( self.tokens, key=lambda s: s.lower() ) try: fd = self._word_context_index.common_contexts(words, True) if not fd: print("No common contexts were found") else: ranked_contexts = [w for w, _ in fd.most_common(num)] print(tokenwrap(w1 + "_" + w2 for w1, w2 in ranked_contexts)) except ValueError as e: print(e) def dispersion_plot(self, words): from nltk.draw import dispersion_plot dispersion_plot(self, words) def _train_default_ngram_lm(self, tokenized_sents, n=3): train_data, padded_sents = padded_everygram_pipeline(n, tokenized_sents) model = MLE(order=n) model.fit(train_data, padded_sents) return model def generate(self, length=100, text_seed=None, random_seed=42): self._tokenized_sents = [ sent.split(" ") for sent in sent_tokenize(" ".join(self.tokens)) ] if not hasattr(self, "_trigram_model"): print("Building ngram index...", file=sys.stderr) self._trigram_model = self._train_default_ngram_lm( self._tokenized_sents, n=3 ) generated_tokens = [] assert length > 0, "The `length` must be more than 0." while len(generated_tokens) < length: for idx, token in enumerate( self._trigram_model.generate( length, text_seed=text_seed, random_seed=random_seed ) ): if token == "<s>": continue if token == "</s>": break generated_tokens.append(token) random_seed += 1 prefix = " ".join(text_seed) + " " if text_seed else "" output_str = prefix + tokenwrap(generated_tokens[:length]) print(output_str) return output_str def plot(self, *args): return self.vocab().plot(*args) def vocab(self): if "_vocab" not in self.__dict__: self._vocab = FreqDist(self) return self._vocab def findall(self, regexp): if "_token_searcher" not in self.__dict__: self._token_searcher = TokenSearcher(self) hits = self._token_searcher.findall(regexp) hits = [" ".join(h) for h in hits] print(tokenwrap(hits, "; ")) _CONTEXT_RE = re.compile(r"\w+|[\.\!\?]") def _context(self, tokens, i): j = i - 1 while j >= 0 and not self._CONTEXT_RE.match(tokens[j]): j -= 1 left = tokens[j] if j != 0 else "*START*" j = i + 1 while j < len(tokens) and not self._CONTEXT_RE.match(tokens[j]): j += 1 right = tokens[j] if j != len(tokens) else "*END*" return (left, right) def __str__(self): return "<Text: %s>" % self.name def __repr__(self): return "<Text: %s>" % self.name class TextCollection(Text): def __init__(self, source): if hasattr(source, "words"): source = [source.words(f) for f in source.fileids()] self._texts = source Text.__init__(self, LazyConcatenation(source)) self._idf_cache = {} def tf(self, term, text): return text.count(term) / len(text) def idf(self, term): idf = self._idf_cache.get(term) if idf is None: matches = len([True for text in self._texts if term in text]) if len(self._texts) == 0: raise ValueError("IDF undefined for empty document collection") idf = log(len(self._texts) / matches) if matches else 0.0 self._idf_cache[term] = idf return idf def tf_idf(self, term, text): return self.tf(term, text) * self.idf(term) def demo(): from nltk.corpus import brown text = Text(brown.words(categories="news")) print(text) print() print("Concordance:") text.concordance("news") print() print("Distributionally similar words:") text.similar("news") print() print("Collocations:") text.collocations() print() print("Dispersion plot:") text.dispersion_plot(["news", "report", "said", "announced"]) print() print("Vocabulary plot:") text.plot(50) print() print("Indexing:") print("text[3]:", text[3]) print("text[3:5]:", text[3:5]) print("text.vocab()['news']:", text.vocab()["news"]) if __name__ == "__main__": demo() __all__ = [ "ContextIndex", "ConcordanceIndex", "TokenSearcher", "Text", "TextCollection", ]
natural language toolkit tokenizers c 20012023 nltk project edward loper edlopergmail com steven bird stevenbird1gmail com minor additions contributors matthewmc clouds56 url https www nltk org for license information see license txt import re from nltk data import load from nltk tokenize casual import tweettokenizer casualtokenize from nltk tokenize destructive import nltkwordtokenizer from nltk tokenize legalityprinciple import legalitysyllabletokenizer from nltk tokenize mwe import mwetokenizer from nltk tokenize punkt import punktsentencetokenizer from nltk tokenize regexp import blanklinetokenizer regexptokenizer whitespacetokenizer wordpuncttokenizer blanklinetokenize regexptokenize wordpuncttokenize from nltk tokenize repp import repptokenizer from nltk tokenize sexpr import sexprtokenizer sexprtokenize from nltk tokenize simple import linetokenizer spacetokenizer tabtokenizer linetokenize from nltk tokenize sonoritysequencing import syllabletokenizer from nltk tokenize stanfordsegmenter import stanfordsegmenter from nltk tokenize texttiling import texttilingtokenizer from nltk tokenize toktok import toktoktokenizer from nltk tokenize treebank import treebankworddetokenizer treebankwordtokenizer from nltk tokenize util import regexpspantokenize stringspantokenize standard sentence tokenizer def senttokenizetext languageenglish tokenizer loadftokenizerspunktlanguage pickle return tokenizer tokenizetext standard word tokenizer treebankwordtokenizer nltkwordtokenizer def wordtokenizetext languageenglish preservelinefalse sentences text if preserveline else senttokenizetext language return token for sent in sentences for token in treebankwordtokenizer tokenizesent natural language toolkit tokenizers c 2001 2023 nltk project edward loper edloper gmail com steven bird stevenbird1 gmail com minor additions contributors matthewmc clouds56 url https www nltk org for license information see license txt nltk tokenizer package tokenizers divide strings into lists of substrings for example tokenizers can be used to find the words and punctuation in a string from nltk tokenize import word_tokenize s good muffins cost 3 88 nin new york please buy me two of them n nthanks word_tokenize s doctest normalize_whitespace good muffins cost 3 88 in new york please buy me two of them thanks this particular tokenizer requires the punkt sentence tokenization models to be installed nltk also provides a simpler regular expression based tokenizer which splits text on whitespace and punctuation from nltk tokenize import wordpunct_tokenize wordpunct_tokenize s doctest normalize_whitespace good muffins cost 3 88 in new york please buy me two of them thanks we can also operate at the level of sentences using the sentence tokenizer directly as follows from nltk tokenize import sent_tokenize word_tokenize sent_tokenize s good muffins cost 3 88 nin new york please buy me ntwo of them thanks word_tokenize t for t in sent_tokenize s doctest normalize_whitespace good muffins cost 3 88 in new york please buy me two of them thanks caution when tokenizing a unicode string make sure you are not using an encoded version of the string it may be necessary to decode it first e g with s decode utf8 nltk tokenizers can produce token spans represented as tuples of integers having the same semantics as string slices to support efficient comparison of tokenizers these methods are implemented as generators from nltk tokenize import whitespacetokenizer list whitespacetokenizer span_tokenize s doctest normalize_whitespace 0 4 5 12 13 17 18 23 24 26 27 30 31 36 38 44 45 48 49 51 52 55 56 58 59 64 66 73 there are numerous ways to tokenize text if you need more control over tokenization see the other methods provided in this package for further information please see chapter 3 of the nltk book standard sentence tokenizer return a sentence tokenized copy of text using nltk s recommended sentence tokenizer currently class punktsentencetokenizer for the specified language param text text to split into sentences param language the model name in the punkt corpus standard word tokenizer return a tokenized copy of text using nltk s recommended word tokenizer currently an improved class treebankwordtokenizer along with class punktsentencetokenizer for the specified language param text text to split into words type text str param language the model name in the punkt corpus type language str param preserve_line a flag to decide whether to sentence tokenize the text or not type preserve_line bool
r import re from nltk.data import load from nltk.tokenize.casual import TweetTokenizer, casual_tokenize from nltk.tokenize.destructive import NLTKWordTokenizer from nltk.tokenize.legality_principle import LegalitySyllableTokenizer from nltk.tokenize.mwe import MWETokenizer from nltk.tokenize.punkt import PunktSentenceTokenizer from nltk.tokenize.regexp import ( BlanklineTokenizer, RegexpTokenizer, WhitespaceTokenizer, WordPunctTokenizer, blankline_tokenize, regexp_tokenize, wordpunct_tokenize, ) from nltk.tokenize.repp import ReppTokenizer from nltk.tokenize.sexpr import SExprTokenizer, sexpr_tokenize from nltk.tokenize.simple import ( LineTokenizer, SpaceTokenizer, TabTokenizer, line_tokenize, ) from nltk.tokenize.sonority_sequencing import SyllableTokenizer from nltk.tokenize.stanford_segmenter import StanfordSegmenter from nltk.tokenize.texttiling import TextTilingTokenizer from nltk.tokenize.toktok import ToktokTokenizer from nltk.tokenize.treebank import TreebankWordDetokenizer, TreebankWordTokenizer from nltk.tokenize.util import regexp_span_tokenize, string_span_tokenize def sent_tokenize(text, language="english"): tokenizer = load(f"tokenizers/punkt/{language}.pickle") return tokenizer.tokenize(text) _treebank_word_tokenizer = NLTKWordTokenizer() def word_tokenize(text, language="english", preserve_line=False): sentences = [text] if preserve_line else sent_tokenize(text, language) return [ token for sent in sentences for token in _treebank_word_tokenizer.tokenize(sent) ]
natural language toolkit tokenizer interface c 20012023 nltk project edward loper edlopergmail com steven bird stevenbird1gmail com url https www nltk org for license information see license txt tokenizer interface a processing interface for tokenizing a string subclasses must define tokenize or tokenizesents or both return a tokenized copy of s rtype liststr identify the tokens using integer offsets starti endi where sstarti endi is the corresponding token rtype iteratortupleint int apply self tokenize to each element of strings i e return self tokenizes for s in strings rtype listliststr apply self spantokenize to each element of strings i e return self spantokenizes for s in strings yield listtupleint int a tokenizer that divides a string into substrings by splitting on the specified string defined in subclasses natural language toolkit tokenizer interface c 2001 2023 nltk project edward loper edloper gmail com steven bird stevenbird1 gmail com url https www nltk org for license information see license txt tokenizer interface a processing interface for tokenizing a string subclasses must define tokenize or tokenize_sents or both return a tokenized copy of s rtype list str identify the tokens using integer offsets start_i end_i where s start_i end_i is the corresponding token rtype iterator tuple int int apply self tokenize to each element of strings i e return self tokenize s for s in strings rtype list list str apply self span_tokenize to each element of strings i e return self span_tokenize s for s in strings yield list tuple int int a tokenizer that divides a string into substrings by splitting on the specified string defined in subclasses
from abc import ABC, abstractmethod from typing import Iterator, List, Tuple from nltk.internals import overridden from nltk.tokenize.util import string_span_tokenize class TokenizerI(ABC): @abstractmethod def tokenize(self, s: str) -> List[str]: if overridden(self.tokenize_sents): return self.tokenize_sents([s])[0] def span_tokenize(self, s: str) -> Iterator[Tuple[int, int]]: raise NotImplementedError() def tokenize_sents(self, strings: List[str]) -> List[List[str]]: return [self.tokenize(s) for s in strings] def span_tokenize_sents( self, strings: List[str] ) -> Iterator[List[Tuple[int, int]]]: for s in strings: yield list(self.span_tokenize(s)) class StringTokenizer(TokenizerI): @property @abstractmethod def _string(self): raise NotImplementedError def tokenize(self, s): return s.split(self._string) def span_tokenize(self, s): yield from string_span_tokenize(s, self._string)
natural language toolkit tokenizers c 20012023 nltk project christopher hench chris l henchgmail com alex estes url https www nltk org for license information see license txt the legality principle is a language agnostic principle maintaining that syllable onsets and codas the beginning and ends of syllables not including the vowel are only legal if they are found as word onsets or codas in the language the english word admit must then be syllabified as admit since dm is not found wordinitially in the english language bartlett et al this principle was first proposed in daniel kahn s 1976 dissertation syllablebased generalizations in english phonology kahn further argues that there is a strong tendency to syllabify in such a way that initial clusters are of maximal length consistent with the general constraints on wordinitial consonant clusters consequently in addition to being legal onsets the longest legal onset is preferable onset maximization the default implementation assumes an english vowel set but the vowels attribute can be set to ipa or any other alphabet s vowel set for the usecase both a valid set of vowels as well as a text corpus of words in the language are necessary to determine legal onsets and subsequently syllabify words the legality principle with onset maximization is a universal syllabification algorithm but that does not mean it performs equally across languages bartlett et al 2009 is a good benchmark for english accuracy if utilizing ipa pg 311 references otto jespersen 1904 lehrbuch der phonetik leipzig teubner chapter 13 silbe pp 185203 theo vennemann on the theory of syllabic phonology 1972 p 11 daniel kahn syllablebased generalizations in english phonology phd diss mit 1976 elisabeth selkirk 1984 on the major class features and syllable theory in aronoff oehrle eds language sound structure studies in phonology cambridge mit press pp 107136 jeremy goslin and ulrich frauenfelder 2001 a comparison of theoretical and human syllabification language and speech 44 409436 susan bartlett et al 2009 on the syllabification of phonemes in hltnaacl pp 308316 christopher hench 2017 resonances in middle high german new methodologies in prosody uc berkeley syllabifies words based on the legality principle and onset maximization from nltk tokenize import legalitysyllabletokenizer from nltk import wordtokenize from nltk corpus import words text this is a wonderful sentence textwords wordtokenizetext lp legalitysyllabletokenizerwords words lp tokenizeword for word in textwords this is a won der ful sen ten ce param tokenizedsourcetext list of valid tokens in the language type tokenizedsourcetext liststr param vowels valid vowels in language or ipa representation type vowels str param legalfrequencythreshold lowest frequency of all onsets to be considered a legal onset type legalfrequencythreshold float gathers all onsets and then return only those above the frequency threshold param words list of words in a language type words liststr return set of legal onsets rtype setstr returns consonant cluster of word i e all characters until the first vowel param word single word or token type word str return string of characters of onset rtype str apply the legality principle in combination with onset maximization to return a list of syllables param token single word or token type token str return syllablelist single word or token broken up into syllables rtype liststr natural language toolkit tokenizers c 2001 2023 nltk project christopher hench chris l hench gmail com alex estes url https www nltk org for license information see license txt the legality principle is a language agnostic principle maintaining that syllable onsets and codas the beginning and ends of syllables not including the vowel are only legal if they are found as word onsets or codas in the language the english word admit must then be syllabified as ad mit since dm is not found word initially in the english language bartlett et al this principle was first proposed in daniel kahn s 1976 dissertation syllable based generalizations in english phonology kahn further argues that there is a strong tendency to syllabify in such a way that initial clusters are of maximal length consistent with the general constraints on word initial consonant clusters consequently in addition to being legal onsets the longest legal onset is preferable onset maximization the default implementation assumes an english vowel set but the vowels attribute can be set to ipa or any other alphabet s vowel set for the use case both a valid set of vowels as well as a text corpus of words in the language are necessary to determine legal onsets and subsequently syllabify words the legality principle with onset maximization is a universal syllabification algorithm but that does not mean it performs equally across languages bartlett et al 2009 is a good benchmark for english accuracy if utilizing ipa pg 311 references otto jespersen 1904 lehrbuch der phonetik leipzig teubner chapter 13 silbe pp 185 203 theo vennemann on the theory of syllabic phonology 1972 p 11 daniel kahn syllable based generalizations in english phonology phd diss mit 1976 elisabeth selkirk 1984 on the major class features and syllable theory in aronoff oehrle eds language sound structure studies in phonology cambridge mit press pp 107 136 jeremy goslin and ulrich frauenfelder 2001 a comparison of theoretical and human syllabification language and speech 44 409 436 susan bartlett et al 2009 on the syllabification of phonemes in hlt naacl pp 308 316 christopher hench 2017 resonances in middle high german new methodologies in prosody uc berkeley syllabifies words based on the legality principle and onset maximization from nltk tokenize import legalitysyllabletokenizer from nltk import word_tokenize from nltk corpus import words text this is a wonderful sentence text_words word_tokenize text lp legalitysyllabletokenizer words words lp tokenize word for word in text_words this is a won der ful sen ten ce param tokenized_source_text list of valid tokens in the language type tokenized_source_text list str param vowels valid vowels in language or ipa representation type vowels str param legal_frequency_threshold lowest frequency of all onsets to be considered a legal onset type legal_frequency_threshold float gathers all onsets and then return only those above the frequency threshold param words list of words in a language type words list str return set of legal onsets rtype set str returns consonant cluster of word i e all characters until the first vowel param word single word or token type word str return string of characters of onset rtype str apply the legality principle in combination with onset maximization to return a list of syllables param token single word or token type token str return syllable_list single word or token broken up into syllables rtype list str
from collections import Counter from nltk.tokenize.api import TokenizerI class LegalitySyllableTokenizer(TokenizerI): def __init__( self, tokenized_source_text, vowels="aeiouy", legal_frequency_threshold=0.001 ): self.legal_frequency_threshold = legal_frequency_threshold self.vowels = vowels self.legal_onsets = self.find_legal_onsets(tokenized_source_text) def find_legal_onsets(self, words): onsets = [self.onset(word) for word in words] legal_onsets = [ k for k, v in Counter(onsets).items() if (v / len(onsets)) > self.legal_frequency_threshold ] return set(legal_onsets) def onset(self, word): onset = "" for c in word.lower(): if c in self.vowels: return onset else: onset += c return onset def tokenize(self, token): syllables = [] syllable, current_onset = "", "" vowel, onset = False, False for char in token[::-1]: char_lower = char.lower() if not vowel: syllable += char vowel = bool(char_lower in self.vowels) else: if char_lower + current_onset[::-1] in self.legal_onsets: syllable += char current_onset += char_lower onset = True elif char_lower in self.vowels and not onset: syllable += char current_onset += char_lower else: syllables.append(syllable) syllable = char current_onset = "" vowel = bool(char_lower in self.vowels) syllables.append(syllable) syllables_ordered = [syllable[::-1] for syllable in syllables][::-1] return syllables_ordered
multiword expression tokenizer c 20012023 nltk project rob malouf rmaloufmail sdsu edu url https www nltk org for license information see license txt multiword expression tokenizer a mwetokenizer takes a string which has already been divided into tokens and retokenizes it merging multiword expressions into single tokens using a lexicon of mwes from nltk tokenize import mwetokenizer tokenizer mwetokenizer a little a little bit a lot tokenizer addmwe in spite of tokenizer tokenize testing testing testing one two three split testing testing testing one two three tokenizer tokenize this is a test in spite split this is a test in spite tokenizer tokenize in a little or a little bit or a lot in spite of split in alittle or alittlebit or alot inspiteof a tokenizer that processes tokenized text and merges multiword expressions into single tokens initialize the multiword tokenizer with a list of expressions and a separator type mwes listliststr param mwes a sequence of multiword expressions to be merged where each mwe is a sequence of strings type separator str param separator string that should be inserted between words in a multiword expression token default is add a multiword expression to the lexicon stored as a word trie we use util trie to represent the trie its form is a dict of dicts the key true marks the end of a valid mwe param mwe the multiword expression we re adding into the word trie type mwe tuplestr or liststr example tokenizer mwetokenizer tokenizer addmwe a b tokenizer addmwe a b c tokenizer addmwe a x expected a x true none b true none c true none tokenizer mwes expected true param text a list containing tokenized text type text liststr return a list of the tokenized text with multiwords merged together rtype liststr example tokenizer mwetokenizer hors d oeuvre separator tokenizer tokenizean hors d oeuvre tonight sir split an horsd oeuvre tonight sir possible mwe match success no match so backtrack multi word expression tokenizer c 2001 2023 nltk project rob malouf rmalouf mail sdsu edu url https www nltk org for license information see license txt multi word expression tokenizer a mwetokenizer takes a string which has already been divided into tokens and retokenizes it merging multi word expressions into single tokens using a lexicon of mwes from nltk tokenize import mwetokenizer tokenizer mwetokenizer a little a little bit a lot tokenizer add_mwe in spite of tokenizer tokenize testing testing testing one two three split testing testing testing one two three tokenizer tokenize this is a test in spite split this is a test in spite tokenizer tokenize in a little or a little bit or a lot in spite of split in a_little or a_little_bit or a_lot in_spite_of a tokenizer that processes tokenized text and merges multi word expressions into single tokens initialize the multi word tokenizer with a list of expressions and a separator type mwes list list str param mwes a sequence of multi word expressions to be merged where each mwe is a sequence of strings type separator str param separator string that should be inserted between words in a multi word expression token default is _ add a multi word expression to the lexicon stored as a word trie we use util trie to represent the trie its form is a dict of dicts the key true marks the end of a valid mwe param mwe the multi word expression we re adding into the word trie type mwe tuple str or list str example tokenizer mwetokenizer tokenizer add_mwe a b tokenizer add_mwe a b c tokenizer add_mwe a x expected a x true none b true none c true none tokenizer _mwes expected true param text a list containing tokenized text type text list str return a list of the tokenized text with multi words merged together rtype list str example tokenizer mwetokenizer hors d oeuvre separator tokenizer tokenize an hors d oeuvre tonight sir split an hors d oeuvre tonight sir possible mwe match and len trie text j 0 success no match so backtrack
from nltk.tokenize.api import TokenizerI from nltk.util import Trie class MWETokenizer(TokenizerI): def __init__(self, mwes=None, separator="_"): if not mwes: mwes = [] self._mwes = Trie(mwes) self._separator = separator def add_mwe(self, mwe): self._mwes.insert(mwe) def tokenize(self, text): i = 0 n = len(text) result = [] while i < n: if text[i] in self._mwes: j = i trie = self._mwes last_match = -1 while j < n and text[j] in trie: trie = trie[text[j]] j = j + 1 if Trie.LEAF in trie: last_match = j else: if last_match > -1: j = last_match if Trie.LEAF in trie or last_match > -1: result.append(self._separator.join(text[i:j])) i = j else: result.append(text[i]) i += 1 else: result.append(text[i]) i += 1 return result
natural language toolkit python port of the mtevalv14 pl tokenizer c 20012015 nltk project liling tan ported from ftp jaguar ncsl nist govmtresourcesmtevalv14 pl contributors ozan caglayan wiktor stribizew url https www nltk org for license information see license txt this is a nltk port of the tokenizer used in the nist bleu evaluation script https github commosessmtmosesdecoderblobmasterscriptsgenericmtevalv14 pll926 which was also ported into python in https github comliumlstnmtpyblobmasternmtpymetricsmtevalbleu pyl162 this nist tokenizer is sentencebased instead of the original paragraphbased tokenization from mteval14 pl the sentencebased tokenization is consistent with the other tokenizers available in nltk from nltk tokenize nist import nisttokenizer nist nisttokenizer s good muffins cost 3 88 in new york expectedlower u good u muffins u cost u u 3 88 u in u new u york u expectedcased u good u muffins u cost u u 3 88 u in u new u york u nist tokenizes lowercasefalse expectedcased true nist tokenizes lowercasetrue expectedlower lowercased true the internationaltokenize is the preferred function when tokenizing noneuropean text e g from nltk tokenize nist import nisttokenizer nist nisttokenizer input strings albb u alibaba group holding limited chinese us a chinese ecommerce company amz u amazon com inc mzn is an american electronic commerce rkt u rakuten inc rakuten kabushikigaisha is a japanese electronic commerce and internet company based in tokyo expected tokens expectedalbb u alibaba u group u holding u limited u u chinese u u u963fu91ccu5df4u5df4u96c6u56e2u63a7u80a1 u u6709u9650u516cu53f8 u expectedamz u amazon u u com u u inc u u u u u02c8xe6 u m expectedrkt u rakuten u u inc u u u u697du5929u682au5f0fu4f1au793e u rakuten u kabushiki u u gaisha nist internationaltokenizealbb 10 expectedalbb true nist internationaltokenizeamz 10 expectedamz true nist internationaltokenizerkt 10 expectedrkt true doctest for patching issue 1926 sent u this is a foou2604sentence expectedsent u this u is u a u foo u u2604 u sentence u nist internationaltokenizesent expectedsent true strip skipped tags strip endofline hyphenation and join lines tokenize punctuation tokenize period and comma unless preceded by a digit tokenize period and comma unless followed by a digit tokenize dash when preceded by a digit perluniprops characters used in nist tokenizer python regexes needs to escape some special symbols see see https stackoverflow comq45670950610569 note in the original perl implementation pz and pzl were used to i strip trailing and heading spaces and ii dedeuplicate spaces in python this would do joinstr strip split thus the next two lines were commented out lineseparator str joinperluniprops chars lineseparator i e pzl separator str joinperluniprops chars separator i e pz pads nonascii strings with space tokenize any punctuation unless followed and preceded by a digit tokenize symbols performs the language independent string substituitions it s a strange order of regexes it ll be better to unescape after stripeolhyphen but let s keep it close to the original nist implementation regexp substitution self stripskip text regexp subsubstitution text text xmlunescapetext regexp substitution self stripeolhyphen text regexp subsubstitution text return text def tokenizeself text lowercasefalse westernlangtrue returnstrfalse text strtext language independent regex text self langindependentsubtext language dependent regex if westernlang pad string with whitespace text text if lowercase text text lower for regexp substitution in self langdependentregexes text regexp subsubstitution text remove contiguous whitespaces text jointext split finally strips heading and trailing spaces and converts output string into unicode text strtext strip return text if returnstr else text split def internationaltokenize self text lowercasefalse splitnonasciitrue returnstrfalse text strtext different from the normal tokenize stripeolhyphen is applied first before unescaping regexp substitution self stripskip text regexp subsubstitution text regexp substitution self stripeolhyphen text regexp subsubstitution text text xmlunescapetext if lowercase text text lower for regexp substitution in self internationalregexes text regexp subsubstitution text make sure that there s only one space only between words strip leading and trailing spaces text jointext strip split return text if returnstr else text split natural language toolkit python port of the mteval v14 pl tokenizer c 2001 2015 nltk project liling tan ported from ftp jaguar ncsl nist gov mt resources mteval v14 pl contributors ozan caglayan wiktor stribizew url https www nltk org for license information see license txt this is a nltk port of the tokenizer used in the nist bleu evaluation script https github com moses smt mosesdecoder blob master scripts generic mteval v14 pl l926 which was also ported into python in https github com lium lst nmtpy blob master nmtpy metrics mtevalbleu py l162 this nist tokenizer is sentence based instead of the original paragraph based tokenization from mteval 14 pl the sentence based tokenization is consistent with the other tokenizers available in nltk from nltk tokenize nist import nisttokenizer nist nisttokenizer s good muffins cost 3 88 in new york expected_lower u good u muffins u cost u u 3 88 u in u new u york u expected_cased u good u muffins u cost u u 3 88 u in u new u york u nist tokenize s lowercase false expected_cased true nist tokenize s lowercase true expected_lower lowercased true the international_tokenize is the preferred function when tokenizing non european text e g from nltk tokenize nist import nisttokenizer nist nisttokenizer input strings albb u alibaba group holding limited chinese 阿里巴巴集团控股 有限公司 us a chinese e commerce company amz u amazon com inc ˈæməzɒn is an american electronic commerce rkt u rakuten inc 楽天株式会社 rakuten kabushiki gaisha is a japanese electronic commerce and internet company based in tokyo expected tokens expected_albb u alibaba u group u holding u limited u u chinese u u u963f u91cc u5df4 u5df4 u96c6 u56e2 u63a7 u80a1 u u6709 u9650 u516c u53f8 u expected_amz u amazon u u com u u inc u u u u u02c8 xe6 u m expected_rkt u rakuten u u inc u u u u697d u5929 u682a u5f0f u4f1a u793e u rakuten u kabushiki u u gaisha nist international_tokenize albb 10 expected_albb true nist international_tokenize amz 10 expected_amz true nist international_tokenize rkt 10 expected_rkt true doctest for patching issue 1926 sent u this is a foo u2604sentence expected_sent u this u is u a u foo u u2604 u sentence u nist international_tokenize sent expected_sent true strip skipped tags strip end of line hyphenation and join lines tokenize punctuation tokenize period and comma unless preceded by a digit tokenize period and comma unless followed by a digit tokenize dash when preceded by a digit perluniprops characters used in nist tokenizer i e p n i e p p i e p s python regexes needs to escape some special symbols see see https stackoverflow com q 45670950 610569 note in the original perl implementation p z and p zl were used to i strip trailing and heading spaces and ii de deuplicate spaces in python this would do join str strip split thus the next two lines were commented out line_separator str join perluniprops chars line_separator i e p zl separator str join perluniprops chars separator i e p z pads non ascii strings with space tokenize any punctuation unless followed and preceded by a digit tokenize symbols performs the language independent string substituitions it s a strange order of regexes it ll be better to unescape after strip_eol_hyphen but let s keep it close to the original nist implementation language independent regex language dependent regex pad string with whitespace remove contiguous whitespaces finally strips heading and trailing spaces and converts output string into unicode different from the normal tokenize strip_eol_hyphen is applied first before unescaping make sure that there s only one space only between words strip leading and trailing spaces
import io import re from nltk.corpus import perluniprops from nltk.tokenize.api import TokenizerI from nltk.tokenize.util import xml_unescape class NISTTokenizer(TokenizerI): STRIP_SKIP = re.compile("<skipped>"), "" STRIP_EOL_HYPHEN = re.compile("\u2028"), " " PUNCT = re.compile(r"([\{-\~\[-\` -\&\(-\+\:-\@\/])"), " \\1 " PERIOD_COMMA_PRECEED = re.compile(r"([^0-9])([\.,])"), "\\1 \\2 " PERIOD_COMMA_FOLLOW = re.compile(r"([\.,])([^0-9])"), " \\1 \\2" DASH_PRECEED_DIGIT = re.compile("([0-9])(-)"), "\\1 \\2 " LANG_DEPENDENT_REGEXES = [ PUNCT, PERIOD_COMMA_PRECEED, PERIOD_COMMA_FOLLOW, DASH_PRECEED_DIGIT, ] pup_number = str("".join(set(perluniprops.chars("Number")))) pup_punct = str("".join(set(perluniprops.chars("Punctuation")))) pup_symbol = str("".join(set(perluniprops.chars("Symbol")))) number_regex = re.sub(r"[]^\\-]", r"\\\g<0>", pup_number) punct_regex = re.sub(r"[]^\\-]", r"\\\g<0>", pup_punct) symbol_regex = re.sub(r"[]^\\-]", r"\\\g<0>", pup_symbol) NONASCII = re.compile("([\x00-\x7f]+)"), r" \1 " PUNCT_1 = ( re.compile(f"([{number_regex}])([{punct_regex}])"), "\\1 \\2 ", ) PUNCT_2 = ( re.compile(f"([{punct_regex}])([{number_regex}])"), " \\1 \\2", ) SYMBOLS = re.compile(f"([{symbol_regex}])"), " \\1 " INTERNATIONAL_REGEXES = [NONASCII, PUNCT_1, PUNCT_2, SYMBOLS] def lang_independent_sub(self, text): regexp, substitution = self.STRIP_SKIP text = regexp.sub(substitution, text) text = xml_unescape(text) regexp, substitution = self.STRIP_EOL_HYPHEN text = regexp.sub(substitution, text) return text def tokenize(self, text, lowercase=False, western_lang=True, return_str=False): text = str(text) text = self.lang_independent_sub(text) if western_lang: text = " " + text + " " if lowercase: text = text.lower() for regexp, substitution in self.LANG_DEPENDENT_REGEXES: text = regexp.sub(substitution, text) text = " ".join(text.split()) text = str(text.strip()) return text if return_str else text.split() def international_tokenize( self, text, lowercase=False, split_non_ascii=True, return_str=False ): text = str(text) regexp, substitution = self.STRIP_SKIP text = regexp.sub(substitution, text) regexp, substitution = self.STRIP_EOL_HYPHEN text = regexp.sub(substitution, text) text = xml_unescape(text) if lowercase: text = text.lower() for regexp, substitution in self.INTERNATIONAL_REGEXES: text = regexp.sub(substitution, text) text = " ".join(text.strip().split()) return text if return_str else text.split()
natural language toolkit tokenizers c 20012023 nltk project edward loper edlopergmail com steven bird stevenbird1gmail com trevor cohn tacohncsse unimelb edu au url https www nltk org for license information see license txt import re from nltk tokenize api import tokenizeri from nltk tokenize util import regexpspantokenize class regexptokenizertokenizeri r a tokenizer that splits a string using a regular expression which matches either the tokens or the separators between tokens tokenizer regexptokenizerr wd s type pattern str param pattern the pattern used to build this tokenizer this pattern must not contain capturing parentheses use noncapturing parentheses e g instead type gaps bool param gaps true if this tokenizer s pattern should be used to find separators between tokens false if this tokenizer s pattern should be used to find the tokens themselves type discardempty bool param discardempty true if any empty tokens generated by the tokenizer should be discarded empty tokens can only be generated if gaps true type flags int param flags the regexp flags used to compile this tokenizer s pattern by default the following flags are used re unicode re multiline re dotall if they gave us a regexp object extract the pattern if our regexp matches gaps use re split if our regexp matches tokens use re findall def initself regexptokenizer initself rs gapstrue class blanklinetokenizerregexptokenizer def initself regexptokenizer initself rsnsns gapstrue class wordpuncttokenizerregexptokenizer r tokenize a text into a sequence of alphabetic and nonalphabetic characters using the regexp wws from nltk tokenize import wordpuncttokenizer s good muffins cost 3 88nin new york please buy mentwo of them nnthanks wordpuncttokenizer tokenizes doctest normalizewhitespace good muffins cost 3 88 in new york please buy me two of them thanks tokenization functions return a tokenized copy of text see class regexptokenizer for descriptions of the arguments natural language toolkit tokenizers c 2001 2023 nltk project edward loper edloper gmail com steven bird stevenbird1 gmail com trevor cohn tacohn csse unimelb edu au url https www nltk org for license information see license txt regular expression tokenizers a regexptokenizer splits a string into substrings using a regular expression for example the following tokenizer forms tokens out of alphabetic sequences money expressions and any other non whitespace sequences from nltk tokenize import regexptokenizer s good muffins cost 3 88 nin new york please buy me ntwo of them n nthanks tokenizer regexptokenizer r w d s tokenizer tokenize s doctest normalize_whitespace good muffins cost 3 88 in new york please buy me two of them thanks a regexptokenizer can use its regexp to match delimiters instead tokenizer regexptokenizer r s gaps true tokenizer tokenize s doctest normalize_whitespace good muffins cost 3 88 in new york please buy me two of them thanks note that empty tokens are not returned when the delimiter appears at the start or end of the string the material between the tokens is discarded for example the following tokenizer selects just the capitalized words capword_tokenizer regexptokenizer r a z w capword_tokenizer tokenize s good new york please thanks this module contains several subclasses of regexptokenizer that use pre defined regular expressions from nltk tokenize import blanklinetokenizer uses s n s n s blanklinetokenizer tokenize s doctest normalize_whitespace good muffins cost 3 88 nin new york please buy me ntwo of them thanks all of the regular expression tokenizers are also available as functions from nltk tokenize import regexp_tokenize wordpunct_tokenize blankline_tokenize regexp_tokenize s pattern r w d s doctest normalize_whitespace good muffins cost 3 88 in new york please buy me two of them thanks wordpunct_tokenize s doctest normalize_whitespace good muffins cost 3 88 in new york please buy me two of them thanks blankline_tokenize s good muffins cost 3 88 nin new york please buy me ntwo of them thanks caution the function regexp_tokenize takes the text as its first argument and the regular expression pattern as its second argument this differs from the conventions used by python s re functions where the pattern is always the first argument this is for consistency with the other nltk tokenizers a tokenizer that splits a string using a regular expression which matches either the tokens or the separators between tokens tokenizer regexptokenizer r w d s type pattern str param pattern the pattern used to build this tokenizer this pattern must not contain capturing parentheses use non capturing parentheses e g instead type gaps bool param gaps true if this tokenizer s pattern should be used to find separators between tokens false if this tokenizer s pattern should be used to find the tokens themselves type discard_empty bool param discard_empty true if any empty tokens generated by the tokenizer should be discarded empty tokens can only be generated if _gaps true type flags int param flags the regexp flags used to compile this tokenizer s pattern by default the following flags are used re unicode re multiline re dotall if they gave us a regexp object extract the pattern if our regexp matches gaps use re split if our regexp matches tokens use re findall tokenize a string on whitespace space tab newline in general users should use the string split method instead from nltk tokenize import whitespacetokenizer s good muffins cost 3 88 nin new york please buy me ntwo of them n nthanks whitespacetokenizer tokenize s doctest normalize_whitespace good muffins cost 3 88 in new york please buy me two of them thanks tokenize a string treating any sequence of blank lines as a delimiter blank lines are defined as lines containing no characters except for space or tab characters tokenize a text into a sequence of alphabetic and non alphabetic characters using the regexp w w s from nltk tokenize import wordpuncttokenizer s good muffins cost 3 88 nin new york please buy me ntwo of them n nthanks wordpuncttokenizer tokenize s doctest normalize_whitespace good muffins cost 3 88 in new york please buy me two of them thanks tokenization functions return a tokenized copy of text see class regexptokenizer for descriptions of the arguments
r import re from nltk.tokenize.api import TokenizerI from nltk.tokenize.util import regexp_span_tokenize class RegexpTokenizer(TokenizerI): r def __init__( self, pattern, gaps=False, discard_empty=True, flags=re.UNICODE | re.MULTILINE | re.DOTALL, ): pattern = getattr(pattern, "pattern", pattern) self._pattern = pattern self._gaps = gaps self._discard_empty = discard_empty self._flags = flags self._regexp = None def _check_regexp(self): if self._regexp is None: self._regexp = re.compile(self._pattern, self._flags) def tokenize(self, text): self._check_regexp() if self._gaps: if self._discard_empty: return [tok for tok in self._regexp.split(text) if tok] else: return self._regexp.split(text) else: return self._regexp.findall(text) def span_tokenize(self, text): self._check_regexp() if self._gaps: for left, right in regexp_span_tokenize(text, self._regexp): if not (self._discard_empty and left == right): yield left, right else: for m in re.finditer(self._regexp, text): yield m.span() def __repr__(self): return "{}(pattern={!r}, gaps={!r}, discard_empty={!r}, flags={!r})".format( self.__class__.__name__, self._pattern, self._gaps, self._discard_empty, self._flags, ) class WhitespaceTokenizer(RegexpTokenizer): r def __init__(self): RegexpTokenizer.__init__(self, r"\s+", gaps=True) class BlanklineTokenizer(RegexpTokenizer): def __init__(self): RegexpTokenizer.__init__(self, r"\s*\n\s*\n\s*", gaps=True) class WordPunctTokenizer(RegexpTokenizer): r def __init__(self): RegexpTokenizer.__init__(self, r"\w+|[^\w\s]+") def regexp_tokenize( text, pattern, gaps=False, discard_empty=True, flags=re.UNICODE | re.MULTILINE | re.DOTALL, ): tokenizer = RegexpTokenizer(pattern, gaps, discard_empty, flags) return tokenizer.tokenize(text) blankline_tokenize = BlanklineTokenizer().tokenize wordpunct_tokenize = WordPunctTokenizer().tokenize
natural language toolkit interface to the repp tokenizer c 20012015 nltk project s rebecca dridan and stephan oepen contributors liling tan url https www nltk org for license information see license txt a class for word tokenization using the repp parser described in rebecca dridan and stephan oepen 2012 tokenization returning to a long solved problem a survey contrastive experiment recommendations and toolkit in acl http anthology aclweb orgpp12p122 pdfpage406 sents tokenization is widely regarded as a solved problem due to the high accuracy that rulebased tokenizers achieve but rulebased tokenizers are hard to maintain and their rules language specific we evaluated our method on three languages and obtained error rates of 0 27 english 0 35 dutch and 0 76 italian for our best models tokenizer repptokenizer homealvasrepp doctest skip for sent in sents doctest skip tokenizer tokenizesent doctest skip u tokenization u is u widely u regarded u as u a u solved u problem u due u to u the u high u accuracy u that u rulebased u tokenizers u achieve u u but u rulebased u tokenizers u are u hard u to u maintain u and u their u rules u language u specific u u we u evaluated u our u method u on u three u languages u and u obtained u error u rates u of u 0 27 u u u english u u u 0 35 u u u dutch u u and u 0 76 u u u italian u u for u our u best u models u for sent in tokenizer tokenizesentssents doctest skip printsent doctest skip u tokenization u is u widely u regarded u as u a u solved u problem u due u to u the u high u accuracy u that u rulebased u tokenizers u achieve u u but u rulebased u tokenizers u are u hard u to u maintain u and u their u rules u language u specific u u we u evaluated u our u method u on u three u languages u and u obtained u error u rates u of u 0 27 u u u english u u u 0 35 u u u dutch u u and u 0 76 u u u italian u u for u our u best u models u for sent in tokenizer tokenizesentssents keeptokenpositionstrue doctest skip printsent doctest skip u tokenization 0 12 u is 13 15 u widely 16 22 u regarded 23 31 u as 32 34 u a 35 36 u solved 37 43 u problem 44 51 u due 52 55 u to 56 58 u the 59 62 u high 63 67 u accuracy 68 76 u that 77 81 u rulebased 82 91 u tokenizers 92 102 u achieve 103 110 u 110 111 u but 0 3 u rulebased 4 14 u tokenizers 15 25 u are 26 29 u hard 30 34 u to 35 37 u maintain 38 46 u and 47 50 u their 51 56 u rules 57 62 u language 63 71 u specific 72 80 u 80 81 u we 0 2 u evaluated 3 12 u our 13 16 u method 17 23 u on 24 26 u three 27 32 u languages 33 42 u and 43 46 u obtained 47 55 u error 56 61 u rates 62 67 u of 68 70 u 0 27 71 75 u 75 76 u 77 78 u english 78 85 u 85 86 u 86 87 u 0 35 88 92 u 92 93 u 94 95 u dutch 95 100 u 100 101 u and 102 105 u 0 76 106 110 u 110 111 u 112 113 u italian 113 120 u 120 121 u for 122 125 u our 126 129 u best 130 134 u models 135 141 u 141 142 set a directory to store the temporary files set an encoding for the input strings use repp to tokenize a single sentence param sentence a single sentence string type sentence str return a tuple of tokens rtype tuplestr tokenize multiple sentences using repp param sentences a list of sentence strings type sentences liststr return a list of tuples of tokens rtype itertuplestr write sentences to temporary input file generate command to run repp decode the stdout and strips the ending newline removes token position information this module generates the repp command to be used at the terminal param inputfilename path to the input file type inputfilename str this module parses the trituple format that repp outputs using the format triple option and returns an generator with tuple of string tokens param reppoutput type reppoutput type return an iterable of the tokenized sentences as tuples of strings rtype itertuple a module to find repp tokenizer binary and its repp set config file checks for the repp binary and ergrepp set config file natural language toolkit interface to the repp tokenizer c 2001 2015 nltk project s rebecca dridan and stephan oepen contributors liling tan url https www nltk org for license information see license txt a class for word tokenization using the repp parser described in rebecca dridan and stephan oepen 2012 tokenization returning to a long solved problem a survey contrastive experiment recommendations and toolkit in acl http anthology aclweb org p p12 p12 2 pdf page 406 sents tokenization is widely regarded as a solved problem due to the high accuracy that rulebased tokenizers achieve but rule based tokenizers are hard to maintain and their rules language specific we evaluated our method on three languages and obtained error rates of 0 27 english 0 35 dutch and 0 76 italian for our best models tokenizer repptokenizer home alvas repp doctest skip for sent in sents doctest skip tokenizer tokenize sent doctest skip u tokenization u is u widely u regarded u as u a u solved u problem u due u to u the u high u accuracy u that u rulebased u tokenizers u achieve u u but u rule based u tokenizers u are u hard u to u maintain u and u their u rules u language u specific u u we u evaluated u our u method u on u three u languages u and u obtained u error u rates u of u 0 27 u u u english u u u 0 35 u u u dutch u u and u 0 76 u u u italian u u for u our u best u models u for sent in tokenizer tokenize_sents sents doctest skip print sent doctest skip u tokenization u is u widely u regarded u as u a u solved u problem u due u to u the u high u accuracy u that u rulebased u tokenizers u achieve u u but u rule based u tokenizers u are u hard u to u maintain u and u their u rules u language u specific u u we u evaluated u our u method u on u three u languages u and u obtained u error u rates u of u 0 27 u u u english u u u 0 35 u u u dutch u u and u 0 76 u u u italian u u for u our u best u models u for sent in tokenizer tokenize_sents sents keep_token_positions true doctest skip print sent doctest skip u tokenization 0 12 u is 13 15 u widely 16 22 u regarded 23 31 u as 32 34 u a 35 36 u solved 37 43 u problem 44 51 u due 52 55 u to 56 58 u the 59 62 u high 63 67 u accuracy 68 76 u that 77 81 u rulebased 82 91 u tokenizers 92 102 u achieve 103 110 u 110 111 u but 0 3 u rule based 4 14 u tokenizers 15 25 u are 26 29 u hard 30 34 u to 35 37 u maintain 38 46 u and 47 50 u their 51 56 u rules 57 62 u language 63 71 u specific 72 80 u 80 81 u we 0 2 u evaluated 3 12 u our 13 16 u method 17 23 u on 24 26 u three 27 32 u languages 33 42 u and 43 46 u obtained 47 55 u error 56 61 u rates 62 67 u of 68 70 u 0 27 71 75 u 75 76 u 77 78 u english 78 85 u 85 86 u 86 87 u 0 35 88 92 u 92 93 u 94 95 u dutch 95 100 u 100 101 u and 102 105 u 0 76 106 110 u 110 111 u 112 113 u italian 113 120 u 120 121 u for 122 125 u our 126 129 u best 130 134 u models 135 141 u 141 142 set a directory to store the temporary files set an encoding for the input strings use repp to tokenize a single sentence param sentence a single sentence string type sentence str return a tuple of tokens rtype tuple str tokenize multiple sentences using repp param sentences a list of sentence strings type sentences list str return a list of tuples of tokens rtype iter tuple str write sentences to temporary input file generate command to run repp decode the stdout and strips the ending newline removes token position information this module generates the repp command to be used at the terminal param inputfilename path to the input file type inputfilename str this module parses the tri tuple format that repp outputs using the format triple option and returns an generator with tuple of string tokens param repp_output type repp_output type return an iterable of the tokenized sentences as tuples of strings rtype iter tuple a module to find repp tokenizer binary and its repp set config file if a full path is given try to find path to repp directory in environment variables checks for the repp binary and erg repp set config file
import os import re import subprocess import sys import tempfile from nltk.data import ZipFilePathPointer from nltk.internals import find_dir from nltk.tokenize.api import TokenizerI class ReppTokenizer(TokenizerI): def __init__(self, repp_dir, encoding="utf8"): self.repp_dir = self.find_repptokenizer(repp_dir) self.working_dir = tempfile.gettempdir() self.encoding = encoding def tokenize(self, sentence): return next(self.tokenize_sents([sentence])) def tokenize_sents(self, sentences, keep_token_positions=False): with tempfile.NamedTemporaryFile( prefix="repp_input.", dir=self.working_dir, mode="w", delete=False ) as input_file: for sent in sentences: input_file.write(str(sent) + "\n") input_file.close() cmd = self.generate_repp_command(input_file.name) repp_output = self._execute(cmd).decode(self.encoding).strip() for tokenized_sent in self.parse_repp_outputs(repp_output): if not keep_token_positions: tokenized_sent, starts, ends = zip(*tokenized_sent) yield tokenized_sent def generate_repp_command(self, inputfilename): cmd = [self.repp_dir + "/src/repp"] cmd += ["-c", self.repp_dir + "/erg/repp.set"] cmd += ["--format", "triple"] cmd += [inputfilename] return cmd @staticmethod def _execute(cmd): p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() return stdout @staticmethod def parse_repp_outputs(repp_output): line_regex = re.compile(r"^\((\d+), (\d+), (.+)\)$", re.MULTILINE) for section in repp_output.split("\n\n"): words_with_positions = [ (token, int(start), int(end)) for start, end, token in line_regex.findall(section) ] words = tuple(t[2] for t in words_with_positions) yield words_with_positions def find_repptokenizer(self, repp_dirname): if os.path.exists(repp_dirname): _repp_dir = repp_dirname else: _repp_dir = find_dir(repp_dirname, env_vars=("REPP_TOKENIZER",)) assert os.path.exists(_repp_dir + "/src/repp") assert os.path.exists(_repp_dir + "/erg/repp.set") return _repp_dir
natural language toolkit tokenizers c 20012023 nltk project yoav goldberg yoavgcs bgu ac il steven bird stevenbird1gmail com minor edits url https www nltk org for license information see license txt sexpression tokenizer sexprtokenizer is used to find parenthesized expressions in a string in particular it divides a string into a sequence of substrings that are either parenthesized expressions including any nested parenthesized expressions or other whitespaceseparated tokens from nltk tokenize import sexprtokenizer sexprtokenizer tokenize a b c d e f g a b c d e f g by default sexprtokenizer will raise a valueerror exception if used to tokenize an expression with nonmatching parentheses sexprtokenizer tokenize c d e f g traceback most recent call last valueerror unmatched close paren at char 1 the strict argument can be set to false to allow for nonmatching parentheses any unmatched close parentheses will be listed as their own sexpression and the last partial sexpr with unmatched open parentheses will be listed as its own sexpr sexprtokenizerstrictfalse tokenize c d e f g c d e f g the characters used for open and close parentheses may be customized using the parens argument to the sexprtokenizer constructor sexprtokenizerparens tokenize a b c d e f g a b c d e f g the sexpression tokenizer is also available as a function from nltk tokenize import sexprtokenize sexprtokenize a b c d e f g a b c d e f g a tokenizer that divides strings into sexpressions an sexpresion can be either a parenthesized expression including any nested parenthesized expressions or a sequence of nonwhitespace nonparenthesis characters for example the string a b c d e f consists of four sexpressions a b c d e and f by default the characters and are treated as open and close parentheses but alternative strings may be specified param parens a twoelement sequence specifying the open and close parentheses that should be used to find sexprs this will typically be either a twocharacter string or a list of two strings type parens str or list param strict if true then raise an exception when tokenizing an illformed sexpr return a list of sexpressions extracted from text for example sexprtokenizer tokenize a b c d e f g a b c d e f g all parentheses are assumed to mark sexpressions no special processing is done to exclude parentheses that occur inside strings or following backslash characters if the given expression contains nonmatching parentheses then the behavior of the tokenizer depends on the strict parameter to the constructor if strict is true then raise a valueerror if strict is false then any unmatched close parentheses will be listed as their own sexpression and the last partial sexpression with unmatched open parentheses will be listed as its own sexpression sexprtokenizerstrictfalse tokenize c d e f g c d e f g param text the string to be tokenized type text str or iterstr rtype iterstr natural language toolkit tokenizers c 2001 2023 nltk project yoav goldberg yoavg cs bgu ac il steven bird stevenbird1 gmail com minor edits url https www nltk org for license information see license txt s expression tokenizer sexprtokenizer is used to find parenthesized expressions in a string in particular it divides a string into a sequence of substrings that are either parenthesized expressions including any nested parenthesized expressions or other whitespace separated tokens from nltk tokenize import sexprtokenizer sexprtokenizer tokenize a b c d e f g a b c d e f g by default sexprtokenizer will raise a valueerror exception if used to tokenize an expression with non matching parentheses sexprtokenizer tokenize c d e f g traceback most recent call last valueerror un matched close paren at char 1 the strict argument can be set to false to allow for non matching parentheses any unmatched close parentheses will be listed as their own s expression and the last partial sexpr with unmatched open parentheses will be listed as its own sexpr sexprtokenizer strict false tokenize c d e f g c d e f g the characters used for open and close parentheses may be customized using the parens argument to the sexprtokenizer constructor sexprtokenizer parens tokenize a b c d e f g a b c d e f g the s expression tokenizer is also available as a function from nltk tokenize import sexpr_tokenize sexpr_tokenize a b c d e f g a b c d e f g a tokenizer that divides strings into s expressions an s expresion can be either a parenthesized expression including any nested parenthesized expressions or a sequence of non whitespace non parenthesis characters for example the string a b c d e f consists of four s expressions a b c d e and f by default the characters and are treated as open and close parentheses but alternative strings may be specified param parens a two element sequence specifying the open and close parentheses that should be used to find sexprs this will typically be either a two character string or a list of two strings type parens str or list param strict if true then raise an exception when tokenizing an ill formed sexpr return a list of s expressions extracted from text for example sexprtokenizer tokenize a b c d e f g a b c d e f g all parentheses are assumed to mark s expressions no special processing is done to exclude parentheses that occur inside strings or following backslash characters if the given expression contains non matching parentheses then the behavior of the tokenizer depends on the strict parameter to the constructor if strict is true then raise a valueerror if strict is false then any unmatched close parentheses will be listed as their own s expression and the last partial s expression with unmatched open parentheses will be listed as its own s expression sexprtokenizer strict false tokenize c d e f g c d e f g param text the string to be tokenized type text str or iter str rtype iter str
import re from nltk.tokenize.api import TokenizerI class SExprTokenizer(TokenizerI): def __init__(self, parens="()", strict=True): if len(parens) != 2: raise ValueError("parens must contain exactly two strings") self._strict = strict self._open_paren = parens[0] self._close_paren = parens[1] self._paren_regexp = re.compile( f"{re.escape(parens[0])}|{re.escape(parens[1])}" ) def tokenize(self, text): result = [] pos = 0 depth = 0 for m in self._paren_regexp.finditer(text): paren = m.group() if depth == 0: result += text[pos : m.start()].split() pos = m.start() if paren == self._open_paren: depth += 1 if paren == self._close_paren: if self._strict and depth == 0: raise ValueError("Un-matched close paren at char %d" % m.start()) depth = max(0, depth - 1) if depth == 0: result.append(text[pos : m.end()]) pos = m.end() if self._strict and depth > 0: raise ValueError("Un-matched open paren at char %d" % pos) if pos < len(text): result.append(text[pos:]) return result sexpr_tokenize = SExprTokenizer().tokenize
natural language toolkit simple tokenizers c 20012023 nltk project edward loper edlopergmail com steven bird stevenbird1gmail com url https www nltk org for license information see license txt from nltk tokenize api import stringtokenizer tokenizeri from nltk tokenize util import regexpspantokenize stringspantokenize class spacetokenizerstringtokenizer rtokenize a string using the space character as a delimiter which is the same as s split from nltk tokenize import spacetokenizer s good muffins cost 3 88nin new york please buy mentwo of them nnthanks spacetokenizer tokenizes doctest normalizewhitespace good muffins cost 3 88nin new york please buy mentwo of them nnthanks string t class chartokenizerstringtokenizer string none def tokenizeself s return lists def spantokenizeself s yield from enumeraterange1 lens 1 class linetokenizertokenizeri rtokenize a string into its lines optionally discarding blank lines this is similar to s split n from nltk tokenize import linetokenizer s good muffins cost 3 88nin new york please buy mentwo of them nnthanks linetokenizerblanklines keep tokenizes doctest normalizewhitespace good muffins cost 3 88 in new york please buy me two of them thanks same as l for l in s split n if l strip linetokenizerblanklines discard tokenizes doctest normalizewhitespace good muffins cost 3 88 in new york please buy me two of them thanks param blanklines indicates how blank lines should be handled valid values are discard strip blank lines out of the token list before returning it a line is considered blank if it contains only whitespace characters keep leave all blank lines in the token list discardeof if the string ends with a newline then do not generate a corresponding token after that newline if requested strip off blank lines discardeof not implemented tokenization functions xxx it is stated in module docs that there is no function versions natural language toolkit simple tokenizers c 2001 2023 nltk project edward loper edloper gmail com steven bird stevenbird1 gmail com url https www nltk org for license information see license txt simple tokenizers these tokenizers divide strings into substrings using the string split method when tokenizing using a particular delimiter string use the string split method directly as this is more efficient the simple tokenizers are not available as separate functions instead you should just use the string split method directly s good muffins cost 3 88 nin new york please buy me ntwo of them n nthanks s split doctest normalize_whitespace good muffins cost 3 88 in new york please buy me two of them thanks s split doctest normalize_whitespace good muffins cost 3 88 nin new york please buy me ntwo of them n nthanks s split n doctest normalize_whitespace good muffins cost 3 88 in new york please buy me two of them thanks the simple tokenizers are mainly useful because they follow the standard tokenizeri interface and so can be used with any code that expects a tokenizer for example these tokenizers can be used to specify the tokenization conventions when building a corpusreader tokenize a string using the space character as a delimiter which is the same as s split from nltk tokenize import spacetokenizer s good muffins cost 3 88 nin new york please buy me ntwo of them n nthanks spacetokenizer tokenize s doctest normalize_whitespace good muffins cost 3 88 nin new york please buy me ntwo of them n nthanks tokenize a string use the tab character as a delimiter the same as s split t from nltk tokenize import tabtokenizer tabtokenizer tokenize a tb c n t d a b c n d tokenize a string into individual characters if this functionality is ever required directly use for char in string tokenize a string into its lines optionally discarding blank lines this is similar to s split n from nltk tokenize import linetokenizer s good muffins cost 3 88 nin new york please buy me ntwo of them n nthanks linetokenizer blanklines keep tokenize s doctest normalize_whitespace good muffins cost 3 88 in new york please buy me two of them thanks same as l for l in s split n if l strip linetokenizer blanklines discard tokenize s doctest normalize_whitespace good muffins cost 3 88 in new york please buy me two of them thanks param blanklines indicates how blank lines should be handled valid values are discard strip blank lines out of the token list before returning it a line is considered blank if it contains only whitespace characters keep leave all blank lines in the token list discard eof if the string ends with a newline then do not generate a corresponding token after that newline if requested strip off blank lines discard eof not implemented tokenization functions xxx it is stated in module docs that there is no function versions
r from nltk.tokenize.api import StringTokenizer, TokenizerI from nltk.tokenize.util import regexp_span_tokenize, string_span_tokenize class SpaceTokenizer(StringTokenizer): r _string = " " class TabTokenizer(StringTokenizer): r _string = "\t" class CharTokenizer(StringTokenizer): _string = None def tokenize(self, s): return list(s) def span_tokenize(self, s): yield from enumerate(range(1, len(s) + 1)) class LineTokenizer(TokenizerI): r def __init__(self, blanklines="discard"): valid_blanklines = ("discard", "keep", "discard-eof") if blanklines not in valid_blanklines: raise ValueError( "Blank lines must be one of: %s" % " ".join(valid_blanklines) ) self._blanklines = blanklines def tokenize(self, s): lines = s.splitlines() if self._blanklines == "discard": lines = [l for l in lines if l.rstrip()] elif self._blanklines == "discard-eof": if lines and not lines[-1].strip(): lines.pop() return lines def span_tokenize(self, s): if self._blanklines == "keep": yield from string_span_tokenize(s, r"\n") else: yield from regexp_span_tokenize(s, r"\n(\s+\n)*") def line_tokenize(text, blanklines="discard"): return LineTokenizer(blanklines).tokenize(text)
natural language toolkit tokenizers c 20012023 nltk project christopher hench chris l henchgmail com alex estes url https www nltk org for license information see license txt the sonority sequencing principle ssp is a language agnostic algorithm proposed by otto jesperson in 1904 the sonorous quality of a phoneme is judged by the openness of the lips syllable breaks occur before troughs in sonority for more on the ssp see selkirk 1984 the default implementation uses the english alphabet but the sonorityhiearchy can be modified to ipa or any other alphabet for the usecase the ssp is a universal syllabification algorithm but that does not mean it performs equally across languages bartlett et al 2009 is a good benchmark for english accuracy if utilizing ipa pg 311 importantly if a custom hierarchy is supplied and vowels span across more than one level they should be given separately to the vowels class attribute references otto jespersen 1904 lehrbuch der phonetik leipzig teubner chapter 13 silbe pp 185203 elisabeth selkirk 1984 on the major class features and syllable theory in aronoff oehrle eds language sound structure studies in phonology cambridge mit press pp 107136 susan bartlett et al 2009 on the syllabification of phonemes in hltnaacl pp 308316 syllabifies words based on the sonority sequencing principle ssp from nltk tokenize import syllabletokenizer from nltk import wordtokenize ssp syllabletokenizer ssp tokenize justification jus ti fi ca tion text this is a foobarlike sentence ssp tokenizetoken for token in wordtokenizetext this is a foo bar li ke sen ten ce param lang language parameter default is english en type lang str param sonorityhierarchy sonority hierarchy according to the sonority sequencing principle type sonorityhierarchy liststr sonority hierarchy should be provided in descending order if vowels are spread across multiple levels they should be passed assigned self vowels var together otherwise should be placed in first index of hierarchy assigns each phoneme its value from the sonority hierarchy note sentencetext has to be tokenized first param token single word or token type token str return list of tuples first element is characterphoneme and second is the soronity value rtype listtuplestr int ensures each syllable has at least one vowel if the following syllable doesn t have vowel add it to the current one param syllablelist single word or token broken up into syllables type syllablelist liststr return single word or token broken up into syllables with added syllables if necessary rtype liststr apply the ssp to return a list of syllables note sentencetext has to be tokenized first param token single word or token type token str return syllablelist single word or token broken up into syllables rtype liststr assign values from hierarchy if only one vowel return word sonority of previous focal and following phoneme focal phoneme these cases trigger syllable break no syllable break natural language toolkit tokenizers c 2001 2023 nltk project christopher hench chris l hench gmail com alex estes url https www nltk org for license information see license txt the sonority sequencing principle ssp is a language agnostic algorithm proposed by otto jesperson in 1904 the sonorous quality of a phoneme is judged by the openness of the lips syllable breaks occur before troughs in sonority for more on the ssp see selkirk 1984 the default implementation uses the english alphabet but the sonority_hiearchy can be modified to ipa or any other alphabet for the use case the ssp is a universal syllabification algorithm but that does not mean it performs equally across languages bartlett et al 2009 is a good benchmark for english accuracy if utilizing ipa pg 311 importantly if a custom hierarchy is supplied and vowels span across more than one level they should be given separately to the vowels class attribute references otto jespersen 1904 lehrbuch der phonetik leipzig teubner chapter 13 silbe pp 185 203 elisabeth selkirk 1984 on the major class features and syllable theory in aronoff oehrle eds language sound structure studies in phonology cambridge mit press pp 107 136 susan bartlett et al 2009 on the syllabification of phonemes in hlt naacl pp 308 316 syllabifies words based on the sonority sequencing principle ssp from nltk tokenize import syllabletokenizer from nltk import word_tokenize ssp syllabletokenizer ssp tokenize justification jus ti fi ca tion text this is a foobar like sentence ssp tokenize token for token in word_tokenize text this is a foo bar li ke sen ten ce param lang language parameter default is english en type lang str param sonority_hierarchy sonority hierarchy according to the sonority sequencing principle type sonority_hierarchy list str sonority hierarchy should be provided in descending order if vowels are spread across multiple levels they should be passed assigned self vowels var together otherwise should be placed in first index of hierarchy vowels nasals fricatives stops assigns each phoneme its value from the sonority hierarchy note sentence text has to be tokenized first param token single word or token type token str return list of tuples first element is character phoneme and second is the soronity value rtype list tuple str int if it s a punctuation or numbers assign 1 ensures each syllable has at least one vowel if the following syllable doesn t have vowel add it to the current one param syllable_list single word or token broken up into syllables type syllable_list list str return single word or token broken up into syllables with added syllables if necessary rtype list str apply the ssp to return a list of syllables note sentence text has to be tokenized first param token single word or token type token str return syllable_list single word or token broken up into syllables rtype list str assign values from hierarchy if only one vowel return word start syllable with first phoneme sonority of previous focal and following phoneme focal phoneme these cases trigger syllable break if it s a punctuation just break no syllable break append last phoneme
import re import warnings from string import punctuation from nltk.tokenize.api import TokenizerI from nltk.util import ngrams class SyllableTokenizer(TokenizerI): def __init__(self, lang="en", sonority_hierarchy=False): if not sonority_hierarchy and lang == "en": sonority_hierarchy = [ "aeiouy", "lmnrw", "zvsf", "bcdgtkpqxhj", ] self.vowels = sonority_hierarchy[0] self.phoneme_map = {} for i, level in enumerate(sonority_hierarchy): for c in level: sonority_level = len(sonority_hierarchy) - i self.phoneme_map[c] = sonority_level self.phoneme_map[c.upper()] = sonority_level def assign_values(self, token): syllables_values = [] for c in token: try: syllables_values.append((c, self.phoneme_map[c])) except KeyError: if c not in "0123456789" and c not in punctuation: warnings.warn( "Character not defined in sonority_hierarchy," " assigning as vowel: '{}'".format(c) ) syllables_values.append((c, max(self.phoneme_map.values()))) if c not in self.vowels: self.vowels += c else: syllables_values.append((c, -1)) return syllables_values def validate_syllables(self, syllable_list): valid_syllables = [] front = "" vowel_pattern = re.compile("|".join(self.vowels)) for i, syllable in enumerate(syllable_list): if syllable in punctuation: valid_syllables.append(syllable) continue if not vowel_pattern.search(syllable): if len(valid_syllables) == 0: front += syllable else: valid_syllables = valid_syllables[:-1] + [ valid_syllables[-1] + syllable ] else: if len(valid_syllables) == 0: valid_syllables.append(front + syllable) else: valid_syllables.append(syllable) return valid_syllables def tokenize(self, token): syllables_values = self.assign_values(token) if sum(token.count(x) for x in self.vowels) <= 1: return [token] syllable_list = [] syllable = syllables_values[0][0] for trigram in ngrams(syllables_values, n=3): phonemes, values = zip(*trigram) prev_value, focal_value, next_value = values focal_phoneme = phonemes[1] if focal_value == -1: syllable_list.append(syllable) syllable_list.append(focal_phoneme) syllable = "" elif prev_value >= focal_value == next_value: syllable += focal_phoneme syllable_list.append(syllable) syllable = "" elif prev_value > focal_value < next_value: syllable_list.append(syllable) syllable = "" syllable += focal_phoneme else: syllable += focal_phoneme syllable += syllables_values[-1][0] syllable_list.append(syllable) return self.validate_syllables(syllable_list)
natural language toolkit interface to the stanford tokenizer c 20012023 nltk project steven xu xxustudent unimelb edu au url https www nltk org for license information see license txt jar stanfordpostagger jar def init self pathtojarnone encodingutf8 optionsnone verbosefalse javaoptionsmx1000m raise deprecation warning warnings warn str nthe stanfordtokenizer will be deprecated in version 3 2 5 n please use 03391mnltk parse corenlp corenlpparser0330m instead deprecationwarning stacklevel2 self stanfordjar findjar self jar pathtojar envvarsstanfordpostagger searchpath urlstanfordurl verboseverbose self encoding encoding self javaoptions javaoptions options if options is none else options self optionscmd joinfkeyval for key val in options items staticmethod def parsetokenizedoutputs return s splitlines def tokenizeself s cmd edu stanford nlp process ptbtokenizer return self parsetokenizedoutputself executecmd s def executeself cmd input verbosefalse encoding self encoding cmd extendcharset encoding optionscmd self optionscmd if optionscmd cmd extendoptions self optionscmd defaultoptions joinjavaoptions configure java configjavaoptionsself javaoptions verboseverbose windows is incompatible with namedtemporaryfile without passing in deletefalse with tempfile namedtemporaryfilemodewb deletefalse as inputfile write the actual sentences to the temporary input file if isinstanceinput str and encoding input input encodeencoding inputfile writeinput inputfile flush cmd appendinputfile name run the tagger and get the output stdout stderr java cmd classpathself stanfordjar stdoutpipe stderrpipe stdout stdout decodeencoding os unlinkinputfile name return java configurations to their default values configjavaoptionsdefaultoptions verbosefalse return stdout natural language toolkit interface to the stanford tokenizer c 2001 2023 nltk project steven xu xxu student unimelb edu au url https www nltk org for license information see license txt interface to the stanford tokenizer from nltk tokenize stanford import stanfordtokenizer s good muffins cost 3 88 nin new york please buy me ntwo of them nthanks stanfordtokenizer tokenize s doctest skip good muffins cost 3 88 in new york please buy me two of them thanks s the colour of the wall is blue stanfordtokenizer options americanize true tokenize s doctest skip the color of the wall is blue raise deprecation warning use stanford tokenizer s ptbtokenizer to tokenize multiple sentences configure java windows is incompatible with namedtemporaryfile without passing in delete false write the actual sentences to the temporary input file run the tagger and get the output return java configurations to their default values
import json import os import tempfile import warnings from subprocess import PIPE from nltk.internals import _java_options, config_java, find_jar, java from nltk.parse.corenlp import CoreNLPParser from nltk.tokenize.api import TokenizerI _stanford_url = "https://nlp.stanford.edu/software/tokenizer.shtml" class StanfordTokenizer(TokenizerI): r _JAR = "stanford-postagger.jar" def __init__( self, path_to_jar=None, encoding="utf8", options=None, verbose=False, java_options="-mx1000m", ): warnings.warn( str( "\nThe StanfordTokenizer will " "be deprecated in version 3.2.5.\n" "Please use \033[91mnltk.parse.corenlp.CoreNLPParser\033[0m instead.'" ), DeprecationWarning, stacklevel=2, ) self._stanford_jar = find_jar( self._JAR, path_to_jar, env_vars=("STANFORD_POSTAGGER",), searchpath=(), url=_stanford_url, verbose=verbose, ) self._encoding = encoding self.java_options = java_options options = {} if options is None else options self._options_cmd = ",".join(f"{key}={val}" for key, val in options.items()) @staticmethod def _parse_tokenized_output(s): return s.splitlines() def tokenize(self, s): cmd = ["edu.stanford.nlp.process.PTBTokenizer"] return self._parse_tokenized_output(self._execute(cmd, s)) def _execute(self, cmd, input_, verbose=False): encoding = self._encoding cmd.extend(["-charset", encoding]) _options_cmd = self._options_cmd if _options_cmd: cmd.extend(["-options", self._options_cmd]) default_options = " ".join(_java_options) config_java(options=self.java_options, verbose=verbose) with tempfile.NamedTemporaryFile(mode="wb", delete=False) as input_file: if isinstance(input_, str) and encoding: input_ = input_.encode(encoding) input_file.write(input_) input_file.flush() cmd.append(input_file.name) stdout, stderr = java( cmd, classpath=self._stanford_jar, stdout=PIPE, stderr=PIPE ) stdout = stdout.decode(encoding) os.unlink(input_file.name) config_java(options=default_options, verbose=False) return stdout
usrbinenv python natural language toolkit interface to the stanford segmenter for chinese and arabic c 20012023 nltk project 52nlp 52nlpcngmail com casper lehmannstrm casperlehmanngmail com alex constantin alexkeyworder ch url https www nltk org for license information see license txt interface to the stanford segmenter if stanfordsegmenter version is older than 20161031 then pathtoslf4j should be provieded for example seg stanfordsegmenterpathtoslf4j yourpathslf4japi jar from nltk tokenize stanfordsegmenter import stanfordsegmenter seg stanfordsegmenter doctest skip seg defaultconfig zh doctest skip sent u printseg segmentsent doctest skip u8fd9 u662f u65afu5766u798f u4e2du6587 u5206u8bcdu5668 u6d4bu8bd5 blankline seg defaultconfig ar doctest skip sent u printseg segmentsent split doctest skip u0647u0630u0627 u0647u0648 u062au0635u0646u064au0641 u0633u062au0627u0646u0641u0648u0631u062f u0627u0644u0639u0631u0628u064a u0644 u0627u0644u0643u0644u0645u0627u062a blankline raise deprecation warning this is passed to java as the cp option the old version of segmenter needs slf4j the new version of stanfordsegmenter20161031 doesn t need slf4j attempt to initialize stanford word segmenter for the specified language using the stanfordsegmenter and stanfordmodels environment variables init for chinesespecific files cmd self javaclass loadclassifier self model keepallwhitespaces self keepwhitespaces textfile inputfilepath if self sihancorporadict is not none cmd extend serdictionary self dict sighancorporadict self sihancorporadict sighanpostprocessing self sihanpostprocessing stdout self executecmd return stdout def segmentself tokens return self segmentsentstokens def segmentsentsself sentences create a temporary input file write the actural sentences to the temporary input file delete the temporary file configure java return java configurations to their default values usr bin env python natural language toolkit interface to the stanford segmenter for chinese and arabic c 2001 2023 nltk project 52nlp 52nlpcn gmail com casper lehmann strøm casperlehmann gmail com alex constantin alex keyworder ch url https www nltk org for license information see license txt interface to the stanford segmenter if stanford segmenter version is older than 2016 10 31 then path_to_slf4j should be provieded for example seg stanfordsegmenter path_to_slf4j your_path slf4j api jar from nltk tokenize stanford_segmenter import stanfordsegmenter seg stanfordsegmenter doctest skip seg default_config zh doctest skip sent u 这是斯坦福中文分词器测试 print seg segment sent doctest skip u8fd9 u662f u65af u5766 u798f u4e2d u6587 u5206 u8bcd u5668 u6d4b u8bd5 blankline seg default_config ar doctest skip sent u هذا هو تصنيف ستانفورد العربي للكلمات print seg segment sent split doctest skip u0647 u0630 u0627 u0647 u0648 u062a u0635 u0646 u064a u0641 u0633 u062a u0627 u0646 u0641 u0648 u0631 u062f u0627 u0644 u0639 u0631 u0628 u064a u0644 u0627 u0644 u0643 u0644 u0645 u0627 u062a blankline raise deprecation warning this is passed to java as the cp option the old version of segmenter needs slf4j the new version of stanford segmenter 2016 10 31 doesn t need slf4j attempt to initialize stanford word segmenter for the specified language using the stanford_segmenter and stanford_models environment variables init for chinese specific files create a temporary input file write the actural sentences to the temporary input file delete the temporary file configure java return java configurations to their default values
import json import os import tempfile import warnings from subprocess import PIPE from nltk.internals import ( _java_options, config_java, find_dir, find_file, find_jar, java, ) from nltk.tokenize.api import TokenizerI _stanford_url = "https://nlp.stanford.edu/software" class StanfordSegmenter(TokenizerI): _JAR = "stanford-segmenter.jar" def __init__( self, path_to_jar=None, path_to_slf4j=None, java_class=None, path_to_model=None, path_to_dict=None, path_to_sihan_corpora_dict=None, sihan_post_processing="false", keep_whitespaces="false", encoding="UTF-8", options=None, verbose=False, java_options="-mx2g", ): warnings.simplefilter("always", DeprecationWarning) warnings.warn( str( "\nThe StanfordTokenizer will " "be deprecated in version 3.2.5.\n" "Please use \033[91mnltk.parse.corenlp.CoreNLPTokenizer\033[0m instead.'" ), DeprecationWarning, stacklevel=2, ) warnings.simplefilter("ignore", DeprecationWarning) stanford_segmenter = find_jar( self._JAR, path_to_jar, env_vars=("STANFORD_SEGMENTER",), searchpath=(), url=_stanford_url, verbose=verbose, ) if path_to_slf4j is not None: slf4j = find_jar( "slf4j-api.jar", path_to_slf4j, env_vars=("SLF4J", "STANFORD_SEGMENTER"), searchpath=(), url=_stanford_url, verbose=verbose, ) else: slf4j = None self._stanford_jar = os.pathsep.join( _ for _ in [stanford_segmenter, slf4j] if _ is not None ) self._java_class = java_class self._model = path_to_model self._sihan_corpora_dict = path_to_sihan_corpora_dict self._sihan_post_processing = sihan_post_processing self._keep_whitespaces = keep_whitespaces self._dict = path_to_dict self._encoding = encoding self.java_options = java_options options = {} if options is None else options self._options_cmd = ",".join( f"{key}={json.dumps(val)}" for key, val in options.items() ) def default_config(self, lang): search_path = () if os.environ.get("STANFORD_SEGMENTER"): search_path = {os.path.join(os.environ.get("STANFORD_SEGMENTER"), "data")} self._dict = None self._sihan_corpora_dict = None self._sihan_post_processing = "false" if lang == "ar": self._java_class = ( "edu.stanford.nlp.international.arabic.process.ArabicSegmenter" ) model = "arabic-segmenter-atb+bn+arztrain.ser.gz" elif lang == "zh": self._java_class = "edu.stanford.nlp.ie.crf.CRFClassifier" model = "pku.gz" self._sihan_post_processing = "true" path_to_dict = "dict-chris6.ser.gz" try: self._dict = find_file( path_to_dict, searchpath=search_path, url=_stanford_url, verbose=False, env_vars=("STANFORD_MODELS",), ) except LookupError as e: raise LookupError( "Could not find '%s' (tried using env. " "variables STANFORD_MODELS and <STANFORD_SEGMENTER>/data/)" % path_to_dict ) from e sihan_dir = "./data/" try: path_to_sihan_dir = find_dir( sihan_dir, url=_stanford_url, verbose=False, env_vars=("STANFORD_SEGMENTER",), ) self._sihan_corpora_dict = os.path.join(path_to_sihan_dir, sihan_dir) except LookupError as e: raise LookupError( "Could not find '%s' (tried using the " "STANFORD_SEGMENTER environment variable)" % sihan_dir ) from e else: raise LookupError(f"Unsupported language {lang}") try: self._model = find_file( model, searchpath=search_path, url=_stanford_url, verbose=False, env_vars=("STANFORD_MODELS", "STANFORD_SEGMENTER"), ) except LookupError as e: raise LookupError( "Could not find '%s' (tried using env. " "variables STANFORD_MODELS and <STANFORD_SEGMENTER>/data/)" % model ) from e def tokenize(self, s): super().tokenize(s) def segment_file(self, input_file_path): cmd = [ self._java_class, "-loadClassifier", self._model, "-keepAllWhitespaces", self._keep_whitespaces, "-textFile", input_file_path, ] if self._sihan_corpora_dict is not None: cmd.extend( [ "-serDictionary", self._dict, "-sighanCorporaDict", self._sihan_corpora_dict, "-sighanPostProcessing", self._sihan_post_processing, ] ) stdout = self._execute(cmd) return stdout def segment(self, tokens): return self.segment_sents([tokens]) def segment_sents(self, sentences): encoding = self._encoding _input_fh, self._input_file_path = tempfile.mkstemp(text=True) _input_fh = os.fdopen(_input_fh, "wb") _input = "\n".join(" ".join(x) for x in sentences) if isinstance(_input, str) and encoding: _input = _input.encode(encoding) _input_fh.write(_input) _input_fh.close() cmd = [ self._java_class, "-loadClassifier", self._model, "-keepAllWhitespaces", self._keep_whitespaces, "-textFile", self._input_file_path, ] if self._sihan_corpora_dict is not None: cmd.extend( [ "-serDictionary", self._dict, "-sighanCorporaDict", self._sihan_corpora_dict, "-sighanPostProcessing", self._sihan_post_processing, ] ) stdout = self._execute(cmd) os.unlink(self._input_file_path) return stdout def _execute(self, cmd, verbose=False): encoding = self._encoding cmd.extend(["-inputEncoding", encoding]) _options_cmd = self._options_cmd if _options_cmd: cmd.extend(["-options", self._options_cmd]) default_options = " ".join(_java_options) config_java(options=self.java_options, verbose=verbose) stdout, _stderr = java( cmd, classpath=self._stanford_jar, stdout=PIPE, stderr=PIPE ) stdout = stdout.decode(encoding) config_java(options=default_options, verbose=False) return stdout
natural language toolkit texttiling c 20012023 nltk project george boutsioukis url https www nltk org for license information see license txt tokenize a document into topical sections using the texttiling algorithm this algorithm detects subtopic shifts based on the analysis of lexical cooccurrence patterns the process starts by tokenizing the text into pseudosentences of a fixed size w then depending on the method used similarity scores are assigned at sentence gaps the algorithm proceeds by detecting the peak differences between these scores and marking them as boundaries the boundaries are normalized to the closest paragraph break and the segmented text is returned param w pseudosentence size type w int param k size in sentences of the block used in the block comparison method type k int param similaritymethod the method used for determining similarity scores blockcomparison default or vocabularyintroduction type similaritymethod constant param stopwords a list of stopwords that are filtered out defaults to nltk s stopwords corpus type stopwords liststr param smoothingmethod the method used for smoothing the score plot defaultsmoothing default type smoothingmethod constant param smoothingwidth the width of the window used by the smoothing method type smoothingwidth int param smoothingrounds the number of smoothing passes type smoothingrounds int param cutoffpolicy the policy used to determine the number of boundaries hc default or lc type cutoffpolicy constant from nltk corpus import brown tt texttilingtokenizerdemomodetrue text brown raw 4000 s ss d b tt tokenizetext b 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 1 0 0 0 0 return a tokenized copy of text where each token represents a separate topic lowercasetext text lower paragraphbreaks self markparagraphbreakstext textlength lenlowercasetext tokenization step starts here remove punctuation nopuncttext join c for c in lowercasetext if re matchraz nt c nopunctparbreaks self markparagraphbreaksnopuncttext tokseqs self dividetotokensequencesnopuncttext the morphological stemming step mentioned in the texttile paper is not implemented a comment in the original c implementation states that it offers no benefit to the process it might be interesting to test the existing stemmers though words stemwordswords filter stopwords for ts in tokseqs ts wrdindexlist wi for wi in ts wrdindexlist if wi0 not in self stopwords tokentable self createtokentabletokseqs nopunctparbreaks end of the tokenization step lexical score determination if self similaritymethod blockcomparison gapscores self blockcomparisontokseqs tokentable elif self similaritymethod vocabularyintroduction raise notimplementederrorvocabulary introduction not implemented else raise valueerror fsimilarity method self similaritymethod not recognized if self smoothingmethod defaultsmoothing smoothscores self smoothscoresgapscores else raise valueerrorfsmoothing method self smoothingmethod not recognized end of lexical score determination boundary identification depthscores self depthscoressmoothscores segmentboundaries self identifyboundariesdepthscores normalizedboundaries self normalizeboundaries text segmentboundaries paragraphbreaks end of boundary identification segmentedtext prevb 0 for b in normalizedboundaries if b 0 continue segmentedtext appendtextprevb b prevb b if prevb textlength append any text that may be remaining segmentedtext appendtextprevb if not segmentedtext segmentedtext text if self demomode return gapscores smoothscores depthscores segmentboundaries return segmentedtext def blockcomparisonself tokseqs tokentable adjust window size for boundary conditions identifies indented text or line breaks as the beginning of paragraphs minparagraph 100 pattern re compile trfvn trfvn trfv matches pattern finditertext lastbreak 0 pbreaks 0 for pb in matches if pb start lastbreak minparagraph continue else pbreaks appendpb start lastbreak pb start return pbreaks def dividetotokensequencesself text divides the text into pseudosentences of fixed size w self w wrdindexlist matches re finditerrw text for match in matches wrdindexlist appendmatch group match start return tokensequencei w wrdindexlisti i w for i in range0 lenwrdindexlist w def createtokentableself tokensequences parbreaks creates a table of tokentablefields tokentable currentpar 0 currenttokseq 0 pbiter parbreaks iter currentparbreak nextpbiter if currentparbreak 0 try currentparbreak nextpbiter skip break at 0 except stopiteration as e raise valueerror no paragraph breaks were foundtext too short perhaps from e for ts in tokensequences for word index in ts wrdindexlist try while index currentparbreak currentparbreak nextpbiter currentpar 1 except stopiteration hit bottom pass if word in tokentable tokentableword totalcount 1 if tokentableword lastpar currentpar tokentableword lastpar currentpar tokentableword parcount 1 if tokentableword lasttokseq currenttokseq tokentableword lasttokseq currenttokseq tokentableword tsoccurences appendcurrenttokseq 1 else tokentableword tsoccurences11 1 else new word tokentableword tokentablefield firstposindex tsoccurencescurrenttokseq 1 totalcount1 parcount1 lastparcurrentpar lasttokseqcurrenttokseq currenttokseq 1 return tokentable def identifyboundariesself depthscores calculates the depth of each gap i e the average difference between the left and right peaks and the gap s score depthscores 0 for x in scores clip boundaries this holds on the rule of thumbmy thumb that a section shouldn t be smaller than at least 2 pseudosentences for small texts and around 5 for larger ones clip minmaxlenscores 10 2 5 index clip for gapscore in scoresclip clip lpeak gapscore for score in scoresindex 1 if score lpeak lpeak score else break rpeak gapscore for score in scoresindex if score rpeak rpeak score else break depthscoresindex lpeak rpeak 2 gapscore index 1 return depthscores def normalizeboundariesself text boundaries paragraphbreaks find closest paragraph break a field in the token table holding parameters for each token used later in the process def init self firstpos tsoccurences totalcount1 parcount1 lastpar0 lasttokseqnone self dict updatelocals del self dictself class tokensequence a token list with its original length and its index def initself index wrdindexlist originallengthnone originallength originallength or lenwrdindexlist self dict updatelocals del self dictself pasted from the scipy cookbook https www scipy orgcookbooksignalsmooth def smoothx windowlen11 windowflat if x ndim 1 raise valueerrorsmooth only accepts 1 dimension arrays if x size windowlen raise valueerrorinput vector needs to be bigger than window size if windowlen 3 return x if window not in flat hanning hamming bartlett blackman raise valueerror window is on of flat hanning hamming bartlett blackman s numpy r2 x0 xwindowlen 1 1 x 2 x1 x1 windowlen 1 printlens if window flat moving average w numpy oneswindowlen d else w evalnumpy window windowlen y numpy convolvew w sum s modesame return ywindowlen 1 windowlen 1 def demotextnone from matplotlib import pylab from nltk corpus import brown tt texttilingtokenizerdemomodetrue if text is none text brown raw 10000 s ss d b tt tokenizetext pylab xlabelsentence gap index pylab ylabelgap scores pylab plotrangelens s labelgap scores pylab plotrangelenss ss labelsmoothed gap scores pylab plotrangelend d labeldepth scores pylab stemrangelenb b pylab legend pylab show natural language toolkit texttiling c 2001 2023 nltk project george boutsioukis url https www nltk org for license information see license txt tokenize a document into topical sections using the texttiling algorithm this algorithm detects subtopic shifts based on the analysis of lexical co occurrence patterns the process starts by tokenizing the text into pseudosentences of a fixed size w then depending on the method used similarity scores are assigned at sentence gaps the algorithm proceeds by detecting the peak differences between these scores and marking them as boundaries the boundaries are normalized to the closest paragraph break and the segmented text is returned param w pseudosentence size type w int param k size in sentences of the block used in the block comparison method type k int param similarity_method the method used for determining similarity scores block_comparison default or vocabulary_introduction type similarity_method constant param stopwords a list of stopwords that are filtered out defaults to nltk s stopwords corpus type stopwords list str param smoothing_method the method used for smoothing the score plot default_smoothing default type smoothing_method constant param smoothing_width the width of the window used by the smoothing method type smoothing_width int param smoothing_rounds the number of smoothing passes type smoothing_rounds int param cutoff_policy the policy used to determine the number of boundaries hc default or lc type cutoff_policy constant from nltk corpus import brown tt texttilingtokenizer demo_mode true text brown raw 4000 s ss d b tt tokenize text b 0 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 1 0 0 0 0 return a tokenized copy of text where each token represents a separate topic tokenization step starts here remove punctuation the morphological stemming step mentioned in the texttile paper is not implemented a comment in the original c implementation states that it offers no benefit to the process it might be interesting to test the existing stemmers though words _stem_words words filter stopwords end of the tokenization step lexical score determination end of lexical score determination boundary identification end of boundary identification append any text that may be remaining implements the block comparison method adjust window size for boundary conditions score 0 0 identifies indented text or line breaks as the beginning of paragraphs skip break at 0 hit bottom new word identifies boundaries at the peaks of similarity score differences undo if there is a boundary close already calculates the depth of each gap i e the average difference between the left and right peaks and the gap s score clip boundaries this holds on the rule of thumb my thumb that a section shouldn t be smaller than at least 2 pseudosentences for small texts and around 5 for larger ones normalize the boundaries identified to the original text s paragraph breaks find closest paragraph break avoid duplicates a field in the token table holding parameters for each token used later in the process pasted from the scipy cookbook https www scipy org cookbook signalsmooth smooth the data using a window with requested size this method is based on the convolution of a scaled window with the signal the signal is prepared by introducing reflected copies of the signal with the window size in both ends so that transient parts are minimized in the beginning and end part of the output signal param x the input signal param window_len the dimension of the smoothing window should be an odd integer param window the type of window from flat hanning hamming bartlett blackman flat window will produce a moving average smoothing return the smoothed signal example t linspace 2 2 0 1 x sin t randn len t 0 1 y smooth x see also numpy hanning numpy hamming numpy bartlett numpy blackman numpy convolve scipy signal lfilter todo the window parameter could be the window itself if an array instead of a string print len s moving average
import math import re try: import numpy except ImportError: pass from nltk.tokenize.api import TokenizerI BLOCK_COMPARISON, VOCABULARY_INTRODUCTION = 0, 1 LC, HC = 0, 1 DEFAULT_SMOOTHING = [0] class TextTilingTokenizer(TokenizerI): def __init__( self, w=20, k=10, similarity_method=BLOCK_COMPARISON, stopwords=None, smoothing_method=DEFAULT_SMOOTHING, smoothing_width=2, smoothing_rounds=1, cutoff_policy=HC, demo_mode=False, ): if stopwords is None: from nltk.corpus import stopwords stopwords = stopwords.words("english") self.__dict__.update(locals()) del self.__dict__["self"] def tokenize(self, text): lowercase_text = text.lower() paragraph_breaks = self._mark_paragraph_breaks(text) text_length = len(lowercase_text) nopunct_text = "".join( c for c in lowercase_text if re.match(r"[a-z\-' \n\t]", c) ) nopunct_par_breaks = self._mark_paragraph_breaks(nopunct_text) tokseqs = self._divide_to_tokensequences(nopunct_text) for ts in tokseqs: ts.wrdindex_list = [ wi for wi in ts.wrdindex_list if wi[0] not in self.stopwords ] token_table = self._create_token_table(tokseqs, nopunct_par_breaks) if self.similarity_method == BLOCK_COMPARISON: gap_scores = self._block_comparison(tokseqs, token_table) elif self.similarity_method == VOCABULARY_INTRODUCTION: raise NotImplementedError("Vocabulary introduction not implemented") else: raise ValueError( f"Similarity method {self.similarity_method} not recognized" ) if self.smoothing_method == DEFAULT_SMOOTHING: smooth_scores = self._smooth_scores(gap_scores) else: raise ValueError(f"Smoothing method {self.smoothing_method} not recognized") depth_scores = self._depth_scores(smooth_scores) segment_boundaries = self._identify_boundaries(depth_scores) normalized_boundaries = self._normalize_boundaries( text, segment_boundaries, paragraph_breaks ) segmented_text = [] prevb = 0 for b in normalized_boundaries: if b == 0: continue segmented_text.append(text[prevb:b]) prevb = b if prevb < text_length: segmented_text.append(text[prevb:]) if not segmented_text: segmented_text = [text] if self.demo_mode: return gap_scores, smooth_scores, depth_scores, segment_boundaries return segmented_text def _block_comparison(self, tokseqs, token_table): def blk_frq(tok, block): ts_occs = filter(lambda o: o[0] in block, token_table[tok].ts_occurences) freq = sum(tsocc[1] for tsocc in ts_occs) return freq gap_scores = [] numgaps = len(tokseqs) - 1 for curr_gap in range(numgaps): score_dividend, score_divisor_b1, score_divisor_b2 = 0.0, 0.0, 0.0 score = 0.0 if curr_gap < self.k - 1: window_size = curr_gap + 1 elif curr_gap > numgaps - self.k: window_size = numgaps - curr_gap else: window_size = self.k b1 = [ts.index for ts in tokseqs[curr_gap - window_size + 1 : curr_gap + 1]] b2 = [ts.index for ts in tokseqs[curr_gap + 1 : curr_gap + window_size + 1]] for t in token_table: score_dividend += blk_frq(t, b1) * blk_frq(t, b2) score_divisor_b1 += blk_frq(t, b1) ** 2 score_divisor_b2 += blk_frq(t, b2) ** 2 try: score = score_dividend / math.sqrt(score_divisor_b1 * score_divisor_b2) except ZeroDivisionError: pass gap_scores.append(score) return gap_scores def _smooth_scores(self, gap_scores): "Wraps the smooth function from the SciPy Cookbook" return list( smooth(numpy.array(gap_scores[:]), window_len=self.smoothing_width + 1) ) def _mark_paragraph_breaks(self, text): MIN_PARAGRAPH = 100 pattern = re.compile("[ \t\r\f\v]*\n[ \t\r\f\v]*\n[ \t\r\f\v]*") matches = pattern.finditer(text) last_break = 0 pbreaks = [0] for pb in matches: if pb.start() - last_break < MIN_PARAGRAPH: continue else: pbreaks.append(pb.start()) last_break = pb.start() return pbreaks def _divide_to_tokensequences(self, text): "Divides the text into pseudosentences of fixed size" w = self.w wrdindex_list = [] matches = re.finditer(r"\w+", text) for match in matches: wrdindex_list.append((match.group(), match.start())) return [ TokenSequence(i / w, wrdindex_list[i : i + w]) for i in range(0, len(wrdindex_list), w) ] def _create_token_table(self, token_sequences, par_breaks): "Creates a table of TokenTableFields" token_table = {} current_par = 0 current_tok_seq = 0 pb_iter = par_breaks.__iter__() current_par_break = next(pb_iter) if current_par_break == 0: try: current_par_break = next(pb_iter) except StopIteration as e: raise ValueError( "No paragraph breaks were found(text too short perhaps?)" ) from e for ts in token_sequences: for word, index in ts.wrdindex_list: try: while index > current_par_break: current_par_break = next(pb_iter) current_par += 1 except StopIteration: pass if word in token_table: token_table[word].total_count += 1 if token_table[word].last_par != current_par: token_table[word].last_par = current_par token_table[word].par_count += 1 if token_table[word].last_tok_seq != current_tok_seq: token_table[word].last_tok_seq = current_tok_seq token_table[word].ts_occurences.append([current_tok_seq, 1]) else: token_table[word].ts_occurences[-1][1] += 1 else: token_table[word] = TokenTableField( first_pos=index, ts_occurences=[[current_tok_seq, 1]], total_count=1, par_count=1, last_par=current_par, last_tok_seq=current_tok_seq, ) current_tok_seq += 1 return token_table def _identify_boundaries(self, depth_scores): boundaries = [0 for x in depth_scores] avg = sum(depth_scores) / len(depth_scores) stdev = numpy.std(depth_scores) if self.cutoff_policy == LC: cutoff = avg - stdev else: cutoff = avg - stdev / 2.0 depth_tuples = sorted(zip(depth_scores, range(len(depth_scores)))) depth_tuples.reverse() hp = list(filter(lambda x: x[0] > cutoff, depth_tuples)) for dt in hp: boundaries[dt[1]] = 1 for dt2 in hp: if ( dt[1] != dt2[1] and abs(dt2[1] - dt[1]) < 4 and boundaries[dt2[1]] == 1 ): boundaries[dt[1]] = 0 return boundaries def _depth_scores(self, scores): depth_scores = [0 for x in scores] clip = min(max(len(scores) // 10, 2), 5) index = clip for gapscore in scores[clip:-clip]: lpeak = gapscore for score in scores[index::-1]: if score >= lpeak: lpeak = score else: break rpeak = gapscore for score in scores[index:]: if score >= rpeak: rpeak = score else: break depth_scores[index] = lpeak + rpeak - 2 * gapscore index += 1 return depth_scores def _normalize_boundaries(self, text, boundaries, paragraph_breaks): norm_boundaries = [] char_count, word_count, gaps_seen = 0, 0, 0 seen_word = False for char in text: char_count += 1 if char in " \t\n" and seen_word: seen_word = False word_count += 1 if char not in " \t\n" and not seen_word: seen_word = True if gaps_seen < len(boundaries) and word_count > ( max(gaps_seen * self.w, self.w) ): if boundaries[gaps_seen] == 1: best_fit = len(text) for br in paragraph_breaks: if best_fit > abs(br - char_count): best_fit = abs(br - char_count) bestbr = br else: break if bestbr not in norm_boundaries: norm_boundaries.append(bestbr) gaps_seen += 1 return norm_boundaries class TokenTableField: def __init__( self, first_pos, ts_occurences, total_count=1, par_count=1, last_par=0, last_tok_seq=None, ): self.__dict__.update(locals()) del self.__dict__["self"] class TokenSequence: "A token list with its original length and its index" def __init__(self, index, wrdindex_list, original_length=None): original_length = original_length or len(wrdindex_list) self.__dict__.update(locals()) del self.__dict__["self"] def smooth(x, window_len=11, window="flat"): if x.ndim != 1: raise ValueError("smooth only accepts 1 dimension arrays.") if x.size < window_len: raise ValueError("Input vector needs to be bigger than window size.") if window_len < 3: return x if window not in ["flat", "hanning", "hamming", "bartlett", "blackman"]: raise ValueError( "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'" ) s = numpy.r_[2 * x[0] - x[window_len:1:-1], x, 2 * x[-1] - x[-1:-window_len:-1]] if window == "flat": w = numpy.ones(window_len, "d") else: w = eval("numpy." + window + "(window_len)") y = numpy.convolve(w / w.sum(), s, mode="same") return y[window_len - 1 : -window_len + 1] def demo(text=None): from matplotlib import pylab from nltk.corpus import brown tt = TextTilingTokenizer(demo_mode=True) if text is None: text = brown.raw()[:10000] s, ss, d, b = tt.tokenize(text) pylab.xlabel("Sentence Gap index") pylab.ylabel("Gap Scores") pylab.plot(range(len(s)), s, label="Gap Scores") pylab.plot(range(len(ss)), ss, label="Smoothed Gap scores") pylab.plot(range(len(d)), d, label="Depth scores") pylab.stem(range(len(b)), b) pylab.legend() pylab.show()
natural language toolkit toolbox reader c 20012023 nltk project greg aumann gregaumannsil org url https www nltk org for license information see license txt module for reading writing and manipulating toolbox databases and settings files class for reading and processing standard format marker files and strings open a standard format marker file for sequential reading param sfmfile name of the standard format marker input file type sfmfile str open a standard format marker string for sequential reading param s string to parse as a standard format marker input file type s str return an iterator that returns the next field in a marker value tuple linebreaks and trailing white space are preserved except for the final newline in each field rtype itertuplestr str discard a bom in the first line need to get first line outside the loop for correct handling of the first marker if it spans multiple lines pep 479 prevent runtimeerror when stopiteration is raised inside generator no more data is available terminate the generator return an iterator that returns the next field in a marker value tuple where marker and value are unicode strings if an encoding was specified in the fields method otherwise they are nonunicode strings param strip strip trailing whitespace from the last line of each field type strip bool param unwrap convert newlines in a field to spaces type unwrap bool param encoding name of an encoding to use if it is specified then the fields method returns unicode strings rather than non unicode strings type encoding str or none param errors error handling scheme for codec same as the decode builtin string method type errors str param unicodefields set of marker names whose values are utf8 encoded ignored if encoding is none if the whole file is utf8 encoded set encoding utf8 and leave unicodefields with its default value of none type unicodefields sequence rtype itertuplestr str close a previously opened standard format marker file or string self file close try del self linenum except attributeerror pass class toolboxdatastandardformat def parseself grammarnone kwargs if grammar return self chunkparsegrammargrammar kwargs else return self recordparsekwargs def recordparseself keynone kwargs r returns an element tree structure corresponding to a toolbox data file with all markers at the same level thus the following toolbox database sh v3 0 400 rotokas dictionary datestamphasfourdigityear lx kaa ps v a ge gag gp nek i pas lx kaa ps v b ge strangle gp pasim nek after parsing will end up with the same structure ignoring the extra whitespace as the following xml fragment after being parsed by elementtree toolboxdata header shv3 0 400 rotokas dictionarysh datestamphasfourdigityear header record lxkaalx psv aps gegagge gpnek i pasgp record record lxkaalx psv bps gestranglege gppasim nekgp record toolboxdata param key name of key marker at the start of each record if set to none the default value the first marker that doesn t begin with an underscore is assumed to be the key type key str param kwargs keyword arguments passed to standardformat fields type kwargs dict rtype elementtree elementinterface return contents of toolbox data divided into header and records returns an element tree structure corresponding to a toolbox data file parsed according to the chunk grammar type grammar str param grammar contains the chunking rules used to parse the database see chunk regexp for documentation type rootlabel str param rootlabel the node value that should be used for the top node of the chunk structure type trace int param trace the level of tracing that should be used when parsing a text 0 will generate no tracing output 1 will generate normal tracing output and 2 or higher will generate verbose tracing output type kwargs dict param kwargs keyword arguments passed to toolbox standardformat fields rtype elementtree elementinterface return a string with a standard format representation of the toolbox data in tree tree can be a toolbox database or a single record param tree flat representation of toolbox data whole database or single record type tree elementtree elementinterface param encoding name of an encoding to use type encoding str param errors error handling scheme for codec same as the encode builtin string method type errors str param unicodefields type unicodefields dictstr or setstr rtype str this class is the base class for settings files def initself super init def parseself encodingnone errorsstrict kwargs builder treebuilder for mkr value in self fieldsencodingencoding errorserrors kwargs check whether the first char of the field marker indicates a block start or end block mkr0 if block in mkr mkr1 else block none build tree on the basis of block char if block builder startmkr builder datavalue elif block builder endmkr else builder startmkr builder datavalue builder endmkr return builder close def tosettingsstringtree encodingnone errorsstrict unicodefieldsnone write xml to file l list tosettingsstring tree getroot l encodingencoding errorserrors unicodefieldsunicodefields return joinl def tosettingsstringnode l kwargs write xml to file tag node tag text node text if lennode 0 if text l appendftag textn else l appendsn tag else if text l appendftag textn else l appendsn tag for n in node tosettingsstringn l kwargs l appendsn tag return def removeblankselem out list for child in elem removeblankschild if child text or lenchild 0 out appendchild elem out def adddefaultfieldselem defaultfields for field in defaultfields getelem tag if elem findfield is none subelementelem field for child in elem adddefaultfieldschild defaultfields def sortfieldselem fieldorders orderdicts dict for field order in fieldorders items orderdictsfield orderkey dict for i subfield in enumerateorder orderkeysubfield i sortfieldselem orderdicts def sortfieldselem ordersdicts add blank lines before all elements and subelements specified in blankbefore param elem toolbox data in an elementtree structure type elem elementtree elementinterface param blankbefore elements and subelements to add blank lines before type blankbefore dicttuple zippath find corporatoolbox zip lexicon toolboxdatazipfilepathpointerzippath toolboxrotokas dic parse settings openzipfilepathpointerzippath entry toolboxmdfmdfalth typ natural language toolkit toolbox reader c 2001 2023 nltk project greg aumann greg_aumann sil org url https www nltk org for license information see license txt module for reading writing and manipulating toolbox databases and settings files class for reading and processing standard format marker files and strings open a standard format marker file for sequential reading param sfm_file name of the standard format marker input file type sfm_file str open a standard format marker string for sequential reading param s string to parse as a standard format marker input file type s str return an iterator that returns the next field in a marker value tuple linebreaks and trailing white space are preserved except for the final newline in each field rtype iter tuple str str discard a bom in the first line need to get first line outside the loop for correct handling of the first marker if it spans multiple lines pep 479 prevent runtimeerror when stopiteration is raised inside generator no more data is available terminate the generator return an iterator that returns the next field in a marker value tuple where marker and value are unicode strings if an encoding was specified in the fields method otherwise they are non unicode strings param strip strip trailing whitespace from the last line of each field type strip bool param unwrap convert newlines in a field to spaces type unwrap bool param encoding name of an encoding to use if it is specified then the fields method returns unicode strings rather than non unicode strings type encoding str or none param errors error handling scheme for codec same as the decode builtin string method type errors str param unicode_fields set of marker names whose values are utf 8 encoded ignored if encoding is none if the whole file is utf 8 encoded set encoding utf8 and leave unicode_fields with its default value of none type unicode_fields sequence rtype iter tuple str str close a previously opened standard format marker file or string returns an element tree structure corresponding to a toolbox data file with all markers at the same level thus the following toolbox database _sh v3 0 400 rotokas dictionary _datestamphasfourdigityear lx kaa ps v a ge gag gp nek i pas lx kaa ps v b ge strangle gp pasim nek after parsing will end up with the same structure ignoring the extra whitespace as the following xml fragment after being parsed by elementtree toolbox_data header _sh v3 0 400 rotokas dictionary _sh _datestamphasfourdigityear header record lx kaa lx ps v a ps ge gag ge gp nek i pas gp record record lx kaa lx ps v b ps ge strangle ge gp pasim nek gp record toolbox_data param key name of key marker at the start of each record if set to none the default value the first marker that doesn t begin with an underscore is assumed to be the key type key str param kwargs keyword arguments passed to standardformat fields type kwargs dict rtype elementtree _elementinterface return contents of toolbox data divided into header and records returns an element tree structure corresponding to a toolbox data file parsed according to the chunk grammar type grammar str param grammar contains the chunking rules used to parse the database see chunk regexp for documentation type root_label str param root_label the node value that should be used for the top node of the chunk structure type trace int param trace the level of tracing that should be used when parsing a text 0 will generate no tracing output 1 will generate normal tracing output and 2 or higher will generate verbose tracing output type kwargs dict param kwargs keyword arguments passed to toolbox standardformat fields rtype elementtree _elementinterface return a string with a standard format representation of the toolbox data in tree tree can be a toolbox database or a single record param tree flat representation of toolbox data whole database or single record type tree elementtree _elementinterface param encoding name of an encoding to use type encoding str param errors error handling scheme for codec same as the encode builtin string method type errors str param unicode_fields type unicode_fields dict str or set str rtype str this class is the base class for settings files return the contents of toolbox settings file with a nested structure param encoding encoding used by settings file type encoding str param errors error handling scheme for codec same as decode builtin method type errors str param kwargs keyword arguments passed to standardformat fields type kwargs dict rtype elementtree _elementinterface check whether the first char of the field marker indicates a block start or end build tree on the basis of block char write xml to file write xml to file remove all elements and subelements with no text and no child elements param elem toolbox data in an elementtree structure type elem elementtree _elementinterface add blank elements and subelements specified in default_fields param elem toolbox data in an elementtree structure type elem elementtree _elementinterface param default_fields fields to add to each type of element and subelement type default_fields dict tuple sort the elements and subelements in order specified in field_orders param elem toolbox data in an elementtree structure type elem elementtree _elementinterface param field_orders order of fields for each type of element and subelement type field_orders dict tuple sort the children of elem add blank lines before all elements and subelements specified in blank_before param elem toolbox data in an elementtree structure type elem elementtree _elementinterface param blank_before elements and subelements to add blank lines before type blank_before dict tuple zip_path find corpora toolbox zip lexicon toolboxdata zipfilepathpointer zip_path toolbox rotokas dic parse settings open zipfilepathpointer zip_path entry toolbox mdf mdf_alth typ
import codecs import re from io import StringIO from xml.etree.ElementTree import Element, ElementTree, SubElement, TreeBuilder from nltk.data import PathPointer, find class StandardFormat: def __init__(self, filename=None, encoding=None): self._encoding = encoding if filename is not None: self.open(filename) def open(self, sfm_file): if isinstance(sfm_file, PathPointer): self._file = sfm_file.open(self._encoding) else: self._file = codecs.open(sfm_file, "r", self._encoding) def open_string(self, s): self._file = StringIO(s) def raw_fields(self): join_string = "\n" line_regexp = r"^%s(?:\\(\S+)\s*)?(.*)$" first_line_pat = re.compile(line_regexp % "(?:\xef\xbb\xbf)?") line_pat = re.compile(line_regexp % "") file_iter = iter(self._file) try: line = next(file_iter) except StopIteration: return mobj = re.match(first_line_pat, line) mkr, line_value = mobj.groups() value_lines = [line_value] self.line_num = 0 for line in file_iter: self.line_num += 1 mobj = re.match(line_pat, line) line_mkr, line_value = mobj.groups() if line_mkr: yield (mkr, join_string.join(value_lines)) mkr = line_mkr value_lines = [line_value] else: value_lines.append(line_value) self.line_num += 1 yield (mkr, join_string.join(value_lines)) def fields( self, strip=True, unwrap=True, encoding=None, errors="strict", unicode_fields=None, ): if encoding is None and unicode_fields is not None: raise ValueError("unicode_fields is set but not encoding.") unwrap_pat = re.compile(r"\n+") for mkr, val in self.raw_fields(): if unwrap: val = unwrap_pat.sub(" ", val) if strip: val = val.rstrip() yield (mkr, val) def close(self): self._file.close() try: del self.line_num except AttributeError: pass class ToolboxData(StandardFormat): def parse(self, grammar=None, **kwargs): if grammar: return self._chunk_parse(grammar=grammar, **kwargs) else: return self._record_parse(**kwargs) def _record_parse(self, key=None, **kwargs): r builder = TreeBuilder() builder.start("toolbox_data", {}) builder.start("header", {}) in_records = False for mkr, value in self.fields(**kwargs): if key is None and not in_records and mkr[0] != "_": key = mkr if mkr == key: if in_records: builder.end("record") else: builder.end("header") in_records = True builder.start("record", {}) builder.start(mkr, {}) builder.data(value) builder.end(mkr) if in_records: builder.end("record") else: builder.end("header") builder.end("toolbox_data") return builder.close() def _tree2etree(self, parent): from nltk.tree import Tree root = Element(parent.label()) for child in parent: if isinstance(child, Tree): root.append(self._tree2etree(child)) else: text, tag = child e = SubElement(root, tag) e.text = text return root def _chunk_parse(self, grammar=None, root_label="record", trace=0, **kwargs): from nltk import chunk from nltk.tree import Tree cp = chunk.RegexpParser(grammar, root_label=root_label, trace=trace) db = self.parse(**kwargs) tb_etree = Element("toolbox_data") header = db.find("header") tb_etree.append(header) for record in db.findall("record"): parsed = cp.parse([(elem.text, elem.tag) for elem in record]) tb_etree.append(self._tree2etree(parsed)) return tb_etree _is_value = re.compile(r"\S") def to_sfm_string(tree, encoding=None, errors="strict", unicode_fields=None): if tree.tag == "record": root = Element("toolbox_data") root.append(tree) tree = root if tree.tag != "toolbox_data": raise ValueError("not a toolbox_data element structure") if encoding is None and unicode_fields is not None: raise ValueError( "if encoding is not specified then neither should unicode_fields" ) l = [] for rec in tree: l.append("\n") for field in rec: mkr = field.tag value = field.text if encoding is not None: if unicode_fields is not None and mkr in unicode_fields: cur_encoding = "utf8" else: cur_encoding = encoding if re.search(_is_value, value): l.append((f"\\{mkr} {value}\n").encode(cur_encoding, errors)) else: l.append((f"\\{mkr}{value}\n").encode(cur_encoding, errors)) else: if re.search(_is_value, value): l.append(f"\\{mkr} {value}\n") else: l.append(f"\\{mkr}{value}\n") return "".join(l[1:]) class ToolboxSettings(StandardFormat): def __init__(self): super().__init__() def parse(self, encoding=None, errors="strict", **kwargs): builder = TreeBuilder() for mkr, value in self.fields(encoding=encoding, errors=errors, **kwargs): block = mkr[0] if block in ("+", "-"): mkr = mkr[1:] else: block = None if block == "+": builder.start(mkr, {}) builder.data(value) elif block == "-": builder.end(mkr) else: builder.start(mkr, {}) builder.data(value) builder.end(mkr) return builder.close() def to_settings_string(tree, encoding=None, errors="strict", unicode_fields=None): l = list() _to_settings_string( tree.getroot(), l, encoding=encoding, errors=errors, unicode_fields=unicode_fields, ) return "".join(l) def _to_settings_string(node, l, **kwargs): tag = node.tag text = node.text if len(node) == 0: if text: l.append(f"\\{tag} {text}\n") else: l.append("\\%s\n" % tag) else: if text: l.append(f"\\+{tag} {text}\n") else: l.append("\\+%s\n" % tag) for n in node: _to_settings_string(n, l, **kwargs) l.append("\\-%s\n" % tag) return def remove_blanks(elem): out = list() for child in elem: remove_blanks(child) if child.text or len(child) > 0: out.append(child) elem[:] = out def add_default_fields(elem, default_fields): for field in default_fields.get(elem.tag, []): if elem.find(field) is None: SubElement(elem, field) for child in elem: add_default_fields(child, default_fields) def sort_fields(elem, field_orders): order_dicts = dict() for field, order in field_orders.items(): order_dicts[field] = order_key = dict() for i, subfield in enumerate(order): order_key[subfield] = i _sort_fields(elem, order_dicts) def _sort_fields(elem, orders_dicts): try: order = orders_dicts[elem.tag] except KeyError: pass else: tmp = sorted( ((order.get(child.tag, 1e9), i), child) for i, child in enumerate(elem) ) elem[:] = [child for key, child in tmp] for child in elem: if len(child): _sort_fields(child, orders_dicts) def add_blank_lines(tree, blanks_before, blanks_between): try: before = blanks_before[tree.tag] between = blanks_between[tree.tag] except KeyError: for elem in tree: if len(elem): add_blank_lines(elem, blanks_before, blanks_between) else: last_elem = None for elem in tree: tag = elem.tag if last_elem is not None and last_elem.tag != tag: if tag in before and last_elem is not None: e = last_elem.getiterator()[-1] e.text = (e.text or "") + "\n" else: if tag in between: e = last_elem.getiterator()[-1] e.text = (e.text or "") + "\n" if len(elem): add_blank_lines(elem, blanks_before, blanks_between) last_elem = elem def demo(): from itertools import islice file_path = find("corpora/toolbox/rotokas.dic") lexicon = ToolboxData(file_path).parse() print("first field in fourth record:") print(lexicon[3][0].tag) print(lexicon[3][0].text) print("\nfields in sequential order:") for field in islice(lexicon.find("record"), 10): print(field.tag, field.text) print("\nlx fields:") for field in islice(lexicon.findall("record/lx"), 10): print(field.text) settings = ToolboxSettings() file_path = find("corpora/toolbox/MDF/MDF_AltH.typ") settings.open(file_path) tree = settings.parse(unwrap=False, encoding="cp1252") print(tree.find("expset/expMDF/rtfPageSetup/paperSize").text) settings_tree = ElementTree(tree) print(to_settings_string(settings_tree).encode("utf8")) if __name__ == "__main__": demo()
natural language toolkit machine translation c 20012023 nltk project steven bird stevenbird1gmail com tah wei hoon hoon twgmail com url https www nltk org for license information see license txt experimental features for machine translation these interfaces are prone to change isort skipfile natural language toolkit machine translation c 2001 2023 nltk project steven bird stevenbird1 gmail com tah wei hoon hoon tw gmail com url https www nltk org for license information see license txt experimental features for machine translation these interfaces are prone to change isort skip_file
from nltk.translate.api import AlignedSent, Alignment, PhraseTable from nltk.translate.ibm_model import IBMModel from nltk.translate.ibm1 import IBMModel1 from nltk.translate.ibm2 import IBMModel2 from nltk.translate.ibm3 import IBMModel3 from nltk.translate.ibm4 import IBMModel4 from nltk.translate.ibm5 import IBMModel5 from nltk.translate.bleu_score import sentence_bleu as bleu from nltk.translate.ribes_score import sentence_ribes as ribes from nltk.translate.meteor_score import meteor_score as meteor from nltk.translate.metrics import alignment_error_rate from nltk.translate.stack_decoder import StackDecoder from nltk.translate.nist_score import sentence_nist as nist from nltk.translate.chrf_score import sentence_chrf as chrf from nltk.translate.gale_church import trace from nltk.translate.gdfa import grow_diag_final_and from nltk.translate.gleu_score import sentence_gleu as gleu from nltk.translate.phrase_based import extract
natural language toolkit api for alignment and translation objects c 20012023 nltk project will zhang wilzzhagmail com guan gui gguistudent unimelb edu au steven bird stevenbird1gmail com tah wei hoon hoon twgmail com url https www nltk org for license information see license txt return an aligned sentence object which encapsulates two sentences along with an alignment between them typically used in machine translation to represent a sentence and its translation from nltk translate import alignedsent alignment algnsent alignedsent klein ist das haus the house is small alignment fromstring 03 12 20 31 algnsent words klein ist das haus algnsent mots the house is small algnsent alignment alignment0 3 1 2 2 0 3 1 from nltk corpus import comtrans printcomtrans alignedsents54 alignedsent weshalb also sollten so why should eu arm printcomtrans alignedsents54 alignment 00 01 10 22 34 35 47 58 63 79 89 910 911 1012 116 126 1313 param words words in the target language sentence type words liststr param mots words in the source language sentence type mots liststr param alignment wordlevel alignments between words and mots each alignment is represented as a 2tuple wordsindex motsindex type alignment alignment return a string representation for this alignedsent rtype str dot representation of the aligned sentence declare node alignment connect the source words connect the target words put it in the same rank ipython magic show svg representation of this alignedsent return a humanreadable string representation for this alignedsent rtype str return the aligned sentence pair reversing the directionality rtype alignedsent a storage class for representing alignment between two sequences s1 s2 in general an alignment is a set of tuples of the form i j representing an alignment between the ith element of s1 and the jth element of s2 tuples are extensible they might contain additional data such as a boolean to indicate sure vs possible alignments from nltk translate import alignment a alignment0 0 0 1 1 2 2 2 a invert alignment0 0 1 0 2 1 2 2 printa invert 00 10 21 22 a0 0 1 0 0 a invert2 2 1 2 2 b alignment0 0 0 1 b issubseta true c alignment fromstring 00 01 b c true read a gizaformatted string and return an alignment object alignment fromstring 00 21 92 213 104 75 alignment0 0 2 1 7 5 9 2 10 4 21 3 type s str param s the positional alignments in giza format rtype alignment return an alignment object corresponding to the string representation s look up the alignments that map from a given index or slice return an alignment object being the inverted mapping work out the range of the mapping from the given positions if no positions are specified compute the range of the entire mapping produce a gizaformatted string representing the alignment produce a gizaformatted string representing the alignment build a list self index such that self indexi is a list of the alignments originating from word i check whether the alignments are legal param numwords the number of source language words type numwords int param nummots the number of target language words type nummots int param alignment alignment to be checked type alignment alignment raise indexerror if alignment falls outside the sentence inmemory store of translations for a given phrase and the log probability of the those translations get the translations for a source language phrase param srcphrase source language phrase of interest type srcphrase tuplestr return a list of target language phrases that are translations of srcphrase ordered in decreasing order of likelihood each list element is a tuple of the target phrase and its log probability rtype listphrasetableentry type srcphrase tuplestr type trgphrase tuplestr param logprob log probability that given srcphrase trgphrase is its translation type logprob float natural language toolkit api for alignment and translation objects c 2001 2023 nltk project will zhang wilzzha gmail com guan gui ggui student unimelb edu au steven bird stevenbird1 gmail com tah wei hoon hoon tw gmail com url https www nltk org for license information see license txt return an aligned sentence object which encapsulates two sentences along with an alignment between them typically used in machine translation to represent a sentence and its translation from nltk translate import alignedsent alignment algnsent alignedsent klein ist das haus the house is small alignment fromstring 0 3 1 2 2 0 3 1 algnsent words klein ist das haus algnsent mots the house is small algnsent alignment alignment 0 3 1 2 2 0 3 1 from nltk corpus import comtrans print comtrans aligned_sents 54 alignedsent weshalb also sollten so why should eu arm print comtrans aligned_sents 54 alignment 0 0 0 1 1 0 2 2 3 4 3 5 4 7 5 8 6 3 7 9 8 9 9 10 9 11 10 12 11 6 12 6 13 13 param words words in the target language sentence type words list str param mots words in the source language sentence type mots list str param alignment word level alignments between words and mots each alignment is represented as a 2 tuple words_index mots_index type alignment alignment return a string representation for this alignedsent rtype str dot representation of the aligned sentence declare node alignment connect the source words connect the target words put it in the same rank ipython magic show svg representation of this alignedsent return a human readable string representation for this alignedsent rtype str return the aligned sentence pair reversing the directionality rtype alignedsent a storage class for representing alignment between two sequences s1 s2 in general an alignment is a set of tuples of the form i j representing an alignment between the i th element of s1 and the j th element of s2 tuples are extensible they might contain additional data such as a boolean to indicate sure vs possible alignments from nltk translate import alignment a alignment 0 0 0 1 1 2 2 2 a invert alignment 0 0 1 0 2 1 2 2 print a invert 0 0 1 0 2 1 2 2 a 0 0 1 0 0 a invert 2 2 1 2 2 b alignment 0 0 0 1 b issubset a true c alignment fromstring 0 0 0 1 b c true read a giza formatted string and return an alignment object alignment fromstring 0 0 2 1 9 2 21 3 10 4 7 5 alignment 0 0 2 1 7 5 9 2 10 4 21 3 type s str param s the positional alignments in giza format rtype alignment return an alignment object corresponding to the string representation s look up the alignments that map from a given index or slice return an alignment object being the inverted mapping work out the range of the mapping from the given positions if no positions are specified compute the range of the entire mapping produce a giza formatted string representing the alignment produce a giza formatted string representing the alignment build a list self _index such that self _index i is a list of the alignments originating from word i check whether the alignments are legal param num_words the number of source language words type num_words int param num_mots the number of target language words type num_mots int param alignment alignment to be checked type alignment alignment raise indexerror if alignment falls outside the sentence in memory store of translations for a given phrase and the log probability of the those translations get the translations for a source language phrase param src_phrase source language phrase of interest type src_phrase tuple str return a list of target language phrases that are translations of src_phrase ordered in decreasing order of likelihood each list element is a tuple of the target phrase and its log probability rtype list phrasetableentry type src_phrase tuple str type trg_phrase tuple str param log_prob log probability that given src_phrase trg_phrase is its translation type log_prob float
import subprocess from collections import namedtuple class AlignedSent: def __init__(self, words, mots, alignment=None): self._words = words self._mots = mots if alignment is None: self.alignment = Alignment([]) else: assert type(alignment) is Alignment self.alignment = alignment @property def words(self): return self._words @property def mots(self): return self._mots def _get_alignment(self): return self._alignment def _set_alignment(self, alignment): _check_alignment(len(self.words), len(self.mots), alignment) self._alignment = alignment alignment = property(_get_alignment, _set_alignment) def __repr__(self): words = "[%s]" % (", ".join("'%s'" % w for w in self._words)) mots = "[%s]" % (", ".join("'%s'" % w for w in self._mots)) return f"AlignedSent({words}, {mots}, {self._alignment!r})" def _to_dot(self): s = "graph align {\n" s += "node[shape=plaintext]\n" for w in self._words: s += f'"{w}_source" [label="{w}"] \n' for w in self._mots: s += f'"{w}_target" [label="{w}"] \n' for u, v in self._alignment: s += f'"{self._words[u]}_source" -- "{self._mots[v]}_target" \n' for i in range(len(self._words) - 1): s += '"{}_source" -- "{}_source" [style=invis]\n'.format( self._words[i], self._words[i + 1], ) for i in range(len(self._mots) - 1): s += '"{}_target" -- "{}_target" [style=invis]\n'.format( self._mots[i], self._mots[i + 1], ) s += "{rank = same; %s}\n" % (" ".join('"%s_source"' % w for w in self._words)) s += "{rank = same; %s}\n" % (" ".join('"%s_target"' % w for w in self._mots)) s += "}" return s def _repr_svg_(self): dot_string = self._to_dot().encode("utf8") output_format = "svg" try: process = subprocess.Popen( ["dot", "-T%s" % output_format], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) except OSError as e: raise Exception("Cannot find the dot binary from Graphviz package") from e out, err = process.communicate(dot_string) return out.decode("utf8") def __str__(self): source = " ".join(self._words)[:20] + "..." target = " ".join(self._mots)[:20] + "..." return f"<AlignedSent: '{source}' -> '{target}'>" def invert(self): return AlignedSent(self._mots, self._words, self._alignment.invert()) class Alignment(frozenset): def __new__(cls, pairs): self = frozenset.__new__(cls, pairs) self._len = max(p[0] for p in self) if self != frozenset([]) else 0 self._index = None return self @classmethod def fromstring(cls, s): return Alignment([_giza2pair(a) for a in s.split()]) def __getitem__(self, key): if not self._index: self._build_index() return self._index.__getitem__(key) def invert(self): return Alignment(((p[1], p[0]) + p[2:]) for p in self) def range(self, positions=None): image = set() if not self._index: self._build_index() if not positions: positions = list(range(len(self._index))) for p in positions: image.update(f for _, f in self._index[p]) return sorted(image) def __repr__(self): return "Alignment(%r)" % sorted(self) def __str__(self): return " ".join("%d-%d" % p[:2] for p in sorted(self)) def _build_index(self): self._index = [[] for _ in range(self._len + 1)] for p in self: self._index[p[0]].append(p) def _giza2pair(pair_string): i, j = pair_string.split("-") return int(i), int(j) def _naacl2pair(pair_string): i, j, p = pair_string.split("-") return int(i), int(j) def _check_alignment(num_words, num_mots, alignment): assert type(alignment) is Alignment if not all(0 <= pair[0] < num_words for pair in alignment): raise IndexError("Alignment is outside boundary of words") if not all(pair[1] is None or 0 <= pair[1] < num_mots for pair in alignment): raise IndexError("Alignment is outside boundary of mots") PhraseTableEntry = namedtuple("PhraseTableEntry", ["trg_phrase", "log_prob"]) class PhraseTable: def __init__(self): self.src_phrases = dict() def translations_for(self, src_phrase): return self.src_phrases[src_phrase] def add(self, src_phrase, trg_phrase, log_prob): entry = PhraseTableEntry(trg_phrase=trg_phrase, log_prob=log_prob) if src_phrase not in self.src_phrases: self.src_phrases[src_phrase] = [] self.src_phrases[src_phrase].append(entry) self.src_phrases[src_phrase].sort(key=lambda e: e.log_prob, reverse=True) def __contains__(self, src_phrase): return src_phrase in self.src_phrases
natural language toolkit bleu score c 20012023 nltk project s chin yee lee hengfeng li ruxin hou calvin tanujaya lim contributors bjrn mattsson dmitrijs milajevs liling tan url https www nltk org for license information see license txt bleu score implementation import math import sys import warnings from collections import counter from fractions import fraction from nltk util import ngrams def sentencebleu references hypothesis weights0 25 0 25 0 25 0 25 smoothingfunctionnone autoreweighfalse return corpusbleu references hypothesis weights smoothingfunction autoreweigh def corpusbleu listofreferences hypotheses weights0 25 0 25 0 25 0 25 smoothingfunctionnone autoreweighfalse before proceeding to compute bleu perform sanity checks pnumerators counter key ngram order and value no of ngram matches pdenominators counter key ngram order and value no of ngram in ref hyplengths reflengths 0 0 assert lenlistofreferences lenhypotheses the number of hypotheses and their references should be the same try weights00 except weights weights maxweightlength maxlenweight for weight in weights iterate through each hypothesis and their corresponding references for references hypothesis in ziplistofreferences hypotheses for each order of ngram calculate the numerator and denominator for the corpuslevel modified precision for i in range1 maxweightlength 1 pi modifiedprecisionreferences hypothesis i pnumeratorsi pi numerator pdenominatorsi pi denominator calculate the hypothesis length and the closest reference length adds them to the corpuslevel hypothesis and reference counts hyplen lenhypothesis hyplengths hyplen reflengths closestreflengthreferences hyplen calculate corpuslevel brevity penalty bp brevitypenaltyreflengths hyplengths collects the various precision values for the different ngram orders pn fractionpnumeratorsi pdenominatorsi normalizefalse for i in range1 maxweightlength 1 returns 0 if there s no matching ngrams we only need to check for pnumerators1 0 since if there s no unigrams there won t be any higher order ngrams if pnumerators1 0 return 0 if lenweights 1 else 0 lenweights if there s no smoothing set use method0 from smoothinfunction class if not smoothingfunction smoothingfunction smoothingfunction method0 smoothen the modified precision note smoothingfunction may convert values into floats it tries to retain the fraction object as much as the smoothing method allows pn smoothingfunction pn referencesreferences hypothesishypothesis hyplenhyplengths bleuscores for weight in weights uniformly reweighting based on maximum hypothesis lengths if largest order of ngrams 4 and weights is set at default if autoreweigh if hyplengths 4 and weight 0 25 0 25 0 25 0 25 weight 1 hyplengths hyplengths s wi math logpi for wi pi in zipweight pn if pi 0 s bp math expmath fsums bleuscores appends return bleuscores0 if lenweights 1 else bleuscores def modifiedprecisionreferences hypothesis n extracts all ngrams in hypothesis set an empty counter if hypothesis is empty counts counterngramshypothesis n if lenhypothesis n else counter extract a union of references counts maxcounts reduceor counterngramsref n for ref in references maxcounts for reference in references referencecounts counterngramsreference n if lenreference n else counter for ngram in counts maxcountsngram maxmaxcounts getngram 0 referencecountsngram assigns the intersection between hypothesis and references counts clippedcounts ngram mincount maxcountsngram for ngram count in counts items numerator sumclippedcounts values ensures that denominator is minimum 1 to avoid zerodivisionerror usually this happens when the ngram order is lenreference denominator max1 sumcounts values return fractionnumerator denominator normalizefalse def closestreflengthreferences hyplen reflens lenreference for reference in references closestreflen min reflens keylambda reflen absreflen hyplen reflen return closestreflen def brevitypenaltyclosestreflen hyplen if hyplen closestreflen return 1 if hypothesis is empty brevity penalty 0 should result in bleu 0 0 elif hyplen 0 return 0 else return math exp1 closestreflen hyplen class smoothingfunction def initself epsilon0 1 alpha5 k5 self epsilon epsilon self alpha alpha self k k def method0self pn args kwargs pnnew for i pi in enumeratepn if pi numerator 0 pnnew appendpi else msg str nthe hypothesis contains 0 counts of gram overlaps n therefore the bleu score evaluates to 0 independently ofn how many ngram overlaps of lower order it contains n consider using lower ngram order or use smoothingfunction formati 1 warnings warnmsg when numerator0 where denonminator0 or 0 the result for the precision score should be equal to 0 or undefined due to bleu geometric mean computation in logarithm space we we need to take the return sys floatinfo min such that math logsys floatinfo min returns a 0 precision score pnnew appendsys floatinfo min return pnnew def method1self pn args kwargs return pi numerator self epsilon pi denominator if pi numerator 0 else pi for pi in pn def method2self pn args kwargs return fractionpni numerator 1 pni denominator 1 normalizefalse if i 0 else pn0 for i in rangelenpn def method3self pn args kwargs incvnt 1 from the mtevalv13a pl it s referred to as k for i pi in enumeratepn if pi numerator 0 pni 1 2incvnt pi denominator incvnt 1 return pn def method4self pn references hypothesis hyplennone args kwargs incvnt 1 hyplen hyplen if hyplen else lenhypothesis for i pi in enumeratepn if pi numerator 0 and hyplen 1 incvnt i 1 self k math log hyplen note that this k is different from the k from nist pni incvnt pi denominator numerator 1 2incvnt self k math loghyplen pni numerator pi denominator incvnt 1 return pn def method5self pn references hypothesis hyplennone args kwargs hyplen hyplen if hyplen else lenhypothesis m requires an precision value for an addition ngram order pnplus1 pn modifiedprecisionreferences hypothesis 5 m1 pn0 1 for i pi in enumeratepn pni mi 1 pi pnplus1i 1 3 mi pni return pn def method6self pn references hypothesis hyplennone args kwargs hyplen hyplen if hyplen else lenhypothesis this smoothing only works when p1 and p2 is nonzero raise an error with an appropriate message when the input is too short to use this smoothing technique assert pn2 this smoothing method requires nonzero precision for bigrams for i pi in enumeratepn if i in 0 1 skips the first 2 orders of ngrams continue else pi0 0 if pni 2 0 else pni 1 2 pni 2 no of ngrams in translation that matches the reference m pi numerator no of ngrams in translation l sum1 for in ngramshypothesis i 1 calculates the interpolated precision pni m self alpha pi0 l self alpha return pn def method7self pn references hypothesis hyplennone args kwargs hyplen hyplen if hyplen else lenhypothesis pn self method4pn references hypothesis hyplen pn self method5pn references hypothesis hyplen return pn natural language toolkit bleu score c 2001 2023 nltk project s chin yee lee hengfeng li ruxin hou calvin tanujaya lim contributors björn mattsson dmitrijs milajevs liling tan url https www nltk org for license information see license txt bleu score implementation calculate bleu score bilingual evaluation understudy from papineni kishore salim roukos todd ward and wei jing zhu 2002 bleu a method for automatic evaluation of machine translation in proceedings of acl https www aclweb org anthology p02 1040 pdf hypothesis1 it is a guide to action which ensures that the military always obeys the commands of the party hypothesis2 it is to insure the troops forever hearing the activity guidebook that party direct reference1 it is a guide to action that ensures that the military will forever heed party commands reference2 it is the guiding principle which guarantees the military forces always being under the command of the party reference3 it is the practical guide for the army always to heed the directions of the party sentence_bleu reference1 reference2 reference3 hypothesis1 doctest ellipsis 0 5045 if there is no ngrams overlap for any order of n grams bleu returns the value 0 this is because the precision for the order of n grams without overlap is 0 and the geometric mean in the final bleu score computation multiplies the 0 with the precision of other n grams this results in 0 independently of the precision of the other n gram orders the following example has zero 3 gram and 4 gram overlaps round sentence_bleu reference1 reference2 reference3 hypothesis2 4 doctest ellipsis 0 0 to avoid this harsh behaviour when no ngram overlaps are found a smoothing function can be used chencherry smoothingfunction sentence_bleu reference1 reference2 reference3 hypothesis2 smoothing_function chencherry method1 doctest ellipsis 0 0370 the default bleu calculates a score for up to 4 grams using uniform weights this is called bleu 4 to evaluate your translations with higher lower order ngrams use customized weights e g when accounting for up to 5 grams with uniform weights this is called bleu 5 use weights 1 5 1 5 1 5 1 5 1 5 sentence_bleu reference1 reference2 reference3 hypothesis1 weights doctest ellipsis 0 3920 multiple bleu scores can be computed at once by supplying a list of weights e g for computing bleu 2 bleu 3 and bleu 4 in one computation use weights 1 2 1 2 1 3 1 3 1 3 1 4 1 4 1 4 1 4 sentence_bleu reference1 reference2 reference3 hypothesis1 weights doctest ellipsis 0 7453 0 6240 0 5045 param references reference sentences type references list list str param hypothesis a hypothesis sentence type hypothesis list str param weights weights for unigrams bigrams trigrams and so on one or a list of weights type weights tuple float list tuple float param smoothing_function type smoothing_function smoothingfunction param auto_reweigh option to re normalize the weights uniformly type auto_reweigh bool return the sentence level bleu score returns a list if multiple weights were supplied rtype float list float calculate a single corpus level bleu score aka system level bleu for all the hypotheses and their respective references instead of averaging the sentence level bleu scores i e macro average precision the original bleu metric papineni et al 2002 accounts for the micro average precision i e summing the numerators and denominators for each hypothesis reference s pairs before the division hyp1 it is a guide to action which ensures that the military always obeys the commands of the party ref1a it is a guide to action that ensures that the military will forever heed party commands ref1b it is the guiding principle which guarantees the military forces always being under the command of the party ref1c it is the practical guide for the army always to heed the directions of the party hyp2 he read the book because he was interested in world history ref2a he was interested in world history because he read the book list_of_references ref1a ref1b ref1c ref2a hypotheses hyp1 hyp2 corpus_bleu list_of_references hypotheses doctest ellipsis 0 5920 the example below show that corpus_bleu is different from averaging sentence_bleu for hypotheses score1 sentence_bleu ref1a ref1b ref1c hyp1 score2 sentence_bleu ref2a hyp2 score1 score2 2 doctest ellipsis 0 6223 custom weights may be supplied to fine tune the bleu score further a tuple of float weights for unigrams bigrams trigrams and so on can be given weights 0 1 0 3 0 5 0 1 corpus_bleu list_of_references hypotheses weights weights doctest ellipsis 0 5818 this particular weight gave extra value to trigrams furthermore multiple weights can be given resulting in multiple bleu scores weights 0 5 0 5 0 333 0 333 0 334 0 25 0 25 0 25 0 25 0 2 0 2 0 2 0 2 0 2 corpus_bleu list_of_references hypotheses weights weights doctest ellipsis 0 8242 0 7067 0 5920 0 4719 param list_of_references a corpus of lists of reference sentences w r t hypotheses type list_of_references list list list str param hypotheses a list of hypothesis sentences type hypotheses list list str param weights weights for unigrams bigrams trigrams and so on one or a list of weights type weights tuple float list tuple float param smoothing_function type smoothing_function smoothingfunction param auto_reweigh option to re normalize the weights uniformly type auto_reweigh bool return the corpus level bleu score rtype float before proceeding to compute bleu perform sanity checks key ngram order and value no of ngram matches key ngram order and value no of ngram in ref iterate through each hypothesis and their corresponding references for each order of ngram calculate the numerator and denominator for the corpus level modified precision calculate the hypothesis length and the closest reference length adds them to the corpus level hypothesis and reference counts calculate corpus level brevity penalty collects the various precision values for the different ngram orders returns 0 if there s no matching n grams we only need to check for p_numerators 1 0 since if there s no unigrams there won t be any higher order ngrams if there s no smoothing set use method0 from smoothinfunction class smoothen the modified precision note smoothing_function may convert values into floats it tries to retain the fraction object as much as the smoothing method allows uniformly re weighting based on maximum hypothesis lengths if largest order of n grams 4 and weights is set at default calculate modified ngram precision the normal precision method may lead to some wrong translations with high precision e g the translation in which a word of reference repeats several times has very high precision this function only returns the fraction object that contains the numerator and denominator necessary to calculate the corpus level precision to calculate the modified precision for a single pair of hypothesis and references cast the fraction object into a float the famous the the the example shows that you can get bleu precision by duplicating high frequency words reference1 the cat is on the mat split reference2 there is a cat on the mat split hypothesis1 the the the the the the the split references reference1 reference2 float modified_precision references hypothesis1 n 1 doctest ellipsis 0 2857 in the modified n gram precision a reference word will be considered exhausted after a matching hypothesis word is identified e g reference1 it is a guide to action that ensures that the military will forever heed party commands reference2 it is the guiding principle which guarantees the military forces always being under the command of the party reference3 it is the practical guide for the army always to heed the directions of the party hypothesis of the split references reference1 reference2 reference3 float modified_precision references hypothesis n 1 1 0 float modified_precision references hypothesis n 2 1 0 an example of a normal machine translation hypothesis hypothesis1 it is a guide to action which ensures that the military always obeys the commands of the party hypothesis2 it is to insure the troops forever hearing the activity guidebook that party direct reference1 it is a guide to action that ensures that the military will forever heed party commands reference2 it is the guiding principle which guarantees the military forces always being under the command of the party reference3 it is the practical guide for the army always to heed the directions of the party references reference1 reference2 reference3 float modified_precision references hypothesis1 n 1 doctest ellipsis 0 9444 float modified_precision references hypothesis2 n 1 doctest ellipsis 0 5714 float modified_precision references hypothesis1 n 2 doctest ellipsis 0 5882352941176471 float modified_precision references hypothesis2 n 2 doctest ellipsis 0 07692 param references a list of reference translations type references list list str param hypothesis a hypothesis translation type hypothesis list str param n the ngram order type n int return bleu s modified precision for the nth order ngram rtype fraction extracts all ngrams in hypothesis set an empty counter if hypothesis is empty extract a union of references counts max_counts reduce or_ counter ngrams ref n for ref in references assigns the intersection between hypothesis and references counts ensures that denominator is minimum 1 to avoid zerodivisionerror usually this happens when the ngram order is len reference this function finds the reference that is the closest length to the hypothesis the closest reference length is referred to as r variable from the brevity penalty formula in papineni et al 2002 param references a list of reference translations type references list list str param hyp_len the length of the hypothesis type hyp_len int return the length of the reference that s closest to the hypothesis rtype int calculate brevity penalty as the modified n gram precision still has the problem from the short length sentence brevity penalty is used to modify the overall bleu score according to length an example from the paper there are three references with length 12 15 and 17 and a concise hypothesis of the length 12 the brevity penalty is 1 reference1 list aaaaaaaaaaaa i e a 12 reference2 list aaaaaaaaaaaaaaa i e a 15 reference3 list aaaaaaaaaaaaaaaaa i e a 17 hypothesis list aaaaaaaaaaaa i e a 12 references reference1 reference2 reference3 hyp_len len hypothesis closest_ref_len closest_ref_length references hyp_len brevity_penalty closest_ref_len hyp_len 1 0 in case a hypothesis translation is shorter than the references penalty is applied references a 28 a 28 hypothesis a 12 hyp_len len hypothesis closest_ref_len closest_ref_length references hyp_len brevity_penalty closest_ref_len hyp_len 0 2635971381157267 the length of the closest reference is used to compute the penalty if the length of a hypothesis is 12 and the reference lengths are 13 and 2 the penalty is applied because the hypothesis length 12 is less then the closest reference length 13 references a 13 a 2 hypothesis a 12 hyp_len len hypothesis closest_ref_len closest_ref_length references hyp_len brevity_penalty closest_ref_len hyp_len doctest ellipsis 0 9200 the brevity penalty doesn t depend on reference order more importantly when two reference sentences are at the same distance the shortest reference sentence length is used references a 13 a 11 hypothesis a 12 hyp_len len hypothesis closest_ref_len closest_ref_length references hyp_len bp1 brevity_penalty closest_ref_len hyp_len hyp_len len hypothesis closest_ref_len closest_ref_length reversed references hyp_len bp2 brevity_penalty closest_ref_len hyp_len bp1 bp2 1 true a test example from mteval v13a pl starting from the line 705 references a 11 a 8 hypothesis a 7 hyp_len len hypothesis closest_ref_len closest_ref_length references hyp_len brevity_penalty closest_ref_len hyp_len doctest ellipsis 0 8668 references a 11 a 8 a 6 a 7 hypothesis a 7 hyp_len len hypothesis closest_ref_len closest_ref_length references hyp_len brevity_penalty closest_ref_len hyp_len 1 0 param hyp_len the length of the hypothesis for a single sentence or the sum of all the hypotheses lengths for a corpus type hyp_len int param closest_ref_len the length of the closest reference for a single hypothesis or the sum of all the closest references for every hypotheses type closest_ref_len int return bleu s brevity penalty rtype float if hypothesis is empty brevity penalty 0 should result in bleu 0 0 this is an implementation of the smoothing techniques for segment level bleu scores that was presented in boxing chen and collin cherry 2014 a systematic comparison of smoothing techniques for sentence level bleu in wmt14 http acl2014 org acl2014 w14 33 pdf w14 3346 pdf this will initialize the parameters required for the various smoothing techniques the default values are set to the numbers used in the experiments from chen and cherry 2014 hypothesis1 it is a guide to action which ensures that the military always obeys the commands of the party reference1 it is a guide to action that ensures that the military will forever heed party commands chencherry smoothingfunction print sentence_bleu reference1 hypothesis1 doctest ellipsis 0 4118 print sentence_bleu reference1 hypothesis1 smoothing_function chencherry method0 doctest ellipsis 0 4118 print sentence_bleu reference1 hypothesis1 smoothing_function chencherry method1 doctest ellipsis 0 4118 print sentence_bleu reference1 hypothesis1 smoothing_function chencherry method2 doctest ellipsis 0 4452 print sentence_bleu reference1 hypothesis1 smoothing_function chencherry method3 doctest ellipsis 0 4118 print sentence_bleu reference1 hypothesis1 smoothing_function chencherry method4 doctest ellipsis 0 4118 print sentence_bleu reference1 hypothesis1 smoothing_function chencherry method5 doctest ellipsis 0 4905 print sentence_bleu reference1 hypothesis1 smoothing_function chencherry method6 doctest ellipsis 0 4135 print sentence_bleu reference1 hypothesis1 smoothing_function chencherry method7 doctest ellipsis 0 4905 param epsilon the epsilon value use in method 1 type epsilon float param alpha the alpha value use in method 6 type alpha int param k the k value use in method 4 type k int no smoothing when numerator 0 where denonminator 0 or 0 the result for the precision score should be equal to 0 or undefined due to bleu geometric mean computation in logarithm space we we need to take the return sys float_info min such that math log sys float_info min returns a 0 precision score smoothing method 1 add epsilon counts to precision with 0 counts smoothing method 2 add 1 to both numerator and denominator from chin yew lin and franz josef och 2004 orange a method for evaluating automatic evaluation metrics for machine translation in coling 2004 smoothing method 3 nist geometric sequence smoothing the smoothing is computed by taking 1 2 k instead of 0 for each precision score whose matching n gram count is null k is 1 for the first n value for which the n gram match count is null for example if the text contains one 2 gram match and consequently two 1 gram matches the n gram count for each individual precision score would be n 1 prec_count 2 two unigrams n 2 prec_count 1 one bigram n 3 prec_count 1 2 no trigram taking smoothed value of 1 2 k with k 1 n 4 prec_count 1 4 no fourgram taking smoothed value of 1 2 k with k 2 from the mteval v13a pl it s referred to as k smoothing method 4 shorter translations may have inflated precision values due to having smaller denominators therefore we give them proportionally smaller smoothed counts instead of scaling to 1 2 k chen and cherry suggests dividing by 1 ln len t where t is the length of the translation incvnt i 1 self k math log hyp_len note that this k is different from the k from nist p_n i incvnt p_i denominator smoothing method 5 the matched counts for similar values of n should be similar to a calculate the n gram matched count it averages the n 1 n and n 1 gram matched counts requires an precision value for an addition ngram order smoothing method 6 interpolates the maximum likelihood estimate of the precision p_n with a prior estimate pi0 the prior is estimated by assuming that the ratio between pn and pn 1 will be the same as that between pn 1 and pn 2 from gao and he 2013 training mrf based phrase translation models using gradient ascent in naacl this smoothing only works when p_1 and p_2 is non zero raise an error with an appropriate message when the input is too short to use this smoothing technique skips the first 2 orders of ngrams no of ngrams in translation that matches the reference no of ngrams in translation calculates the interpolated precision smoothing method 7 interpolates methods 4 and 5
import math import sys import warnings from collections import Counter from fractions import Fraction from nltk.util import ngrams def sentence_bleu( references, hypothesis, weights=(0.25, 0.25, 0.25, 0.25), smoothing_function=None, auto_reweigh=False, ): return corpus_bleu( [references], [hypothesis], weights, smoothing_function, auto_reweigh ) def corpus_bleu( list_of_references, hypotheses, weights=(0.25, 0.25, 0.25, 0.25), smoothing_function=None, auto_reweigh=False, ): p_numerators = Counter() p_denominators = Counter() hyp_lengths, ref_lengths = 0, 0 assert len(list_of_references) == len(hypotheses), ( "The number of hypotheses and their reference(s) should be the " "same " ) try: weights[0][0] except: weights = [weights] max_weight_length = max(len(weight) for weight in weights) for references, hypothesis in zip(list_of_references, hypotheses): for i in range(1, max_weight_length + 1): p_i = modified_precision(references, hypothesis, i) p_numerators[i] += p_i.numerator p_denominators[i] += p_i.denominator hyp_len = len(hypothesis) hyp_lengths += hyp_len ref_lengths += closest_ref_length(references, hyp_len) bp = brevity_penalty(ref_lengths, hyp_lengths) p_n = [ Fraction(p_numerators[i], p_denominators[i], _normalize=False) for i in range(1, max_weight_length + 1) ] if p_numerators[1] == 0: return 0 if len(weights) == 1 else [0] * len(weights) if not smoothing_function: smoothing_function = SmoothingFunction().method0 p_n = smoothing_function( p_n, references=references, hypothesis=hypothesis, hyp_len=hyp_lengths ) bleu_scores = [] for weight in weights: if auto_reweigh: if hyp_lengths < 4 and weight == (0.25, 0.25, 0.25, 0.25): weight = (1 / hyp_lengths,) * hyp_lengths s = (w_i * math.log(p_i) for w_i, p_i in zip(weight, p_n) if p_i > 0) s = bp * math.exp(math.fsum(s)) bleu_scores.append(s) return bleu_scores[0] if len(weights) == 1 else bleu_scores def modified_precision(references, hypothesis, n): counts = Counter(ngrams(hypothesis, n)) if len(hypothesis) >= n else Counter() max_counts = {} for reference in references: reference_counts = ( Counter(ngrams(reference, n)) if len(reference) >= n else Counter() ) for ngram in counts: max_counts[ngram] = max(max_counts.get(ngram, 0), reference_counts[ngram]) clipped_counts = { ngram: min(count, max_counts[ngram]) for ngram, count in counts.items() } numerator = sum(clipped_counts.values()) denominator = max(1, sum(counts.values())) return Fraction(numerator, denominator, _normalize=False) def closest_ref_length(references, hyp_len): ref_lens = (len(reference) for reference in references) closest_ref_len = min( ref_lens, key=lambda ref_len: (abs(ref_len - hyp_len), ref_len) ) return closest_ref_len def brevity_penalty(closest_ref_len, hyp_len): if hyp_len > closest_ref_len: return 1 elif hyp_len == 0: return 0 else: return math.exp(1 - closest_ref_len / hyp_len) class SmoothingFunction: def __init__(self, epsilon=0.1, alpha=5, k=5): self.epsilon = epsilon self.alpha = alpha self.k = k def method0(self, p_n, *args, **kwargs): p_n_new = [] for i, p_i in enumerate(p_n): if p_i.numerator != 0: p_n_new.append(p_i) else: _msg = str( "\nThe hypothesis contains 0 counts of {}-gram overlaps.\n" "Therefore the BLEU score evaluates to 0, independently of\n" "how many N-gram overlaps of lower order it contains.\n" "Consider using lower n-gram order or use " "SmoothingFunction()" ).format(i + 1) warnings.warn(_msg) p_n_new.append(sys.float_info.min) return p_n_new def method1(self, p_n, *args, **kwargs): return [ (p_i.numerator + self.epsilon) / p_i.denominator if p_i.numerator == 0 else p_i for p_i in p_n ] def method2(self, p_n, *args, **kwargs): return [ Fraction(p_n[i].numerator + 1, p_n[i].denominator + 1, _normalize=False) if i != 0 else p_n[0] for i in range(len(p_n)) ] def method3(self, p_n, *args, **kwargs): incvnt = 1 for i, p_i in enumerate(p_n): if p_i.numerator == 0: p_n[i] = 1 / (2**incvnt * p_i.denominator) incvnt += 1 return p_n def method4(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs): incvnt = 1 hyp_len = hyp_len if hyp_len else len(hypothesis) for i, p_i in enumerate(p_n): if p_i.numerator == 0 and hyp_len > 1: numerator = 1 / (2**incvnt * self.k / math.log(hyp_len)) p_n[i] = numerator / p_i.denominator incvnt += 1 return p_n def method5(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs): hyp_len = hyp_len if hyp_len else len(hypothesis) m = {} p_n_plus1 = p_n + [modified_precision(references, hypothesis, 5)] m[-1] = p_n[0] + 1 for i, p_i in enumerate(p_n): p_n[i] = (m[i - 1] + p_i + p_n_plus1[i + 1]) / 3 m[i] = p_n[i] return p_n def method6(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs): hyp_len = hyp_len if hyp_len else len(hypothesis) assert p_n[2], "This smoothing method requires non-zero precision for bigrams." for i, p_i in enumerate(p_n): if i in [0, 1]: continue else: pi0 = 0 if p_n[i - 2] == 0 else p_n[i - 1] ** 2 / p_n[i - 2] m = p_i.numerator l = sum(1 for _ in ngrams(hypothesis, i + 1)) p_n[i] = (m + self.alpha * pi0) / (l + self.alpha) return p_n def method7(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs): hyp_len = hyp_len if hyp_len else len(hypothesis) p_n = self.method4(p_n, references, hypothesis, hyp_len) p_n = self.method5(p_n, references, hypothesis, hyp_len) return p_n
natural language toolkit chrf score c 20012023 nltk project s maja popovic contributors liling tan ale tamchyna memsource url https www nltk org for license information see license txt chrf score implementation import re from collections import counter defaultdict from nltk util import ngrams def sentencechrf reference hypothesis minlen1 maxlen6 beta3 0 ignorewhitespacetrue return corpuschrf reference hypothesis minlen maxlen betabeta ignorewhitespaceignorewhitespace def preprocesssent ignorewhitespace if typesent str turn list of tokens into a string sent joinsent if ignorewhitespace sent re subrs sent return sent def chrfprecisionrecallfscoresupport reference hypothesis n beta3 0 epsilon1e16 refngrams counterngramsreference n hypngrams counterngramshypothesis n calculate the number of ngram matches overlapngrams refngrams hypngrams tp sumoverlapngrams values true positives tpfp sumhypngrams values true positives false positives tpfn sumrefngrams values true positives false negatives try prec tp tpfp precision rec tp tpfn recall factor beta2 fscore 1 factor prec rec factor prec rec except zerodivisionerror prec rec fscore epsilon return prec rec fscore tp def corpuschrf references hypotheses minlen1 maxlen6 beta3 0 ignorewhitespacetrue assert lenreferences len hypotheses the number of hypotheses and their references should be the same numsents lenhypotheses keep fscores for each ngram order separate ngramfscores defaultdictlambda list iterate through each hypothesis and their corresponding references for reference hypothesis in zipreferences hypotheses preprocess both reference and hypothesis reference preprocessreference ignorewhitespace hypothesis preprocesshypothesis ignorewhitespace calculate fscores for each sentence and for each ngram order separately for n in rangeminlen maxlen 1 compute the precision recall fscore and support prec rec fscore tp chrfprecisionrecallfscoresupport reference hypothesis n betabeta ngramfscoresn appendfscore how many ngram sizes numngramsizes lenngramfscores sum of fscores over all sentences for each ngram order totalscores sumfscores for n fscores in ngramfscores items macroaverage over ngram orders and over all sentences return sumtotalscores numngramsizes numsents natural language toolkit chrf score c 2001 2023 nltk project s maja popovic contributors liling tan aleš tamchyna memsource url https www nltk org for license information see license txt chrf score implementation calculates the sentence level chrf character n gram f score described in maja popovic 2015 chrf character n gram f score for automatic mt evaluation in proceedings of the 10th workshop on machine translation https www statmt org wmt15 pdf wmt49 pdf maja popovic 2016 chrf deconstructed β parameters and n gram weights in proceedings of the 1st conference on machine translation https www statmt org wmt16 pdf w16 2341 pdf this implementation of chrf only supports a single reference at the moment for details not reported in the paper consult maja popovic s original implementation https github com m popovic chrf the code should output results equivalent to running chrf with the following options nw 0 b 3 an example from the original bleu paper https www aclweb org anthology p02 1040 pdf ref1 str it is a guide to action that ensures that the military will forever heed party commands split hyp1 str it is a guide to action which ensures that the military always obeys the commands of the party split hyp2 str it is to insure the troops forever hearing the activity guidebook that party direct split sentence_chrf ref1 hyp1 doctest ellipsis 0 6349 sentence_chrf ref1 hyp2 doctest ellipsis 0 3330 the infamous the the the example ref the cat is on the mat split hyp the the the the the the the split sentence_chrf ref hyp doctest ellipsis 0 1468 an example to show that this function allows users to use strings instead of tokens i e list str as inputs ref1 str it is a guide to action that ensures that the military will forever heed party commands hyp1 str it is a guide to action which ensures that the military always obeys the commands of the party sentence_chrf ref1 hyp1 doctest ellipsis 0 6349 type ref1 type hyp1 str true sentence_chrf ref1 split hyp1 split doctest ellipsis 0 6349 to skip the unigrams and only use 2 to 3 grams sentence_chrf ref1 hyp1 min_len 2 max_len 3 doctest ellipsis 0 6617 param references reference sentence type references list str str param hypothesis a hypothesis sentence type hypothesis list str str param min_len the minimum order of n gram this function should extract type min_len int param max_len the maximum order of n gram this function should extract type max_len int param beta the parameter to assign more importance to recall over precision type beta float param ignore_whitespace ignore whitespace characters in scoring type ignore_whitespace bool return the sentence level chrf score rtype float turn list of tokens into a string this function computes the precision recall and fscore from the ngram overlaps it returns the support which is the true positive score by underspecifying the input type the function will be agnostic as to how it computes the ngrams and simply take the whichever element in the list it could be either token or character param reference the reference sentence type reference list param hypothesis the hypothesis sentence type hypothesis list param n extract up to the n th order ngrams type n int param beta the parameter to assign more importance to recall over precision type beta float param epsilon the fallback value if the hypothesis or reference is empty type epsilon float return returns the precision recall and f score and support true positive rtype tuple float calculate the number of ngram matches true positives true positives false positives true positives false negatives precision recall calculates the corpus level chrf character n gram f score it is the macro averaged value of the sentence segment level chrf score this implementation of chrf only supports a single reference at the moment ref1 str it is a guide to action that ensures that the military will forever heed party commands split ref2 str it is the guiding principle which guarantees the military forces always being under the command of the party split hyp1 str it is a guide to action which ensures that the military always obeys the commands of the party split hyp2 str it is to insure the troops forever hearing the activity guidebook that party direct corpus_chrf ref1 ref2 ref1 ref2 hyp1 hyp2 hyp2 hyp1 doctest ellipsis 0 3910 param references a corpus of list of reference sentences w r t hypotheses type references list list str param hypotheses a list of hypothesis sentences type hypotheses list list str param min_len the minimum order of n gram this function should extract type min_len int param max_len the maximum order of n gram this function should extract type max_len int param beta the parameter to assign more importance to recall over precision type beta float param ignore_whitespace ignore whitespace characters in scoring type ignore_whitespace bool return the sentence level chrf score rtype float keep f scores for each n gram order separate iterate through each hypothesis and their corresponding references preprocess both reference and hypothesis calculate f scores for each sentence and for each n gram order separately compute the precision recall fscore and support how many n gram sizes sum of f scores over all sentences for each n gram order macro average over n gram orders and over all sentences
import re from collections import Counter, defaultdict from nltk.util import ngrams def sentence_chrf( reference, hypothesis, min_len=1, max_len=6, beta=3.0, ignore_whitespace=True ): return corpus_chrf( [reference], [hypothesis], min_len, max_len, beta=beta, ignore_whitespace=ignore_whitespace, ) def _preprocess(sent, ignore_whitespace): if type(sent) != str: sent = " ".join(sent) if ignore_whitespace: sent = re.sub(r"\s+", "", sent) return sent def chrf_precision_recall_fscore_support( reference, hypothesis, n, beta=3.0, epsilon=1e-16 ): ref_ngrams = Counter(ngrams(reference, n)) hyp_ngrams = Counter(ngrams(hypothesis, n)) overlap_ngrams = ref_ngrams & hyp_ngrams tp = sum(overlap_ngrams.values()) tpfp = sum(hyp_ngrams.values()) tpfn = sum(ref_ngrams.values()) try: prec = tp / tpfp rec = tp / tpfn factor = beta**2 fscore = (1 + factor) * (prec * rec) / (factor * prec + rec) except ZeroDivisionError: prec = rec = fscore = epsilon return prec, rec, fscore, tp def corpus_chrf( references, hypotheses, min_len=1, max_len=6, beta=3.0, ignore_whitespace=True ): assert len(references) == len( hypotheses ), "The number of hypotheses and their references should be the same" num_sents = len(hypotheses) ngram_fscores = defaultdict(lambda: list()) for reference, hypothesis in zip(references, hypotheses): reference = _preprocess(reference, ignore_whitespace) hypothesis = _preprocess(hypothesis, ignore_whitespace) for n in range(min_len, max_len + 1): prec, rec, fscore, tp = chrf_precision_recall_fscore_support( reference, hypothesis, n, beta=beta ) ngram_fscores[n].append(fscore) num_ngram_sizes = len(ngram_fscores) total_scores = [sum(fscores) for n, fscores in ngram_fscores.items()] return (sum(total_scores) / num_ngram_sizes) / num_sents
natural language toolkit galechurch aligner c 20012023 nltk project torsten marek marekifi uzh ch contributor cassidy laidlaw liling tan url https www nltk org for license information see license txt a port of the galechurch aligner gale church 1993 a program for aligning sentences in bilingual corpora https aclweb organthologyj931004 pdf complementary error function z absx t 1 1 0 5 z r t math exp z z 1 26551223 t 1 00002368 t 0 37409196 t 0 09678418 t 0 18628806 t 0 27886807 t 1 13520398 t 1 48851587 t 0 82215223 t 0 17087277 if x 0 0 return r else return 2 0 r def normcdfx these are the languageindependent probabilities and parameters given in gale church for the computation l1 is always the language with less characters traverse the alignment cost from the tracebacks and retrieves appropriate sentence pairs param backlinks a dictionary where the key is the alignment points and value is the cost referencing the languageindependent priors type backlinks dict param sourcesentslens a list of target sentences lengths type sourcesentslens listint param targetsentslens a list of target sentences lengths type targetsentslens listint returns the log probability of the two sentences csourcesentsi ctargetsentsj being aligned with a specific calignment param i the offset of the source sentence param j the offset of the target sentence param sourcesents the list of source sentence lengths param targetsents the list of target sentence lengths param alignment the alignment type a tuple of two integers param params the sentence alignment parameters returns the log probability of a specific alignment between the two sentences given the parameters actually the paper says ls params variancecharacters this is based on the c reference implementation with ls in the denominator insertions are impossible return the sentence alignment of two text blocks usually paragraphs alignblocks5 5 5 7 7 7 0 0 1 1 2 2 alignblocks10 5 5 12 20 0 0 1 1 2 1 alignblocks12 20 10 5 5 0 0 1 1 1 2 alignblocks10 2 10 10 2 10 12 3 20 3 12 0 0 1 1 2 2 3 2 4 3 5 4 param sourcesentslens the list of source sentence lengths param targetsentslens the list of target sentence lengths param params the sentence alignment parameters return the sentence alignments a list of index pairs there are always three rows in the history with the last of them being filled creates the sentence alignment of two texts texts can consist of several blocks block boundaries cannot be crossed by sentence alignment links each block consists of a list that contains the lengths in characters of the sentences in this block param sourceblocks the list of blocks in the source text param targetblocks the list of blocks in the target text param params the sentence alignment parameters returns a list of sentence alignment lists file io functions may belong in a corpus reader splits an iterator cit at values of csplitvalue each instance of csplitvalue is swallowed the iterator produces subiterators which need to be consumed fully before the next subiterator can be used parses a stream of tokens and splits it into sentences using csoftdelimiter tokens and blocks using charddelimiter tokens for use with the laligntexts function natural language toolkit gale church aligner c 2001 2023 nltk project torsten marek marek ifi uzh ch contributor cassidy laidlaw liling tan url https www nltk org for license information see license txt a port of the gale church aligner gale church 1993 a program for aligning sentences in bilingual corpora https aclweb org anthology j93 1004 pdf complementary error function return the area under the normal distribution from m x these are the language independent probabilities and parameters given in gale church for the computation l_1 is always the language with less characters traverse the alignment cost from the tracebacks and retrieves appropriate sentence pairs param backlinks a dictionary where the key is the alignment points and value is the cost referencing the languageindependent priors type backlinks dict param source_sents_lens a list of target sentences lengths type source_sents_lens list int param target_sents_lens a list of target sentences lengths type target_sents_lens list int returns the log probability of the two sentences c source_sents i c target_sents j being aligned with a specific c alignment param i the offset of the source sentence param j the offset of the target sentence param source_sents the list of source sentence lengths param target_sents the list of target sentence lengths param alignment the alignment type a tuple of two integers param params the sentence alignment parameters returns the log probability of a specific alignment between the two sentences given the parameters actually the paper says l_s params variance_characters this is based on the c reference implementation with l_s in the denominator insertions are impossible return the sentence alignment of two text blocks usually paragraphs align_blocks 5 5 5 7 7 7 0 0 1 1 2 2 align_blocks 10 5 5 12 20 0 0 1 1 2 1 align_blocks 12 20 10 5 5 0 0 1 1 1 2 align_blocks 10 2 10 10 2 10 12 3 20 3 12 0 0 1 1 2 2 3 2 4 3 5 4 param source_sents_lens the list of source sentence lengths param target_sents_lens the list of target sentence lengths param params the sentence alignment parameters return the sentence alignments a list of index pairs there are always three rows in the history with the last of them being filled creates the sentence alignment of two texts texts can consist of several blocks block boundaries cannot be crossed by sentence alignment links each block consists of a list that contains the lengths in characters of the sentences in this block param source_blocks the list of blocks in the source text param target_blocks the list of blocks in the target text param params the sentence alignment parameters returns a list of sentence alignment lists file i o functions may belong in a corpus reader splits an iterator c it at values of c split_value each instance of c split_value is swallowed the iterator produces subiterators which need to be consumed fully before the next subiterator can be used parses a stream of tokens and splits it into sentences using c soft_delimiter tokens and blocks using c hard_delimiter tokens for use with the l align_texts function
import math try: from norm import logsf as norm_logsf from scipy.stats import norm except ImportError: def erfcc(x): z = abs(x) t = 1 / (1 + 0.5 * z) r = t * math.exp( -z * z - 1.26551223 + t * ( 1.00002368 + t * ( 0.37409196 + t * ( 0.09678418 + t * ( -0.18628806 + t * ( 0.27886807 + t * ( -1.13520398 + t * (1.48851587 + t * (-0.82215223 + t * 0.17087277)) ) ) ) ) ) ) ) if x >= 0.0: return r else: return 2.0 - r def norm_cdf(x): return 1 - 0.5 * erfcc(x / math.sqrt(2)) def norm_logsf(x): try: return math.log(1 - norm_cdf(x)) except ValueError: return float("-inf") LOG2 = math.log(2) class LanguageIndependent: PRIORS = { (1, 0): 0.0099, (0, 1): 0.0099, (1, 1): 0.89, (2, 1): 0.089, (1, 2): 0.089, (2, 2): 0.011, } AVERAGE_CHARACTERS = 1 VARIANCE_CHARACTERS = 6.8 def trace(backlinks, source_sents_lens, target_sents_lens): links = [] position = (len(source_sents_lens), len(target_sents_lens)) while position != (0, 0) and all(p >= 0 for p in position): try: s, t = backlinks[position] except TypeError: position = (position[0] - 1, position[1] - 1) continue for i in range(s): for j in range(t): links.append((position[0] - i - 1, position[1] - j - 1)) position = (position[0] - s, position[1] - t) return links[::-1] def align_log_prob(i, j, source_sents, target_sents, alignment, params): l_s = sum(source_sents[i - offset - 1] for offset in range(alignment[0])) l_t = sum(target_sents[j - offset - 1] for offset in range(alignment[1])) try: m = (l_s + l_t / params.AVERAGE_CHARACTERS) / 2 delta = (l_s * params.AVERAGE_CHARACTERS - l_t) / math.sqrt( m * params.VARIANCE_CHARACTERS ) except ZeroDivisionError: return float("-inf") return -(LOG2 + norm_logsf(abs(delta)) + math.log(params.PRIORS[alignment])) def align_blocks(source_sents_lens, target_sents_lens, params=LanguageIndependent): alignment_types = list(params.PRIORS.keys()) D = [[]] backlinks = {} for i in range(len(source_sents_lens) + 1): for j in range(len(target_sents_lens) + 1): min_dist = float("inf") min_align = None for a in alignment_types: prev_i = -1 - a[0] prev_j = j - a[1] if prev_i < -len(D) or prev_j < 0: continue p = D[prev_i][prev_j] + align_log_prob( i, j, source_sents_lens, target_sents_lens, a, params ) if p < min_dist: min_dist = p min_align = a if min_dist == float("inf"): min_dist = 0 backlinks[(i, j)] = min_align D[-1].append(min_dist) if len(D) > 2: D.pop(0) D.append([]) return trace(backlinks, source_sents_lens, target_sents_lens) def align_texts(source_blocks, target_blocks, params=LanguageIndependent): if len(source_blocks) != len(target_blocks): raise ValueError( "Source and target texts do not have the same number of blocks." ) return [ align_blocks(source_block, target_block, params) for source_block, target_block in zip(source_blocks, target_blocks) ] def split_at(it, split_value): def _chunk_iterator(first): v = first while v != split_value: yield v v = it.next() while True: yield _chunk_iterator(it.next()) def parse_token_stream(stream, soft_delimiter, hard_delimiter): return [ [ sum(len(token) for token in sentence_it) for sentence_it in split_at(block_it, soft_delimiter) ] for block_it in split_at(stream, hard_delimiter) ]
natural language toolkit gdfa word alignment symmetrization c 20012023 nltk project s liling tan url https www nltk org for license information see license txt this module symmetrisatizes the sourcetotarget and targettosource word alignment output and produces aka gdfa algorithm koehn 2005 step 1 find the intersection of the bidirectional alignment step 2 search for additional neighbor alignment points to be added given these criteria i neighbor alignments points are not in the intersection and ii neighbor alignments are in the union step 3 add all other alignment points that are not in the intersection not in the neighboring alignments that met the criteria but in the original forwardbackward alignment outputs forw 00 21 92 213 104 75 116 97 128 19 310 411 1712 1713 2514 1315 2416 1117 2818 back 00 19 29 310 411 512 66 75 86 97 104 116 128 1312 1512 1713 1813 1912 2013 213 2212 2314 2417 2515 2617 2718 2818 srctext trgtext therefore we expect that the luminosity function of such halo white dwarfs increases discontinuously with the luminosity srclen lensrctext split trglen lentrgtext split gdfa growdiagfinalandsrclen trglen forw back gdfa sortedset28 18 6 6 24 17 2 1 15 12 13 12 2 9 3 10 26 17 25 15 8 6 9 7 20 13 18 13 0 0 10 4 13 15 23 14 7 5 25 14 1 9 17 13 4 11 11 17 9 2 22 12 27 18 24 16 21 3 19 12 17 12 5 12 11 6 12 8 true references koehn p a axelrod a birch c callison m osborne and d talbot 2005 edinburgh system description for the 2005 iwslt speech translation evaluation in mt eval workshop type srclen int param srclen the number of tokens in the source language type trglen int param trglen the number of tokens in the target language type e2f str param e2f the forward word alignment outputs from sourcetotarget language in pharaoh output format type f2e str param f2e the backward word alignment outputs from targettosource language in pharaoh output format rtype settupleint return the symmetrized alignment points from the gdfa algorithm converts pharaoh text format into list of tuples aligned is used to check if neighbors are aligned in growdiag search for the neighbor points and them to the intersected alignment points if criteria are met iterate until no new points added for english word e 0 en for foreign word f 0 fn if e aligned with f for each neighboring point enew fnew if enew not aligned and fnew not aligned and enew fnew in unione2f f2e iterate until no new points added adds remaining points that are not in the intersection not in the neighboring alignments but in the original e2f and f2e alignments for english word e 0 en for foreign word f 0 fn if enew not aligned and fnew not aligned and enew fnew in unione2f f2e natural language toolkit gdfa word alignment symmetrization c 2001 2023 nltk project s liling tan url https www nltk org for license information see license txt this module symmetrisatizes the source to target and target to source word alignment output and produces aka gdfa algorithm koehn 2005 step 1 find the intersection of the bidirectional alignment step 2 search for additional neighbor alignment points to be added given these criteria i neighbor alignments points are not in the intersection and ii neighbor alignments are in the union step 3 add all other alignment points that are not in the intersection not in the neighboring alignments that met the criteria but in the original forward backward alignment outputs forw 0 0 2 1 9 2 21 3 10 4 7 5 11 6 9 7 12 8 1 9 3 10 4 11 17 12 17 13 25 14 13 15 24 16 11 17 28 18 back 0 0 1 9 2 9 3 10 4 11 5 12 6 6 7 5 8 6 9 7 10 4 11 6 12 8 13 12 15 12 17 13 18 13 19 12 20 13 21 3 22 12 23 14 24 17 25 15 26 17 27 18 28 18 srctext この よう な ハロー 白色 わい 星 の l 関数 は l と 共 に 不連続 に 増加 する こと が 期待 さ れる こと を 示し た trgtext therefore we expect that the luminosity function of such halo white dwarfs increases discontinuously with the luminosity srclen len srctext split trglen len trgtext split gdfa grow_diag_final_and srclen trglen forw back gdfa sorted set 28 18 6 6 24 17 2 1 15 12 13 12 2 9 3 10 26 17 25 15 8 6 9 7 20 13 18 13 0 0 10 4 13 15 23 14 7 5 25 14 1 9 17 13 4 11 11 17 9 2 22 12 27 18 24 16 21 3 19 12 17 12 5 12 11 6 12 8 true references koehn p a axelrod a birch c callison m osborne and d talbot 2005 edinburgh system description for the 2005 iwslt speech translation evaluation in mt eval workshop type srclen int param srclen the number of tokens in the source language type trglen int param trglen the number of tokens in the target language type e2f str param e2f the forward word alignment outputs from source to target language in pharaoh output format type f2e str param f2e the backward word alignment outputs from target to source language in pharaoh output format rtype set tuple int return the symmetrized alignment points from the gdfa algorithm converts pharaoh text format into list of tuples find the intersection aligned is used to check if neighbors are aligned in grow_diag search for the neighbor points and them to the intersected alignment points if criteria are met iterate until no new points added for english word e 0 en for foreign word f 0 fn if e aligned with f for each neighboring point e new f new if e new not aligned and f new not aligned and e new f new in union e2f f2e iterate until no new points added adds remaining points that are not in the intersection not in the neighboring alignments but in the original e2f and f2e alignments for english word e 0 en for foreign word f 0 fn if e new not aligned and f new not aligned and e new f new in union e2f f2e
from collections import defaultdict def grow_diag_final_and(srclen, trglen, e2f, f2e): e2f = [tuple(map(int, a.split("-"))) for a in e2f.split()] f2e = [tuple(map(int, a.split("-"))) for a in f2e.split()] neighbors = [(-1, 0), (0, -1), (1, 0), (0, 1), (-1, -1), (-1, 1), (1, -1), (1, 1)] alignment = set(e2f).intersection(set(f2e)) union = set(e2f).union(set(f2e)) aligned = defaultdict(set) for i, j in alignment: aligned["e"].add(i) aligned["f"].add(j) def grow_diag(): prev_len = len(alignment) - 1 while prev_len < len(alignment): no_new_points = True for e in range(srclen): for f in range(trglen): if (e, f) in alignment: for neighbor in neighbors: neighbor = tuple(i + j for i, j in zip((e, f), neighbor)) e_new, f_new = neighbor if ( e_new not in aligned and f_new not in aligned ) and neighbor in union: alignment.add(neighbor) aligned["e"].add(e_new) aligned["f"].add(f_new) prev_len += 1 no_new_points = False if no_new_points: break def final_and(a): for e_new in range(srclen): for f_new in range(trglen): if ( e_new not in aligned and f_new not in aligned and (e_new, f_new) in union ): alignment.add((e_new, f_new)) aligned["e"].add(e_new) aligned["f"].add(f_new) grow_diag() final_and(e2f) final_and(f2e) return sorted(alignment)
natural language toolkit gleu score c 20012023 nltk project s contributors mike schuster michael wayne goodman liling tan url https www nltk org for license information see license txt gleu score implementation from collections import counter from nltk util import everygrams ngrams def sentencegleureferences hypothesis minlen1 maxlen4 return corpusgleureferences hypothesis minlenminlen maxlenmaxlen def corpusgleulistofreferences hypotheses minlen1 maxlen4 sanity check assert lenlistofreferences len hypotheses the number of hypotheses and their references should be the same sum matches and maxtokenlengths over all sentences corpusnmatch 0 corpusnall 0 for references hypothesis in ziplistofreferences hypotheses hypngrams countereverygramshypothesis minlen maxlen tpfp sumhypngrams values true positives false positives hypcounts for reference in references refngrams countereverygramsreference minlen maxlen tpfn sumrefngrams values true positives false negatives overlapngrams refngrams hypngrams tp sumoverlapngrams values true positives while gleu is defined as the minimum of precision and recall we can reduce the number of division operations by one by instead finding the maximum of the denominators for the precision and recall formulae since the numerators are the same precision tp tpfp recall tp tpfn gleuscore minprecision recall tp maxtpfp tpfn nall maxtpfp tpfn if nall 0 hypcounts appendtp nall use the reference yielding the highest score if hypcounts nmatch nall maxhypcounts keylambda hc hc0 hc1 corpusnmatch nmatch corpusnall nall corner case empty corpus or empty referencesdon t divide by zero if corpusnall 0 gleuscore 0 0 else gleuscore corpusnmatch corpusnall return gleuscore natural language toolkit gleu score c 2001 2023 nltk project s contributors mike schuster michael wayne goodman liling tan url https www nltk org for license information see license txt gleu score implementation calculates the sentence level gleu google bleu score described in yonghui wu mike schuster zhifeng chen quoc v le mohammad norouzi wolfgang macherey maxim krikun yuan cao qin gao klaus macherey jeff klingner apurva shah melvin johnson xiaobing liu lukasz kaiser stephan gouws yoshikiyo kato taku kudo hideto kazawa keith stevens george kurian nishant patil wei wang cliff young jason smith jason riesa alex rudnick oriol vinyals greg corrado macduff hughes jeffrey dean 2016 google s neural machine translation system bridging the gap between human and machine translation eprint arxiv 1609 08144 https arxiv org pdf 1609 08144v2 pdf retrieved on 27 oct 2016 from wu et al 2016 the bleu score has some undesirable properties when used for single sentences as it was designed to be a corpus measure we therefore use a slightly different score for our rl experiments which we call the gleu score for the gleu score we record all sub sequences of 1 2 3 or 4 tokens in output and target sequence n grams we then compute a recall which is the ratio of the number of matching n grams to the number of total n grams in the target ground truth sequence and a precision which is the ratio of the number of matching n grams to the number of total n grams in the generated output sequence then gleu score is simply the minimum of recall and precision this gleu score s range is always between 0 no matches and 1 all match and it is symmetrical when switching output and target according to our experiments gleu score correlates quite well with the bleu metric on a corpus level but does not have its drawbacks for our per sentence reward objective note the initial implementation only allowed a single reference but now a list of references is required which is consistent with bleu_score sentence_bleu the infamous the the the example ref the cat is on the mat split hyp the the the the the the the split sentence_gleu ref hyp doctest ellipsis 0 0909 an example to evaluate normal machine translation outputs ref1 str it is a guide to action that ensures that the military will forever heed party commands split hyp1 str it is a guide to action which ensures that the military always obeys the commands of the party split hyp2 str it is to insure the troops forever hearing the activity guidebook that party direct split sentence_gleu ref1 hyp1 doctest ellipsis 0 4393 sentence_gleu ref1 hyp2 doctest ellipsis 0 1206 param references a list of reference sentences type references list list str param hypothesis a hypothesis sentence type hypothesis list str param min_len the minimum order of n gram this function should extract type min_len int param max_len the maximum order of n gram this function should extract type max_len int return the sentence level gleu score rtype float calculate a single corpus level gleu score aka system level gleu for all the hypotheses and their respective references instead of averaging the sentence level gleu scores i e macro average precision wu et al 2016 sum up the matching tokens and the max of hypothesis and reference tokens for each sentence then compute using the aggregate values from mike schuster via email for the corpus we just add up the two statistics n_match and n_all max n_all_output n_all_target for all sentences then calculate gleu_score n_match n_all so it is not just a mean of the sentence gleu scores in our case longer sentences count more which i think makes sense as they are more difficult to translate hyp1 it is a guide to action which ensures that the military always obeys the commands of the party ref1a it is a guide to action that ensures that the military will forever heed party commands ref1b it is the guiding principle which guarantees the military forces always being under the command of the party ref1c it is the practical guide for the army always to heed the directions of the party hyp2 he read the book because he was interested in world history ref2a he was interested in world history because he read the book list_of_references ref1a ref1b ref1c ref2a hypotheses hyp1 hyp2 corpus_gleu list_of_references hypotheses doctest ellipsis 0 5673 the example below show that corpus_gleu is different from averaging sentence_gleu for hypotheses score1 sentence_gleu ref1a hyp1 score2 sentence_gleu ref2a hyp2 score1 score2 2 doctest ellipsis 0 6144 param list_of_references a list of reference sentences w r t hypotheses type list_of_references list list list str param hypotheses a list of hypothesis sentences type hypotheses list list str param min_len the minimum order of n gram this function should extract type min_len int param max_len the maximum order of n gram this function should extract type max_len int return the corpus level gleu score rtype float sanity check sum matches and max token lengths over all sentences true positives false positives true positives false negatives true positives while gleu is defined as the minimum of precision and recall we can reduce the number of division operations by one by instead finding the maximum of the denominators for the precision and recall formulae since the numerators are the same precision tp tpfp recall tp tpfn gleu_score min precision recall tp max tpfp tpfn use the reference yielding the highest score corner case empty corpus or empty references don t divide by zero
from collections import Counter from nltk.util import everygrams, ngrams def sentence_gleu(references, hypothesis, min_len=1, max_len=4): return corpus_gleu([references], [hypothesis], min_len=min_len, max_len=max_len) def corpus_gleu(list_of_references, hypotheses, min_len=1, max_len=4): assert len(list_of_references) == len( hypotheses ), "The number of hypotheses and their reference(s) should be the same" corpus_n_match = 0 corpus_n_all = 0 for references, hypothesis in zip(list_of_references, hypotheses): hyp_ngrams = Counter(everygrams(hypothesis, min_len, max_len)) tpfp = sum(hyp_ngrams.values()) hyp_counts = [] for reference in references: ref_ngrams = Counter(everygrams(reference, min_len, max_len)) tpfn = sum(ref_ngrams.values()) overlap_ngrams = ref_ngrams & hyp_ngrams tp = sum(overlap_ngrams.values()) n_all = max(tpfp, tpfn) if n_all > 0: hyp_counts.append((tp, n_all)) if hyp_counts: n_match, n_all = max(hyp_counts, key=lambda hc: hc[0] / hc[1]) corpus_n_match += n_match corpus_n_all += n_all if corpus_n_all == 0: gleu_score = 0.0 else: gleu_score = corpus_n_match / corpus_n_all return gleu_score
natural language toolkit ibm model 1 c 20012013 nltk project chin yee lee c lee32student unimelb edu au hengfeng li hengfeng12345gmail com ruxin hou r houstudent unimelb edu au calvin tanujaya lim c tanujayalimgmail com based on earlier version by will zhang wilzzhagmail com guan gui gguistudent unimelb edu au url https www nltk org for license information see license txt lexical translation model that ignores word order in ibm model 1 word order is ignored for simplicity as long as the word alignments are equivalent it doesn t matter where the word occurs in the source or target sentence thus the following three alignments are equally likely source je mange du jambon target i eat some ham alignment 0 0 1 1 2 2 3 3 source je mange du jambon target some ham eat i alignment 0 2 1 3 2 1 3 1 source du jambon je mange target eat i some ham alignment 0 3 1 2 2 0 3 1 note that an alignment is represented here as wordindexintarget wordindexinsource the em algorithm used in model 1 is e step in the training data count how many times a source language word is translated into a target language word weighted by the prior probability of the translation m step estimate the new probability of translation based on the counts from the expectation step notations i position in the source sentence valid values are 0 for null 1 2 length of source sentence j position in the target sentence valid values are 1 2 length of target sentence s a word in the source language t a word in the target language references philipp koehn 2010 statistical machine translation cambridge university press new york peter e brown stephen a della pietra vincent j della pietra and robert l mercer 1993 the mathematics of statistical machine translation parameter estimation computational linguistics 19 2 263311 lexical translation model that ignores word order bitext bitext appendalignedsent klein ist das haus the house is small bitext appendalignedsent das haus ist ja gro the house is big bitext appendalignedsent das buch ist ja klein the book is small bitext appendalignedsent das haus the house bitext appendalignedsent das buch the book bitext appendalignedsent ein buch a book ibm1 ibmmodel1bitext 5 printroundibm1 translationtable buch book 3 0 889 printroundibm1 translationtable das book 3 0 062 printroundibm1 translationtable buch none 3 0 113 printroundibm1 translationtable ja none 3 0 073 testsentence bitext2 testsentence words das buch ist ja klein testsentence mots the book is small testsentence alignment alignment0 0 1 1 2 2 3 2 4 3 train on sentencealignedcorpus and create a lexical translation model translation direction is from alignedsent mots to alignedsent words param sentencealignedcorpus sentencealigned parallel corpus type sentencealignedcorpus listalignedsent param iterations number of iterations to run training algorithm type iterations int param probabilitytables optional use this to pass in custom probability values if not specified probabilities will be set to a uniform distribution or some other sensible value if specified the following entry must be present translationtable see ibmmodel for the type and purpose of this table type probabilitytables dictstr object set userdefined probabilities e step a compute normalization factors to weigh counts e step b collect counts m step update probabilities with maximum likelihood estimate computes the probability of all possible word alignments expressed as a marginal distribution over target words t each entry in the return value represents the contribution to the total alignment probability by the target word t to obtain probabilityalignment srcsentence trgsentence simply sum the entries in the return value return probability of t for all s in srcsentence rtype dictstr float probability that word t in the target sentence is aligned to word s in the source sentence probability of target sentence and an alignment given the source sentence determines the best word alignment for one sentence pair from the corpus that the model was trained on the best alignment will be set in sentencepair when the method returns in contrast with the internal implementation of ibm models the word indices in the alignment are zero indexed not oneindexed param sentencepair a sentence in the source language and its counterpart sentence in the target language type sentencepair alignedsent initialize trgword to align with the null token natural language toolkit ibm model 1 c 2001 2013 nltk project chin yee lee c lee32 student unimelb edu au hengfeng li hengfeng12345 gmail com ruxin hou r hou student unimelb edu au calvin tanujaya lim c tanujayalim gmail com based on earlier version by will zhang wilzzha gmail com guan gui ggui student unimelb edu au url https www nltk org for license information see license txt lexical translation model that ignores word order in ibm model 1 word order is ignored for simplicity as long as the word alignments are equivalent it doesn t matter where the word occurs in the source or target sentence thus the following three alignments are equally likely source je mange du jambon target i eat some ham alignment 0 0 1 1 2 2 3 3 source je mange du jambon target some ham eat i alignment 0 2 1 3 2 1 3 1 source du jambon je mange target eat i some ham alignment 0 3 1 2 2 0 3 1 note that an alignment is represented here as word_index_in_target word_index_in_source the em algorithm used in model 1 is e step in the training data count how many times a source language word is translated into a target language word weighted by the prior probability of the translation m step estimate the new probability of translation based on the counts from the expectation step notations i position in the source sentence valid values are 0 for null 1 2 length of source sentence j position in the target sentence valid values are 1 2 length of target sentence s a word in the source language t a word in the target language references philipp koehn 2010 statistical machine translation cambridge university press new york peter e brown stephen a della pietra vincent j della pietra and robert l mercer 1993 the mathematics of statistical machine translation parameter estimation computational linguistics 19 2 263 311 lexical translation model that ignores word order bitext bitext append alignedsent klein ist das haus the house is small bitext append alignedsent das haus ist ja groß the house is big bitext append alignedsent das buch ist ja klein the book is small bitext append alignedsent das haus the house bitext append alignedsent das buch the book bitext append alignedsent ein buch a book ibm1 ibmmodel1 bitext 5 print round ibm1 translation_table buch book 3 0 889 print round ibm1 translation_table das book 3 0 062 print round ibm1 translation_table buch none 3 0 113 print round ibm1 translation_table ja none 3 0 073 test_sentence bitext 2 test_sentence words das buch ist ja klein test_sentence mots the book is small test_sentence alignment alignment 0 0 1 1 2 2 3 2 4 3 train on sentence_aligned_corpus and create a lexical translation model translation direction is from alignedsent mots to alignedsent words param sentence_aligned_corpus sentence aligned parallel corpus type sentence_aligned_corpus list alignedsent param iterations number of iterations to run training algorithm type iterations int param probability_tables optional use this to pass in custom probability values if not specified probabilities will be set to a uniform distribution or some other sensible value if specified the following entry must be present translation_table see ibmmodel for the type and purpose of this table type probability_tables dict str object set user defined probabilities e step a compute normalization factors to weigh counts e step b collect counts m step update probabilities with maximum likelihood estimate computes the probability of all possible word alignments expressed as a marginal distribution over target words t each entry in the return value represents the contribution to the total alignment probability by the target word t to obtain probability alignment src_sentence trg_sentence simply sum the entries in the return value return probability of t for all s in src_sentence rtype dict str float probability that word t in the target sentence is aligned to word s in the source sentence probability of target sentence and an alignment given the source sentence skip the dummy zeroeth element determines the best word alignment for one sentence pair from the corpus that the model was trained on the best alignment will be set in sentence_pair when the method returns in contrast with the internal implementation of ibm models the word indices in the alignment are zero indexed not one indexed param sentence_pair a sentence in the source language and its counterpart sentence in the target language type sentence_pair alignedsent initialize trg_word to align with the null token prefer newer word in case of tie
import warnings from collections import defaultdict from nltk.translate import AlignedSent, Alignment, IBMModel from nltk.translate.ibm_model import Counts class IBMModel1(IBMModel): def __init__(self, sentence_aligned_corpus, iterations, probability_tables=None): super().__init__(sentence_aligned_corpus) if probability_tables is None: self.set_uniform_probabilities(sentence_aligned_corpus) else: self.translation_table = probability_tables["translation_table"] for n in range(0, iterations): self.train(sentence_aligned_corpus) self.align_all(sentence_aligned_corpus) def set_uniform_probabilities(self, sentence_aligned_corpus): initial_prob = 1 / len(self.trg_vocab) if initial_prob < IBMModel.MIN_PROB: warnings.warn( "Target language vocabulary is too large (" + str(len(self.trg_vocab)) + " words). " "Results may be less accurate." ) for t in self.trg_vocab: self.translation_table[t] = defaultdict(lambda: initial_prob) def train(self, parallel_corpus): counts = Counts() for aligned_sentence in parallel_corpus: trg_sentence = aligned_sentence.words src_sentence = [None] + aligned_sentence.mots total_count = self.prob_all_alignments(src_sentence, trg_sentence) for t in trg_sentence: for s in src_sentence: count = self.prob_alignment_point(s, t) normalized_count = count / total_count[t] counts.t_given_s[t][s] += normalized_count counts.any_t_given_s[s] += normalized_count self.maximize_lexical_translation_probabilities(counts) def prob_all_alignments(self, src_sentence, trg_sentence): alignment_prob_for_t = defaultdict(lambda: 0.0) for t in trg_sentence: for s in src_sentence: alignment_prob_for_t[t] += self.prob_alignment_point(s, t) return alignment_prob_for_t def prob_alignment_point(self, s, t): return self.translation_table[t][s] def prob_t_a_given_s(self, alignment_info): prob = 1.0 for j, i in enumerate(alignment_info.alignment): if j == 0: continue trg_word = alignment_info.trg_sentence[j] src_word = alignment_info.src_sentence[i] prob *= self.translation_table[trg_word][src_word] return max(prob, IBMModel.MIN_PROB) def align_all(self, parallel_corpus): for sentence_pair in parallel_corpus: self.align(sentence_pair) def align(self, sentence_pair): best_alignment = [] for j, trg_word in enumerate(sentence_pair.words): best_prob = max(self.translation_table[trg_word][None], IBMModel.MIN_PROB) best_alignment_point = None for i, src_word in enumerate(sentence_pair.mots): align_prob = self.translation_table[trg_word][src_word] if align_prob >= best_prob: best_prob = align_prob best_alignment_point = i best_alignment.append((j, best_alignment_point)) sentence_pair.alignment = Alignment(best_alignment)
natural language toolkit ibm model 2 c 20012013 nltk project s chin yee lee hengfeng li ruxin hou calvin tanujaya lim url https www nltk org for license information see license txt lexical translation model that considers word order ibm model 2 improves on model 1 by accounting for word order an alignment probability is introduced ai j l m which predicts a source word position given its aligned target word s position the em algorithm used in model 2 is e step in the training data collect counts weighted by prior probabilities a count how many times a source language word is translated into a target language word b count how many times a particular position in the source sentence is aligned to a particular position in the target sentence m step estimate new probabilities based on the counts from the e step notations i position in the source sentence valid values are 0 for null 1 2 length of source sentence j position in the target sentence valid values are 1 2 length of target sentence l number of words in the source sentence excluding null m number of words in the target sentence s a word in the source language t a word in the target language references philipp koehn 2010 statistical machine translation cambridge university press new york peter e brown stephen a della pietra vincent j della pietra and robert l mercer 1993 the mathematics of statistical machine translation parameter estimation computational linguistics 19 2 263311 lexical translation model that considers word order bitext bitext appendalignedsent klein ist das haus the house is small bitext appendalignedsent das haus ist ja gro the house is big bitext appendalignedsent das buch ist ja klein the book is small bitext appendalignedsent das haus the house bitext appendalignedsent das buch the book bitext appendalignedsent ein buch a book ibm2 ibmmodel2bitext 5 printroundibm2 translationtable buch book 3 1 0 printroundibm2 translationtable das book 3 0 0 printroundibm2 translationtable buch none 3 0 0 printroundibm2 translationtable ja none 3 0 0 printroundibm2 alignmenttable1122 3 0 939 printroundibm2 alignmenttable1222 3 0 0 printroundibm2 alignmenttable2245 3 1 0 testsentence bitext2 testsentence words das buch ist ja klein testsentence mots the book is small testsentence alignment alignment0 0 1 1 2 2 3 2 4 3 train on sentencealignedcorpus and create a lexical translation model and an alignment model translation direction is from alignedsent mots to alignedsent words param sentencealignedcorpus sentencealigned parallel corpus type sentencealignedcorpus listalignedsent param iterations number of iterations to run training algorithm type iterations int param probabilitytables optional use this to pass in custom probability values if not specified probabilities will be set to a uniform distribution or some other sensible value if specified all the following entries must be present translationtable alignmenttable see ibmmodel for the type and purpose of these tables type probabilitytables dictstr object get translation probabilities from ibm model 1 run more iterations of training for model 1 since it is faster than model 2 set userdefined probabilities ai j l m 1 l1 for all i j l m e step a compute normalization factors to weigh counts e step b collect counts m step update probabilities with maximum likelihood estimates computes the probability of all possible word alignments expressed as a marginal distribution over target words t each entry in the return value represents the contribution to the total alignment probability by the target word t to obtain probabilityalignment srcsentence trgsentence simply sum the entries in the return value return probability of t for all s in srcsentence rtype dictstr float probability that position j in trgsentence is aligned to position i in the srcsentence probability of target sentence and an alignment given the source sentence determines the best word alignment for one sentence pair from the corpus that the model was trained on the best alignment will be set in sentencepair when the method returns in contrast with the internal implementation of ibm models the word indices in the alignment are zero indexed not oneindexed param sentencepair a sentence in the source language and its counterpart sentence in the target language type sentencepair alignedsent initialize trgword to align with the null token data object to store counts of various parameters during training includes counts for alignment natural language toolkit ibm model 2 c 2001 2013 nltk project s chin yee lee hengfeng li ruxin hou calvin tanujaya lim url https www nltk org for license information see license txt lexical translation model that considers word order ibm model 2 improves on model 1 by accounting for word order an alignment probability is introduced a i j l m which predicts a source word position given its aligned target word s position the em algorithm used in model 2 is e step in the training data collect counts weighted by prior probabilities a count how many times a source language word is translated into a target language word b count how many times a particular position in the source sentence is aligned to a particular position in the target sentence m step estimate new probabilities based on the counts from the e step notations i position in the source sentence valid values are 0 for null 1 2 length of source sentence j position in the target sentence valid values are 1 2 length of target sentence l number of words in the source sentence excluding null m number of words in the target sentence s a word in the source language t a word in the target language references philipp koehn 2010 statistical machine translation cambridge university press new york peter e brown stephen a della pietra vincent j della pietra and robert l mercer 1993 the mathematics of statistical machine translation parameter estimation computational linguistics 19 2 263 311 lexical translation model that considers word order bitext bitext append alignedsent klein ist das haus the house is small bitext append alignedsent das haus ist ja groß the house is big bitext append alignedsent das buch ist ja klein the book is small bitext append alignedsent das haus the house bitext append alignedsent das buch the book bitext append alignedsent ein buch a book ibm2 ibmmodel2 bitext 5 print round ibm2 translation_table buch book 3 1 0 print round ibm2 translation_table das book 3 0 0 print round ibm2 translation_table buch none 3 0 0 print round ibm2 translation_table ja none 3 0 0 print round ibm2 alignment_table 1 1 2 2 3 0 939 print round ibm2 alignment_table 1 2 2 2 3 0 0 print round ibm2 alignment_table 2 2 4 5 3 1 0 test_sentence bitext 2 test_sentence words das buch ist ja klein test_sentence mots the book is small test_sentence alignment alignment 0 0 1 1 2 2 3 2 4 3 train on sentence_aligned_corpus and create a lexical translation model and an alignment model translation direction is from alignedsent mots to alignedsent words param sentence_aligned_corpus sentence aligned parallel corpus type sentence_aligned_corpus list alignedsent param iterations number of iterations to run training algorithm type iterations int param probability_tables optional use this to pass in custom probability values if not specified probabilities will be set to a uniform distribution or some other sensible value if specified all the following entries must be present translation_table alignment_table see ibmmodel for the type and purpose of these tables type probability_tables dict str object get translation probabilities from ibm model 1 run more iterations of training for model 1 since it is faster than model 2 set user defined probabilities a i j l m 1 l 1 for all i j l m 1 indexed e step a compute normalization factors to weigh counts e step b collect counts m step update probabilities with maximum likelihood estimates computes the probability of all possible word alignments expressed as a marginal distribution over target words t each entry in the return value represents the contribution to the total alignment probability by the target word t to obtain probability alignment src_sentence trg_sentence simply sum the entries in the return value return probability of t for all s in src_sentence rtype dict str float probability that position j in trg_sentence is aligned to position i in the src_sentence probability of target sentence and an alignment given the source sentence skip the dummy zeroeth element determines the best word alignment for one sentence pair from the corpus that the model was trained on the best alignment will be set in sentence_pair when the method returns in contrast with the internal implementation of ibm models the word indices in the alignment are zero indexed not one indexed param sentence_pair a sentence in the source language and its counterpart sentence in the target language type sentence_pair alignedsent initialize trg_word to align with the null token data object to store counts of various parameters during training includes counts for alignment
import warnings from collections import defaultdict from nltk.translate import AlignedSent, Alignment, IBMModel, IBMModel1 from nltk.translate.ibm_model import Counts class IBMModel2(IBMModel): def __init__(self, sentence_aligned_corpus, iterations, probability_tables=None): super().__init__(sentence_aligned_corpus) if probability_tables is None: ibm1 = IBMModel1(sentence_aligned_corpus, 2 * iterations) self.translation_table = ibm1.translation_table self.set_uniform_probabilities(sentence_aligned_corpus) else: self.translation_table = probability_tables["translation_table"] self.alignment_table = probability_tables["alignment_table"] for n in range(0, iterations): self.train(sentence_aligned_corpus) self.align_all(sentence_aligned_corpus) def set_uniform_probabilities(self, sentence_aligned_corpus): l_m_combinations = set() for aligned_sentence in sentence_aligned_corpus: l = len(aligned_sentence.mots) m = len(aligned_sentence.words) if (l, m) not in l_m_combinations: l_m_combinations.add((l, m)) initial_prob = 1 / (l + 1) if initial_prob < IBMModel.MIN_PROB: warnings.warn( "A source sentence is too long (" + str(l) + " words). Results may be less accurate." ) for i in range(0, l + 1): for j in range(1, m + 1): self.alignment_table[i][j][l][m] = initial_prob def train(self, parallel_corpus): counts = Model2Counts() for aligned_sentence in parallel_corpus: src_sentence = [None] + aligned_sentence.mots trg_sentence = ["UNUSED"] + aligned_sentence.words l = len(aligned_sentence.mots) m = len(aligned_sentence.words) total_count = self.prob_all_alignments(src_sentence, trg_sentence) for j in range(1, m + 1): t = trg_sentence[j] for i in range(0, l + 1): s = src_sentence[i] count = self.prob_alignment_point(i, j, src_sentence, trg_sentence) normalized_count = count / total_count[t] counts.update_lexical_translation(normalized_count, s, t) counts.update_alignment(normalized_count, i, j, l, m) self.maximize_lexical_translation_probabilities(counts) self.maximize_alignment_probabilities(counts) def maximize_alignment_probabilities(self, counts): MIN_PROB = IBMModel.MIN_PROB for i, j_s in counts.alignment.items(): for j, src_sentence_lengths in j_s.items(): for l, trg_sentence_lengths in src_sentence_lengths.items(): for m in trg_sentence_lengths: estimate = ( counts.alignment[i][j][l][m] / counts.alignment_for_any_i[j][l][m] ) self.alignment_table[i][j][l][m] = max(estimate, MIN_PROB) def prob_all_alignments(self, src_sentence, trg_sentence): alignment_prob_for_t = defaultdict(lambda: 0.0) for j in range(1, len(trg_sentence)): t = trg_sentence[j] for i in range(0, len(src_sentence)): alignment_prob_for_t[t] += self.prob_alignment_point( i, j, src_sentence, trg_sentence ) return alignment_prob_for_t def prob_alignment_point(self, i, j, src_sentence, trg_sentence): l = len(src_sentence) - 1 m = len(trg_sentence) - 1 s = src_sentence[i] t = trg_sentence[j] return self.translation_table[t][s] * self.alignment_table[i][j][l][m] def prob_t_a_given_s(self, alignment_info): prob = 1.0 l = len(alignment_info.src_sentence) - 1 m = len(alignment_info.trg_sentence) - 1 for j, i in enumerate(alignment_info.alignment): if j == 0: continue trg_word = alignment_info.trg_sentence[j] src_word = alignment_info.src_sentence[i] prob *= ( self.translation_table[trg_word][src_word] * self.alignment_table[i][j][l][m] ) return max(prob, IBMModel.MIN_PROB) def align_all(self, parallel_corpus): for sentence_pair in parallel_corpus: self.align(sentence_pair) def align(self, sentence_pair): best_alignment = [] l = len(sentence_pair.mots) m = len(sentence_pair.words) for j, trg_word in enumerate(sentence_pair.words): best_prob = ( self.translation_table[trg_word][None] * self.alignment_table[0][j + 1][l][m] ) best_prob = max(best_prob, IBMModel.MIN_PROB) best_alignment_point = None for i, src_word in enumerate(sentence_pair.mots): align_prob = ( self.translation_table[trg_word][src_word] * self.alignment_table[i + 1][j + 1][l][m] ) if align_prob >= best_prob: best_prob = align_prob best_alignment_point = i best_alignment.append((j, best_alignment_point)) sentence_pair.alignment = Alignment(best_alignment) class Model2Counts(Counts): def __init__(self): super().__init__() self.alignment = defaultdict( lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: 0.0))) ) self.alignment_for_any_i = defaultdict( lambda: defaultdict(lambda: defaultdict(lambda: 0.0)) ) def update_lexical_translation(self, count, s, t): self.t_given_s[t][s] += count self.any_t_given_s[s] += count def update_alignment(self, count, i, j, l, m): self.alignment[i][j][l][m] += count self.alignment_for_any_i[j][l][m] += count
natural language toolkit ibm model 3 c 20012013 nltk project s chin yee lee hengfeng li ruxin hou calvin tanujaya lim url https www nltk org for license information see license txt translation model that considers how a word can be aligned to multiple words in another language ibm model 3 improves on model 2 by directly modeling the phenomenon where a word in one language may be translated into zero or more words in another this is expressed by the fertility probability nphi source word if a source word translates into more than one word it is possible to generate sentences that have the same alignment in multiple ways this is modeled by a distortion step the distortion probability dji l m predicts a target word position given its aligned source word s position the distortion probability replaces the alignment probability of model 2 the fertility probability is not applicable for null target words that align to null are assumed to be distributed uniformly in the target sentence the existence of these words is modeled by p1 the probability that a target word produced by a real source word requires another target word that is produced by null the em algorithm used in model 3 is e step in the training data collect counts weighted by prior probabilities a count how many times a source language word is translated into a target language word b count how many times a particular position in the target sentence is aligned to a particular position in the source sentence c count how many times a source word is aligned to phi number of target words d count how many times null is aligned to a target word m step estimate new probabilities based on the counts from the e step because there are too many possible alignments only the most probable ones are considered first the best alignment is determined using prior probabilities then a hill climbing approach is used to find other good candidates notations i position in the source sentence valid values are 0 for null 1 2 length of source sentence j position in the target sentence valid values are 1 2 length of target sentence l number of words in the source sentence excluding null m number of words in the target sentence s a word in the source language t a word in the target language phi fertility the number of target words produced by a source word p1 probability that a target word produced by a source word is accompanied by another target word that is aligned to null p0 1 p1 references philipp koehn 2010 statistical machine translation cambridge university press new york peter e brown stephen a della pietra vincent j della pietra and robert l mercer 1993 the mathematics of statistical machine translation parameter estimation computational linguistics 19 2 263311 translation model that considers how a word can be aligned to multiple words in another language bitext bitext appendalignedsent klein ist das haus the house is small bitext appendalignedsent das haus war ja gro the house was big bitext appendalignedsent das buch ist ja klein the book is small bitext appendalignedsent ein haus ist klein a house is small bitext appendalignedsent das haus the house bitext appendalignedsent das buch the book bitext appendalignedsent ein buch a book bitext appendalignedsent ich fasse das buch zusammen i summarize the book bitext appendalignedsent fasse zusammen summarize ibm3 ibmmodel3bitext 5 printroundibm3 translationtable buch book 3 1 0 printroundibm3 translationtable das book 3 0 0 printroundibm3 translationtable ja none 3 1 0 printroundibm3 distortiontable1122 3 1 0 printroundibm3 distortiontable1222 3 0 0 printroundibm3 distortiontable2245 3 0 75 printroundibm3 fertilitytable2 summarize 3 1 0 printroundibm3 fertilitytable1 book 3 1 0 printroundibm3 p1 3 0 054 testsentence bitext2 testsentence words das buch ist ja klein testsentence mots the book is small testsentence alignment alignment0 0 1 1 2 2 3 none 4 3 train on sentencealignedcorpus and create a lexical translation model a distortion model a fertility model and a model for generating nullaligned words translation direction is from alignedsent mots to alignedsent words param sentencealignedcorpus sentencealigned parallel corpus type sentencealignedcorpus listalignedsent param iterations number of iterations to run training algorithm type iterations int param probabilitytables optional use this to pass in custom probability values if not specified probabilities will be set to a uniform distribution or some other sensible value if specified all the following entries must be present translationtable alignmenttable fertilitytable p1 distortiontable see ibmmodel for the type and purpose of these tables type probabilitytables dictstr object get translation and alignment probabilities from ibm model 2 set userdefined probabilities dictintintintint float probabilityj i l m values accessed as distortiontablejilm dj i l m 1 m for all i j l m simple initialization taken from giza sample the alignment space record the most probable alignment e step a compute normalization factors to weigh counts e step b collect counts m step update probabilities with maximum likelihood estimates if any probability is less than minprob clamp it to minprob probability of target sentence and an alignment given the source sentence combine null insertion probability compute combination m nullfertility choose nullfertility combine fertility probabilities combine lexical and distortion probabilities data object to store counts of various parameters during training includes counts for distortion natural language toolkit ibm model 3 c 2001 2013 nltk project s chin yee lee hengfeng li ruxin hou calvin tanujaya lim url https www nltk org for license information see license txt translation model that considers how a word can be aligned to multiple words in another language ibm model 3 improves on model 2 by directly modeling the phenomenon where a word in one language may be translated into zero or more words in another this is expressed by the fertility probability n phi source word if a source word translates into more than one word it is possible to generate sentences that have the same alignment in multiple ways this is modeled by a distortion step the distortion probability d j i l m predicts a target word position given its aligned source word s position the distortion probability replaces the alignment probability of model 2 the fertility probability is not applicable for null target words that align to null are assumed to be distributed uniformly in the target sentence the existence of these words is modeled by p1 the probability that a target word produced by a real source word requires another target word that is produced by null the em algorithm used in model 3 is e step in the training data collect counts weighted by prior probabilities a count how many times a source language word is translated into a target language word b count how many times a particular position in the target sentence is aligned to a particular position in the source sentence c count how many times a source word is aligned to phi number of target words d count how many times null is aligned to a target word m step estimate new probabilities based on the counts from the e step because there are too many possible alignments only the most probable ones are considered first the best alignment is determined using prior probabilities then a hill climbing approach is used to find other good candidates notations i position in the source sentence valid values are 0 for null 1 2 length of source sentence j position in the target sentence valid values are 1 2 length of target sentence l number of words in the source sentence excluding null m number of words in the target sentence s a word in the source language t a word in the target language phi fertility the number of target words produced by a source word p1 probability that a target word produced by a source word is accompanied by another target word that is aligned to null p0 1 p1 references philipp koehn 2010 statistical machine translation cambridge university press new york peter e brown stephen a della pietra vincent j della pietra and robert l mercer 1993 the mathematics of statistical machine translation parameter estimation computational linguistics 19 2 263 311 translation model that considers how a word can be aligned to multiple words in another language bitext bitext append alignedsent klein ist das haus the house is small bitext append alignedsent das haus war ja groß the house was big bitext append alignedsent das buch ist ja klein the book is small bitext append alignedsent ein haus ist klein a house is small bitext append alignedsent das haus the house bitext append alignedsent das buch the book bitext append alignedsent ein buch a book bitext append alignedsent ich fasse das buch zusammen i summarize the book bitext append alignedsent fasse zusammen summarize ibm3 ibmmodel3 bitext 5 print round ibm3 translation_table buch book 3 1 0 print round ibm3 translation_table das book 3 0 0 print round ibm3 translation_table ja none 3 1 0 print round ibm3 distortion_table 1 1 2 2 3 1 0 print round ibm3 distortion_table 1 2 2 2 3 0 0 print round ibm3 distortion_table 2 2 4 5 3 0 75 print round ibm3 fertility_table 2 summarize 3 1 0 print round ibm3 fertility_table 1 book 3 1 0 print round ibm3 p1 3 0 054 test_sentence bitext 2 test_sentence words das buch ist ja klein test_sentence mots the book is small test_sentence alignment alignment 0 0 1 1 2 2 3 none 4 3 train on sentence_aligned_corpus and create a lexical translation model a distortion model a fertility model and a model for generating null aligned words translation direction is from alignedsent mots to alignedsent words param sentence_aligned_corpus sentence aligned parallel corpus type sentence_aligned_corpus list alignedsent param iterations number of iterations to run training algorithm type iterations int param probability_tables optional use this to pass in custom probability values if not specified probabilities will be set to a uniform distribution or some other sensible value if specified all the following entries must be present translation_table alignment_table fertility_table p1 distortion_table see ibmmodel for the type and purpose of these tables type probability_tables dict str object get translation and alignment probabilities from ibm model 2 set user defined probabilities dict int int int int float probability j i l m values accessed as distortion_table j i l m d j i l m 1 m for all i j l m simple initialization taken from giza sample the alignment space record the most probable alignment e step a compute normalization factors to weigh counts e step b collect counts m step update probabilities with maximum likelihood estimates if any probability is less than min_prob clamp it to min_prob don t retrain probability of target sentence and an alignment given the source sentence exclude null combine null insertion probability compute combination m null_fertility choose null_fertility combine fertility probabilities combine lexical and distortion probabilities data object to store counts of various parameters during training includes counts for distortion
import warnings from collections import defaultdict from math import factorial from nltk.translate import AlignedSent, Alignment, IBMModel, IBMModel2 from nltk.translate.ibm_model import Counts class IBMModel3(IBMModel): def __init__(self, sentence_aligned_corpus, iterations, probability_tables=None): super().__init__(sentence_aligned_corpus) self.reset_probabilities() if probability_tables is None: ibm2 = IBMModel2(sentence_aligned_corpus, iterations) self.translation_table = ibm2.translation_table self.alignment_table = ibm2.alignment_table self.set_uniform_probabilities(sentence_aligned_corpus) else: self.translation_table = probability_tables["translation_table"] self.alignment_table = probability_tables["alignment_table"] self.fertility_table = probability_tables["fertility_table"] self.p1 = probability_tables["p1"] self.distortion_table = probability_tables["distortion_table"] for n in range(0, iterations): self.train(sentence_aligned_corpus) def reset_probabilities(self): super().reset_probabilities() self.distortion_table = defaultdict( lambda: defaultdict( lambda: defaultdict(lambda: defaultdict(lambda: self.MIN_PROB)) ) ) def set_uniform_probabilities(self, sentence_aligned_corpus): l_m_combinations = set() for aligned_sentence in sentence_aligned_corpus: l = len(aligned_sentence.mots) m = len(aligned_sentence.words) if (l, m) not in l_m_combinations: l_m_combinations.add((l, m)) initial_prob = 1 / m if initial_prob < IBMModel.MIN_PROB: warnings.warn( "A target sentence is too long (" + str(m) + " words). Results may be less accurate." ) for j in range(1, m + 1): for i in range(0, l + 1): self.distortion_table[j][i][l][m] = initial_prob self.fertility_table[0] = defaultdict(lambda: 0.2) self.fertility_table[1] = defaultdict(lambda: 0.65) self.fertility_table[2] = defaultdict(lambda: 0.1) self.fertility_table[3] = defaultdict(lambda: 0.04) MAX_FERTILITY = 10 initial_fert_prob = 0.01 / (MAX_FERTILITY - 4) for phi in range(4, MAX_FERTILITY): self.fertility_table[phi] = defaultdict(lambda: initial_fert_prob) self.p1 = 0.5 def train(self, parallel_corpus): counts = Model3Counts() for aligned_sentence in parallel_corpus: l = len(aligned_sentence.mots) m = len(aligned_sentence.words) sampled_alignments, best_alignment = self.sample(aligned_sentence) aligned_sentence.alignment = Alignment( best_alignment.zero_indexed_alignment() ) total_count = self.prob_of_alignments(sampled_alignments) for alignment_info in sampled_alignments: count = self.prob_t_a_given_s(alignment_info) normalized_count = count / total_count for j in range(1, m + 1): counts.update_lexical_translation( normalized_count, alignment_info, j ) counts.update_distortion(normalized_count, alignment_info, j, l, m) counts.update_null_generation(normalized_count, alignment_info) counts.update_fertility(normalized_count, alignment_info) existing_alignment_table = self.alignment_table self.reset_probabilities() self.alignment_table = existing_alignment_table self.maximize_lexical_translation_probabilities(counts) self.maximize_distortion_probabilities(counts) self.maximize_fertility_probabilities(counts) self.maximize_null_generation_probabilities(counts) def maximize_distortion_probabilities(self, counts): MIN_PROB = IBMModel.MIN_PROB for j, i_s in counts.distortion.items(): for i, src_sentence_lengths in i_s.items(): for l, trg_sentence_lengths in src_sentence_lengths.items(): for m in trg_sentence_lengths: estimate = ( counts.distortion[j][i][l][m] / counts.distortion_for_any_j[i][l][m] ) self.distortion_table[j][i][l][m] = max(estimate, MIN_PROB) def prob_t_a_given_s(self, alignment_info): src_sentence = alignment_info.src_sentence trg_sentence = alignment_info.trg_sentence l = len(src_sentence) - 1 m = len(trg_sentence) - 1 p1 = self.p1 p0 = 1 - p1 probability = 1.0 MIN_PROB = IBMModel.MIN_PROB null_fertility = alignment_info.fertility_of_i(0) probability *= pow(p1, null_fertility) * pow(p0, m - 2 * null_fertility) if probability < MIN_PROB: return MIN_PROB for i in range(1, null_fertility + 1): probability *= (m - null_fertility - i + 1) / i if probability < MIN_PROB: return MIN_PROB for i in range(1, l + 1): fertility = alignment_info.fertility_of_i(i) probability *= ( factorial(fertility) * self.fertility_table[fertility][src_sentence[i]] ) if probability < MIN_PROB: return MIN_PROB for j in range(1, m + 1): t = trg_sentence[j] i = alignment_info.alignment[j] s = src_sentence[i] probability *= ( self.translation_table[t][s] * self.distortion_table[j][i][l][m] ) if probability < MIN_PROB: return MIN_PROB return probability class Model3Counts(Counts): def __init__(self): super().__init__() self.distortion = defaultdict( lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: 0.0))) ) self.distortion_for_any_j = defaultdict( lambda: defaultdict(lambda: defaultdict(lambda: 0.0)) ) def update_distortion(self, count, alignment_info, j, l, m): i = alignment_info.alignment[j] self.distortion[j][i][l][m] += count self.distortion_for_any_j[i][l][m] += count
natural language toolkit ibm model 4 c 20012023 nltk project tah wei hoon hoon twgmail com url https www nltk org for license information see license txt translation model that reorders output words based on their type and distance from other related words in the output sentence ibm model 4 improves the distortion model of model 3 motivated by the observation that certain words tend to be reordered in a predictable way relative to one another for example adjectivenoun in english usually has its order flipped as nounadjective in french model 4 requires words in the source and target vocabularies to be categorized into classes this can be linguistically driven like parts of speech adjective nouns prepositions etc word classes can also be obtained by statistical methods the original ibm model 4 uses an information theoretic approach to group words into 50 classes for each vocabulary terminology cept a source word with nonzero fertility i e aligned to one or more target words tablet the set of target words aligned to a cept head of cept the first word of the tablet of that cept center of cept the average position of the words in that cept s tablet if the value is not an integer the ceiling is taken for example for a tablet with words in positions 2 5 6 in the target sentence the center of the corresponding cept is ceil2 5 6 3 5 displacement for a head word defined as position of head word position of previous cept s center can be positive or negative for a nonhead word defined as position of nonhead word position of previous word in the same tablet always positive because successive words in a tablet are assumed to appear to the right of the previous word in contrast to model 3 which reorders words in a tablet independently of other words model 4 distinguishes between three cases 1 words generated by null are distributed uniformly 2 for a head word t its position is modeled by the probability dheaddisplacement wordclassss wordclasstt where s is the previous cept and wordclasss and wordclasst maps s and t to a source and target language word class respectively 3 for a nonhead word t its position is modeled by the probability dnonheaddisplacement wordclasstt the em algorithm used in model 4 is e step in the training data collect counts weighted by prior probabilities a count how many times a source language word is translated into a target language word b for a particular word class count how many times a head word is located at a particular displacement from the previous cept s center c for a particular word class count how many times a nonhead word is located at a particular displacement from the previous target word d count how many times a source word is aligned to phi number of target words e count how many times null is aligned to a target word m step estimate new probabilities based on the counts from the e step like model 3 there are too many possible alignments to consider thus a hill climbing approach is used to sample good candidates notations i position in the source sentence valid values are 0 for null 1 2 length of source sentence j position in the target sentence valid values are 1 2 length of target sentence l number of words in the source sentence excluding null m number of words in the target sentence s a word in the source language t a word in the target language phi fertility the number of target words produced by a source word p1 probability that a target word produced by a source word is accompanied by another target word that is aligned to null p0 1 p1 dj displacement j references philipp koehn 2010 statistical machine translation cambridge university press new york peter e brown stephen a della pietra vincent j della pietra and robert l mercer 1993 the mathematics of statistical machine translation parameter estimation computational linguistics 19 2 263311 translation model that reorders output words based on their type and their distance from other related words in the output sentence bitext bitext appendalignedsent klein ist das haus the house is small bitext appendalignedsent das haus war ja gro the house was big bitext appendalignedsent das buch ist ja klein the book is small bitext appendalignedsent ein haus ist klein a house is small bitext appendalignedsent das haus the house bitext appendalignedsent das buch the book bitext appendalignedsent ein buch a book bitext appendalignedsent ich fasse das buch zusammen i summarize the book bitext appendalignedsent fasse zusammen summarize srcclasses the 0 a 0 small 1 big 1 house 2 book 2 is 3 was 3 i 4 summarize 5 trgclasses das 0 ein 0 haus 1 buch 1 klein 2 gro 2 ist 3 war 3 ja 4 ich 5 fasse 6 zusammen 6 ibm4 ibmmodel4bitext 5 srcclasses trgclasses printroundibm4 translationtable buch book 3 1 0 printroundibm4 translationtable das book 3 0 0 printroundibm4 translationtable ja none 3 1 0 printroundibm4 headdistortiontable101 3 1 0 printroundibm4 headdistortiontable201 3 0 0 printroundibm4 nonheaddistortiontable36 3 0 5 printroundibm4 fertilitytable2 summarize 3 1 0 printroundibm4 fertilitytable1 book 3 1 0 printroundibm4 p1 3 0 033 testsentence bitext2 testsentence words das buch ist ja klein testsentence mots the book is small testsentence alignment alignment0 0 1 1 2 2 3 none 4 3 train on sentencealignedcorpus and create a lexical translation model distortion models a fertility model and a model for generating nullaligned words translation direction is from alignedsent mots to alignedsent words param sentencealignedcorpus sentencealigned parallel corpus type sentencealignedcorpus listalignedsent param iterations number of iterations to run training algorithm type iterations int param sourcewordclasses lookup table that maps a source word to its word class the latter represented by an integer id type sourcewordclasses dictstr int param targetwordclasses lookup table that maps a target word to its word class the latter represented by an integer id type targetwordclasses dictstr int param probabilitytables optional use this to pass in custom probability values if not specified probabilities will be set to a uniform distribution or some other sensible value if specified all the following entries must be present translationtable alignmenttable fertilitytable p1 headdistortiontable nonheaddistortiontable see ibmmodel and ibmmodel4 for the type and purpose of these tables type probabilitytables dictstr object get probabilities from ibm model 3 set userdefined probabilities dictintintint float probabilitydisplacement of head word word class of previous cept target word class values accessed as distortiontabledjsrcclasstrgclass dictintint float probabilitydisplacement of nonhead word target word class values accessed as distortiontabledjtrgclass set distortion probabilities uniformly to 1 cardinality of displacement values the maximum displacement is m1 when a word is in the last position m of the target sentence and the previously placed word is in the first position conversely the minimum displacement is m1 thus the displacement range is m1 m1 note that displacement cannot be zero and is not included in the range sample the alignment space record the most probable alignment e step a compute normalization factors to weigh counts e step b collect counts m step update probabilities with maximum likelihood estimates if any probability is less than minprob clamp it to minprob probability of target sentence and an alignment given the source sentence binomial distribution bm nullfertility p1 combination m nullfertility choose nullfertility case 1 t is aligned to null case 2 t is the first word of a tablet case 3 t is a subsequent word of a tablet end nested functions abort computation whenever probability falls below minprob at any point since minprob can be considered as zero data object to store counts of various parameters during training includes counts for distortion case 1 t is aligned to null case 2 t is the first word of a tablet case 3 t is a subsequent word of a tablet natural language toolkit ibm model 4 c 2001 2023 nltk project tah wei hoon hoon tw gmail com url https www nltk org for license information see license txt translation model that reorders output words based on their type and distance from other related words in the output sentence ibm model 4 improves the distortion model of model 3 motivated by the observation that certain words tend to be re ordered in a predictable way relative to one another for example adjective noun in english usually has its order flipped as noun adjective in french model 4 requires words in the source and target vocabularies to be categorized into classes this can be linguistically driven like parts of speech adjective nouns prepositions etc word classes can also be obtained by statistical methods the original ibm model 4 uses an information theoretic approach to group words into 50 classes for each vocabulary terminology cept a source word with non zero fertility i e aligned to one or more target words tablet the set of target word s aligned to a cept head of cept the first word of the tablet of that cept center of cept the average position of the words in that cept s tablet if the value is not an integer the ceiling is taken for example for a tablet with words in positions 2 5 6 in the target sentence the center of the corresponding cept is ceil 2 5 6 3 5 displacement for a head word defined as position of head word position of previous cept s center can be positive or negative for a non head word defined as position of non head word position of previous word in the same tablet always positive because successive words in a tablet are assumed to appear to the right of the previous word in contrast to model 3 which reorders words in a tablet independently of other words model 4 distinguishes between three cases 1 words generated by null are distributed uniformly 2 for a head word t its position is modeled by the probability d_head displacement word_class_s s word_class_t t where s is the previous cept and word_class_s and word_class_t maps s and t to a source and target language word class respectively 3 for a non head word t its position is modeled by the probability d_non_head displacement word_class_t t the em algorithm used in model 4 is e step in the training data collect counts weighted by prior probabilities a count how many times a source language word is translated into a target language word b for a particular word class count how many times a head word is located at a particular displacement from the previous cept s center c for a particular word class count how many times a non head word is located at a particular displacement from the previous target word d count how many times a source word is aligned to phi number of target words e count how many times null is aligned to a target word m step estimate new probabilities based on the counts from the e step like model 3 there are too many possible alignments to consider thus a hill climbing approach is used to sample good candidates notations i position in the source sentence valid values are 0 for null 1 2 length of source sentence j position in the target sentence valid values are 1 2 length of target sentence l number of words in the source sentence excluding null m number of words in the target sentence s a word in the source language t a word in the target language phi fertility the number of target words produced by a source word p1 probability that a target word produced by a source word is accompanied by another target word that is aligned to null p0 1 p1 dj displacement δj references philipp koehn 2010 statistical machine translation cambridge university press new york peter e brown stephen a della pietra vincent j della pietra and robert l mercer 1993 the mathematics of statistical machine translation parameter estimation computational linguistics 19 2 263 311 translation model that reorders output words based on their type and their distance from other related words in the output sentence bitext bitext append alignedsent klein ist das haus the house is small bitext append alignedsent das haus war ja groß the house was big bitext append alignedsent das buch ist ja klein the book is small bitext append alignedsent ein haus ist klein a house is small bitext append alignedsent das haus the house bitext append alignedsent das buch the book bitext append alignedsent ein buch a book bitext append alignedsent ich fasse das buch zusammen i summarize the book bitext append alignedsent fasse zusammen summarize src_classes the 0 a 0 small 1 big 1 house 2 book 2 is 3 was 3 i 4 summarize 5 trg_classes das 0 ein 0 haus 1 buch 1 klein 2 groß 2 ist 3 war 3 ja 4 ich 5 fasse 6 zusammen 6 ibm4 ibmmodel4 bitext 5 src_classes trg_classes print round ibm4 translation_table buch book 3 1 0 print round ibm4 translation_table das book 3 0 0 print round ibm4 translation_table ja none 3 1 0 print round ibm4 head_distortion_table 1 0 1 3 1 0 print round ibm4 head_distortion_table 2 0 1 3 0 0 print round ibm4 non_head_distortion_table 3 6 3 0 5 print round ibm4 fertility_table 2 summarize 3 1 0 print round ibm4 fertility_table 1 book 3 1 0 print round ibm4 p1 3 0 033 test_sentence bitext 2 test_sentence words das buch ist ja klein test_sentence mots the book is small test_sentence alignment alignment 0 0 1 1 2 2 3 none 4 3 train on sentence_aligned_corpus and create a lexical translation model distortion models a fertility model and a model for generating null aligned words translation direction is from alignedsent mots to alignedsent words param sentence_aligned_corpus sentence aligned parallel corpus type sentence_aligned_corpus list alignedsent param iterations number of iterations to run training algorithm type iterations int param source_word_classes lookup table that maps a source word to its word class the latter represented by an integer id type source_word_classes dict str int param target_word_classes lookup table that maps a target word to its word class the latter represented by an integer id type target_word_classes dict str int param probability_tables optional use this to pass in custom probability values if not specified probabilities will be set to a uniform distribution or some other sensible value if specified all the following entries must be present translation_table alignment_table fertility_table p1 head_distortion_table non_head_distortion_table see ibmmodel and ibmmodel4 for the type and purpose of these tables type probability_tables dict str object get probabilities from ibm model 3 set user defined probabilities dict int int int float probability displacement of head word word class of previous cept target word class values accessed as distortion_table dj src_class trg_class dict int int float probability displacement of non head word target word class values accessed as distortion_table dj trg_class set distortion probabilities uniformly to 1 cardinality of displacement values the maximum displacement is m 1 when a word is in the last position m of the target sentence and the previously placed word is in the first position conversely the minimum displacement is m 1 thus the displacement range is m 1 m 1 note that displacement cannot be zero and is not included in the range sample the alignment space record the most probable alignment e step a compute normalization factors to weigh counts e step b collect counts m step update probabilities with maximum likelihood estimates if any probability is less than min_prob clamp it to min_prob don t retrain probability of target sentence and an alignment given the source sentence exposed for model 5 to use binomial distribution b m null_fertility p1 combination m null_fertility choose null_fertility case 1 t is aligned to null case 2 t is the first word of a tablet case 3 t is a subsequent word of a tablet end nested functions abort computation whenever probability falls below min_prob at any point since min_prob can be considered as zero data object to store counts of various parameters during training includes counts for distortion case 1 t is aligned to null case 2 t is the first word of a tablet case 3 t is a subsequent word of a tablet
import warnings from collections import defaultdict from math import factorial from nltk.translate import AlignedSent, Alignment, IBMModel, IBMModel3 from nltk.translate.ibm_model import Counts, longest_target_sentence_length class IBMModel4(IBMModel): def __init__( self, sentence_aligned_corpus, iterations, source_word_classes, target_word_classes, probability_tables=None, ): super().__init__(sentence_aligned_corpus) self.reset_probabilities() self.src_classes = source_word_classes self.trg_classes = target_word_classes if probability_tables is None: ibm3 = IBMModel3(sentence_aligned_corpus, iterations) self.translation_table = ibm3.translation_table self.alignment_table = ibm3.alignment_table self.fertility_table = ibm3.fertility_table self.p1 = ibm3.p1 self.set_uniform_probabilities(sentence_aligned_corpus) else: self.translation_table = probability_tables["translation_table"] self.alignment_table = probability_tables["alignment_table"] self.fertility_table = probability_tables["fertility_table"] self.p1 = probability_tables["p1"] self.head_distortion_table = probability_tables["head_distortion_table"] self.non_head_distortion_table = probability_tables[ "non_head_distortion_table" ] for n in range(0, iterations): self.train(sentence_aligned_corpus) def reset_probabilities(self): super().reset_probabilities() self.head_distortion_table = defaultdict( lambda: defaultdict(lambda: defaultdict(lambda: self.MIN_PROB)) ) self.non_head_distortion_table = defaultdict( lambda: defaultdict(lambda: self.MIN_PROB) ) def set_uniform_probabilities(self, sentence_aligned_corpus): max_m = longest_target_sentence_length(sentence_aligned_corpus) if max_m <= 1: initial_prob = IBMModel.MIN_PROB else: initial_prob = 1 / (2 * (max_m - 1)) if initial_prob < IBMModel.MIN_PROB: warnings.warn( "A target sentence is too long (" + str(max_m) + " words). Results may be less accurate." ) for dj in range(1, max_m): self.head_distortion_table[dj] = defaultdict( lambda: defaultdict(lambda: initial_prob) ) self.head_distortion_table[-dj] = defaultdict( lambda: defaultdict(lambda: initial_prob) ) self.non_head_distortion_table[dj] = defaultdict(lambda: initial_prob) self.non_head_distortion_table[-dj] = defaultdict(lambda: initial_prob) def train(self, parallel_corpus): counts = Model4Counts() for aligned_sentence in parallel_corpus: m = len(aligned_sentence.words) sampled_alignments, best_alignment = self.sample(aligned_sentence) aligned_sentence.alignment = Alignment( best_alignment.zero_indexed_alignment() ) total_count = self.prob_of_alignments(sampled_alignments) for alignment_info in sampled_alignments: count = self.prob_t_a_given_s(alignment_info) normalized_count = count / total_count for j in range(1, m + 1): counts.update_lexical_translation( normalized_count, alignment_info, j ) counts.update_distortion( normalized_count, alignment_info, j, self.src_classes, self.trg_classes, ) counts.update_null_generation(normalized_count, alignment_info) counts.update_fertility(normalized_count, alignment_info) existing_alignment_table = self.alignment_table self.reset_probabilities() self.alignment_table = existing_alignment_table self.maximize_lexical_translation_probabilities(counts) self.maximize_distortion_probabilities(counts) self.maximize_fertility_probabilities(counts) self.maximize_null_generation_probabilities(counts) def maximize_distortion_probabilities(self, counts): head_d_table = self.head_distortion_table for dj, src_classes in counts.head_distortion.items(): for s_cls, trg_classes in src_classes.items(): for t_cls in trg_classes: estimate = ( counts.head_distortion[dj][s_cls][t_cls] / counts.head_distortion_for_any_dj[s_cls][t_cls] ) head_d_table[dj][s_cls][t_cls] = max(estimate, IBMModel.MIN_PROB) non_head_d_table = self.non_head_distortion_table for dj, trg_classes in counts.non_head_distortion.items(): for t_cls in trg_classes: estimate = ( counts.non_head_distortion[dj][t_cls] / counts.non_head_distortion_for_any_dj[t_cls] ) non_head_d_table[dj][t_cls] = max(estimate, IBMModel.MIN_PROB) def prob_t_a_given_s(self, alignment_info): return IBMModel4.model4_prob_t_a_given_s(alignment_info, self) @staticmethod def model4_prob_t_a_given_s(alignment_info, ibm_model): probability = 1.0 MIN_PROB = IBMModel.MIN_PROB def null_generation_term(): value = 1.0 p1 = ibm_model.p1 p0 = 1 - p1 null_fertility = alignment_info.fertility_of_i(0) m = len(alignment_info.trg_sentence) - 1 value *= pow(p1, null_fertility) * pow(p0, m - 2 * null_fertility) if value < MIN_PROB: return MIN_PROB for i in range(1, null_fertility + 1): value *= (m - null_fertility - i + 1) / i return value def fertility_term(): value = 1.0 src_sentence = alignment_info.src_sentence for i in range(1, len(src_sentence)): fertility = alignment_info.fertility_of_i(i) value *= ( factorial(fertility) * ibm_model.fertility_table[fertility][src_sentence[i]] ) if value < MIN_PROB: return MIN_PROB return value def lexical_translation_term(j): t = alignment_info.trg_sentence[j] i = alignment_info.alignment[j] s = alignment_info.src_sentence[i] return ibm_model.translation_table[t][s] def distortion_term(j): t = alignment_info.trg_sentence[j] i = alignment_info.alignment[j] if i == 0: return 1.0 if alignment_info.is_head_word(j): previous_cept = alignment_info.previous_cept(j) src_class = None if previous_cept is not None: previous_s = alignment_info.src_sentence[previous_cept] src_class = ibm_model.src_classes[previous_s] trg_class = ibm_model.trg_classes[t] dj = j - alignment_info.center_of_cept(previous_cept) return ibm_model.head_distortion_table[dj][src_class][trg_class] previous_position = alignment_info.previous_in_tablet(j) trg_class = ibm_model.trg_classes[t] dj = j - previous_position return ibm_model.non_head_distortion_table[dj][trg_class] probability *= null_generation_term() if probability < MIN_PROB: return MIN_PROB probability *= fertility_term() if probability < MIN_PROB: return MIN_PROB for j in range(1, len(alignment_info.trg_sentence)): probability *= lexical_translation_term(j) if probability < MIN_PROB: return MIN_PROB probability *= distortion_term(j) if probability < MIN_PROB: return MIN_PROB return probability class Model4Counts(Counts): def __init__(self): super().__init__() self.head_distortion = defaultdict( lambda: defaultdict(lambda: defaultdict(lambda: 0.0)) ) self.head_distortion_for_any_dj = defaultdict(lambda: defaultdict(lambda: 0.0)) self.non_head_distortion = defaultdict(lambda: defaultdict(lambda: 0.0)) self.non_head_distortion_for_any_dj = defaultdict(lambda: 0.0) def update_distortion(self, count, alignment_info, j, src_classes, trg_classes): i = alignment_info.alignment[j] t = alignment_info.trg_sentence[j] if i == 0: pass elif alignment_info.is_head_word(j): previous_cept = alignment_info.previous_cept(j) if previous_cept is not None: previous_src_word = alignment_info.src_sentence[previous_cept] src_class = src_classes[previous_src_word] else: src_class = None trg_class = trg_classes[t] dj = j - alignment_info.center_of_cept(previous_cept) self.head_distortion[dj][src_class][trg_class] += count self.head_distortion_for_any_dj[src_class][trg_class] += count else: previous_j = alignment_info.previous_in_tablet(j) trg_class = trg_classes[t] dj = j - previous_j self.non_head_distortion[dj][trg_class] += count self.non_head_distortion_for_any_dj[trg_class] += count
natural language toolkit ibm model 5 c 20012023 nltk project tah wei hoon hoon twgmail com url https www nltk org for license information see license txt translation model that keeps track of vacant positions in the target sentence to decide where to place translated words translation can be viewed as a process where each word in the source sentence is stepped through sequentially generating translated words for each source word the target sentence can be viewed as being made up of m empty slots initially which gradually fill up as generated words are placed in them models 3 and 4 use distortion probabilities to decide how to place translated words for simplicity these models ignore the history of which slots have already been occupied with translated words consider the placement of the last translated word there is only one empty slot left in the target sentence so the distortion probability should be 1 0 for that position and 0 0 everywhere else however the distortion probabilities for models 3 and 4 are set up such that all positions are under consideration ibm model 5 fixes this deficiency by accounting for occupied slots during translation it introduces the vacancy function vj the number of vacancies up to and including position j in the target sentence terminology maximum vacancy the number of valid slots that a word can be placed in this is not necessarily the same as the number of vacant slots for example if a tablet contains more than one word the head word cannot be placed at the last vacant slot because there will be no space for the other words in the tablet the number of valid slots has to take into account the length of the tablet nonhead words cannot be placed before the head word so vacancies to the left of the head word are ignored vacancy difference for a head word vj vcenter of previous cept can be positive or negative for a nonhead word vj vposition of previously placed word always positive because successive words in a tablet are assumed to appear to the right of the previous word positioning of target words fall under three cases 1 words generated by null are distributed uniformly 2 for a head word t its position is modeled by the probability vheaddv maxv wordclasstt 3 for a nonhead word t its position is modeled by the probability vnonheaddv maxv wordclasstt dv and maxv are defined differently for head and nonhead words the em algorithm used in model 5 is e step in the training data collect counts weighted by prior probabilities a count how many times a source language word is translated into a target language word b for a particular word class and maximum vacancy count how many times a head word and the previous cept s center have a particular difference in number of vacancies b for a particular word class and maximum vacancy count how many times a nonhead word and the previous target word have a particular difference in number of vacancies d count how many times a source word is aligned to phi number of target words e count how many times null is aligned to a target word m step estimate new probabilities based on the counts from the e step like model 4 there are too many possible alignments to consider thus a hill climbing approach is used to sample good candidates in addition pruning is used to weed out unlikely alignments based on model 4 scores notations i position in the source sentence valid values are 0 for null 1 2 length of source sentence j position in the target sentence valid values are 1 2 length of target sentence l number of words in the source sentence excluding null m number of words in the target sentence s a word in the source language t a word in the target language phi fertility the number of target words produced by a source word p1 probability that a target word produced by a source word is accompanied by another target word that is aligned to null p0 1 p1 maxv maximum vacancy dv vacancy difference v the definition of vhead here differs from giza section 4 7 of brown et al 1993 and koehn 2010 in the latter cases vhead is vheadvj vcenter of previous cept maxv wordclasst here we follow appendix b of brown et al 1993 and combine vj with vcenter of previous cept to obtain dv vheadvj vcenter of previous cept maxv wordclasst references philipp koehn 2010 statistical machine translation cambridge university press new york peter e brown stephen a della pietra vincent j della pietra and robert l mercer 1993 the mathematics of statistical machine translation parameter estimation computational linguistics 19 2 263311 translation model that keeps track of vacant positions in the target sentence to decide where to place translated words bitext bitext appendalignedsent klein ist das haus the house is small bitext appendalignedsent das haus war ja gro the house was big bitext appendalignedsent das buch ist ja klein the book is small bitext appendalignedsent ein haus ist klein a house is small bitext appendalignedsent das haus the house bitext appendalignedsent das buch the book bitext appendalignedsent ein buch a book bitext appendalignedsent ich fasse das buch zusammen i summarize the book bitext appendalignedsent fasse zusammen summarize srcclasses the 0 a 0 small 1 big 1 house 2 book 2 is 3 was 3 i 4 summarize 5 trgclasses das 0 ein 0 haus 1 buch 1 klein 2 gro 2 ist 3 war 3 ja 4 ich 5 fasse 6 zusammen 6 ibm5 ibmmodel5bitext 5 srcclasses trgclasses printroundibm5 headvacancytable111 3 1 0 printroundibm5 headvacancytable211 3 0 0 printroundibm5 nonheadvacancytable336 3 1 0 printroundibm5 fertilitytable2 summarize 3 1 0 printroundibm5 fertilitytable1 book 3 1 0 printroundibm5 p1 3 0 033 testsentence bitext2 testsentence words das buch ist ja klein testsentence mots the book is small testsentence alignment alignment0 0 1 1 2 2 3 none 4 3 alignments with scores below this factor are pruned during sampling train on sentencealignedcorpus and create a lexical translation model vacancy models a fertility model and a model for generating nullaligned words translation direction is from alignedsent mots to alignedsent words param sentencealignedcorpus sentencealigned parallel corpus type sentencealignedcorpus listalignedsent param iterations number of iterations to run training algorithm type iterations int param sourcewordclasses lookup table that maps a source word to its word class the latter represented by an integer id type sourcewordclasses dictstr int param targetwordclasses lookup table that maps a target word to its word class the latter represented by an integer id type targetwordclasses dictstr int param probabilitytables optional use this to pass in custom probability values if not specified probabilities will be set to a uniform distribution or some other sensible value if specified all the following entries must be present translationtable alignmenttable fertilitytable p1 headdistortiontable nonheaddistortiontable headvacancytable nonheadvacancytable see ibmmodel ibmmodel4 and ibmmodel5 for the type and purpose of these tables type probabilitytables dictstr object get probabilities from ibm model 4 set userdefined probabilities dictintintint float probabilityvacancy difference number of remaining valid positions target word class values accessed as headvacancytabledvvmaxtrgclass dictintintint float probabilityvacancy difference number of remaining valid positions target word class values accessed as nonheadvacancytabledvvmaxtrgclass set vacancy probabilities uniformly to 1 cardinality of vacancy difference values the maximum vacancy difference occurs when a word is placed in the last available position m of the target sentence and the previous word position has no vacancies the minimum is 1maxv when a word is placed in the first available position and the previous word is placed beyond the last available position thus the number of possible vacancy difference values is maxv 1maxv 1 2 maxv sample the alignment space record the most probable alignment e step a compute normalization factors to weigh counts e step b collect counts m step update probabilities with maximum likelihood estimates if any probability is less than minprob clamp it to minprob sample the most probable alignments from the entire alignment space according to model 4 note that model 4 scoring is used instead of model 5 because the latter is too expensive to compute first determine the best alignment according to ibm model 2 with this initial alignment use hill climbing to determine the best alignment according to a ibm model 4 add this alignment and its neighbors to the sample set repeat this process with other initial alignments obtained by pegging an alignment point finally prune alignments that have substantially lower model 4 scores than the best alignment param sentencepair source and target language sentence pair to generate a sample of alignments from type sentencepair alignedsent return a set of best alignments represented by their alignmentinfo and the best alignment of the set for convenience rtype setalignmentinfo alignmentinfo removes alignments from alignmentinfos that have substantially lower model 4 scores than the best alignment return pruned alignments rtype setalignmentinfo starting from the alignment in alignmentinfo look at neighboring alignments iteratively for the best one according to model 4 note that model 4 scoring is used instead of model 5 because the latter is too expensive to compute there is no guarantee that the best alignment in the alignment space will be found because the algorithm might be stuck in a local maximum param jpegged if specified the search will be constrained to alignments where jpegged remains unchanged type jpegged int return the best alignment found from hill climbing rtype alignmentinfo until there are no better alignments probability of target sentence and an alignment given the source sentence binomial distribution bm nullfertility p1 combination m nullfertility choose nullfertility case 1 nullaligned words case 2 head word case 3 nonhead words end nested functions abort computation whenever probability falls below minprob at any point since minprob can be considered as zero data object to store counts of various parameters during training includes counts for vacancies param count value to add to the vacancy counts param alignmentinfo alignment under consideration param i source word position under consideration param trgclasses target word classes param slots vacancy states of the slots in the target sentence output parameter that will be modified as new words are placed in the target sentence case 1 null aligned words case 2 head word case 3 nonhead words represents positions in a target sentence used to keep track of which slot position is occupied return mark slot at position as occupied return number of vacant slots up to and including position natural language toolkit ibm model 5 c 2001 2023 nltk project tah wei hoon hoon tw gmail com url https www nltk org for license information see license txt translation model that keeps track of vacant positions in the target sentence to decide where to place translated words translation can be viewed as a process where each word in the source sentence is stepped through sequentially generating translated words for each source word the target sentence can be viewed as being made up of m empty slots initially which gradually fill up as generated words are placed in them models 3 and 4 use distortion probabilities to decide how to place translated words for simplicity these models ignore the history of which slots have already been occupied with translated words consider the placement of the last translated word there is only one empty slot left in the target sentence so the distortion probability should be 1 0 for that position and 0 0 everywhere else however the distortion probabilities for models 3 and 4 are set up such that all positions are under consideration ibm model 5 fixes this deficiency by accounting for occupied slots during translation it introduces the vacancy function v j the number of vacancies up to and including position j in the target sentence terminology maximum vacancy the number of valid slots that a word can be placed in this is not necessarily the same as the number of vacant slots for example if a tablet contains more than one word the head word cannot be placed at the last vacant slot because there will be no space for the other words in the tablet the number of valid slots has to take into account the length of the tablet non head words cannot be placed before the head word so vacancies to the left of the head word are ignored vacancy difference for a head word v j v center of previous cept can be positive or negative for a non head word v j v position of previously placed word always positive because successive words in a tablet are assumed to appear to the right of the previous word positioning of target words fall under three cases 1 words generated by null are distributed uniformly 2 for a head word t its position is modeled by the probability v_head dv max_v word_class_t t 3 for a non head word t its position is modeled by the probability v_non_head dv max_v word_class_t t dv and max_v are defined differently for head and non head words the em algorithm used in model 5 is e step in the training data collect counts weighted by prior probabilities a count how many times a source language word is translated into a target language word b for a particular word class and maximum vacancy count how many times a head word and the previous cept s center have a particular difference in number of vacancies b for a particular word class and maximum vacancy count how many times a non head word and the previous target word have a particular difference in number of vacancies d count how many times a source word is aligned to phi number of target words e count how many times null is aligned to a target word m step estimate new probabilities based on the counts from the e step like model 4 there are too many possible alignments to consider thus a hill climbing approach is used to sample good candidates in addition pruning is used to weed out unlikely alignments based on model 4 scores notations i position in the source sentence valid values are 0 for null 1 2 length of source sentence j position in the target sentence valid values are 1 2 length of target sentence l number of words in the source sentence excluding null m number of words in the target sentence s a word in the source language t a word in the target language phi fertility the number of target words produced by a source word p1 probability that a target word produced by a source word is accompanied by another target word that is aligned to null p0 1 p1 max_v maximum vacancy dv vacancy difference δv the definition of v_head here differs from giza section 4 7 of brown et al 1993 and koehn 2010 in the latter cases v_head is v_head v j v center of previous cept max_v word_class t here we follow appendix b of brown et al 1993 and combine v j with v center of previous cept to obtain dv v_head v j v center of previous cept max_v word_class t references philipp koehn 2010 statistical machine translation cambridge university press new york peter e brown stephen a della pietra vincent j della pietra and robert l mercer 1993 the mathematics of statistical machine translation parameter estimation computational linguistics 19 2 263 311 translation model that keeps track of vacant positions in the target sentence to decide where to place translated words bitext bitext append alignedsent klein ist das haus the house is small bitext append alignedsent das haus war ja groß the house was big bitext append alignedsent das buch ist ja klein the book is small bitext append alignedsent ein haus ist klein a house is small bitext append alignedsent das haus the house bitext append alignedsent das buch the book bitext append alignedsent ein buch a book bitext append alignedsent ich fasse das buch zusammen i summarize the book bitext append alignedsent fasse zusammen summarize src_classes the 0 a 0 small 1 big 1 house 2 book 2 is 3 was 3 i 4 summarize 5 trg_classes das 0 ein 0 haus 1 buch 1 klein 2 groß 2 ist 3 war 3 ja 4 ich 5 fasse 6 zusammen 6 ibm5 ibmmodel5 bitext 5 src_classes trg_classes print round ibm5 head_vacancy_table 1 1 1 3 1 0 print round ibm5 head_vacancy_table 2 1 1 3 0 0 print round ibm5 non_head_vacancy_table 3 3 6 3 1 0 print round ibm5 fertility_table 2 summarize 3 1 0 print round ibm5 fertility_table 1 book 3 1 0 print round ibm5 p1 3 0 033 test_sentence bitext 2 test_sentence words das buch ist ja klein test_sentence mots the book is small test_sentence alignment alignment 0 0 1 1 2 2 3 none 4 3 alignments with scores below this factor are pruned during sampling train on sentence_aligned_corpus and create a lexical translation model vacancy models a fertility model and a model for generating null aligned words translation direction is from alignedsent mots to alignedsent words param sentence_aligned_corpus sentence aligned parallel corpus type sentence_aligned_corpus list alignedsent param iterations number of iterations to run training algorithm type iterations int param source_word_classes lookup table that maps a source word to its word class the latter represented by an integer id type source_word_classes dict str int param target_word_classes lookup table that maps a target word to its word class the latter represented by an integer id type target_word_classes dict str int param probability_tables optional use this to pass in custom probability values if not specified probabilities will be set to a uniform distribution or some other sensible value if specified all the following entries must be present translation_table alignment_table fertility_table p1 head_distortion_table non_head_distortion_table head_vacancy_table non_head_vacancy_table see ibmmodel ibmmodel4 and ibmmodel5 for the type and purpose of these tables type probability_tables dict str object get probabilities from ibm model 4 set user defined probabilities dict int int int float probability vacancy difference number of remaining valid positions target word class values accessed as head_vacancy_table dv v_max trg_class dict int int int float probability vacancy difference number of remaining valid positions target word class values accessed as non_head_vacancy_table dv v_max trg_class set vacancy probabilities uniformly to 1 cardinality of vacancy difference values the maximum vacancy difference occurs when a word is placed in the last available position m of the target sentence and the previous word position has no vacancies the minimum is 1 max_v when a word is placed in the first available position and the previous word is placed beyond the last available position thus the number of possible vacancy difference values is max_v 1 max_v 1 2 max_v sample the alignment space record the most probable alignment e step a compute normalization factors to weigh counts e step b collect counts m step update probabilities with maximum likelihood estimates if any probability is less than min_prob clamp it to min_prob don t retrain sample the most probable alignments from the entire alignment space according to model 4 note that model 4 scoring is used instead of model 5 because the latter is too expensive to compute first determine the best alignment according to ibm model 2 with this initial alignment use hill climbing to determine the best alignment according to a ibm model 4 add this alignment and its neighbors to the sample set repeat this process with other initial alignments obtained by pegging an alignment point finally prune alignments that have substantially lower model 4 scores than the best alignment param sentence_pair source and target language sentence pair to generate a sample of alignments from type sentence_pair alignedsent return a set of best alignments represented by their alignmentinfo and the best alignment of the set for convenience rtype set alignmentinfo alignmentinfo removes alignments from alignment_infos that have substantially lower model 4 scores than the best alignment return pruned alignments rtype set alignmentinfo starting from the alignment in alignment_info look at neighboring alignments iteratively for the best one according to model 4 note that model 4 scoring is used instead of model 5 because the latter is too expensive to compute there is no guarantee that the best alignment in the alignment space will be found because the algorithm might be stuck in a local maximum param j_pegged if specified the search will be constrained to alignments where j_pegged remains unchanged type j_pegged int return the best alignment found from hill climbing rtype alignmentinfo alias with shorter name until there are no better alignments probability of target sentence and an alignment given the source sentence binomial distribution b m null_fertility p1 combination m null_fertility choose null_fertility case 1 null aligned words case 2 head word mark position as occupied case 3 non head words mark position as occupied end nested functions abort computation whenever probability falls below min_prob at any point since min_prob can be considered as zero data object to store counts of various parameters during training includes counts for vacancies param count value to add to the vacancy counts param alignment_info alignment under consideration param i source word position under consideration param trg_classes target word classes param slots vacancy states of the slots in the target sentence output parameter that will be modified as new words are placed in the target sentence case 1 null aligned words ignore zero fertility words case 2 head word mark position as occupied case 3 non head words mark position as occupied represents positions in a target sentence used to keep track of which slot position is occupied 1 indexed return mark slot at position as occupied return number of vacant slots up to and including position exclude dummy zeroeth element
import warnings from collections import defaultdict from math import factorial from nltk.translate import AlignedSent, Alignment, IBMModel, IBMModel4 from nltk.translate.ibm_model import Counts, longest_target_sentence_length class IBMModel5(IBMModel): MIN_SCORE_FACTOR = 0.2 def __init__( self, sentence_aligned_corpus, iterations, source_word_classes, target_word_classes, probability_tables=None, ): super().__init__(sentence_aligned_corpus) self.reset_probabilities() self.src_classes = source_word_classes self.trg_classes = target_word_classes if probability_tables is None: ibm4 = IBMModel4( sentence_aligned_corpus, iterations, source_word_classes, target_word_classes, ) self.translation_table = ibm4.translation_table self.alignment_table = ibm4.alignment_table self.fertility_table = ibm4.fertility_table self.p1 = ibm4.p1 self.head_distortion_table = ibm4.head_distortion_table self.non_head_distortion_table = ibm4.non_head_distortion_table self.set_uniform_probabilities(sentence_aligned_corpus) else: self.translation_table = probability_tables["translation_table"] self.alignment_table = probability_tables["alignment_table"] self.fertility_table = probability_tables["fertility_table"] self.p1 = probability_tables["p1"] self.head_distortion_table = probability_tables["head_distortion_table"] self.non_head_distortion_table = probability_tables[ "non_head_distortion_table" ] self.head_vacancy_table = probability_tables["head_vacancy_table"] self.non_head_vacancy_table = probability_tables["non_head_vacancy_table"] for n in range(0, iterations): self.train(sentence_aligned_corpus) def reset_probabilities(self): super().reset_probabilities() self.head_vacancy_table = defaultdict( lambda: defaultdict(lambda: defaultdict(lambda: self.MIN_PROB)) ) self.non_head_vacancy_table = defaultdict( lambda: defaultdict(lambda: defaultdict(lambda: self.MIN_PROB)) ) def set_uniform_probabilities(self, sentence_aligned_corpus): max_m = longest_target_sentence_length(sentence_aligned_corpus) if max_m > 0 and (1 / (2 * max_m)) < IBMModel.MIN_PROB: warnings.warn( "A target sentence is too long (" + str(max_m) + " words). Results may be less accurate." ) for max_v in range(1, max_m + 1): for dv in range(1, max_m + 1): initial_prob = 1 / (2 * max_v) self.head_vacancy_table[dv][max_v] = defaultdict(lambda: initial_prob) self.head_vacancy_table[-(dv - 1)][max_v] = defaultdict( lambda: initial_prob ) self.non_head_vacancy_table[dv][max_v] = defaultdict( lambda: initial_prob ) self.non_head_vacancy_table[-(dv - 1)][max_v] = defaultdict( lambda: initial_prob ) def train(self, parallel_corpus): counts = Model5Counts() for aligned_sentence in parallel_corpus: l = len(aligned_sentence.mots) m = len(aligned_sentence.words) sampled_alignments, best_alignment = self.sample(aligned_sentence) aligned_sentence.alignment = Alignment( best_alignment.zero_indexed_alignment() ) total_count = self.prob_of_alignments(sampled_alignments) for alignment_info in sampled_alignments: count = self.prob_t_a_given_s(alignment_info) normalized_count = count / total_count for j in range(1, m + 1): counts.update_lexical_translation( normalized_count, alignment_info, j ) slots = Slots(m) for i in range(1, l + 1): counts.update_vacancy( normalized_count, alignment_info, i, self.trg_classes, slots ) counts.update_null_generation(normalized_count, alignment_info) counts.update_fertility(normalized_count, alignment_info) existing_alignment_table = self.alignment_table self.reset_probabilities() self.alignment_table = existing_alignment_table self.maximize_lexical_translation_probabilities(counts) self.maximize_vacancy_probabilities(counts) self.maximize_fertility_probabilities(counts) self.maximize_null_generation_probabilities(counts) def sample(self, sentence_pair): sampled_alignments, best_alignment = super().sample(sentence_pair) return self.prune(sampled_alignments), best_alignment def prune(self, alignment_infos): alignments = [] best_score = 0 for alignment_info in alignment_infos: score = IBMModel4.model4_prob_t_a_given_s(alignment_info, self) best_score = max(score, best_score) alignments.append((alignment_info, score)) threshold = IBMModel5.MIN_SCORE_FACTOR * best_score alignments = [a[0] for a in alignments if a[1] > threshold] return set(alignments) def hillclimb(self, alignment_info, j_pegged=None): alignment = alignment_info max_probability = IBMModel4.model4_prob_t_a_given_s(alignment, self) while True: old_alignment = alignment for neighbor_alignment in self.neighboring(alignment, j_pegged): neighbor_probability = IBMModel4.model4_prob_t_a_given_s( neighbor_alignment, self ) if neighbor_probability > max_probability: alignment = neighbor_alignment max_probability = neighbor_probability if alignment == old_alignment: break alignment.score = max_probability return alignment def prob_t_a_given_s(self, alignment_info): probability = 1.0 MIN_PROB = IBMModel.MIN_PROB slots = Slots(len(alignment_info.trg_sentence) - 1) def null_generation_term(): value = 1.0 p1 = self.p1 p0 = 1 - p1 null_fertility = alignment_info.fertility_of_i(0) m = len(alignment_info.trg_sentence) - 1 value *= pow(p1, null_fertility) * pow(p0, m - 2 * null_fertility) if value < MIN_PROB: return MIN_PROB for i in range(1, null_fertility + 1): value *= (m - null_fertility - i + 1) / i return value def fertility_term(): value = 1.0 src_sentence = alignment_info.src_sentence for i in range(1, len(src_sentence)): fertility = alignment_info.fertility_of_i(i) value *= ( factorial(fertility) * self.fertility_table[fertility][src_sentence[i]] ) if value < MIN_PROB: return MIN_PROB return value def lexical_translation_term(j): t = alignment_info.trg_sentence[j] i = alignment_info.alignment[j] s = alignment_info.src_sentence[i] return self.translation_table[t][s] def vacancy_term(i): value = 1.0 tablet = alignment_info.cepts[i] tablet_length = len(tablet) total_vacancies = slots.vacancies_at(len(slots)) if tablet_length == 0: return value j = tablet[0] previous_cept = alignment_info.previous_cept(j) previous_center = alignment_info.center_of_cept(previous_cept) dv = slots.vacancies_at(j) - slots.vacancies_at(previous_center) max_v = total_vacancies - tablet_length + 1 trg_class = self.trg_classes[alignment_info.trg_sentence[j]] value *= self.head_vacancy_table[dv][max_v][trg_class] slots.occupy(j) total_vacancies -= 1 if value < MIN_PROB: return MIN_PROB for k in range(1, tablet_length): previous_position = tablet[k - 1] previous_vacancies = slots.vacancies_at(previous_position) j = tablet[k] dv = slots.vacancies_at(j) - previous_vacancies max_v = total_vacancies - tablet_length + k + 1 - previous_vacancies trg_class = self.trg_classes[alignment_info.trg_sentence[j]] value *= self.non_head_vacancy_table[dv][max_v][trg_class] slots.occupy(j) total_vacancies -= 1 if value < MIN_PROB: return MIN_PROB return value probability *= null_generation_term() if probability < MIN_PROB: return MIN_PROB probability *= fertility_term() if probability < MIN_PROB: return MIN_PROB for j in range(1, len(alignment_info.trg_sentence)): probability *= lexical_translation_term(j) if probability < MIN_PROB: return MIN_PROB for i in range(1, len(alignment_info.src_sentence)): probability *= vacancy_term(i) if probability < MIN_PROB: return MIN_PROB return probability def maximize_vacancy_probabilities(self, counts): MIN_PROB = IBMModel.MIN_PROB head_vacancy_table = self.head_vacancy_table for dv, max_vs in counts.head_vacancy.items(): for max_v, trg_classes in max_vs.items(): for t_cls in trg_classes: estimate = ( counts.head_vacancy[dv][max_v][t_cls] / counts.head_vacancy_for_any_dv[max_v][t_cls] ) head_vacancy_table[dv][max_v][t_cls] = max(estimate, MIN_PROB) non_head_vacancy_table = self.non_head_vacancy_table for dv, max_vs in counts.non_head_vacancy.items(): for max_v, trg_classes in max_vs.items(): for t_cls in trg_classes: estimate = ( counts.non_head_vacancy[dv][max_v][t_cls] / counts.non_head_vacancy_for_any_dv[max_v][t_cls] ) non_head_vacancy_table[dv][max_v][t_cls] = max(estimate, MIN_PROB) class Model5Counts(Counts): def __init__(self): super().__init__() self.head_vacancy = defaultdict( lambda: defaultdict(lambda: defaultdict(lambda: 0.0)) ) self.head_vacancy_for_any_dv = defaultdict(lambda: defaultdict(lambda: 0.0)) self.non_head_vacancy = defaultdict( lambda: defaultdict(lambda: defaultdict(lambda: 0.0)) ) self.non_head_vacancy_for_any_dv = defaultdict(lambda: defaultdict(lambda: 0.0)) def update_vacancy(self, count, alignment_info, i, trg_classes, slots): tablet = alignment_info.cepts[i] tablet_length = len(tablet) total_vacancies = slots.vacancies_at(len(slots)) if tablet_length == 0: return j = tablet[0] previous_cept = alignment_info.previous_cept(j) previous_center = alignment_info.center_of_cept(previous_cept) dv = slots.vacancies_at(j) - slots.vacancies_at(previous_center) max_v = total_vacancies - tablet_length + 1 trg_class = trg_classes[alignment_info.trg_sentence[j]] self.head_vacancy[dv][max_v][trg_class] += count self.head_vacancy_for_any_dv[max_v][trg_class] += count slots.occupy(j) total_vacancies -= 1 for k in range(1, tablet_length): previous_position = tablet[k - 1] previous_vacancies = slots.vacancies_at(previous_position) j = tablet[k] dv = slots.vacancies_at(j) - previous_vacancies max_v = total_vacancies - tablet_length + k + 1 - previous_vacancies trg_class = trg_classes[alignment_info.trg_sentence[j]] self.non_head_vacancy[dv][max_v][trg_class] += count self.non_head_vacancy_for_any_dv[max_v][trg_class] += count slots.occupy(j) total_vacancies -= 1 class Slots: def __init__(self, target_sentence_length): self._slots = [False] * (target_sentence_length + 1) def occupy(self, position): self._slots[position] = True def vacancies_at(self, position): vacancies = 0 for k in range(1, position + 1): if not self._slots[k]: vacancies += 1 return vacancies def __len__(self): return len(self._slots) - 1
natural language toolkit ibm model core c 20012023 nltk project tah wei hoon hoon twgmail com url https www nltk org for license information see license txt common methods and classes for all ibm models see ibmmodel1 ibmmodel2 ibmmodel3 ibmmodel4 and ibmmodel5 for specific implementations the ibm models are a series of generative models that learn lexical translation probabilities ptarget language wordsource language word given a sentencealigned parallel corpus the models increase in sophistication from model 1 to 5 typically the output of lower models is used to seed the higher models all models use the expectationmaximization em algorithm to learn various probability tables words in a sentence are oneindexed the first word of a sentence has position 1 not 0 index 0 is reserved in the source sentence for the null token the concept of position does not apply to null but it is indexed at 0 by convention each target word is aligned to exactly one source word or the null token references philipp koehn 2010 statistical machine translation cambridge university press new york peter e brown stephen a della pietra vincent j della pietra and robert l mercer 1993 the mathematics of statistical machine translation parameter estimation computational linguistics 19 2 263311 param sentencealignedcorpus parallel corpus under consideration type sentencealignedcorpus listalignedsent return number of words in the longest target language sentence of sentencealignedcorpus abstract base class for all ibm models avoid division by zero and precision errors by imposing a minimum value for probabilities note that this approach is theoretically incorrect since it may create probabilities that sum to more than 1 in practice the contribution of probabilities with minprob is tiny enough that the value of minprob can be treated as zero dictstrstr float probabilitytarget word source word values accessed as translationtabletargetwordsourceword dictintintintint float probabilityi j l m values accessed as alignmenttableijlm used in model 2 and hill climbing in models 3 and above dictintstr float probabilityfertility source word values accessed as fertilitytablefertilitysourceword used in model 3 and higher probability that a generated word requires another target word that is aligned to null used in model 3 and higher initialize probability tables to a uniform distribution derived classes should implement this accordingly add the null token setstr all source language words used in training setstr all target language words used in training sample the most probable alignments from the entire alignment space first determine the best alignment according to ibm model 2 with this initial alignment use hill climbing to determine the best alignment according to a higher ibm model add this alignment and its neighbors to the sample set repeat this process with other initial alignments obtained by pegging an alignment point hill climbing may be stuck in a local maxima hence the pegging and trying out of different alignments param sentencepair source and target language sentence pair to generate a sample of alignments from type sentencepair alignedsent return a set of best alignments represented by their alignmentinfo and the best alignment of the set for convenience rtype setalignmentinfo alignmentinfo start from the best model 2 alignment start from other model 2 alignments with the constraint that j is aligned pegged to i finds the best alignment according to ibm model 2 used as a starting point for hill climbing in models 3 and above because it is easier to compute than the best alignments in higher models param sentencepair source and target language sentence pair to be wordaligned type sentencepair alignedsent param jpegged if specified the alignment point of jpegged will be fixed to ipegged type jpegged int param ipegged alignment point to jpegged type ipegged int use the pegged alignment instead of searching for best one starting from the alignment in alignmentinfo look at neighboring alignments iteratively for the best one there is no guarantee that the best alignment in the alignment space will be found because the algorithm might be stuck in a local maximum param jpegged if specified the search will be constrained to alignments where jpegged remains unchanged type jpegged int return the best alignment found from hill climbing rtype alignmentinfo until there are no better alignments determine the neighbors of alignmentinfo obtained by moving or swapping one alignment point param jpegged if specified neighbors that have a different alignment point from jpegged will not be considered type jpegged int return a set neighboring alignments represented by their alignmentinfo rtype setalignmentinfo add alignments that differ by one alignment point update alignment update cepts add alignments that have two alignment points swapped update alignments update cepts clip p1 if it is too large because p0 1 p1 should not be smaller than minprob probability of target sentence and an alignment given the source sentence all required information is assumed to be in alignmentinfo and self derived classes should override this method helper data object for training ibm models 3 and up readonly for a source sentence and its counterpart in the target language this class holds information about the sentence pair s alignment cepts and fertility warning alignments are oneindexed here in contrast to nltk translate alignment and alignedsent which are zeroindexed this class is not meant to be used outside of ibm models tupleint alignment function alignmentj is the position in the source sentence that is aligned to the position j in the target sentence tuplestr source sentence referred to by this object should include null token none in index 0 tuplestr target sentence referred to by this object should have a dummy element in index 0 so that the first word starts from index 1 listlistint the positions of the target words in ascending order aligned to a source word position for example cepts4 2 3 7 means that words in positions 2 3 and 7 of the target sentence are aligned to the word in position 4 of the source sentence float optional probability of alignment as defined by the ibm model that assesses this alignment fertility of word in position i of the source sentence return whether the word in position j of the target sentence is a head word return the ceiling of the average positions of the words in the tablet of cept i or 0 if i is none return the previous cept of j or none if j belongs to the first cept return the position of the previous word that is in the same tablet as j or none if j is the first word of the tablet return zeroindexed alignment suitable for use in external nltk translate modules like nltk translate alignment rtype listtuple data object to store counts of various parameters during training natural language toolkit ibm model core c 2001 2023 nltk project tah wei hoon hoon tw gmail com url https www nltk org for license information see license txt common methods and classes for all ibm models see ibmmodel1 ibmmodel2 ibmmodel3 ibmmodel4 and ibmmodel5 for specific implementations the ibm models are a series of generative models that learn lexical translation probabilities p target language word source language word given a sentence aligned parallel corpus the models increase in sophistication from model 1 to 5 typically the output of lower models is used to seed the higher models all models use the expectation maximization em algorithm to learn various probability tables words in a sentence are one indexed the first word of a sentence has position 1 not 0 index 0 is reserved in the source sentence for the null token the concept of position does not apply to null but it is indexed at 0 by convention each target word is aligned to exactly one source word or the null token references philipp koehn 2010 statistical machine translation cambridge university press new york peter e brown stephen a della pietra vincent j della pietra and robert l mercer 1993 the mathematics of statistical machine translation parameter estimation computational linguistics 19 2 263 311 param sentence_aligned_corpus parallel corpus under consideration type sentence_aligned_corpus list alignedsent return number of words in the longest target language sentence of sentence_aligned_corpus abstract base class for all ibm models avoid division by zero and precision errors by imposing a minimum value for probabilities note that this approach is theoretically incorrect since it may create probabilities that sum to more than 1 in practice the contribution of probabilities with min_prob is tiny enough that the value of min_prob can be treated as zero giza is more liberal and uses 1 0e 7 dict str str float probability target word source word values accessed as translation_table target_word source_word dict int int int int float probability i j l m values accessed as alignment_table i j l m used in model 2 and hill climbing in models 3 and above dict int str float probability fertility source word values accessed as fertility_table fertility source_word used in model 3 and higher probability that a generated word requires another target word that is aligned to null used in model 3 and higher initialize probability tables to a uniform distribution derived classes should implement this accordingly add the null token set str all source language words used in training set str all target language words used in training sample the most probable alignments from the entire alignment space first determine the best alignment according to ibm model 2 with this initial alignment use hill climbing to determine the best alignment according to a higher ibm model add this alignment and its neighbors to the sample set repeat this process with other initial alignments obtained by pegging an alignment point hill climbing may be stuck in a local maxima hence the pegging and trying out of different alignments param sentence_pair source and target language sentence pair to generate a sample of alignments from type sentence_pair alignedsent return a set of best alignments represented by their alignmentinfo and the best alignment of the set for convenience rtype set alignmentinfo alignmentinfo start from the best model 2 alignment start from other model 2 alignments with the constraint that j is aligned pegged to i finds the best alignment according to ibm model 2 used as a starting point for hill climbing in models 3 and above because it is easier to compute than the best alignments in higher models param sentence_pair source and target language sentence pair to be word aligned type sentence_pair alignedsent param j_pegged if specified the alignment point of j_pegged will be fixed to i_pegged type j_pegged int param i_pegged alignment point to j_pegged type i_pegged int 1 indexed exclude null init all alignments to null init all cepts to empty list use the pegged alignment instead of searching for best one starting from the alignment in alignment_info look at neighboring alignments iteratively for the best one there is no guarantee that the best alignment in the alignment space will be found because the algorithm might be stuck in a local maximum param j_pegged if specified the search will be constrained to alignments where j_pegged remains unchanged type j_pegged int return the best alignment found from hill climbing rtype alignmentinfo alias with shorter name until there are no better alignments determine the neighbors of alignment_info obtained by moving or swapping one alignment point param j_pegged if specified neighbors that have a different alignment point from j_pegged will not be considered type j_pegged int return a set neighboring alignments represented by their alignmentinfo rtype set alignmentinfo exclude null add alignments that differ by one alignment point update alignment update cepts add alignments that have two alignment points swapped update alignments update cepts clip p1 if it is too large because p0 1 p1 should not be smaller than min_prob probability of target sentence and an alignment given the source sentence all required information is assumed to be in alignment_info and self derived classes should override this method helper data object for training ibm models 3 and up read only for a source sentence and its counterpart in the target language this class holds information about the sentence pair s alignment cepts and fertility warning alignments are one indexed here in contrast to nltk translate alignment and alignedsent which are zero indexed this class is not meant to be used outside of ibm models tuple int alignment function alignment j is the position in the source sentence that is aligned to the position j in the target sentence tuple str source sentence referred to by this object should include null token none in index 0 tuple str target sentence referred to by this object should have a dummy element in index 0 so that the first word starts from index 1 list list int the positions of the target words in ascending order aligned to a source word position for example cepts 4 2 3 7 means that words in positions 2 3 and 7 of the target sentence are aligned to the word in position 4 of the source sentence float optional probability of alignment as defined by the ibm model that assesses this alignment fertility of word in position i of the source sentence return whether the word in position j of the target sentence is a head word return the ceiling of the average positions of the words in the tablet of cept i or 0 if i is none return the previous cept of j or none if j belongs to the first cept return the position of the previous word that is in the same tablet as j or none if j is the first word of the tablet return zero indexed alignment suitable for use in external nltk translate modules like nltk translate alignment rtype list tuple alignment to null token data object to store counts of various parameters during training
from bisect import insort_left from collections import defaultdict from copy import deepcopy from math import ceil def longest_target_sentence_length(sentence_aligned_corpus): max_m = 0 for aligned_sentence in sentence_aligned_corpus: m = len(aligned_sentence.words) max_m = max(m, max_m) return max_m class IBMModel: MIN_PROB = 1.0e-12 def __init__(self, sentence_aligned_corpus): self.init_vocab(sentence_aligned_corpus) self.reset_probabilities() def reset_probabilities(self): self.translation_table = defaultdict( lambda: defaultdict(lambda: IBMModel.MIN_PROB) ) self.alignment_table = defaultdict( lambda: defaultdict( lambda: defaultdict(lambda: defaultdict(lambda: IBMModel.MIN_PROB)) ) ) self.fertility_table = defaultdict(lambda: defaultdict(lambda: self.MIN_PROB)) self.p1 = 0.5 def set_uniform_probabilities(self, sentence_aligned_corpus): pass def init_vocab(self, sentence_aligned_corpus): src_vocab = set() trg_vocab = set() for aligned_sentence in sentence_aligned_corpus: trg_vocab.update(aligned_sentence.words) src_vocab.update(aligned_sentence.mots) src_vocab.add(None) self.src_vocab = src_vocab self.trg_vocab = trg_vocab def sample(self, sentence_pair): sampled_alignments = set() l = len(sentence_pair.mots) m = len(sentence_pair.words) initial_alignment = self.best_model2_alignment(sentence_pair) potential_alignment = self.hillclimb(initial_alignment) sampled_alignments.update(self.neighboring(potential_alignment)) best_alignment = potential_alignment for j in range(1, m + 1): for i in range(0, l + 1): initial_alignment = self.best_model2_alignment(sentence_pair, j, i) potential_alignment = self.hillclimb(initial_alignment, j) neighbors = self.neighboring(potential_alignment, j) sampled_alignments.update(neighbors) if potential_alignment.score > best_alignment.score: best_alignment = potential_alignment return sampled_alignments, best_alignment def best_model2_alignment(self, sentence_pair, j_pegged=None, i_pegged=0): src_sentence = [None] + sentence_pair.mots trg_sentence = ["UNUSED"] + sentence_pair.words l = len(src_sentence) - 1 m = len(trg_sentence) - 1 alignment = [0] * (m + 1) cepts = [[] for i in range(l + 1)] for j in range(1, m + 1): if j == j_pegged: best_i = i_pegged else: best_i = 0 max_alignment_prob = IBMModel.MIN_PROB t = trg_sentence[j] for i in range(0, l + 1): s = src_sentence[i] alignment_prob = ( self.translation_table[t][s] * self.alignment_table[i][j][l][m] ) if alignment_prob >= max_alignment_prob: max_alignment_prob = alignment_prob best_i = i alignment[j] = best_i cepts[best_i].append(j) return AlignmentInfo( tuple(alignment), tuple(src_sentence), tuple(trg_sentence), cepts ) def hillclimb(self, alignment_info, j_pegged=None): alignment = alignment_info max_probability = self.prob_t_a_given_s(alignment) while True: old_alignment = alignment for neighbor_alignment in self.neighboring(alignment, j_pegged): neighbor_probability = self.prob_t_a_given_s(neighbor_alignment) if neighbor_probability > max_probability: alignment = neighbor_alignment max_probability = neighbor_probability if alignment == old_alignment: break alignment.score = max_probability return alignment def neighboring(self, alignment_info, j_pegged=None): neighbors = set() l = len(alignment_info.src_sentence) - 1 m = len(alignment_info.trg_sentence) - 1 original_alignment = alignment_info.alignment original_cepts = alignment_info.cepts for j in range(1, m + 1): if j != j_pegged: for i in range(0, l + 1): new_alignment = list(original_alignment) new_cepts = deepcopy(original_cepts) old_i = original_alignment[j] new_alignment[j] = i insort_left(new_cepts[i], j) new_cepts[old_i].remove(j) new_alignment_info = AlignmentInfo( tuple(new_alignment), alignment_info.src_sentence, alignment_info.trg_sentence, new_cepts, ) neighbors.add(new_alignment_info) for j in range(1, m + 1): if j != j_pegged: for other_j in range(1, m + 1): if other_j != j_pegged and other_j != j: new_alignment = list(original_alignment) new_cepts = deepcopy(original_cepts) other_i = original_alignment[other_j] i = original_alignment[j] new_alignment[j] = other_i new_alignment[other_j] = i new_cepts[other_i].remove(other_j) insort_left(new_cepts[other_i], j) new_cepts[i].remove(j) insort_left(new_cepts[i], other_j) new_alignment_info = AlignmentInfo( tuple(new_alignment), alignment_info.src_sentence, alignment_info.trg_sentence, new_cepts, ) neighbors.add(new_alignment_info) return neighbors def maximize_lexical_translation_probabilities(self, counts): for t, src_words in counts.t_given_s.items(): for s in src_words: estimate = counts.t_given_s[t][s] / counts.any_t_given_s[s] self.translation_table[t][s] = max(estimate, IBMModel.MIN_PROB) def maximize_fertility_probabilities(self, counts): for phi, src_words in counts.fertility.items(): for s in src_words: estimate = counts.fertility[phi][s] / counts.fertility_for_any_phi[s] self.fertility_table[phi][s] = max(estimate, IBMModel.MIN_PROB) def maximize_null_generation_probabilities(self, counts): p1_estimate = counts.p1 / (counts.p1 + counts.p0) p1_estimate = max(p1_estimate, IBMModel.MIN_PROB) self.p1 = min(p1_estimate, 1 - IBMModel.MIN_PROB) def prob_of_alignments(self, alignments): probability = 0 for alignment_info in alignments: probability += self.prob_t_a_given_s(alignment_info) return probability def prob_t_a_given_s(self, alignment_info): return 0.0 class AlignmentInfo: def __init__(self, alignment, src_sentence, trg_sentence, cepts): if not isinstance(alignment, tuple): raise TypeError( "The alignment must be a tuple because it is used " "to uniquely identify AlignmentInfo objects." ) self.alignment = alignment self.src_sentence = src_sentence self.trg_sentence = trg_sentence self.cepts = cepts self.score = None def fertility_of_i(self, i): return len(self.cepts[i]) def is_head_word(self, j): i = self.alignment[j] return self.cepts[i][0] == j def center_of_cept(self, i): if i is None: return 0 average_position = sum(self.cepts[i]) / len(self.cepts[i]) return int(ceil(average_position)) def previous_cept(self, j): i = self.alignment[j] if i == 0: raise ValueError( "Words aligned to NULL cannot have a previous " "cept because NULL has no position" ) previous_cept = i - 1 while previous_cept > 0 and self.fertility_of_i(previous_cept) == 0: previous_cept -= 1 if previous_cept <= 0: previous_cept = None return previous_cept def previous_in_tablet(self, j): i = self.alignment[j] tablet_position = self.cepts[i].index(j) if tablet_position == 0: return None return self.cepts[i][tablet_position - 1] def zero_indexed_alignment(self): zero_indexed_alignment = [] for j in range(1, len(self.trg_sentence)): i = self.alignment[j] - 1 if i < 0: i = None zero_indexed_alignment.append((j - 1, i)) return zero_indexed_alignment def __eq__(self, other): return self.alignment == other.alignment def __ne__(self, other): return not self == other def __hash__(self): return hash(self.alignment) class Counts: def __init__(self): self.t_given_s = defaultdict(lambda: defaultdict(lambda: 0.0)) self.any_t_given_s = defaultdict(lambda: 0.0) self.p0 = 0.0 self.p1 = 0.0 self.fertility = defaultdict(lambda: defaultdict(lambda: 0.0)) self.fertility_for_any_phi = defaultdict(lambda: 0.0) def update_lexical_translation(self, count, alignment_info, j): i = alignment_info.alignment[j] t = alignment_info.trg_sentence[j] s = alignment_info.src_sentence[i] self.t_given_s[t][s] += count self.any_t_given_s[s] += count def update_null_generation(self, count, alignment_info): m = len(alignment_info.trg_sentence) - 1 fertility_of_null = alignment_info.fertility_of_i(0) self.p1 += fertility_of_null * count self.p0 += (m - 2 * fertility_of_null) * count def update_fertility(self, count, alignment_info): for i in range(0, len(alignment_info.src_sentence)): s = alignment_info.src_sentence[i] phi = alignment_info.fertility_of_i(i) self.fertility[phi][s] += count self.fertility_for_any_phi[s] += count
natural language toolkit machine translation c 20012023 nltk project uday krishna udaykrishna5gmail com contributor tom aarsen url https www nltk org for license information see license txt takes in pretokenized inputs for hypothesis and reference and returns enumerated word lists for each of them param hypothesis pretokenized hypothesis param reference pretokenized reference preprocess preprocessing method default str lower return enumerated words list matches exact words in hypothesis and reference and returns a word mapping based on the enumerated word id between hypothesis and reference param hypothesis pretokenized hypothesis param reference pretokenized reference return enumerated matched tuples enumerated unmatched hypothesis tuples enumerated unmatched reference tuples matches exact words in hypothesis and reference and returns a word mapping between enumhypothesislist and enumreferencelist based on the enumerated word id param enumhypothesislist enumerated hypothesis list param enumreferencelist enumerated reference list return enumerated matched tuples enumerated unmatched hypothesis tuples enumerated unmatched reference tuples stems each word and matches them in hypothesis and reference and returns a word mapping between enumhypothesislist and enumreferencelist based on the enumerated word id the function also returns a enumerated list of unmatched words for hypothesis and reference param enumhypothesislist enumerated hypothesis list param enumreferencelist enumerated reference list param stemmer nltk stem api stemmeri object default porterstemmer return enumerated matched tuples enumerated unmatched hypothesis tuples enumerated unmatched reference tuples stems each word and matches them in hypothesis and reference and returns a word mapping between hypothesis and reference param hypothesis pretokenized hypothesis param reference pretokenized reference param stemmer nltk stem api stemmeri object default porterstemmer return enumerated matched tuples enumerated unmatched hypothesis tuples enumerated unmatched reference tuples matches each word in reference to a word in hypothesis if any synonym of a hypothesis word is the exact match to the reference word param enumhypothesislist enumerated hypothesis list param enumreferencelist enumerated reference list param wordnet a wordnet corpus reader object default nltk corpus wordnet matches each word in reference to a word in hypothesis if any synonym of a hypothesis word is the exact match to the reference word param hypothesis pretokenized hypothesis param reference pretokenized reference param wordnet a wordnet corpus reader object default nltk corpus wordnet return list of mapped tuples alignsmatches words in the hypothesis to reference by sequentially applying exact match stemmed match and wordnet based synonym match in case there are multiple matches the match which has the least number of crossing is chosen takes enumerated list as input instead of string input param enumhypothesislist enumerated hypothesis list param enumreferencelist enumerated reference list param stemmer nltk stem api stemmeri object default porterstemmer param wordnet a wordnet corpus reader object default nltk corpus wordnet return sorted list of matched tuples unmatched hypothesis list unmatched reference list alignsmatches words in the hypothesis to reference by sequentially applying exact match stemmed match and wordnet based synonym match in case there are multiple matches the match which has the least number of crossing is chosen param hypothesis pretokenized hypothesis param reference pretokenized reference param stemmer nltk stem api stemmeri object default porterstemmer param wordnet a wordnet corpus reader object default nltk corpus wordnet return sorted list of matched tuples unmatched hypothesis list unmatched reference list counts the fewest possible number of chunks such that matched unigrams of each chunk are adjacent to each other this is used to calculate the fragmentation part of the metric param matches list containing a mapping of matched words output of alignwords return number of chunks a sentence is divided into post alignment calculates meteor score for single hypothesis and reference as per meteor an automatic metric for mt evaluation with highlevels of correlation with human judgments by alon lavie and abhaya agarwal in proceedings of acl https www cs cmu edualaviemeteorpdflavieagarwal2007meteor pdf hypothesis1 it is a guide to action which ensures that the military always obeys the commands of the party reference1 it is a guide to action that ensures that the military will forever heed party commands roundsinglemeteorscorereference1 hypothesis1 4 0 6944 if there is no words match during the alignment the method returns the score as 0 we can safely return a zero instead of raising a division by zero error as no match usually implies a bad translation roundsinglemeteorscore this is a cat non matching hypothesis 4 0 0 param reference pretokenized reference param hypothesis pretokenized hypothesis param preprocess preprocessing function default str lower param stemmer nltk stem api stemmeri object default porterstemmer param wordnet a wordnet corpus reader object default nltk corpus wordnet param alpha parameter for controlling relative weights of precision and recall param beta parameter for controlling shape of penalty as a function of as a function of fragmentation param gamma relative weight assigned to fragmentation penalty return the sentencelevel meteor score calculates meteor score for hypothesis with multiple references as described in meteor an automatic metric for mt evaluation with highlevels of correlation with human judgments by alon lavie and abhaya agarwal in proceedings of acl https www cs cmu edualaviemeteorpdflavieagarwal2007meteor pdf in case of multiple references the best score is chosen this method iterates over singlemeteorscore and picks the best pair among all the references for a given hypothesis hypothesis1 it is a guide to action which ensures that the military always obeys the commands of the party hypothesis2 it is to insure the troops forever hearing the activity guidebook that party direct reference1 it is a guide to action that ensures that the military will forever heed party commands reference2 it is the guiding principle which guarantees the military forces always being under the command of the party reference3 it is the practical guide for the army always to heed the directions of the party roundmeteorscorereference1 reference2 reference3 hypothesis1 4 0 6944 if there is no words match during the alignment the method returns the score as 0 we can safely return a zero instead of raising a division by zero error as no match usually implies a bad translation roundmeteorscore this is a cat non matching hypothesis 4 0 0 param references pretokenized reference sentences param hypothesis a pretokenized hypothesis sentence param preprocess preprocessing function default str lower param stemmer nltk stem api stemmeri object default porterstemmer param wordnet a wordnet corpus reader object default nltk corpus wordnet param alpha parameter for controlling relative weights of precision and recall param beta parameter for controlling shape of penalty as a function of as a function of fragmentation param gamma relative weight assigned to fragmentation penalty return the sentencelevel meteor score natural language toolkit machine translation c 2001 2023 nltk project uday krishna udaykrishna5 gmail com contributor tom aarsen url https www nltk org for license information see license txt takes in pre tokenized inputs for hypothesis and reference and returns enumerated word lists for each of them param hypothesis pre tokenized hypothesis param reference pre tokenized reference preprocess preprocessing method default str lower return enumerated words list matches exact words in hypothesis and reference and returns a word mapping based on the enumerated word id between hypothesis and reference param hypothesis pre tokenized hypothesis param reference pre tokenized reference return enumerated matched tuples enumerated unmatched hypothesis tuples enumerated unmatched reference tuples matches exact words in hypothesis and reference and returns a word mapping between enum_hypothesis_list and enum_reference_list based on the enumerated word id param enum_hypothesis_list enumerated hypothesis list param enum_reference_list enumerated reference list return enumerated matched tuples enumerated unmatched hypothesis tuples enumerated unmatched reference tuples stems each word and matches them in hypothesis and reference and returns a word mapping between enum_hypothesis_list and enum_reference_list based on the enumerated word id the function also returns a enumerated list of unmatched words for hypothesis and reference param enum_hypothesis_list enumerated hypothesis list param enum_reference_list enumerated reference list param stemmer nltk stem api stemmeri object default porterstemmer return enumerated matched tuples enumerated unmatched hypothesis tuples enumerated unmatched reference tuples stems each word and matches them in hypothesis and reference and returns a word mapping between hypothesis and reference param hypothesis pre tokenized hypothesis param reference pre tokenized reference param stemmer nltk stem api stemmeri object default porterstemmer return enumerated matched tuples enumerated unmatched hypothesis tuples enumerated unmatched reference tuples matches each word in reference to a word in hypothesis if any synonym of a hypothesis word is the exact match to the reference word param enum_hypothesis_list enumerated hypothesis list param enum_reference_list enumerated reference list param wordnet a wordnet corpus reader object default nltk corpus wordnet matches each word in reference to a word in hypothesis if any synonym of a hypothesis word is the exact match to the reference word param hypothesis pre tokenized hypothesis param reference pre tokenized reference param wordnet a wordnet corpus reader object default nltk corpus wordnet return list of mapped tuples aligns matches words in the hypothesis to reference by sequentially applying exact match stemmed match and wordnet based synonym match in case there are multiple matches the match which has the least number of crossing is chosen takes enumerated list as input instead of string input param enum_hypothesis_list enumerated hypothesis list param enum_reference_list enumerated reference list param stemmer nltk stem api stemmeri object default porterstemmer param wordnet a wordnet corpus reader object default nltk corpus wordnet return sorted list of matched tuples unmatched hypothesis list unmatched reference list aligns matches words in the hypothesis to reference by sequentially applying exact match stemmed match and wordnet based synonym match in case there are multiple matches the match which has the least number of crossing is chosen param hypothesis pre tokenized hypothesis param reference pre tokenized reference param stemmer nltk stem api stemmeri object default porterstemmer param wordnet a wordnet corpus reader object default nltk corpus wordnet return sorted list of matched tuples unmatched hypothesis list unmatched reference list counts the fewest possible number of chunks such that matched unigrams of each chunk are adjacent to each other this is used to calculate the fragmentation part of the metric param matches list containing a mapping of matched words output of align_words return number of chunks a sentence is divided into post alignment calculates meteor score for single hypothesis and reference as per meteor an automatic metric for mt evaluation with highlevels of correlation with human judgments by alon lavie and abhaya agarwal in proceedings of acl https www cs cmu edu alavie meteor pdf lavie agarwal 2007 meteor pdf hypothesis1 it is a guide to action which ensures that the military always obeys the commands of the party reference1 it is a guide to action that ensures that the military will forever heed party commands round single_meteor_score reference1 hypothesis1 4 0 6944 if there is no words match during the alignment the method returns the score as 0 we can safely return a zero instead of raising a division by zero error as no match usually implies a bad translation round single_meteor_score this is a cat non matching hypothesis 4 0 0 param reference pre tokenized reference param hypothesis pre tokenized hypothesis param preprocess preprocessing function default str lower param stemmer nltk stem api stemmeri object default porterstemmer param wordnet a wordnet corpus reader object default nltk corpus wordnet param alpha parameter for controlling relative weights of precision and recall param beta parameter for controlling shape of penalty as a function of as a function of fragmentation param gamma relative weight assigned to fragmentation penalty return the sentence level meteor score calculates meteor score for hypothesis with multiple references as described in meteor an automatic metric for mt evaluation with highlevels of correlation with human judgments by alon lavie and abhaya agarwal in proceedings of acl https www cs cmu edu alavie meteor pdf lavie agarwal 2007 meteor pdf in case of multiple references the best score is chosen this method iterates over single_meteor_score and picks the best pair among all the references for a given hypothesis hypothesis1 it is a guide to action which ensures that the military always obeys the commands of the party hypothesis2 it is to insure the troops forever hearing the activity guidebook that party direct reference1 it is a guide to action that ensures that the military will forever heed party commands reference2 it is the guiding principle which guarantees the military forces always being under the command of the party reference3 it is the practical guide for the army always to heed the directions of the party round meteor_score reference1 reference2 reference3 hypothesis1 4 0 6944 if there is no words match during the alignment the method returns the score as 0 we can safely return a zero instead of raising a division by zero error as no match usually implies a bad translation round meteor_score this is a cat non matching hypothesis 4 0 0 param references pre tokenized reference sentences param hypothesis a pre tokenized hypothesis sentence param preprocess preprocessing function default str lower param stemmer nltk stem api stemmeri object default porterstemmer param wordnet a wordnet corpus reader object default nltk corpus wordnet param alpha parameter for controlling relative weights of precision and recall param beta parameter for controlling shape of penalty as a function of as a function of fragmentation param gamma relative weight assigned to fragmentation penalty return the sentence level meteor score
from itertools import chain, product from typing import Callable, Iterable, List, Tuple from nltk.corpus import WordNetCorpusReader, wordnet from nltk.stem.api import StemmerI from nltk.stem.porter import PorterStemmer def _generate_enums( hypothesis: Iterable[str], reference: Iterable[str], preprocess: Callable[[str], str] = str.lower, ) -> Tuple[List[Tuple[int, str]], List[Tuple[int, str]]]: if isinstance(hypothesis, str): raise TypeError( f'"hypothesis" expects pre-tokenized hypothesis (Iterable[str]): {hypothesis}' ) if isinstance(reference, str): raise TypeError( f'"reference" expects pre-tokenized reference (Iterable[str]): {reference}' ) enum_hypothesis_list = list(enumerate(map(preprocess, hypothesis))) enum_reference_list = list(enumerate(map(preprocess, reference))) return enum_hypothesis_list, enum_reference_list def exact_match( hypothesis: Iterable[str], reference: Iterable[str] ) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: enum_hypothesis_list, enum_reference_list = _generate_enums(hypothesis, reference) return _match_enums(enum_hypothesis_list, enum_reference_list) def _match_enums( enum_hypothesis_list: List[Tuple[int, str]], enum_reference_list: List[Tuple[int, str]], ) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: word_match = [] for i in range(len(enum_hypothesis_list))[::-1]: for j in range(len(enum_reference_list))[::-1]: if enum_hypothesis_list[i][1] == enum_reference_list[j][1]: word_match.append( (enum_hypothesis_list[i][0], enum_reference_list[j][0]) ) enum_hypothesis_list.pop(i) enum_reference_list.pop(j) break return word_match, enum_hypothesis_list, enum_reference_list def _enum_stem_match( enum_hypothesis_list: List[Tuple[int, str]], enum_reference_list: List[Tuple[int, str]], stemmer: StemmerI = PorterStemmer(), ) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: stemmed_enum_hypothesis_list = [ (word_pair[0], stemmer.stem(word_pair[1])) for word_pair in enum_hypothesis_list ] stemmed_enum_reference_list = [ (word_pair[0], stemmer.stem(word_pair[1])) for word_pair in enum_reference_list ] return _match_enums(stemmed_enum_hypothesis_list, stemmed_enum_reference_list) def stem_match( hypothesis: Iterable[str], reference: Iterable[str], stemmer: StemmerI = PorterStemmer(), ) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: enum_hypothesis_list, enum_reference_list = _generate_enums(hypothesis, reference) return _enum_stem_match(enum_hypothesis_list, enum_reference_list, stemmer=stemmer) def _enum_wordnetsyn_match( enum_hypothesis_list: List[Tuple[int, str]], enum_reference_list: List[Tuple[int, str]], wordnet: WordNetCorpusReader = wordnet, ) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: word_match = [] for i in range(len(enum_hypothesis_list))[::-1]: hypothesis_syns = set( chain.from_iterable( ( lemma.name() for lemma in synset.lemmas() if lemma.name().find("_") < 0 ) for synset in wordnet.synsets(enum_hypothesis_list[i][1]) ) ).union({enum_hypothesis_list[i][1]}) for j in range(len(enum_reference_list))[::-1]: if enum_reference_list[j][1] in hypothesis_syns: word_match.append( (enum_hypothesis_list[i][0], enum_reference_list[j][0]) ) enum_hypothesis_list.pop(i) enum_reference_list.pop(j) break return word_match, enum_hypothesis_list, enum_reference_list def wordnetsyn_match( hypothesis: Iterable[str], reference: Iterable[str], wordnet: WordNetCorpusReader = wordnet, ) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: enum_hypothesis_list, enum_reference_list = _generate_enums(hypothesis, reference) return _enum_wordnetsyn_match( enum_hypothesis_list, enum_reference_list, wordnet=wordnet ) def _enum_align_words( enum_hypothesis_list: List[Tuple[int, str]], enum_reference_list: List[Tuple[int, str]], stemmer: StemmerI = PorterStemmer(), wordnet: WordNetCorpusReader = wordnet, ) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: exact_matches, enum_hypothesis_list, enum_reference_list = _match_enums( enum_hypothesis_list, enum_reference_list ) stem_matches, enum_hypothesis_list, enum_reference_list = _enum_stem_match( enum_hypothesis_list, enum_reference_list, stemmer=stemmer ) wns_matches, enum_hypothesis_list, enum_reference_list = _enum_wordnetsyn_match( enum_hypothesis_list, enum_reference_list, wordnet=wordnet ) return ( sorted( exact_matches + stem_matches + wns_matches, key=lambda wordpair: wordpair[0] ), enum_hypothesis_list, enum_reference_list, ) def align_words( hypothesis: Iterable[str], reference: Iterable[str], stemmer: StemmerI = PorterStemmer(), wordnet: WordNetCorpusReader = wordnet, ) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: enum_hypothesis_list, enum_reference_list = _generate_enums(hypothesis, reference) return _enum_align_words( enum_hypothesis_list, enum_reference_list, stemmer=stemmer, wordnet=wordnet ) def _count_chunks(matches: List[Tuple[int, int]]) -> int: i = 0 chunks = 1 while i < len(matches) - 1: if (matches[i + 1][0] == matches[i][0] + 1) and ( matches[i + 1][1] == matches[i][1] + 1 ): i += 1 continue i += 1 chunks += 1 return chunks def single_meteor_score( reference: Iterable[str], hypothesis: Iterable[str], preprocess: Callable[[str], str] = str.lower, stemmer: StemmerI = PorterStemmer(), wordnet: WordNetCorpusReader = wordnet, alpha: float = 0.9, beta: float = 3.0, gamma: float = 0.5, ) -> float: enum_hypothesis, enum_reference = _generate_enums( hypothesis, reference, preprocess=preprocess ) translation_length = len(enum_hypothesis) reference_length = len(enum_reference) matches, _, _ = _enum_align_words( enum_hypothesis, enum_reference, stemmer=stemmer, wordnet=wordnet ) matches_count = len(matches) try: precision = float(matches_count) / translation_length recall = float(matches_count) / reference_length fmean = (precision * recall) / (alpha * precision + (1 - alpha) * recall) chunk_count = float(_count_chunks(matches)) frag_frac = chunk_count / matches_count except ZeroDivisionError: return 0.0 penalty = gamma * frag_frac**beta return (1 - penalty) * fmean def meteor_score( references: Iterable[Iterable[str]], hypothesis: Iterable[str], preprocess: Callable[[str], str] = str.lower, stemmer: StemmerI = PorterStemmer(), wordnet: WordNetCorpusReader = wordnet, alpha: float = 0.9, beta: float = 3.0, gamma: float = 0.5, ) -> float: return max( single_meteor_score( reference, hypothesis, preprocess=preprocess, stemmer=stemmer, wordnet=wordnet, alpha=alpha, beta=beta, gamma=gamma, ) for reference in references )
natural language toolkit translation metrics c 20012023 nltk project will zhang wilzzhagmail com guan gui gguistudent unimelb edu au steven bird stevenbird1gmail com url https www nltk org for license information see license txt return the alignment error rate aer of an alignment with respect to a gold standard reference alignment return an error rate between 0 0 perfect alignment and 1 0 no alignment from nltk translate import alignment ref alignment0 0 1 1 2 2 test alignment0 0 1 2 2 1 alignmenterrorrateref test doctest ellipsis 0 6666666666666667 type reference alignment param reference a gold standard alignment sure alignments type hypothesis alignment param hypothesis a hypothesis alignment aka candidate alignments type possible alignment or none param possible a gold standard reference of possible alignments defaults to reference if none rtype float or none natural language toolkit translation metrics c 2001 2023 nltk project will zhang wilzzha gmail com guan gui ggui student unimelb edu au steven bird stevenbird1 gmail com url https www nltk org for license information see license txt return the alignment error rate aer of an alignment with respect to a gold standard reference alignment return an error rate between 0 0 perfect alignment and 1 0 no alignment from nltk translate import alignment ref alignment 0 0 1 1 2 2 test alignment 0 0 1 2 2 1 alignment_error_rate ref test doctest ellipsis 0 6666666666666667 type reference alignment param reference a gold standard alignment sure alignments type hypothesis alignment param hypothesis a hypothesis alignment aka candidate alignments type possible alignment or none param possible a gold standard reference of possible alignments defaults to reference if none rtype float or none sanity check
def alignment_error_rate(reference, hypothesis, possible=None): if possible is None: possible = reference else: assert reference.issubset(possible) return 1.0 - (len(hypothesis & reference) + len(hypothesis & possible)) / float( len(hypothesis) + len(reference) )
natural language toolkit nist score c 20012023 nltk project s contributors url https www nltk org for license information see license txt nist score implementation import fractions import math from collections import counter from nltk util import ngrams def sentencenistreferences hypothesis n5 return corpusnistreferences hypothesis n def corpusnistlistofreferences hypotheses n5 before proceeding to compute nist perform sanity checks assert lenlistofreferences len hypotheses the number of hypotheses and their references should be the same collect the ngram coounts from the reference sentences ngramfreq counter totalreferencewords 0 for references in listofreferences for each source sent there s a list of reference sents for reference in references for each order of ngram count the ngram occurrences for i in range1 n 1 ngramfreq updatengramsreference i totalreferencewords lenreference compute the information weights based on the reference sentences eqn 2 in doddington 2002 infow1 wn log2 of occurrences of w1 wn1 of occurrences of w1 wn informationweights for ngram in ngramfreq w1 wn mgram ngram 1 w1 wn1 from https github commosessmtmosesdecoderblobmasterscriptsgenericmtevalv13a pll546 it s computed as such denominator ngramfreqmgram if mgram and mgram in ngramfreq else denominator totalreferencewords informationweightsngram 1 math logngramfreqngramdenominator math log2 mathematically it s equivalent to the our implementation if mgram and mgram in ngramfreq numerator ngramfreqmgram else numerator totalreferencewords informationweightsngram math lognumerator ngramfreqngram 2 microaverage nistprecisionnumeratorperngram counter nistprecisiondenominatorperngram counter lref lsys 0 0 for each order of ngram for i in range1 n 1 iterate through each hypothesis and their corresponding references for references hypothesis in ziplistofreferences hypotheses hyplen lenhypothesis find reference with the best nist score nistscoreperref for reference in references reflen lenreference counter of ngrams in hypothesis hypngrams counterngramshypothesis i if lenhypothesis i else counter refngrams counterngramsreference i if lenreference i else counter ngramoverlaps hypngrams refngrams precision part of the score in eqn 3 numerator sum informationweightsngram count for ngram count in ngramoverlaps items denominator sumhypngrams values precision 0 if denominator 0 else numerator denominator nistscoreperref append precision numerator denominator reflen best reference precision numerator denominator reflen maxnistscoreperref nistprecisionnumeratorperngrami numerator nistprecisiondenominatorperngrami denominator lref reflen lsys hyplen final nist microaverage mean aggregation nistprecision 0 for i in nistprecisionnumeratorperngram precision nistprecisionnumeratorperngrami nistprecisiondenominatorperngrami nistprecision precision eqn 3 in doddington2002 return nistprecision nistlengthpenaltylref lsys def nistlengthpenaltyreflen hyplen ratio hyplen reflen if 0 ratio 1 ratiox scorex 1 5 0 5 beta math logscorex math logratiox 2 return math expbeta math logratio 2 else ratio 0 or ratio 1 return maxminratio 1 0 0 0 natural language toolkit nist score c 2001 2023 nltk project s contributors url https www nltk org for license information see license txt nist score implementation calculate nist score from george doddington 2002 automatic evaluation of machine translation quality using n gram co occurrence statistics proceedings of hlt morgan kaufmann publishers inc https dl acm org citation cfm id 1289189 1289273 darpa commissioned nist to develop an mt evaluation facility based on the bleu score the official script used by nist to compute bleu and nist score is mteval 14 pl the main differences are bleu uses geometric mean of the ngram overlaps nist uses arithmetic mean nist has a different brevity penalty nist score from mteval 14 pl has a self contained tokenizer note the mteval 14 pl includes a smoothing function for bleu score that is not used in the nist score computation hypothesis1 it is a guide to action which ensures that the military always obeys the commands of the party hypothesis2 it is to insure the troops forever hearing the activity guidebook that party direct reference1 it is a guide to action that ensures that the military will forever heed party commands reference2 it is the guiding principle which guarantees the military forces always being under the command of the party reference3 it is the practical guide for the army always to heed the directions of the party sentence_nist reference1 reference2 reference3 hypothesis1 doctest ellipsis 3 3709 sentence_nist reference1 reference2 reference3 hypothesis2 doctest ellipsis 1 4619 param references reference sentences type references list list str param hypothesis a hypothesis sentence type hypothesis list str param n highest n gram order type n int calculate a single corpus level nist score aka system level bleu for all the hypotheses and their respective references param references a corpus of lists of reference sentences w r t hypotheses type references list list list str param hypotheses a list of hypothesis sentences type hypotheses list list str param n highest n gram order type n int before proceeding to compute nist perform sanity checks collect the ngram coounts from the reference sentences for each source sent there s a list of reference sents for each order of ngram count the ngram occurrences compute the information weights based on the reference sentences eqn 2 in doddington 2002 info w_1 w_n log_2 of occurrences of w_1 w_n 1 of occurrences of w_1 w_n w_1 w_n w_1 w_n 1 from https github com moses smt mosesdecoder blob master scripts generic mteval v13a pl l546 it s computed as such denominator ngram_freq _mgram if _mgram and _mgram in ngram_freq else denominator total_reference_words information_weights _ngram 1 math log ngram_freq _ngram denominator math log 2 mathematically it s equivalent to the our implementation micro average for each order of ngram iterate through each hypothesis and their corresponding references find reference with the best nist score counter of ngrams in hypothesis precision part of the score in eqn 3 best reference final nist micro average mean aggregation eqn 3 in doddington 2002 calculates the nist length penalty from eq 3 in doddington 2002 penalty exp beta log min len hyp len ref 1 0 where beta is chosen to make the brevity penalty factor 0 5 when the no of words in the system output hyp is 2 3 of the average no of words in the reference translation ref the nist penalty is different from bleu s such that it minimize the impact of the score of small variations in the length of a translation see fig 4 in doddington 2002 ratio 0 or ratio 1
import fractions import math from collections import Counter from nltk.util import ngrams def sentence_nist(references, hypothesis, n=5): return corpus_nist([references], [hypothesis], n) def corpus_nist(list_of_references, hypotheses, n=5): assert len(list_of_references) == len( hypotheses ), "The number of hypotheses and their reference(s) should be the same" ngram_freq = Counter() total_reference_words = 0 for ( references ) in list_of_references: for reference in references: for i in range(1, n + 1): ngram_freq.update(ngrams(reference, i)) total_reference_words += len(reference) information_weights = {} for _ngram in ngram_freq: _mgram = _ngram[:-1] if _mgram and _mgram in ngram_freq: numerator = ngram_freq[_mgram] else: numerator = total_reference_words information_weights[_ngram] = math.log(numerator / ngram_freq[_ngram], 2) nist_precision_numerator_per_ngram = Counter() nist_precision_denominator_per_ngram = Counter() l_ref, l_sys = 0, 0 for i in range(1, n + 1): for references, hypothesis in zip(list_of_references, hypotheses): hyp_len = len(hypothesis) nist_score_per_ref = [] for reference in references: _ref_len = len(reference) hyp_ngrams = ( Counter(ngrams(hypothesis, i)) if len(hypothesis) >= i else Counter() ) ref_ngrams = ( Counter(ngrams(reference, i)) if len(reference) >= i else Counter() ) ngram_overlaps = hyp_ngrams & ref_ngrams _numerator = sum( information_weights[_ngram] * count for _ngram, count in ngram_overlaps.items() ) _denominator = sum(hyp_ngrams.values()) _precision = 0 if _denominator == 0 else _numerator / _denominator nist_score_per_ref.append( (_precision, _numerator, _denominator, _ref_len) ) precision, numerator, denominator, ref_len = max(nist_score_per_ref) nist_precision_numerator_per_ngram[i] += numerator nist_precision_denominator_per_ngram[i] += denominator l_ref += ref_len l_sys += hyp_len nist_precision = 0 for i in nist_precision_numerator_per_ngram: precision = ( nist_precision_numerator_per_ngram[i] / nist_precision_denominator_per_ngram[i] ) nist_precision += precision return nist_precision * nist_length_penalty(l_ref, l_sys) def nist_length_penalty(ref_len, hyp_len): ratio = hyp_len / ref_len if 0 < ratio < 1: ratio_x, score_x = 1.5, 0.5 beta = math.log(score_x) / math.log(ratio_x) ** 2 return math.exp(beta * math.log(ratio) ** 2) else: return max(min(ratio, 1.0), 0.0)
natural language toolkit phrase extraction algorithm c 20012023 nltk project s liling tan fredrik hedman petra barancikova url https www nltk org for license information see license txt this function checks for alignment point consistency and extracts phrases using the chunk of consistent phrases a phrase pair e f is consistent with an alignment a if and only if i no english words in the phrase pair are aligned to words outside it e i e e i f j a f j f ii no foreign words in the phrase pair are aligned to words outside it f j f e i f j a e i e iii the phrase pair contains at least one alignment point e i e f j f s t e i f j a type fstart int param fstart starting index of the possible foreign language phrases type fend int param fend end index of the possible foreign language phrases type estart int param estart starting index of the possible source language phrases type eend int param eend end index of the possible source language phrases type srctext list param srctext the source language tokens a list of string type trgtext list param trgtext the target language tokens a list of string type srclen int param srclen the number of tokens in the source language tokens type trglen int param trglen the number of tokens in the target language tokens check if alignment points are consistent add phrase pairs incl additional unaligned f add phrase pair estart eend fs fe to set e need to 1 in range to include the endpoint include more data for later ordering phrase extraction algorithm extracts all consistent phrase pairs from a wordaligned sentence pair the idea is to loop over all possible source language e phrases and find the minimal foreign phrase f that matches each of them matching is done by identifying all alignment points for the source phrase and finding the shortest foreign phrase that includes all the foreign counterparts for the source words in short a phrase alignment has to a contain all alignment points for all covered words b contain at least one alignment point srctext michael assumes that he will stay in the house trgtext michael geht davon aus dass er im haus bleibt alignment 0 0 1 1 1 2 1 3 2 5 3 6 4 9 5 9 6 7 7 7 8 8 phrases phraseextractionsrctext trgtext alignment for i in sortedphrases printi 0 1 0 1 michael michael 0 2 0 4 michael assumes michael geht davon aus 0 2 0 5 michael assumes michael geht davon aus 0 3 0 6 michael assumes that michael geht davon aus dass 0 4 0 7 michael assumes that he michael geht davon aus dass er 0 9 0 10 michael assumes that he will stay in the house michael geht davon aus dass er im haus bleibt 1 2 1 4 assumes geht davon aus 1 2 1 5 assumes geht davon aus 1 3 1 6 assumes that geht davon aus dass 1 4 1 7 assumes that he geht davon aus dass er 1 9 1 10 assumes that he will stay in the house geht davon aus dass er im haus bleibt 2 3 4 6 that dass 2 3 5 6 that dass 2 4 4 7 that he dass er 2 4 5 7 that he dass er 2 9 4 10 that he will stay in the house dass er im haus bleibt 2 9 5 10 that he will stay in the house dass er im haus bleibt 3 4 6 7 he er 3 9 6 10 he will stay in the house er im haus bleibt 4 6 9 10 will stay bleibt 4 9 7 10 will stay in the house im haus bleibt 6 8 7 8 in the im 6 9 7 9 in the house im haus 8 9 8 9 house haus type srctext str param srctext the sentence string from the source language type trgtext str param trgtext the sentence string from the target language type alignment listtuple param alignment the word alignment outputs as list of tuples where the first elements of tuples are the source words indices and second elements are the target words indices this is also the output format of nltk translate ibm1 rtype listtuple return a list of tuples each element in a list is a phrase and each phrase is a tuple made up of i its source location ii its target location iii the source phrase and iii the target phrase the phrase list of tuples represents all the possible phrases extracted from the word alignments type maxphraselength int param maxphraselength maximal phrase length if 0 or not specified it is set to a length of the longer sentence srctext or trgtext keeps an index of which sourcetarget words that are aligned set of phrase pairs bp find the minimally matching foreign phrase f start f end lengthf 0 fstart 0 lenf 1 fend 0 lenf 1 add extract f start f end e start e end to set bp natural language toolkit phrase extraction algorithm c 2001 2023 nltk project s liling tan fredrik hedman petra barancikova url https www nltk org for license information see license txt this function checks for alignment point consistency and extracts phrases using the chunk of consistent phrases a phrase pair e f is consistent with an alignment a if and only if i no english words in the phrase pair are aligned to words outside it e i e e i f j a f j f ii no foreign words in the phrase pair are aligned to words outside it f j f e i f j a e i e iii the phrase pair contains at least one alignment point e i e f j f s t e i f j a type f_start int param f_start starting index of the possible foreign language phrases type f_end int param f_end end index of the possible foreign language phrases type e_start int param e_start starting index of the possible source language phrases type e_end int param e_end end index of the possible source language phrases type srctext list param srctext the source language tokens a list of string type trgtext list param trgtext the target language tokens a list of string type srclen int param srclen the number of tokens in the source language tokens type trglen int param trglen the number of tokens in the target language tokens 0 based indexing check if alignment points are consistent add phrase pairs incl additional unaligned f add phrase pair e_start e_end fs fe to set e need to 1 in range to include the end point include more data for later ordering phrase extraction algorithm extracts all consistent phrase pairs from a word aligned sentence pair the idea is to loop over all possible source language e phrases and find the minimal foreign phrase f that matches each of them matching is done by identifying all alignment points for the source phrase and finding the shortest foreign phrase that includes all the foreign counterparts for the source words in short a phrase alignment has to a contain all alignment points for all covered words b contain at least one alignment point srctext michael assumes that he will stay in the house trgtext michael geht davon aus dass er im haus bleibt alignment 0 0 1 1 1 2 1 3 2 5 3 6 4 9 5 9 6 7 7 7 8 8 phrases phrase_extraction srctext trgtext alignment for i in sorted phrases print i 0 1 0 1 michael michael 0 2 0 4 michael assumes michael geht davon aus 0 2 0 5 michael assumes michael geht davon aus 0 3 0 6 michael assumes that michael geht davon aus dass 0 4 0 7 michael assumes that he michael geht davon aus dass er 0 9 0 10 michael assumes that he will stay in the house michael geht davon aus dass er im haus bleibt 1 2 1 4 assumes geht davon aus 1 2 1 5 assumes geht davon aus 1 3 1 6 assumes that geht davon aus dass 1 4 1 7 assumes that he geht davon aus dass er 1 9 1 10 assumes that he will stay in the house geht davon aus dass er im haus bleibt 2 3 4 6 that dass 2 3 5 6 that dass 2 4 4 7 that he dass er 2 4 5 7 that he dass er 2 9 4 10 that he will stay in the house dass er im haus bleibt 2 9 5 10 that he will stay in the house dass er im haus bleibt 3 4 6 7 he er 3 9 6 10 he will stay in the house er im haus bleibt 4 6 9 10 will stay bleibt 4 9 7 10 will stay in the house im haus bleibt 6 8 7 8 in the im 6 9 7 9 in the house im haus 8 9 8 9 house haus type srctext str param srctext the sentence string from the source language type trgtext str param trgtext the sentence string from the target language type alignment list tuple param alignment the word alignment outputs as list of tuples where the first elements of tuples are the source words indices and second elements are the target words indices this is also the output format of nltk translate ibm1 rtype list tuple return a list of tuples each element in a list is a phrase and each phrase is a tuple made up of i its source location ii its target location iii the source phrase and iii the target phrase the phrase list of tuples represents all the possible phrases extracted from the word alignments type max_phrase_length int param max_phrase_length maximal phrase length if 0 or not specified it is set to a length of the longer sentence srctext or trgtext e f len e len f keeps an index of which source target words that are aligned set of phrase pairs bp find the minimally matching foreign phrase f start f end length f 0 f_start 0 len f 1 f_end 0 len f 1 0 based indexing add extract f start f end e start e end to set bp
def extract( f_start, f_end, e_start, e_end, alignment, f_aligned, srctext, trgtext, srclen, trglen, max_phrase_length, ): if f_end < 0: return {} for e, f in alignment: if (f_start <= f <= f_end) and (e < e_start or e > e_end): return {} phrases = set() fs = f_start while True: fe = min(f_end, f_start + max_phrase_length - 1) while True: src_phrase = " ".join(srctext[e_start : e_end + 1]) trg_phrase = " ".join(trgtext[fs : fe + 1]) phrases.add(((e_start, e_end + 1), (fs, fe + 1), src_phrase, trg_phrase)) fe += 1 if fe in f_aligned or fe >= trglen: break fs -= 1 if fs in f_aligned or fs < 0: break return phrases def phrase_extraction(srctext, trgtext, alignment, max_phrase_length=0): srctext = srctext.split() trgtext = trgtext.split() srclen = len(srctext) trglen = len(trgtext) f_aligned = [j for _, j in alignment] max_phrase_length = max_phrase_length or max(srclen, trglen) bp = set() for e_start in range(srclen): max_idx = min(srclen, e_start + max_phrase_length) for e_end in range(e_start, max_idx): f_start, f_end = trglen - 1, -1 for e, f in alignment: if e_start <= e <= e_end: f_start = min(f, f_start) f_end = max(f, f_end) phrases = extract( f_start, f_end, e_start, e_end, alignment, f_aligned, srctext, trgtext, srclen, trglen, max_phrase_length, ) if phrases: bp.update(phrases) return bp
natural language toolkit ribes score c 20012023 nltk project contributors katsuhito sudoh liling tan kasramvd j f sebastian mark byers ekhumoro p ortiz url https www nltk org for license information see license txt ribes score implementation import math from itertools import islice from nltk util import choose ngrams def sentenceribesreferences hypothesis alpha0 25 beta0 10 bestribes 1 0 calculates ribes for each reference and returns the best score for reference in references collects the worder from the ranked correlation alignments worder wordrankalignmentreference hypothesis nkt kendalltauworder calculates the brevity penalty bp min1 0 math exp1 0 lenreference lenhypothesis calculates the unigram precision p1 p1 lenworder lenhypothesis ribes nkt p1alpha bpbeta if ribes bestribes keeps the best score bestribes ribes return bestribes def corpusribeslistofreferences hypotheses alpha0 25 beta0 10 corpusbestribes 0 0 iterate through each hypothesis and their corresponding references for references hypothesis in ziplistofreferences hypotheses corpusbestribes sentenceribesreferences hypothesis alpha beta return corpusbestribes lenhypotheses def positionofngramngram sentence iterates through the ngrams in sentence for i sublist in enumeratengramssentence lenngram returns the index of the word when ngram matches if ngram sublist return i def wordrankalignmentreference hypothesis characterbasedfalse worder hyplen lenhypothesis stores a list of possible ngrams from the reference sentence this is used for matching context window later in the algorithm refngrams hypngrams for n in range1 lenreference 1 for ng in ngramsreference n refngrams appendng for ng in ngramshypothesis n hypngrams appendng for i hword in enumeratehypothesis if word is not in the reference continue if hword not in reference continue if we can determine onetoone word correspondence for unigrams that only appear once in both the reference and hypothesis elif hypothesis counthword reference counthword 1 worder appendreference indexhword else maxwindowsize maxi hyplen i 1 for window in range1 maxwindowsize if i window hyplen if searching the right context is possible retrieve the right context window rightcontextngram tupleislicehypothesis i i window 1 numtimesinref refngrams countrightcontextngram numtimesinhyp hypngrams countrightcontextngram if ngram appears only once in both ref and hyp if numtimesinref numtimesinhyp 1 find the position of ngram that matched the reference pos positionofngramrightcontextngram reference worder appendpos add the positions of the ngram break if window i if searching the left context is possible retrieve the left context window leftcontextngram tupleislicehypothesis i window i 1 numtimesinref refngrams countleftcontextngram numtimesinhyp hypngrams countleftcontextngram if numtimesinref numtimesinhyp 1 find the position of ngram that matched the reference pos positionofngramleftcontextngram reference add the positions of the ngram worder appendpos lenleftcontextngram 1 break return worder def findincreasingsequencesworder items iterworder a b none nextitems none result b while b is not none a b b nextitems none if b is not none and a 1 b result appendb else if lenresult 1 yield tupleresult result b def kendalltauworder normalizetrue worderlen lenworder with worderlen 2 chooseworderlen 2 will be 0 as we divide by this it will give a zerodivisionerror to avoid this we can just return the lowest possible score if worderlen 2 tau 1 else extract the groups of increasingmonotonic sequences increasingsequences findincreasingsequencesworder calculate no of increasingpairs in worder list numincreasingpairs sumchooselenseq 2 for seq in increasingsequences calculate no of possible pairs numpossiblepairs chooseworderlen 2 kendall s tau computation tau 2 numincreasingpairs numpossiblepairs 1 if normalize if normalized the tau output falls between 0 0 to 1 0 return tau 1 2 else otherwise the tau outputs falls between 1 0 to 1 0 return tau def spearmanrhoworder normalizetrue worderlen lenworder sumdsquare sumwi i 2 for wi i in zipworder rangeworderlen rho 1 sumdsquare chooseworderlen 1 3 if normalize if normalized the rho output falls between 0 0 to 1 0 return rho 1 2 else otherwise the rho outputs falls between 1 0 to 1 0 return rho natural language toolkit ribes score c 2001 2023 nltk project contributors katsuhito sudoh liling tan kasramvd j f sebastian mark byers ekhumoro p ortiz url https www nltk org for license information see license txt ribes score implementation the ribes rank based intuitive bilingual evaluation score from hideki isozaki tsutomu hirao kevin duh katsuhito sudoh and hajime tsukada 2010 automatic evaluation of translation quality for distant language pairs in proceedings of emnlp https www aclweb org anthology d d10 d10 1092 pdf the generic ribes scores used in shared task e g workshop for asian translation wat uses the following ribes calculations ribes kendall_tau alpha p1 beta bp please note that this re implementation differs from the official ribes implementation and though it emulates the results as describe in the original paper there are further optimization implemented in the official ribes script users are encouraged to use the official ribes script instead of this implementation when evaluating your machine translation system refer to https www kecl ntt co jp icl lirg ribes for the official script param references a list of reference sentences type references list list str param hypothesis a hypothesis sentence type hypothesis list str param alpha hyperparameter used as a prior for the unigram precision type alpha float param beta hyperparameter used as a prior for the brevity penalty type beta float return the best ribes score from one of the references rtype float calculates ribes for each reference and returns the best score collects the worder from the ranked correlation alignments calculates the brevity penalty calculates the unigram precision p1 keeps the best score this function calculates ribes for a system output hypothesis with multiple references and returns best score among multi references and individual scores the scores are corpus wise i e averaged by the number of sentences c f ribes version 1 03 1 code different from bleu s micro average precision ribes calculates the macro average precision by averaging the best ribes score for each pair of hypothesis and its corresponding references hyp1 it is a guide to action which ensures that the military always obeys the commands of the party ref1a it is a guide to action that ensures that the military will forever heed party commands ref1b it is the guiding principle which guarantees the military forces always being under the command of the party ref1c it is the practical guide for the army always to heed the directions of the party hyp2 he read the book because he was interested in world history ref2a he was interested in world history because he read the book list_of_references ref1a ref1b ref1c ref2a hypotheses hyp1 hyp2 round corpus_ribes list_of_references hypotheses 4 0 3597 param references a corpus of lists of reference sentences w r t hypotheses type references list list list str param hypotheses a list of hypothesis sentences type hypotheses list list str param alpha hyperparameter used as a prior for the unigram precision type alpha float param beta hyperparameter used as a prior for the brevity penalty type beta float return the best ribes score from one of the references rtype float iterate through each hypothesis and their corresponding references this function returns the position of the first instance of the ngram appearing in a sentence note that one could also use string as follows but the code is a little convoluted with type casting back and forth char_pos join sent join sent index join ngram word_pos char_pos count another way to conceive this is return next i for i ng in enumerate ngrams sentence len ngram if ng ngram param ngram the ngram that needs to be searched type ngram tuple param sentence the list of tokens to search from type sentence list str iterates through the ngrams in sentence returns the index of the word when ngram matches this is the word rank alignment algorithm described in the paper to produce the worder list i e a list of word indices of the hypothesis word orders w r t the list of reference words below is h0 r0 example from the isozaki et al 2010 paper note the examples are indexed from 1 but the results here are indexed from 0 ref str he was interested in world history because he read the book split hyp str he read the book because he was interested in world history split word_rank_alignment ref hyp 7 8 9 10 6 0 1 2 3 4 5 the h1 r1 example from the paper note the 0th index ref john hit bob yesterday split hyp bob hit john yesterday split word_rank_alignment ref hyp 2 1 0 3 here is the h2 r2 example from the paper note the 0th index here too ref the boy read the book split hyp the book was read by the boy split word_rank_alignment ref hyp 3 4 2 0 1 param reference a reference sentence type reference list str param hypothesis a hypothesis sentence type hypothesis list str stores a list of possible ngrams from the reference sentence this is used for matching context window later in the algorithm if word is not in the reference continue if we can determine one to one word correspondence for unigrams that only appear once in both the reference and hypothesis if searching the right context is possible retrieve the right context window if ngram appears only once in both ref and hyp find the position of ngram that matched the reference add the positions of the ngram if searching the left context is possible retrieve the left context window find the position of ngram that matched the reference add the positions of the ngram given the worder list this function groups monotonic 1 sequences worder 7 8 9 10 6 0 1 2 3 4 5 list find_increasing_sequences worder 7 8 9 10 0 1 2 3 4 5 param worder the worder list output from word_rank_alignment param type list int calculates the kendall s tau correlation coefficient given the worder list of word alignments from word_rank_alignment using the formula tau 2 num_increasing_pairs num_possible_pairs 1 note that the no of increasing pairs can be discontinuous in the worder list and each each increasing sequence can be tabulated as choose len seq 2 no of increasing pairs e g worder 7 8 9 10 6 0 1 2 3 4 5 number_possible_pairs choose len worder 2 round kendall_tau worder normalize false 3 0 236 round kendall_tau worder 3 0 382 param worder the worder list output from word_rank_alignment type worder list int param normalize flag to indicate normalization to between 0 0 and 1 0 type normalize boolean return the kendall s tau correlation coefficient rtype float with worder_len 2 choose worder_len 2 will be 0 as we divide by this it will give a zerodivisionerror to avoid this we can just return the lowest possible score extract the groups of increasing monotonic sequences calculate no of increasing_pairs in worder list calculate no of possible pairs kendall s tau computation if normalized the tau output falls between 0 0 to 1 0 otherwise the tau outputs falls between 1 0 to 1 0 calculates the spearman s rho correlation coefficient given the worder list of word alignment from word_rank_alignment using the formula rho 1 sum d 2 choose len worder 1 3 given that d is the sum of difference between the worder list of indices and the original word indices from the reference sentence using the h0 r0 and h5 r5 example from the paper worder 7 8 9 10 6 0 1 2 3 4 5 round spearman_rho worder normalize false 3 0 591 round spearman_rho worder 3 0 205 param worder the worder list output from word_rank_alignment param type list int if normalized the rho output falls between 0 0 to 1 0 otherwise the rho outputs falls between 1 0 to 1 0
import math from itertools import islice from nltk.util import choose, ngrams def sentence_ribes(references, hypothesis, alpha=0.25, beta=0.10): best_ribes = -1.0 for reference in references: worder = word_rank_alignment(reference, hypothesis) nkt = kendall_tau(worder) bp = min(1.0, math.exp(1.0 - len(reference) / len(hypothesis))) p1 = len(worder) / len(hypothesis) _ribes = nkt * (p1**alpha) * (bp**beta) if _ribes > best_ribes: best_ribes = _ribes return best_ribes def corpus_ribes(list_of_references, hypotheses, alpha=0.25, beta=0.10): corpus_best_ribes = 0.0 for references, hypothesis in zip(list_of_references, hypotheses): corpus_best_ribes += sentence_ribes(references, hypothesis, alpha, beta) return corpus_best_ribes / len(hypotheses) def position_of_ngram(ngram, sentence): for i, sublist in enumerate(ngrams(sentence, len(ngram))): if ngram == sublist: return i def word_rank_alignment(reference, hypothesis, character_based=False): worder = [] hyp_len = len(hypothesis) ref_ngrams = [] hyp_ngrams = [] for n in range(1, len(reference) + 1): for ng in ngrams(reference, n): ref_ngrams.append(ng) for ng in ngrams(hypothesis, n): hyp_ngrams.append(ng) for i, h_word in enumerate(hypothesis): if h_word not in reference: continue elif hypothesis.count(h_word) == reference.count(h_word) == 1: worder.append(reference.index(h_word)) else: max_window_size = max(i, hyp_len - i + 1) for window in range(1, max_window_size): if i + window < hyp_len: right_context_ngram = tuple(islice(hypothesis, i, i + window + 1)) num_times_in_ref = ref_ngrams.count(right_context_ngram) num_times_in_hyp = hyp_ngrams.count(right_context_ngram) if num_times_in_ref == num_times_in_hyp == 1: pos = position_of_ngram(right_context_ngram, reference) worder.append(pos) break if window <= i: left_context_ngram = tuple(islice(hypothesis, i - window, i + 1)) num_times_in_ref = ref_ngrams.count(left_context_ngram) num_times_in_hyp = hyp_ngrams.count(left_context_ngram) if num_times_in_ref == num_times_in_hyp == 1: pos = position_of_ngram(left_context_ngram, reference) worder.append(pos + len(left_context_ngram) - 1) break return worder def find_increasing_sequences(worder): items = iter(worder) a, b = None, next(items, None) result = [b] while b is not None: a, b = b, next(items, None) if b is not None and a + 1 == b: result.append(b) else: if len(result) > 1: yield tuple(result) result = [b] def kendall_tau(worder, normalize=True): worder_len = len(worder) if worder_len < 2: tau = -1 else: increasing_sequences = find_increasing_sequences(worder) num_increasing_pairs = sum(choose(len(seq), 2) for seq in increasing_sequences) num_possible_pairs = choose(worder_len, 2) tau = 2 * num_increasing_pairs / num_possible_pairs - 1 if normalize: return (tau + 1) / 2 else: return tau def spearman_rho(worder, normalize=True): worder_len = len(worder) sum_d_square = sum((wi - i) ** 2 for wi, i in zip(worder, range(worder_len))) rho = 1 - sum_d_square / choose(worder_len + 1, 3) if normalize: return (rho + 1) / 2 else: return rho
natural language toolkit stack decoder c 20012023 nltk project tah wei hoon hoon twgmail com url https www nltk org for license information see license txt a decoder that uses stacks to implement phrasebased translation in phrasebased translation the source sentence is segmented into phrases of one or more words and translations for those phrases are used to build the target sentence hypothesis data structures are used to keep track of the source words translated so far and the partial output a hypothesis can be expanded by selecting an untranslated phrase looking up its translation in a phrase table and appending that translation to the partial output translation is complete when a hypothesis covers all source words the search space is huge because the source sentence can be segmented in different ways the source phrases can be selected in any order and there could be multiple translations for the same source phrase in the phrase table to make decoding tractable stacks are used to limit the number of candidate hypotheses by doing histogram andor threshold pruning hypotheses with the same number of words translated are placed in the same stack in histogram pruning each stack has a size limit and the hypothesis with the lowest score is removed when the stack is full in threshold pruning hypotheses that score below a certain threshold of the best hypothesis in that stack are removed hypothesis scoring can include various factors such as phrase translation probability language model probability length of translation cost of remaining words to be translated and so on references philipp koehn 2010 statistical machine translation cambridge university press new york phrasebased stack decoder for machine translation from nltk translate import phrasetable phrasetable phrasetable phrasetable add niemand nobody log0 8 phrasetable add niemand no one log0 2 phrasetable add erwartet expects log0 8 phrasetable add erwartet expecting log0 2 phrasetable add niemand erwartet one does not expect log0 1 phrasetable add die spanische inquisition the spanish inquisition log0 8 phrasetable add log0 8 nltk model should be used here once it is implemented from collections import defaultdict languageprob defaultdictlambda 999 0 languageprob nobody log0 5 languageprob expects log0 4 languageprob the spanish inquisition log0 2 languageprob log0 1 languagemodel type object probabilitychange lambda self context phrase languageprobphrase probability lambda self phrase languageprobphrase stackdecoder stackdecoderphrasetable languagemodel stackdecoder translate niemand erwartet die spanische inquisition nobody expects the spanish inquisition param phrasetable table of translations for source language phrases and the log probabilities for those translations type phrasetable phrasetable param languagemodel target language model must define a probabilitychange method that calculates the change in log probability of a sentence if a given string is appended to it this interface is experimental and will likely be replaced with nltk model once it is implemented type languagemodel object float influences the translation length exponentially if positive shorter translations are preferred if negative longer translations are preferred if zero no penalty is applied float hypotheses that score below this factor of the best hypothesis in a stack are dropped from consideration value between 0 0 and 1 0 int maximum number of hypotheses to consider in a stack higher values increase the likelihood of a good translation but increases processing time float amount of reordering of source phrases lower values favour monotone translation suitable when word order is similar for both source and target languages value between 0 0 and 1 0 default 0 5 cache logdistortionfactor so we don t have to recompute it when scoring hypotheses param srcsentence sentence to be translated type srcsentence liststr return translated sentence rtype liststr instead of returning empty output perhaps a partial translation could be returned finds all subsequences in srcsentence that have a phrase translation in the translation table type srcsentence tuplestr return subsequences that have a phrase translation represented as a table of lists of end positions for example if result2 is 5 6 9 then there are three phrases starting from position 2 in srcsentence ending at positions 5 6 and 9 exclusive the list of ending positions are in ascending order rtype listlistint determines the approximate scores for translating every subsequence in srcsentence future scores can be used a lookahead to determine the difficulty of translating the remaining parts of a srcsentence type srcsentence tuplestr return scores of subsequences referenced by their start and end positions for example result25 is the score of the subsequence covering positions 2 3 and 4 rtype dictint dictint float warning api of languagemodel is subject to change check if a better score can be obtained by combining two child subsequences determines the approximate score for translating the untranslated words in hypothesis calculate the score of expanding hypothesis with translationoption param hypothesis hypothesis being expanded type hypothesis hypothesis param translationoption information about the proposed expansion type translationoption phrasetableentry param srcphrasespan word position span of the source phrase type srcphrasespan tupleint int the api of languagemodel is subject to change it could accept a string a list of words andor some other type extract phrases from allphrasesfrom that contains words that have not been translated by hypothesis param allphrasesfrom phrases represented by their spans in the same format as the return value of findallsrcphrases type allphrasesfrom listlistint type hypothesis hypothesis return a list of phrases represented by their spans that cover untranslated positions rtype listtupleint int subsequent elements in allphrasesfromstart will also be availableend since the elements are in ascending order partial solution to a translation records the word positions of the phrase being translated its translation raw score and the cost of the untranslated parts of the sentence when the next phrase is selected to build upon the partial solution a new hypothesis object is created with a back pointer to the previous hypothesis to find out which words have been translated so far look at the srcphrasespan in the hypothesis chain similarly the translation output can be found by traversing up the chain param rawscore likelihood of hypothesis so far higher is better does not account for untranslated words type rawscore float param srcphrasespan span of word positions covered by the source phrase in this hypothesis expansion for example 2 5 means that the phrase is from the second word up to but not including the fifth word in the source sentence type srcphrasespan tupleint param trgphrase translation of the source phrase in this hypothesis expansion type trgphrase tuplestr param previous previous hypothesis before expansion to this one type previous hypothesis param futurescore approximate score for translating the remaining words not covered by this hypothesis higher means that the remaining words are easier to translate type futurescore float overall score of hypothesis after accounting for local and global features starting from each untranslated word find the longest continuous span of untranslated positions param sentencelength length of source sentence being translated by the hypothesis type sentencelength int rtype listtupleint int each untranslated span must end in one of the translatedpositions list of positions in the source sentence of words already translated the list is not sorted rtype listint collection of hypothesis objects param beamthreshold hypotheses that score less than this factor of the best hypothesis are discarded from the stack value must be between 0 0 and 1 0 type beamthreshold float add hypothesis to the stack removes lowest scoring hypothesis if the stack is full after insertion hypotheses that score less than beamthreshold times the score of the best hypothesis are removed logscore beamthreshold logscore logbeamthreshold return hypothesis with the highest score in the stack rtype hypothesis natural language toolkit stack decoder c 2001 2023 nltk project tah wei hoon hoon tw gmail com url https www nltk org for license information see license txt a decoder that uses stacks to implement phrase based translation in phrase based translation the source sentence is segmented into phrases of one or more words and translations for those phrases are used to build the target sentence hypothesis data structures are used to keep track of the source words translated so far and the partial output a hypothesis can be expanded by selecting an untranslated phrase looking up its translation in a phrase table and appending that translation to the partial output translation is complete when a hypothesis covers all source words the search space is huge because the source sentence can be segmented in different ways the source phrases can be selected in any order and there could be multiple translations for the same source phrase in the phrase table to make decoding tractable stacks are used to limit the number of candidate hypotheses by doing histogram and or threshold pruning hypotheses with the same number of words translated are placed in the same stack in histogram pruning each stack has a size limit and the hypothesis with the lowest score is removed when the stack is full in threshold pruning hypotheses that score below a certain threshold of the best hypothesis in that stack are removed hypothesis scoring can include various factors such as phrase translation probability language model probability length of translation cost of remaining words to be translated and so on references philipp koehn 2010 statistical machine translation cambridge university press new york phrase based stack decoder for machine translation from nltk translate import phrasetable phrase_table phrasetable phrase_table add niemand nobody log 0 8 phrase_table add niemand no one log 0 2 phrase_table add erwartet expects log 0 8 phrase_table add erwartet expecting log 0 2 phrase_table add niemand erwartet one does not expect log 0 1 phrase_table add die spanische inquisition the spanish inquisition log 0 8 phrase_table add log 0 8 nltk model should be used here once it is implemented from collections import defaultdict language_prob defaultdict lambda 999 0 language_prob nobody log 0 5 language_prob expects log 0 4 language_prob the spanish inquisition log 0 2 language_prob log 0 1 language_model type object probability_change lambda self context phrase language_prob phrase probability lambda self phrase language_prob phrase stack_decoder stackdecoder phrase_table language_model stack_decoder translate niemand erwartet die spanische inquisition nobody expects the spanish inquisition param phrase_table table of translations for source language phrases and the log probabilities for those translations type phrase_table phrasetable param language_model target language model must define a probability_change method that calculates the change in log probability of a sentence if a given string is appended to it this interface is experimental and will likely be replaced with nltk model once it is implemented type language_model object float influences the translation length exponentially if positive shorter translations are preferred if negative longer translations are preferred if zero no penalty is applied float hypotheses that score below this factor of the best hypothesis in a stack are dropped from consideration value between 0 0 and 1 0 int maximum number of hypotheses to consider in a stack higher values increase the likelihood of a good translation but increases processing time float amount of reordering of source phrases lower values favour monotone translation suitable when word order is similar for both source and target languages value between 0 0 and 1 0 default 0 5 cache log distortion_factor so we don t have to recompute it when scoring hypotheses 1e 9 is almost zero param src_sentence sentence to be translated type src_sentence list str return translated sentence rtype list str prevent accidental modification instead of returning empty output perhaps a partial translation could be returned finds all subsequences in src_sentence that have a phrase translation in the translation table type src_sentence tuple str return subsequences that have a phrase translation represented as a table of lists of end positions for example if result 2 is 5 6 9 then there are three phrases starting from position 2 in src_sentence ending at positions 5 6 and 9 exclusive the list of ending positions are in ascending order rtype list list int determines the approximate scores for translating every subsequence in src_sentence future scores can be used a look ahead to determine the difficulty of translating the remaining parts of a src_sentence type src_sentence tuple str return scores of subsequences referenced by their start and end positions for example result 2 5 is the score of the subsequence covering positions 2 3 and 4 rtype dict int dict int float pick best first translation warning api of language_model is subject to change check if a better score can be obtained by combining two child subsequences determines the approximate score for translating the untranslated words in hypothesis calculate the score of expanding hypothesis with translation_option param hypothesis hypothesis being expanded type hypothesis _hypothesis param translation_option information about the proposed expansion type translation_option phrasetableentry param src_phrase_span word position span of the source phrase type src_phrase_span tuple int int the api of language_model is subject to change it could accept a string a list of words and or some other type extract phrases from all_phrases_from that contains words that have not been translated by hypothesis param all_phrases_from phrases represented by their spans in the same format as the return value of find_all_src_phrases type all_phrases_from list list int type hypothesis _hypothesis return a list of phrases represented by their spans that cover untranslated positions rtype list tuple int int subsequent elements in all_phrases_from start will also be available_end since the elements are in ascending order partial solution to a translation records the word positions of the phrase being translated its translation raw score and the cost of the untranslated parts of the sentence when the next phrase is selected to build upon the partial solution a new _hypothesis object is created with a back pointer to the previous hypothesis to find out which words have been translated so far look at the src_phrase_span in the hypothesis chain similarly the translation output can be found by traversing up the chain param raw_score likelihood of hypothesis so far higher is better does not account for untranslated words type raw_score float param src_phrase_span span of word positions covered by the source phrase in this hypothesis expansion for example 2 5 means that the phrase is from the second word up to but not including the fifth word in the source sentence type src_phrase_span tuple int param trg_phrase translation of the source phrase in this hypothesis expansion type trg_phrase tuple str param previous previous hypothesis before expansion to this one type previous _hypothesis param future_score approximate score for translating the remaining words not covered by this hypothesis higher means that the remaining words are easier to translate type future_score float overall score of hypothesis after accounting for local and global features starting from each untranslated word find the longest continuous span of untranslated positions param sentence_length length of source sentence being translated by the hypothesis type sentence_length int rtype list tuple int int add sentinel position each untranslated span must end in one of the translated_positions list of positions in the source sentence of words already translated the list is not sorted rtype list int collection of _hypothesis objects param beam_threshold hypotheses that score less than this factor of the best hypothesis are discarded from the stack value must be between 0 0 and 1 0 type beam_threshold float add hypothesis to the stack removes lowest scoring hypothesis if the stack is full after insertion hypotheses that score less than beam_threshold times the score of the best hypothesis are removed log score beam_threshold log score log beam_threshold return hypothesis with the highest score in the stack rtype _hypothesis
import warnings from collections import defaultdict from math import log class StackDecoder: def __init__(self, phrase_table, language_model): self.phrase_table = phrase_table self.language_model = language_model self.word_penalty = 0.0 self.beam_threshold = 0.0 self.stack_size = 100 self.__distortion_factor = 0.5 self.__compute_log_distortion() @property def distortion_factor(self): return self.__distortion_factor @distortion_factor.setter def distortion_factor(self, d): self.__distortion_factor = d self.__compute_log_distortion() def __compute_log_distortion(self): if self.__distortion_factor == 0.0: self.__log_distortion_factor = log(1e-9) else: self.__log_distortion_factor = log(self.__distortion_factor) def translate(self, src_sentence): sentence = tuple(src_sentence) sentence_length = len(sentence) stacks = [ _Stack(self.stack_size, self.beam_threshold) for _ in range(0, sentence_length + 1) ] empty_hypothesis = _Hypothesis() stacks[0].push(empty_hypothesis) all_phrases = self.find_all_src_phrases(sentence) future_score_table = self.compute_future_scores(sentence) for stack in stacks: for hypothesis in stack: possible_expansions = StackDecoder.valid_phrases( all_phrases, hypothesis ) for src_phrase_span in possible_expansions: src_phrase = sentence[src_phrase_span[0] : src_phrase_span[1]] for translation_option in self.phrase_table.translations_for( src_phrase ): raw_score = self.expansion_score( hypothesis, translation_option, src_phrase_span ) new_hypothesis = _Hypothesis( raw_score=raw_score, src_phrase_span=src_phrase_span, trg_phrase=translation_option.trg_phrase, previous=hypothesis, ) new_hypothesis.future_score = self.future_score( new_hypothesis, future_score_table, sentence_length ) total_words = new_hypothesis.total_translated_words() stacks[total_words].push(new_hypothesis) if not stacks[sentence_length]: warnings.warn( "Unable to translate all words. " "The source sentence contains words not in " "the phrase table" ) return [] best_hypothesis = stacks[sentence_length].best() return best_hypothesis.translation_so_far() def find_all_src_phrases(self, src_sentence): sentence_length = len(src_sentence) phrase_indices = [[] for _ in src_sentence] for start in range(0, sentence_length): for end in range(start + 1, sentence_length + 1): potential_phrase = src_sentence[start:end] if potential_phrase in self.phrase_table: phrase_indices[start].append(end) return phrase_indices def compute_future_scores(self, src_sentence): scores = defaultdict(lambda: defaultdict(lambda: float("-inf"))) for seq_length in range(1, len(src_sentence) + 1): for start in range(0, len(src_sentence) - seq_length + 1): end = start + seq_length phrase = src_sentence[start:end] if phrase in self.phrase_table: score = self.phrase_table.translations_for(phrase)[ 0 ].log_prob score += self.language_model.probability(phrase) scores[start][end] = score for mid in range(start + 1, end): combined_score = scores[start][mid] + scores[mid][end] if combined_score > scores[start][end]: scores[start][end] = combined_score return scores def future_score(self, hypothesis, future_score_table, sentence_length): score = 0.0 for span in hypothesis.untranslated_spans(sentence_length): score += future_score_table[span[0]][span[1]] return score def expansion_score(self, hypothesis, translation_option, src_phrase_span): score = hypothesis.raw_score score += translation_option.log_prob score += self.language_model.probability_change( hypothesis, translation_option.trg_phrase ) score += self.distortion_score(hypothesis, src_phrase_span) score -= self.word_penalty * len(translation_option.trg_phrase) return score def distortion_score(self, hypothesis, next_src_phrase_span): if not hypothesis.src_phrase_span: return 0.0 next_src_phrase_start = next_src_phrase_span[0] prev_src_phrase_end = hypothesis.src_phrase_span[1] distortion_distance = next_src_phrase_start - prev_src_phrase_end return abs(distortion_distance) * self.__log_distortion_factor @staticmethod def valid_phrases(all_phrases_from, hypothesis): untranslated_spans = hypothesis.untranslated_spans(len(all_phrases_from)) valid_phrases = [] for available_span in untranslated_spans: start = available_span[0] available_end = available_span[1] while start < available_end: for phrase_end in all_phrases_from[start]: if phrase_end > available_end: break valid_phrases.append((start, phrase_end)) start += 1 return valid_phrases class _Hypothesis: def __init__( self, raw_score=0.0, src_phrase_span=(), trg_phrase=(), previous=None, future_score=0.0, ): self.raw_score = raw_score self.src_phrase_span = src_phrase_span self.trg_phrase = trg_phrase self.previous = previous self.future_score = future_score def score(self): return self.raw_score + self.future_score def untranslated_spans(self, sentence_length): translated_positions = self.translated_positions() translated_positions.sort() translated_positions.append(sentence_length) untranslated_spans = [] start = 0 for end in translated_positions: if start < end: untranslated_spans.append((start, end)) start = end + 1 return untranslated_spans def translated_positions(self): translated_positions = [] current_hypothesis = self while current_hypothesis.previous is not None: translated_span = current_hypothesis.src_phrase_span translated_positions.extend(range(translated_span[0], translated_span[1])) current_hypothesis = current_hypothesis.previous return translated_positions def total_translated_words(self): return len(self.translated_positions()) def translation_so_far(self): translation = [] self.__build_translation(self, translation) return translation def __build_translation(self, hypothesis, output): if hypothesis.previous is None: return self.__build_translation(hypothesis.previous, output) output.extend(hypothesis.trg_phrase) class _Stack: def __init__(self, max_size=100, beam_threshold=0.0): self.max_size = max_size self.items = [] if beam_threshold == 0.0: self.__log_beam_threshold = float("-inf") else: self.__log_beam_threshold = log(beam_threshold) def push(self, hypothesis): self.items.append(hypothesis) self.items.sort(key=lambda h: h.score(), reverse=True) while len(self.items) > self.max_size: self.items.pop() self.threshold_prune() def threshold_prune(self): if not self.items: return threshold = self.items[0].score() + self.__log_beam_threshold for hypothesis in reversed(self.items): if hypothesis.score() < threshold: self.items.pop() else: break def best(self): if self.items: return self.items[0] return None def __iter__(self): return iter(self.items) def __contains__(self, hypothesis): return hypothesis in self.items def __bool__(self): return len(self.items) != 0 __nonzero__ = __bool__
natural language toolkit machine translation c 20012023 nltk project edward loper edlopergmail com steven bird stevenbird1gmail com peter ljunglf peter ljunglofgu se tom aarsen url https www nltk org for license information see license txt nltk tree package this package may be used for representing hierarchical language structures such as syntax trees and morphological trees todo add labelledtree can be used for dependency trees natural language toolkit machine translation c 2001 2023 nltk project edward loper edloper gmail com steven bird stevenbird1 gmail com peter ljunglöf peter ljunglof gu se tom aarsen url https www nltk org for license information see license txt nltk tree package this package may be used for representing hierarchical language structures such as syntax trees and morphological trees todo add labelledtree can be used for dependency trees
from nltk.tree.immutable import ( ImmutableMultiParentedTree, ImmutableParentedTree, ImmutableProbabilisticTree, ImmutableTree, ) from nltk.tree.parented import MultiParentedTree, ParentedTree from nltk.tree.parsing import bracket_parse, sinica_parse from nltk.tree.prettyprinter import TreePrettyPrinter from nltk.tree.probabilistic import ProbabilisticTree from nltk.tree.transforms import ( chomsky_normal_form, collapse_unary, un_chomsky_normal_form, ) from nltk.tree.tree import Tree __all__ = [ "ImmutableMultiParentedTree", "ImmutableParentedTree", "ImmutableProbabilisticTree", "ImmutableTree", "MultiParentedTree", "ParentedTree", "bracket_parse", "sinica_parse", "TreePrettyPrinter", "ProbabilisticTree", "chomsky_normal_form", "collapse_unary", "un_chomsky_normal_form", "Tree", ]
natural language toolkit text trees c 20012023 nltk project edward loper edlopergmail com steven bird stevenbird1gmail com peter ljunglf peter ljunglofgu se tom aarsen url https www nltk org for license information see license txt precompute our hash value this ensures that we re really immutable it also means we only have to calculate it once set the node label this will only succeed the first time the node label is set which should occur in immutabletree init we have to patch up these methods to make them work right natural language toolkit text trees c 2001 2023 nltk project edward loper edloper gmail com steven bird stevenbird1 gmail com peter ljunglöf peter ljunglof gu se tom aarsen url https www nltk org for license information see license txt precompute our hash value this ensures that we re really immutable it also means we only have to calculate it once set the node label this will only succeed the first time the node label is set which should occur in immutabletree __init__ we have to patch up these methods to make them work right
from nltk.probability import ProbabilisticMixIn from nltk.tree.parented import MultiParentedTree, ParentedTree from nltk.tree.tree import Tree class ImmutableTree(Tree): def __init__(self, node, children=None): super().__init__(node, children) try: self._hash = hash((self._label, tuple(self))) except (TypeError, ValueError) as e: raise ValueError( "%s: node value and children " "must be immutable" % type(self).__name__ ) from e def __setitem__(self, index, value): raise ValueError("%s may not be modified" % type(self).__name__) def __setslice__(self, i, j, value): raise ValueError("%s may not be modified" % type(self).__name__) def __delitem__(self, index): raise ValueError("%s may not be modified" % type(self).__name__) def __delslice__(self, i, j): raise ValueError("%s may not be modified" % type(self).__name__) def __iadd__(self, other): raise ValueError("%s may not be modified" % type(self).__name__) def __imul__(self, other): raise ValueError("%s may not be modified" % type(self).__name__) def append(self, v): raise ValueError("%s may not be modified" % type(self).__name__) def extend(self, v): raise ValueError("%s may not be modified" % type(self).__name__) def pop(self, v=None): raise ValueError("%s may not be modified" % type(self).__name__) def remove(self, v): raise ValueError("%s may not be modified" % type(self).__name__) def reverse(self): raise ValueError("%s may not be modified" % type(self).__name__) def sort(self): raise ValueError("%s may not be modified" % type(self).__name__) def __hash__(self): return self._hash def set_label(self, value): if hasattr(self, "_label"): raise ValueError("%s may not be modified" % type(self).__name__) self._label = value class ImmutableProbabilisticTree(ImmutableTree, ProbabilisticMixIn): def __init__(self, node, children=None, **prob_kwargs): ImmutableTree.__init__(self, node, children) ProbabilisticMixIn.__init__(self, **prob_kwargs) self._hash = hash((self._label, tuple(self), self.prob())) def _frozen_class(self): return ImmutableProbabilisticTree def __repr__(self): return f"{Tree.__repr__(self)} [{self.prob()}]" def __str__(self): return f"{self.pformat(margin=60)} [{self.prob()}]" def copy(self, deep=False): if not deep: return type(self)(self._label, self, prob=self.prob()) else: return type(self).convert(self) @classmethod def convert(cls, val): if isinstance(val, Tree): children = [cls.convert(child) for child in val] if isinstance(val, ProbabilisticMixIn): return cls(val._label, children, prob=val.prob()) else: return cls(val._label, children, prob=1.0) else: return val class ImmutableParentedTree(ImmutableTree, ParentedTree): pass class ImmutableMultiParentedTree(ImmutableTree, MultiParentedTree): pass __all__ = [ "ImmutableProbabilisticTree", "ImmutableTree", "ImmutableParentedTree", "ImmutableMultiParentedTree", ]
natural language toolkit text trees c 20012023 nltk project edward loper edlopergmail com steven bird stevenbird1gmail com peter ljunglf peter ljunglofgu se tom aarsen url https www nltk org for license information see license txt parsing use tree reads removeemptytopbracketingtrue instead parse a sinica treebank string and return a tree trees are represented as nested brackettings as shown in the following example x represents a chinese character sgoal nphead nep xxtheme nphead nhaa xquantity dab xhead vl2 x0periodcategory return a tree corresponding to the string representation rtype tree param s the string to be converted type s str s re subr ss s remove leading identifier s re subr w s remove role tags return s natural language toolkit text trees c 2001 2023 nltk project edward loper edloper gmail com steven bird stevenbird1 gmail com peter ljunglöf peter ljunglof gu se tom aarsen url https www nltk org for license information see license txt parsing use tree read s remove_empty_top_bracketing true instead parse a sinica treebank string and return a tree trees are represented as nested brackettings as shown in the following example x represents a chinese character s goal np head nep xx theme np head nhaa x quantity dab x head vl2 x 0 periodcategory return a tree corresponding to the string representation rtype tree param s the string to be converted type s str pull nonterminal inside parens non terminal s re sub r s s s remove leading identifier s re sub r w s remove role tags return s
import re from nltk.tree.tree import Tree def bracket_parse(s): raise NameError("Use Tree.read(s, remove_empty_top_bracketing=True) instead.") def sinica_parse(s): tokens = re.split(r"([()| ])", s) for i in range(len(tokens)): if tokens[i] == "(": tokens[i - 1], tokens[i] = ( tokens[i], tokens[i - 1], ) elif ":" in tokens[i]: fields = tokens[i].split(":") if len(fields) == 2: tokens[i] = fields[1] else: tokens[i] = "(" + fields[-2] + " " + fields[-1] + ")" elif tokens[i] == "|": tokens[i] = "" treebank_string = " ".join(tokens) return Tree.fromstring(treebank_string, remove_empty_top_bracketing=True) __all__ = [ "bracket_parse", "sinica_parse", ]
natural language toolkit ascii visualization of nltk trees c 20012023 nltk project andreas van cranenburgh a w vancranenburghuva nl peter ljunglf peter ljunglofgu se url https www nltk org for license information see license txt prettyprinting of discontinuous trees adapted from the discodop project by andreas van cranenburgh https github comandreasvcdiscodop interesting reference not used for this code t eschbach et al orth hypergraph drawing journal of graph algorithms and applications 102 141157 2006149 https jgaa infoaccepted2006eschbachguentherbecker2006 10 2 pdf prettyprint a tree in text format either as ascii or unicode the tree can be a normal tree or discontinuous treeprettyprintertree sentencenone highlight creates an object from which different visualizations can be created param tree a tree object param sentence a list of words strings if sentence is given tree must contain integers as leaves which are taken as indices in sentence using this you can display a discontinuous tree param highlight optionally a sequence of tree objects in tree which should be highlighted has the effect of only applying colors to nodes in this sequence nodes should be given as tree objects terminals as indices from nltk tree import tree tree tree fromstring s np mary vp walks printtreeprettyprintertree text doctest normalizewhitespace s np vp mary walks this deals with empty nodes frontier nonterminals and multiplemixed terminals under nonterminals produce coordinates of nodes on a grid objective produce coordinates for a nonoverlapping placement of nodes and horizontal lines order edges so that crossing edges cross a minimal number of previous horizontal lines never vertical lines approach bottom up level order traversal start at terminals at each level identify nodes which cannot be on the same row identify nodes which cannot be in the same column place nodes into a grid at row column order childparent edges with crossing edges last coordinates are row column the origin 0 0 is at the top left the root node is on row 0 coordinates do not consider the size of a node which depends on font c so the width of a column of the grid should be automatically determined by the element with the greatest width in that column alternatively the integer coordinates could be converted to coordinates in which the distances between adjacent nodes are nonuniform produces tuple nodes coords edges highlighted where nodesid tree object for the node with this integer id coordsid n m coordinate where to draw node with id in the grid edgesid parent id of node with this id ordered dictionary highlighted set of ids that should be highlighted find vacant row column index for node m iterate over current rows for this level try lowest first and look for cell between first and last child of this node add new row to level if no free row available find free column dump matrix contents for debugging purposes return n join 2d s n join2r i 2 for i in row for n row in enumeratematrix leaves tree leaves if not allisinstancen int for n in leaves raise valueerrorall leaves must be integer indices if lenleaves lensetleaves raise valueerrorindices must occur at most once if not all0 n lensentence for n in leaves raise valueerror all leaves must be in the interval 0 n with nlensentencentokens d indices rnsentence s lensentence tree leaves sentence vertline corner 1 2 constants tree tree copytrue for a in tree subtrees a sortkeylambda n minn leaves if isinstancen tree else n scale 2 crossed set internal nodes and lexical nodes no frontiers positions tree treepositions maxdepth maxmaplen positions 1 childcols defaultdictset matrix none lensentence scale nodes ids a n for n a in enumeratepositions highlightednodes n for a n in ids items if not highlight or treea in highlight levels n for n in rangemaxdepth 1 terminals for a in positions node treea if isinstancenode tree levelsmaxdepth node height appenda else terminals appenda for n in levels levelsn sortkeylambda n maxtreen leaves mintreen leaves terminals sort positions setpositions for m in terminals i inttreem scale assert matrix0i is none matrix0i m i matrix0i idsm nodesidsm sentencetreem if nodesidsm is none nodesidsm highlightednodes discardidsm positions removem childcolsm 1 add0 i add other nodes centered on their children if the center is already taken back off to the left and right alternately until an empty cell is found for n in sortedlevels reversetrue nodesatdepth levelsn startoflevel lenmatrix matrix append vertline if a not in corner none else none for a in matrix1 for m in nodesatdepth 1 if n maxdepth 1 and childcolsm pivot minchildcolsm keyitemgetter1 if a 1 for row in matrix 1 for a in row pivot if isinstancea tuple a 1 for row in matrix 1 for a in rowpivot if isinstancea tuple crossed addm rowidx i findcellm matrix startoflevel childcols positions removem block positions where children of this node branch out for x in childcolsm matrixrowidxx corner assert m or matrixrowidxi in none corner matrixrowidxi m strtree joinsentence node itself matrixrowidxi idsm nodesidsm treem add column to the set of children for its parent if lenm 0 childcolsm 1 addrowidx i assert lenpositions 0 remove unused columns right to left for m in rangescale lensentence 1 1 1 if not anyisinstancerowm tree int for row in matrix for row in matrix del rowm remove unused rows reverse matrix row for row in reversedmatrix if not alla is none or a vertline for a in row collect coordinates of nodes coords for n in enumeratematrix for m i in enumeratematrixn if isinstancei int and i 0 coordsi n m move crossed edges last positions sorted a for level in levels values for a in level keylambda a a 1 in crossed collect edges from node to node edges ordereddict for i in reversedpositions for j in enumeratetreei edgesidsi j idsi return nodes coords edges highlightednodes def text self nodedist1 unicodelinesfalse htmlfalse ansifalse nodecolorblue leafcolorred funccolorgreen abbreviatenone maxwidth16 if abbreviate true abbreviate 5 if unicodelines horzline u2500 leftcorner u250c rightcorner u2510 vertline u2502 tee horzline u252c horzline bottom horzline u2534 horzline cross horzline u253c horzline ellipsis u2026 else horzline leftcorner rightcorner vertline tee 3 horzline cross bottom ellipsis def crosscellcur xvertline collect labels and coordinates bottom up level order traversal draw horizontal branch towards children for this node draw vertical lines in partially filled multiline node labels but only if it s not a frontier node for each column if there is a node below us which has a parent above us draw a vertical branch in that column return svg representation of a tree horizontal branches from nodes to children vertical branches from children to parents write nodes with coordinates do some tree drawing tests def printtreen tree sentencenone ansitrue xargs print print formatn joinsentence or tree leaves printtree print drawtree treeprettyprintertree sentence try printdrawtree textunicodelinesansi ansiansi xargs except unicodedecodeerror unicodeencodeerror printdrawtree textunicodelinesfalse ansifalse xargs from nltk corpus import treebank for n in 0 1440 1591 2771 2170 tree treebank parsedsentsn printtreen tree nodedist2 maxwidth8 print printascii version printtreeprettyprintertree textnodedist2 tree tree fromstring top punct 8 smain noun 0 verb 1 inf verb 5 inf verb 6 conj inf pp prep 2 np det 3 noun 4 verb 7 inf verb 9 vg 10 inf verb 11 punct 12 readleafint sentence ze had met haar moeder kunnen gaan winkelen zwemmen of terrassen split printtreediscontinuous tree tree sentence nodedist2 all treeprettyprinter if name main test natural language toolkit ascii visualization of nltk trees c 2001 2023 nltk project andreas van cranenburgh a w vancranenburgh uva nl peter ljunglöf peter ljunglof gu se url https www nltk org for license information see license txt pretty printing of discontinuous trees adapted from the disco dop project by andreas van cranenburgh https github com andreasvc disco dop interesting reference not used for this code t eschbach et al orth hypergraph drawing journal of graph algorithms and applications 10 2 141 157 2006 149 https jgaa info accepted 2006 eschbachguentherbecker2006 10 2 pdf pretty print a tree in text format either as ascii or unicode the tree can be a normal tree or discontinuous treeprettyprinter tree sentence none highlight creates an object from which different visualizations can be created param tree a tree object param sentence a list of words strings if sentence is given tree must contain integers as leaves which are taken as indices in sentence using this you can display a discontinuous tree param highlight optionally a sequence of tree objects in tree which should be highlighted has the effect of only applying colors to nodes in this sequence nodes should be given as tree objects terminals as indices from nltk tree import tree tree tree fromstring s np mary vp walks print treeprettyprinter tree text doctest normalize_whitespace s ____ ____ np vp mary walks this deals with empty nodes frontier non terminals and multiple mixed terminals under non terminals produce coordinates of nodes on a grid objective produce coordinates for a non overlapping placement of nodes and horizontal lines order edges so that crossing edges cross a minimal number of previous horizontal lines never vertical lines approach bottom up level order traversal start at terminals at each level identify nodes which cannot be on the same row identify nodes which cannot be in the same column place nodes into a grid at row column order child parent edges with crossing edges last coordinates are row column the origin 0 0 is at the top left the root node is on row 0 coordinates do not consider the size of a node which depends on font c so the width of a column of the grid should be automatically determined by the element with the greatest width in that column alternatively the integer coordinates could be converted to coordinates in which the distances between adjacent nodes are non uniform produces tuple nodes coords edges highlighted where nodes id tree object for the node with this integer id coords id n m coordinate where to draw node with id in the grid edges id parent id of node with this id ordered dictionary highlighted set of ids that should be highlighted find vacant row column index for node m iterate over current rows for this level try lowest first and look for cell between first and last child of this node add new row to level if no free row available center of gravity round to unscaled coordinate need to add a new row place unaries directly above child find free column dump matrix contents for debugging purposes constants internal nodes and lexical nodes no frontiers add other nodes centered on their children if the center is already taken back off to the left and right alternately until an empty cell is found 1 block positions where children of this node branch out assert m or matrix rowidx i in none corner matrix rowidx i m str tree join sentence node itself add column to the set of children for its parent remove unused columns right to left remove unused rows reverse collect coordinates of nodes move crossed edges last collect edges from node to node return ascii art for a discontinuous tree param unicodelines whether to use unicode line drawing characters instead of plain 7 bit ascii param html whether to wrap output in html code default plain text param ansi whether to produce colors with ansi escape sequences only effective when html false param leafcolor nodecolor specify colors of leaves and phrasal nodes effective when either html or ansi is true param abbreviate if true abbreviate labels longer than 5 characters if integer abbreviate labels longer than abbr characters param maxwidth maximum number of characters before a label starts to wrap pass none to disable overwrite center of this cell with a vertical branch collect labels and coordinates e g root bottom up level order traversal draw horizontal branch towards children for this node if n and n in minchildcol draw vertical lines in partially filled multiline node labels but only if it s not a frontier node for each column if there is a node below us which has a parent above us draw a vertical branch in that column return svg representation of a tree horizontal branches from nodes to children vertical branches from children to parents write nodes with coordinates do some tree drawing tests
import re try: from html import escape except ImportError: from cgi import escape from collections import defaultdict from operator import itemgetter from nltk.tree.tree import Tree from nltk.util import OrderedDict ANSICOLOR = { "black": 30, "red": 31, "green": 32, "yellow": 33, "blue": 34, "magenta": 35, "cyan": 36, "white": 37, } class TreePrettyPrinter: def __init__(self, tree, sentence=None, highlight=()): if sentence is None: leaves = tree.leaves() if ( leaves and all(len(a) > 0 for a in tree.subtrees()) and all(isinstance(a, int) for a in leaves) ): sentence = [str(a) for a in leaves] else: tree = tree.copy(True) sentence = [] for a in tree.subtrees(): if len(a) == 0: a.append(len(sentence)) sentence.append(None) elif any(not isinstance(b, Tree) for b in a): for n, b in enumerate(a): if not isinstance(b, Tree): a[n] = len(sentence) if type(b) == tuple: b = "/".join(b) sentence.append("%s" % b) self.nodes, self.coords, self.edges, self.highlight = self.nodecoords( tree, sentence, highlight ) def __str__(self): return self.text() def __repr__(self): return "<TreePrettyPrinter with %d nodes>" % len(self.nodes) @staticmethod def nodecoords(tree, sentence, highlight): def findcell(m, matrix, startoflevel, children): candidates = [a for _, a in children[m]] minidx, maxidx = min(candidates), max(candidates) leaves = tree[m].leaves() center = scale * sum(leaves) // len(leaves) if minidx < maxidx and not minidx < center < maxidx: center = sum(candidates) // len(candidates) if max(candidates) - min(candidates) > 2 * scale: center -= center % scale if minidx < maxidx and not minidx < center < maxidx: center += scale if ids[m] == 0: startoflevel = len(matrix) for rowidx in range(startoflevel, len(matrix) + 1): if rowidx == len(matrix): matrix.append( [ vertline if a not in (corner, None) else None for a in matrix[-1] ] ) row = matrix[rowidx] if len(children[m]) == 1: return rowidx, next(iter(children[m]))[1] elif all( a is None or a == vertline for a in row[min(candidates) : max(candidates) + 1] ): for n in range(scale): i = j = center + n while j > minidx or i < maxidx: if i < maxidx and ( matrix[rowidx][i] is None or i in candidates ): return rowidx, i elif j > minidx and ( matrix[rowidx][j] is None or j in candidates ): return rowidx, j i += scale j -= scale raise ValueError( "could not find a free cell for:\n%s\n%s" "min=%d; max=%d" % (tree[m], minidx, maxidx, dumpmatrix()) ) def dumpmatrix(): return "\n".join( "%2d: %s" % (n, " ".join(("%2r" % i)[:2] for i in row)) for n, row in enumerate(matrix) ) leaves = tree.leaves() if not all(isinstance(n, int) for n in leaves): raise ValueError("All leaves must be integer indices.") if len(leaves) != len(set(leaves)): raise ValueError("Indices must occur at most once.") if not all(0 <= n < len(sentence) for n in leaves): raise ValueError( "All leaves must be in the interval 0..n " "with n=len(sentence)\ntokens: %d indices: " "%r\nsentence: %s" % (len(sentence), tree.leaves(), sentence) ) vertline, corner = -1, -2 tree = tree.copy(True) for a in tree.subtrees(): a.sort(key=lambda n: min(n.leaves()) if isinstance(n, Tree) else n) scale = 2 crossed = set() positions = tree.treepositions() maxdepth = max(map(len, positions)) + 1 childcols = defaultdict(set) matrix = [[None] * (len(sentence) * scale)] nodes = {} ids = {a: n for n, a in enumerate(positions)} highlighted_nodes = { n for a, n in ids.items() if not highlight or tree[a] in highlight } levels = {n: [] for n in range(maxdepth - 1)} terminals = [] for a in positions: node = tree[a] if isinstance(node, Tree): levels[maxdepth - node.height()].append(a) else: terminals.append(a) for n in levels: levels[n].sort(key=lambda n: max(tree[n].leaves()) - min(tree[n].leaves())) terminals.sort() positions = set(positions) for m in terminals: i = int(tree[m]) * scale assert matrix[0][i] is None, (matrix[0][i], m, i) matrix[0][i] = ids[m] nodes[ids[m]] = sentence[tree[m]] if nodes[ids[m]] is None: nodes[ids[m]] = "..." highlighted_nodes.discard(ids[m]) positions.remove(m) childcols[m[:-1]].add((0, i)) for n in sorted(levels, reverse=True): nodesatdepth = levels[n] startoflevel = len(matrix) matrix.append( [vertline if a not in (corner, None) else None for a in matrix[-1]] ) for m in nodesatdepth: if n < maxdepth - 1 and childcols[m]: _, pivot = min(childcols[m], key=itemgetter(1)) if { a[:-1] for row in matrix[:-1] for a in row[:pivot] if isinstance(a, tuple) } & { a[:-1] for row in matrix[:-1] for a in row[pivot:] if isinstance(a, tuple) }: crossed.add(m) rowidx, i = findcell(m, matrix, startoflevel, childcols) positions.remove(m) for _, x in childcols[m]: matrix[rowidx][x] = corner matrix[rowidx][i] = ids[m] nodes[ids[m]] = tree[m] if len(m) > 0: childcols[m[:-1]].add((rowidx, i)) assert len(positions) == 0 for m in range(scale * len(sentence) - 1, -1, -1): if not any(isinstance(row[m], (Tree, int)) for row in matrix): for row in matrix: del row[m] matrix = [ row for row in reversed(matrix) if not all(a is None or a == vertline for a in row) ] coords = {} for n, _ in enumerate(matrix): for m, i in enumerate(matrix[n]): if isinstance(i, int) and i >= 0: coords[i] = n, m positions = sorted( (a for level in levels.values() for a in level), key=lambda a: a[:-1] in crossed, ) edges = OrderedDict() for i in reversed(positions): for j, _ in enumerate(tree[i]): edges[ids[i + (j,)]] = ids[i] return nodes, coords, edges, highlighted_nodes def text( self, nodedist=1, unicodelines=False, html=False, ansi=False, nodecolor="blue", leafcolor="red", funccolor="green", abbreviate=None, maxwidth=16, ): if abbreviate == True: abbreviate = 5 if unicodelines: horzline = "\u2500" leftcorner = "\u250c" rightcorner = "\u2510" vertline = " \u2502 " tee = horzline + "\u252C" + horzline bottom = horzline + "\u2534" + horzline cross = horzline + "\u253c" + horzline ellipsis = "\u2026" else: horzline = "_" leftcorner = rightcorner = " " vertline = " | " tee = 3 * horzline cross = bottom = "_|_" ellipsis = "." def crosscell(cur, x=vertline): splitl = len(cur) - len(cur) // 2 - len(x) // 2 - 1 lst = list(cur) lst[splitl : splitl + len(x)] = list(x) return "".join(lst) result = [] matrix = defaultdict(dict) maxnodewith = defaultdict(lambda: 3) maxnodeheight = defaultdict(lambda: 1) maxcol = 0 minchildcol = {} maxchildcol = {} childcols = defaultdict(set) labels = {} wrapre = re.compile( "(.{%d,%d}\\b\\W*|.{%d})" % (maxwidth - 4, maxwidth, maxwidth) ) for a in self.nodes: row, column = self.coords[a] matrix[row][column] = a maxcol = max(maxcol, column) label = ( self.nodes[a].label() if isinstance(self.nodes[a], Tree) else self.nodes[a] ) if abbreviate and len(label) > abbreviate: label = label[:abbreviate] + ellipsis if maxwidth and len(label) > maxwidth: label = wrapre.sub(r"\1\n", label).strip() label = label.split("\n") maxnodeheight[row] = max(maxnodeheight[row], len(label)) maxnodewith[column] = max(maxnodewith[column], max(map(len, label))) labels[a] = label if a not in self.edges: continue parent = self.edges[a] childcols[parent].add((row, column)) minchildcol[parent] = min(minchildcol.get(parent, column), column) maxchildcol[parent] = max(maxchildcol.get(parent, column), column) for row in sorted(matrix, reverse=True): noderows = [ ["".center(maxnodewith[col]) for col in range(maxcol + 1)] for _ in range(maxnodeheight[row]) ] branchrow = ["".center(maxnodewith[col]) for col in range(maxcol + 1)] for col in matrix[row]: n = matrix[row][col] node = self.nodes[n] text = labels[n] if isinstance(node, Tree): if n in minchildcol and minchildcol[n] < maxchildcol[n]: i, j = minchildcol[n], maxchildcol[n] a, b = (maxnodewith[i] + 1) // 2 - 1, maxnodewith[j] // 2 branchrow[i] = ((" " * a) + leftcorner).ljust( maxnodewith[i], horzline ) branchrow[j] = (rightcorner + (" " * b)).rjust( maxnodewith[j], horzline ) for i in range(minchildcol[n] + 1, maxchildcol[n]): if i == col and any(a == i for _, a in childcols[n]): line = cross elif i == col: line = bottom elif any(a == i for _, a in childcols[n]): line = tee else: line = horzline branchrow[i] = line.center(maxnodewith[i], horzline) else: branchrow[col] = crosscell(branchrow[col]) text = [a.center(maxnodewith[col]) for a in text] color = nodecolor if isinstance(node, Tree) else leafcolor if isinstance(node, Tree) and node.label().startswith("-"): color = funccolor if html: text = [escape(a, quote=False) for a in text] if n in self.highlight: text = [f"<font color={color}>{a}</font>" for a in text] elif ansi and n in self.highlight: text = ["\x1b[%d;1m%s\x1b[0m" % (ANSICOLOR[color], a) for a in text] for x in range(maxnodeheight[row]): noderows[x][col] = ( text[x] if x < len(text) else (vertline if childcols[n] else " ").center( maxnodewith[col], " " ) ) if row != max(matrix): for n, (childrow, col) in self.coords.items(): if n > 0 and self.coords[self.edges[n]][0] < row < childrow: branchrow[col] = crosscell(branchrow[col]) if col not in matrix[row]: for noderow in noderows: noderow[col] = crosscell(noderow[col]) branchrow = [ a + ((a[-1] if a[-1] != " " else b[0]) * nodedist) for a, b in zip(branchrow, branchrow[1:] + [" "]) ] result.append("".join(branchrow)) result.extend( (" " * nodedist).join(noderow) for noderow in reversed(noderows) ) return "\n".join(reversed(result)) + "\n" def svg(self, nodecolor="blue", leafcolor="red", funccolor="green"): fontsize = 12 hscale = 40 vscale = 25 hstart = vstart = 20 width = max(col for _, col in self.coords.values()) height = max(row for row, _ in self.coords.values()) result = [ '<svg version="1.1" xmlns="http://www.w3.org/2000/svg" ' 'width="%dem" height="%dem" viewBox="%d %d %d %d">' % ( width * 3, height * 2.5, -hstart, -vstart, width * hscale + 3 * hstart, height * vscale + 3 * vstart, ) ] children = defaultdict(set) for n in self.nodes: if n: children[self.edges[n]].add(n) for node in self.nodes: if not children[node]: continue y, x = self.coords[node] x *= hscale y *= vscale x += hstart y += vstart + fontsize // 2 childx = [self.coords[c][1] for c in children[node]] xmin = hstart + hscale * min(childx) xmax = hstart + hscale * max(childx) result.append( '\t<polyline style="stroke:black; stroke-width:1; fill:none;" ' 'points="%g,%g %g,%g" />' % (xmin, y, xmax, y) ) result.append( '\t<polyline style="stroke:black; stroke-width:1; fill:none;" ' 'points="%g,%g %g,%g" />' % (x, y, x, y - fontsize // 3) ) for child, parent in self.edges.items(): y, _ = self.coords[parent] y *= vscale y += vstart + fontsize // 2 childy, childx = self.coords[child] childx *= hscale childy *= vscale childx += hstart childy += vstart - fontsize result += [ '\t<polyline style="stroke:white; stroke-width:10; fill:none;"' ' points="%g,%g %g,%g" />' % (childx, childy, childx, y + 5), '\t<polyline style="stroke:black; stroke-width:1; fill:none;"' ' points="%g,%g %g,%g" />' % (childx, childy, childx, y), ] for n, (row, column) in self.coords.items(): node = self.nodes[n] x = column * hscale + hstart y = row * vscale + vstart if n in self.highlight: color = nodecolor if isinstance(node, Tree) else leafcolor if isinstance(node, Tree) and node.label().startswith("-"): color = funccolor else: color = "black" result += [ '\t<text style="text-anchor: middle; fill: %s; ' 'font-size: %dpx;" x="%g" y="%g">%s</text>' % ( color, fontsize, x, y, escape( node.label() if isinstance(node, Tree) else node, quote=False ), ) ] result += ["</svg>"] return "\n".join(result) def test(): def print_tree(n, tree, sentence=None, ansi=True, **xargs): print() print('{}: "{}"'.format(n, " ".join(sentence or tree.leaves()))) print(tree) print() drawtree = TreePrettyPrinter(tree, sentence) try: print(drawtree.text(unicodelines=ansi, ansi=ansi, **xargs)) except (UnicodeDecodeError, UnicodeEncodeError): print(drawtree.text(unicodelines=False, ansi=False, **xargs)) from nltk.corpus import treebank for n in [0, 1440, 1591, 2771, 2170]: tree = treebank.parsed_sents()[n] print_tree(n, tree, nodedist=2, maxwidth=8) print() print("ASCII version:") print(TreePrettyPrinter(tree).text(nodedist=2)) tree = Tree.fromstring( "(top (punct 8) (smain (noun 0) (verb 1) (inf (verb 5) (inf (verb 6) " "(conj (inf (pp (prep 2) (np (det 3) (noun 4))) (verb 7)) (inf (verb 9)) " "(vg 10) (inf (verb 11)))))) (punct 12))", read_leaf=int, ) sentence = ( "Ze had met haar moeder kunnen gaan winkelen ," " zwemmen of terrassen .".split() ) print_tree("Discontinuous tree", tree, sentence, nodedist=2) __all__ = ["TreePrettyPrinter"] if __name__ == "__main__": test()
natural language toolkit text trees c 20012023 nltk project edward loper edlopergmail com steven bird stevenbird1gmail com peter ljunglf peter ljunglofgu se tom aarsen url https www nltk org for license information see license txt probabilistic trees we have to patch up these methods to make them work right natural language toolkit text trees c 2001 2023 nltk project edward loper edloper gmail com steven bird stevenbird1 gmail com peter ljunglöf peter ljunglof gu se tom aarsen url https www nltk org for license information see license txt probabilistic trees we have to patch up these methods to make them work right
from nltk.internals import raise_unorderable_types from nltk.probability import ProbabilisticMixIn from nltk.tree.immutable import ImmutableProbabilisticTree from nltk.tree.tree import Tree class ProbabilisticTree(Tree, ProbabilisticMixIn): def __init__(self, node, children=None, **prob_kwargs): Tree.__init__(self, node, children) ProbabilisticMixIn.__init__(self, **prob_kwargs) def _frozen_class(self): return ImmutableProbabilisticTree def __repr__(self): return f"{Tree.__repr__(self)} (p={self.prob()!r})" def __str__(self): return f"{self.pformat(margin=60)} (p={self.prob():.6g})" def copy(self, deep=False): if not deep: return type(self)(self._label, self, prob=self.prob()) else: return type(self).convert(self) @classmethod def convert(cls, val): if isinstance(val, Tree): children = [cls.convert(child) for child in val] if isinstance(val, ProbabilisticMixIn): return cls(val._label, children, prob=val.prob()) else: return cls(val._label, children, prob=1.0) else: return val def __eq__(self, other): return self.__class__ is other.__class__ and ( self._label, list(self), self.prob(), ) == (other._label, list(other), other.prob()) def __lt__(self, other): if not isinstance(other, Tree): raise_unorderable_types("<", self, other) if self.__class__ is other.__class__: return (self._label, list(self), self.prob()) < ( other._label, list(other), other.prob(), ) else: return self.__class__.__name__ < other.__class__.__name__ __all__ = ["ProbabilisticTree"]
natural language toolkit tree transformations c 20052007 oregon graduate institute nathan bodenstab bodenstabcslu ogi edu url https www nltk org for license information see license txt from nltk internals import deprecated from nltk tree transforms import chomskynormalform as cnf from nltk tree transforms import collapseunary as cu from nltk tree transforms import unchomskynormalform as ucnf chomskynormalform deprecated import using from nltk tree import chomskynormalform instead cnf unchomskynormalform deprecated import using from nltk tree import unchomskynormalform instead ucnf collapseunary deprecated import using from nltk tree import collapseunary instead cu all chomskynormalform unchomskynormalform collapseunary natural language toolkit tree transformations c 2005 2007 oregon graduate institute nathan bodenstab bodenstab cslu ogi edu url https www nltk org for license information see license txt a collection of methods for tree grammar transformations used in parsing natural language although many of these methods are technically grammar transformations ie chomsky norm form when working with treebanks it is much more natural to visualize these modifications in a tree structure hence we will do all transformation directly to the tree itself transforming the tree directly also allows us to do parent annotation a grammar can then be simply induced from the modified tree the following is a short tutorial on the available transformations 1 chomsky normal form binarization it is well known that any grammar has a chomsky normal form cnf equivalent grammar where cnf is defined by every production having either two non terminals or one terminal on its right hand side when we have hierarchically structured data ie a treebank it is natural to view this in terms of productions where the root of every subtree is the head left hand side of the production and all of its children are the right hand side constituents in order to convert a tree into cnf we simply need to ensure that every subtree has either two subtrees as children binarization or one leaf node non terminal in order to binarize a subtree with more than two children we must introduce artificial nodes there are two popular methods to convert a tree into cnf left factoring and right factoring the following example demonstrates the difference between them example original right factored left factored a a a b c d b a c d or a b c d c d b c 2 parent annotation in addition to binarizing the tree there are two standard modifications to node labels we can do in the same traversal parent annotation and markov order n smoothing or sibling smoothing the purpose of parent annotation is to refine the probabilities of productions by adding a small amount of context with this simple addition a cyk inside outside dynamic programming chart parse can improve from 74 to 79 accuracy a natural generalization from parent annotation is to grandparent annotation and beyond the tradeoff becomes accuracy gain vs computational complexity we must also keep in mind data sparcity issues example original parent annotation a a b c d b a a c d where is the parent of a c a d a 3 markov order n smoothing markov smoothing combats data sparcity issues as well as decreasing computational requirements by limiting the number of children included in artificial nodes in practice most people use an order 2 grammar example original no smoothing markov order 1 markov order 2 etc __a__ a a a b c d e f b a c d e f b a c b a c d c c c annotation decisions can be thought about in the vertical direction parent grandparent etc and the horizontal direction number of siblings to keep parameters to the following functions specify these values for more information see dan klein and chris manning 2003 accurate unlexicalized parsing acl 03 https www aclweb org anthology p03 1054 4 unary collapsing collapse unary productions ie subtrees with a single child into a new non terminal tree node this is useful when working with algorithms that do not allow unary productions yet you do not wish to lose the parent information example a b a b c d c d
r from nltk.internals import deprecated from nltk.tree.transforms import chomsky_normal_form as cnf from nltk.tree.transforms import collapse_unary as cu from nltk.tree.transforms import un_chomsky_normal_form as ucnf chomsky_normal_form = deprecated( "Import using `from nltk.tree import chomsky_normal_form` instead." )(cnf) un_chomsky_normal_form = deprecated( "Import using `from nltk.tree import un_chomsky_normal_form` instead." )(ucnf) collapse_unary = deprecated( "Import using `from nltk.tree import collapse_unary` instead." )(cu) __all__ = ["chomsky_normal_form", "un_chomsky_normal_form", "collapse_unary"]
natural language toolkit twitter c 20012023 nltk project ewan klein ewaninf ed ac uk url https www nltk org for license information see license txt nltk twitter package this package contains classes for retrieving tweet documents using the twitter api natural language toolkit twitter c 2001 2023 nltk project ewan klein ewan inf ed ac uk url https www nltk org for license information see license txt nltk twitter package this package contains classes for retrieving tweet documents using the twitter api
try: import twython except ImportError: import warnings warnings.warn( "The twython library has not been installed. " "Some functionality from the twitter package will not be available." ) else: from nltk.twitter.util import Authenticate, credsfromfile from nltk.twitter.twitterclient import ( Streamer, Query, Twitter, TweetViewer, TweetWriter, ) from nltk.twitter.common import json2csv
natural language toolkit twitter client c 20012023 nltk project ewan klein ewaninf ed ac uk lorenzo rubio lrnzciggmail com url https www nltk org for license information see license txt nltk twitter client this module offers methods for collecting and processing tweets most of the functionality depends on access to the twitter apis and this is handled via the third party twython library if one of the methods below returns an integer it is probably a twitter error code https dev twitter comoverviewapiresponsecodes for example the response of 420 means that you have reached the limit of the requests you can currently make to the twitter api currently rate limits for the search api https dev twitter comrestpublicratelimiting are divided into 15 minute windows retrieve data from the twitter streaming api the streaming api requires oauth 1 0 https en wikipedia orgwikioauth authentication register a method for handling tweets param tweethandleri handler method for viewing param data response from twitter api param statuscode the status code returned by the twitter api param data the response from twitter api wrapper for statuses sample api call stream in an endless loop until limit is reached see twython issue 288 https github comryanmcgrathtwythonissues288 colditzjb commented on 9 dec 2014 wrapper for statuses filter api call stream in an endless loop until limit is reached retrieve data from the twitter rest api param appkey optional your applications key param appsecret optional your applications secret key param oauthtoken optional when using oauth 1 combined with oauthtokensecret to make authenticated calls param oauthtokensecret optional when using oauth 1 combined with oauthtoken to make authenticated calls register a method for handling tweets param tweethandleri handler method for viewing or writing tweets to a file given a file object containing a list of tweet ids fetch the corresponding full tweets from the twitter api the api call statuseslookup will fail to retrieve a tweet if the user has deleted it this call to the twitter api is ratelimited see https dev twitter comrestreferencegetstatuseslookup for details param idsf input file object consisting of tweet ids one to a line return iterable of tweet objects in json format the twitter endpoint takes lists of up to 100 ids so we chunk the ids assumes that the handler has been informed fetches tweets from searchtweets generator output and passses them to handler param str keywords a list of query terms to search for written as a commaseparated string param int limit number of tweets to process param str lang language call the rest api searchtweets endpoint with some plausible defaults see the twitter search documentation https dev twitter comrestpublicsearch for more information about admissible search parameters param str keywords a list of query terms to search for written as a commaseparated string param int limit number of tweets to process param str lang language param int maxid id of the last tweet fetched param int retriesaftertwythonexception number of retries when searching tweets before raising an exception rtype python generator if no handler is provided basictweethandler provides minimum functionality for limiting the number of tweets retrieved pagination loop keep fetching tweets until the desired count is reached while dealing with twitter rate limits the maxid is also present in the tweet metadata results searchmetadata nextresults but as part of a query and difficult to fetch this is doing the equivalent last tweet id minus one convert a list of userids into a variety of information about the users see https dev twitter comrestreferencegetusersshow param list userids a list of integer strings corresponding to twitter userids rtype listjson return a collection of the most recent tweets posted by the user param str user the user s screen name the initial symbol should be omitted param int limit the number of tweets to recover 200 is the maximum allowed param str includerts whether to include statuses which have been retweeted by the user possible values are true and false wrapper class with restricted functionality and fewer options process some tweets in a simple manner param str keywords keywords to use for searching or filtering param list follow userids to use for filtering tweets from the public stream param bool toscreen if true display the tweet texts on the screen otherwise print to a file param bool stream if true use the live public stream otherwise search past public tweets param int limit the number of data items to process in the current round of processing param tuple datelimit the date at which to stop collecting new data this should be entered as a tuple which can serve as the argument to datetime datetime e g datelimit2015 4 1 12 40 for 12 30 pm on april 1 2015 note that in the case of streaming this is the maximum date i e a date in the future if not it is the minimum date i e a date in the past param str lang language param bool repeat a flag to determine whether multiple files should be written if true the length of each file will be set by the value of limit use only if toscreen is false see also py func handle param gzipcompress if true output files are compressed with gzip handle data by sending it to the terminal direct data to sys stdout return return false if processing should cease otherwise return true rtype bool param data tweet object returned by twitter api handle data by writing it to a file the difference between the upper and lower date limits depends on whether tweets are coming in an ascending date order i e when streaming or descending date order i e when searching past tweets param int limit number of data items to process in the current round of processing param tuple upperdatelimit the date at which to stop collecting new data this should be entered as a tuple which can serve as the argument to datetime datetime e g upperdatelimit2015 4 1 12 40 for 12 30 pm on april 1 2015 param tuple lowerdatelimit the date at which to stop collecting new data see upperdatalimit for formatting param str fprefix the prefix to use in creating file names for tweet collections param str subdir the name of the directory where tweet collection files should be stored param bool repeat flag to determine whether multiple files should be written if true the length of each file will be set by the value of limit see also py func handle param gzipcompress if true output files are compressed with gzip return timestamped file name rtype str write twitter data as linedelimited json into one or more files return return false if processing should cease otherwise return true param data tweet object returned by twitter api stop for a functional cause e g date limit repeat is true thus close output file and create a new one natural language toolkit twitter client c 2001 2023 nltk project ewan klein ewan inf ed ac uk lorenzo rubio lrnzcig gmail com url https www nltk org for license information see license txt nltk twitter client this module offers methods for collecting and processing tweets most of the functionality depends on access to the twitter apis and this is handled via the third party twython library if one of the methods below returns an integer it is probably a twitter error code https dev twitter com overview api response codes _ for example the response of 420 means that you have reached the limit of the requests you can currently make to the twitter api currently rate limits for the search api https dev twitter com rest public rate limiting _ are divided into 15 minute windows retrieve data from the twitter streaming api the streaming api requires oauth 1 0 https en wikipedia org wiki oauth _ authentication register a method for handling tweets param tweethandleri handler method for viewing param data response from twitter api param status_code the status code returned by the twitter api param data the response from twitter api wrapper for statuses sample api call stream in an endless loop until limit is reached see twython issue 288 https github com ryanmcgrath twython issues 288 colditzjb commented on 9 dec 2014 wrapper for statuses filter api call stream in an endless loop until limit is reached retrieve data from the twitter rest api param app_key optional your applications key param app_secret optional your applications secret key param oauth_token optional when using oauth 1 combined with oauth_token_secret to make authenticated calls param oauth_token_secret optional when using oauth 1 combined with oauth_token to make authenticated calls register a method for handling tweets param tweethandleri handler method for viewing or writing tweets to a file given a file object containing a list of tweet ids fetch the corresponding full tweets from the twitter api the api call statuses lookup will fail to retrieve a tweet if the user has deleted it this call to the twitter api is rate limited see https dev twitter com rest reference get statuses lookup for details param ids_f input file object consisting of tweet ids one to a line return iterable of tweet objects in json format the twitter endpoint takes lists of up to 100 ids so we chunk the ids assumes that the handler has been informed fetches tweets from search_tweets generator output and passses them to handler param str keywords a list of query terms to search for written as a comma separated string param int limit number of tweets to process param str lang language call the rest api search tweets endpoint with some plausible defaults see the twitter search documentation https dev twitter com rest public search _ for more information about admissible search parameters param str keywords a list of query terms to search for written as a comma separated string param int limit number of tweets to process param str lang language param int max_id id of the last tweet fetched param int retries_after_twython_exception number of retries when searching tweets before raising an exception rtype python generator if no handler is provided basictweethandler provides minimum functionality for limiting the number of tweets retrieved pagination loop keep fetching tweets until the desired count is reached while dealing with twitter rate limits wait 15 minutes the max_id is also present in the tweet metadata results search_metadata next_results but as part of a query and difficult to fetch this is doing the equivalent last tweet id minus one convert a list of userids into a variety of information about the users see https dev twitter com rest reference get users show param list userids a list of integer strings corresponding to twitter userids rtype list json return a collection of the most recent tweets posted by the user param str user the user s screen name the initial symbol should be omitted param int limit the number of tweets to recover 200 is the maximum allowed param str include_rts whether to include statuses which have been retweeted by the user possible values are true and false wrapper class with restricted functionality and fewer options process some tweets in a simple manner param str keywords keywords to use for searching or filtering param list follow userids to use for filtering tweets from the public stream param bool to_screen if true display the tweet texts on the screen otherwise print to a file param bool stream if true use the live public stream otherwise search past public tweets param int limit the number of data items to process in the current round of processing param tuple date_limit the date at which to stop collecting new data this should be entered as a tuple which can serve as the argument to datetime datetime e g date_limit 2015 4 1 12 40 for 12 30 pm on april 1 2015 note that in the case of streaming this is the maximum date i e a date in the future if not it is the minimum date i e a date in the past param str lang language param bool repeat a flag to determine whether multiple files should be written if true the length of each file will be set by the value of limit use only if to_screen is false see also py func handle param gzip_compress if true output files are compressed with gzip handle data by sending it to the terminal direct data to sys stdout return return false if processing should cease otherwise return true rtype bool param data tweet object returned by twitter api handle data by writing it to a file the difference between the upper and lower date limits depends on whether tweets are coming in an ascending date order i e when streaming or descending date order i e when searching past tweets param int limit number of data items to process in the current round of processing param tuple upper_date_limit the date at which to stop collecting new data this should be entered as a tuple which can serve as the argument to datetime datetime e g upper_date_limit 2015 4 1 12 40 for 12 30 pm on april 1 2015 param tuple lower_date_limit the date at which to stop collecting new data see upper_data_limit for formatting param str fprefix the prefix to use in creating file names for tweet collections param str subdir the name of the directory where tweet collection files should be stored param bool repeat flag to determine whether multiple files should be written if true the length of each file will be set by the value of limit see also py func handle param gzip_compress if true output files are compressed with gzip return timestamped file name rtype str write twitter data as line delimited json into one or more files return return false if processing should cease otherwise return true param data tweet object returned by twitter api stop for a functional cause e g date limit repeat is true thus close output file and create a new one
import datetime import gzip import itertools import json import os import time import requests from twython import Twython, TwythonStreamer from twython.exceptions import TwythonError, TwythonRateLimitError from nltk.twitter.api import BasicTweetHandler, TweetHandlerI from nltk.twitter.util import credsfromfile, guess_path class Streamer(TwythonStreamer): def __init__(self, app_key, app_secret, oauth_token, oauth_token_secret): self.handler = None self.do_continue = True TwythonStreamer.__init__( self, app_key, app_secret, oauth_token, oauth_token_secret ) def register(self, handler): self.handler = handler def on_success(self, data): if self.do_continue: if self.handler is not None: if "text" in data: self.handler.counter += 1 self.handler.handle(data) self.do_continue = self.handler.do_continue() else: raise ValueError("No data handler has been registered.") else: self.disconnect() self.handler.on_finish() def on_error(self, status_code, data): print(status_code) def sample(self): while self.do_continue: try: self.statuses.sample() except requests.exceptions.ChunkedEncodingError as e: if e is not None: print(f"Error (stream will continue): {e}") continue def filter(self, track="", follow="", lang="en"): while self.do_continue: try: if track == "" and follow == "": msg = "Please supply a value for 'track', 'follow'" raise ValueError(msg) self.statuses.filter(track=track, follow=follow, lang=lang) except requests.exceptions.ChunkedEncodingError as e: if e is not None: print(f"Error (stream will continue): {e}") continue class Query(Twython): def __init__(self, app_key, app_secret, oauth_token, oauth_token_secret): self.handler = None self.do_continue = True Twython.__init__(self, app_key, app_secret, oauth_token, oauth_token_secret) def register(self, handler): self.handler = handler def expand_tweetids(self, ids_f, verbose=True): ids = [line.strip() for line in ids_f if line] if verbose: print(f"Counted {len(ids)} Tweet IDs in {ids_f}.") id_chunks = [ids[i : i + 100] for i in range(0, len(ids), 100)] chunked_tweets = (self.lookup_status(id=chunk) for chunk in id_chunks) return itertools.chain.from_iterable(chunked_tweets) def _search_tweets(self, keywords, limit=100, lang="en"): while True: tweets = self.search_tweets( keywords=keywords, limit=limit, lang=lang, max_id=self.handler.max_id ) for tweet in tweets: self.handler.handle(tweet) if not (self.handler.do_continue() and self.handler.repeat): break self.handler.on_finish() def search_tweets( self, keywords, limit=100, lang="en", max_id=None, retries_after_twython_exception=0, ): if not self.handler: self.handler = BasicTweetHandler(limit=limit) count_from_query = 0 if max_id: self.handler.max_id = max_id else: results = self.search( q=keywords, count=min(100, limit), lang=lang, result_type="recent" ) count = len(results["statuses"]) if count == 0: print("No Tweets available through REST API for those keywords") return count_from_query = count self.handler.max_id = results["statuses"][count - 1]["id"] - 1 for result in results["statuses"]: yield result self.handler.counter += 1 if self.handler.do_continue() == False: return retries = 0 while count_from_query < limit: try: mcount = min(100, limit - count_from_query) results = self.search( q=keywords, count=mcount, lang=lang, max_id=self.handler.max_id, result_type="recent", ) except TwythonRateLimitError as e: print(f"Waiting for 15 minutes -{e}") time.sleep(15 * 60) continue except TwythonError as e: print(f"Fatal error in Twython request -{e}") if retries_after_twython_exception == retries: raise e retries += 1 count = len(results["statuses"]) if count == 0: print("No more Tweets available through rest api") return count_from_query += count self.handler.max_id = results["statuses"][count - 1]["id"] - 1 for result in results["statuses"]: yield result self.handler.counter += 1 if self.handler.do_continue() == False: return def user_info_from_id(self, userids): return [self.show_user(user_id=userid) for userid in userids] def user_tweets(self, screen_name, limit, include_rts="false"): data = self.get_user_timeline( screen_name=screen_name, count=limit, include_rts=include_rts ) for item in data: self.handler.handle(item) class Twitter: def __init__(self): self._oauth = credsfromfile() self.streamer = Streamer(**self._oauth) self.query = Query(**self._oauth) def tweets( self, keywords="", follow="", to_screen=True, stream=True, limit=100, date_limit=None, lang="en", repeat=False, gzip_compress=False, ): if stream: upper_date_limit = date_limit lower_date_limit = None else: upper_date_limit = None lower_date_limit = date_limit if to_screen: handler = TweetViewer( limit=limit, upper_date_limit=upper_date_limit, lower_date_limit=lower_date_limit, ) else: handler = TweetWriter( limit=limit, upper_date_limit=upper_date_limit, lower_date_limit=lower_date_limit, repeat=repeat, gzip_compress=gzip_compress, ) if to_screen: handler = TweetViewer(limit=limit) else: if stream: upper_date_limit = date_limit lower_date_limit = None else: upper_date_limit = None lower_date_limit = date_limit handler = TweetWriter( limit=limit, upper_date_limit=upper_date_limit, lower_date_limit=lower_date_limit, repeat=repeat, gzip_compress=gzip_compress, ) if stream: self.streamer.register(handler) if keywords == "" and follow == "": self.streamer.sample() else: self.streamer.filter(track=keywords, follow=follow, lang=lang) else: self.query.register(handler) if keywords == "": raise ValueError("Please supply at least one keyword to search for.") else: self.query._search_tweets(keywords, limit=limit, lang=lang) class TweetViewer(TweetHandlerI): def handle(self, data): text = data["text"] print(text) self.check_date_limit(data) if self.do_stop: return def on_finish(self): print(f"Written {self.counter} Tweets") class TweetWriter(TweetHandlerI): def __init__( self, limit=2000, upper_date_limit=None, lower_date_limit=None, fprefix="tweets", subdir="twitter-files", repeat=False, gzip_compress=False, ): self.fprefix = fprefix self.subdir = guess_path(subdir) self.gzip_compress = gzip_compress self.fname = self.timestamped_file() self.repeat = repeat self.output = None TweetHandlerI.__init__(self, limit, upper_date_limit, lower_date_limit) def timestamped_file(self): subdir = self.subdir fprefix = self.fprefix if subdir: if not os.path.exists(subdir): os.mkdir(subdir) fname = os.path.join(subdir, fprefix) fmt = "%Y%m%d-%H%M%S" timestamp = datetime.datetime.now().strftime(fmt) if self.gzip_compress: suffix = ".gz" else: suffix = "" outfile = f"{fname}.{timestamp}.json{suffix}" return outfile def handle(self, data): if self.startingup: if self.gzip_compress: self.output = gzip.open(self.fname, "w") else: self.output = open(self.fname, "w") print(f"Writing to {self.fname}") json_data = json.dumps(data) if self.gzip_compress: self.output.write((json_data + "\n").encode("utf-8")) else: self.output.write(json_data + "\n") self.check_date_limit(data) if self.do_stop: return self.startingup = False def on_finish(self): print(f"Written {self.counter} Tweets") if self.output: self.output.close() def do_continue(self): if self.repeat == False: return TweetHandlerI.do_continue(self) if self.do_stop: return False if self.counter == self.limit: self._restart_file() return True def _restart_file(self): self.on_finish() self.fname = self.timestamped_file() self.startingup = True self.counter = 0
natural language toolkit twitter client c 20012023 nltk project ewan klein ewaninf ed ac uk lorenzo rubio lrnzciggmail com url https www nltk org for license information see license txt authentication utilities to accompany twitterclient convenience function for authentication methods for authenticating with twitter read oauth credentials from a text file file format for oauth 1 appkeyyourappkey appsecretyourappsecret oauthtokenoauthtoken oauthtokensecretoauthtokensecret file format for oauth 2 appkeyyourappkey appsecretyourappsecret accesstokenaccesstoken param str filename file containing credentials none default reads data from twitter credentials txt check validity of a credentials file oauth1 false oauth1keys appkey appsecret oauthtoken oauthtokensecret oauth2 false oauth2keys appkey appsecret accesstoken if allk in self oauth for k in oauth1keys oauth1 true elif allk in self oauth for k in oauth2keys oauth2 true if not oauth1 or oauth2 msg fmissing or incorrect entries in self credsfilen msg pprint pformatself oauth raise valueerrormsg elif verbose printf credentials file self credsfile looks good def addaccesstokencredsfilenone if credsfile is none path os path dirnamefile credsfile os path joinpath credentials2 txt oauth2 credsfromfilecredsfilecredsfile appkey oauth2appkey appsecret oauth2appsecret twitter twythonappkey appsecret oauthversion2 accesstoken twitter obtainaccesstoken tok faccesstokenaccesstokenn with opencredsfile a as infile printtok fileinfile def guesspathpth if os path isabspth return pth else return os path expanduseros path join pth natural language toolkit twitter client c 2001 2023 nltk project ewan klein ewan inf ed ac uk lorenzo rubio lrnzcig gmail com url https www nltk org for license information see license txt authentication utilities to accompany twitterclient convenience function for authentication methods for authenticating with twitter read oauth credentials from a text file file format for oauth 1 app_key your_app_key app_secret your_app_secret oauth_token oauth_token oauth_token_secret oauth_token_secret file format for oauth 2 app_key your_app_key app_secret your_app_secret access_token access_token param str file_name file containing credentials none default reads data from twitter credentials txt check validity of a credentials file for oauth 2 retrieve an access token for an app and append it to a credentials file if the path is not absolute guess that it is a subdirectory of the user s home directory param str pth the pathname of the directory where files of tweets should be written
import os import pprint from twython import Twython def credsfromfile(creds_file=None, subdir=None, verbose=False): return Authenticate().load_creds( creds_file=creds_file, subdir=subdir, verbose=verbose ) class Authenticate: def __init__(self): self.creds_file = "credentials.txt" self.creds_fullpath = None self.oauth = {} try: self.twitter_dir = os.environ["TWITTER"] self.creds_subdir = self.twitter_dir except KeyError: self.twitter_dir = None self.creds_subdir = None def load_creds(self, creds_file=None, subdir=None, verbose=False): if creds_file is not None: self.creds_file = creds_file if subdir is None: if self.creds_subdir is None: msg = ( "Supply a value to the 'subdir' parameter or" + " set the TWITTER environment variable." ) raise ValueError(msg) else: self.creds_subdir = subdir self.creds_fullpath = os.path.normpath( os.path.join(self.creds_subdir, self.creds_file) ) if not os.path.isfile(self.creds_fullpath): raise OSError(f"Cannot find file {self.creds_fullpath}") with open(self.creds_fullpath) as infile: if verbose: print(f"Reading credentials file {self.creds_fullpath}") for line in infile: if "=" in line: name, value = line.split("=", 1) self.oauth[name.strip()] = value.strip() self._validate_creds_file(verbose=verbose) return self.oauth def _validate_creds_file(self, verbose=False): oauth1 = False oauth1_keys = ["app_key", "app_secret", "oauth_token", "oauth_token_secret"] oauth2 = False oauth2_keys = ["app_key", "app_secret", "access_token"] if all(k in self.oauth for k in oauth1_keys): oauth1 = True elif all(k in self.oauth for k in oauth2_keys): oauth2 = True if not (oauth1 or oauth2): msg = f"Missing or incorrect entries in {self.creds_file}\n" msg += pprint.pformat(self.oauth) raise ValueError(msg) elif verbose: print(f'Credentials file "{self.creds_file}" looks good') def add_access_token(creds_file=None): if creds_file is None: path = os.path.dirname(__file__) creds_file = os.path.join(path, "credentials2.txt") oauth2 = credsfromfile(creds_file=creds_file) app_key = oauth2["app_key"] app_secret = oauth2["app_secret"] twitter = Twython(app_key, app_secret, oauth_version=2) access_token = twitter.obtain_access_token() tok = f"access_token={access_token}\n" with open(creds_file, "a") as infile: print(tok, file=infile) def guess_path(pth): if os.path.isabs(pth): return pth else: return os.path.expanduser(os.path.join("~", pth))
natural language toolkit utility functions c 20012023 nltk project steven bird stevenbird1gmail com eric kafe kafe ericgmail com acyclic closures url https www nltk org for license information see license txt short usage message builtins sometimes don t support introspection idle return true if this function is run within idle tkinter programs that are run in idle should never call tk mainloop so this function should be used to gate all calls to tk mainloop warning this function works by checking sys stdin if the user has modified sys stdin then it may return incorrect results rtype bool pretty printing pretty print a sequence of data items param data the data stream to print type data sequence or iter param start the start position type start int param end the end position type end int pretty print a string breaking lines on whitespace param s the string to print consisting of words and spaces type s str param width the display width type width int pretty print a list of text tokens breaking lines on whitespace param tokens the tokens to print type tokens list param separator the string to use to separate tokens type separator str param width the display width default70 type width int indexing regexp display thanks to david mertz return a string with markers surrounding the matched substrings search str for substrings matching regexp and wrap the matches with braces this is convenient for learning about regular expressions param regexp the regular expression type regexp str param string the string being matched type string str param left the left delimiter printed before the matched substring type left str param right the right delimiter printed after the matched substring type right str rtype str read from file or string recipe from david mertz breadthfirst search traverse the nodes of a tree in breadthfirst order no check for cycles the first argument should be the tree root children should be a function taking as argument a tree node and returning an iterator of the node s children graph drawing yield the edges of a graph in breadthfirst order discarding eventual cycles the first argument should be the start node children should be a function taking as argument a graph node and returning an iterator of the node s children from nltk util import edgeclosure printlistedgeclosure a lambda node a b c b c c b node a b a c b c c b param edges the set or list of edges of a directed graph return dotstring a representation of edges as a string in the dot graph language which can be converted to an image by the dot program from the graphviz package or nltk parse dependencygraph dot2imgdotstring param shapes dictionary of strings that trigger a specified shape param attr dictionary with global graph attributes import nltk from nltk util import edges2dot printedges2dot a b a c b c c b digraph g a b a c b c c b blankline build a minimum spanning tree mst of an unweighted graph by traversing the nodes of a tree in breadthfirst order discarding eventual cycles return a representation of this mst as a string in the dot graph language which can be converted to an image by the dot program from the graphviz package or nltk parse dependencygraph dot2imgdotstring the first argument should be the tree root children should be a function taking as argument a tree node and returning an iterator of the node s children import nltk wnnltk corpus wordnet from nltk util import unweightedminimumspanningdigraph as umsd printumsdwn synset bound a 01 lambda s s alsosees digraph g synset bound a 01 synset unfree a 02 synset unfree a 02 synset confined a 02 synset unfree a 02 synset dependent a 01 synset unfree a 02 synset restricted a 01 synset restricted a 01 synset classified a 02 blankline breadthfirst depthfirst searches with cycle detection traverse the nodes of a tree in breadthfirst order discarding eventual cycles the first argument should be the tree root children should be a function taking as argument a tree node and returning an iterator of the node s children traverse the nodes of a tree in depthfirst order discarding eventual cycles within any branch adding cutmark when specified if cycles were truncated the first argument should be the tree root children should be a function taking as argument a tree node and returning an iterator of the node s children catches all cycles import nltk from nltk util import acyclicdepthfirst as acyclictree wnnltk corpus wordnet from pprint import pprint pprintacyclictreewn synset dog n 01 lambda s s hypernyms cutmark synset dog n 01 synset canine n 02 synset carnivore n 01 synset placental n 01 synset mammal n 01 synset vertebrate n 01 synset chordate n 01 synset animal n 01 synset organism n 01 synset livingthing n 01 synset whole n 02 synset object n 01 synset physicalentity n 01 synset entity n 01 synset domesticanimal n 01 cyclesynset animal n 01 3 recurse with a common traversed set for all children traverse the nodes of a tree in depthfirst order discarding eventual cycles within the same branch but keep duplicate paths in different branches add cutmark when defined if cycles were truncated the first argument should be the tree root children should be a function taking as argument a tree node and returning an iterator of the node s children catches only only cycles within the same branch but keeping cycles from different branches import nltk from nltk util import acyclicbranchesdepthfirst as tree wnnltk corpus wordnet from pprint import pprint pprinttreewn synset certified a 01 lambda s s alsosees cutmark depth4 synset certified a 01 synset ized a 01 synset lawful a 01 synset legal a 01 cyclesynset lawful a 01 0 synset legitimate a 01 synset straight a 06 synset honest a 01 cyclesynset lawful a 01 0 synset legitimate a 01 cyclesynset ized a 01 1 synset legal a 01 synset lawful a 01 cyclesynset legitimate a 01 0 synset valid a 01 cyclesynset legitimate a 01 0 synset reasonable a 01 synset official a 01 cyclesynset ized a 01 1 synset documented a 01 recurse with a different traversed set for each child convert acyclic dictionary dic where the keys are nodes and the values are lists of children to output tree suitable for pprint starting at root node with subtrees as nested lists return node acyclicdic2treechild dic for child in dicnode def unweightedminimumspanningdicttree childreniter traversed set empty set of traversed nodes queue dequetree initialize queue agenda tree set of all nodes ever queued mstdic empty mst dictionary while queue node queue popleft node is not yet in the mst dictionary mstdicnode so add it with an empty list of children if node not in traversed avoid cycles traversed addnode for child in childrennode if child not in agenda queue nodes only once mstdicnode appendchild add child to the mst queue appendchild add child to queue agenda addchild return mstdic def unweightedminimumspanningtreetree childreniter return acyclicdic2treetree unweightedminimumspanningdicttree children guess character encoding adapted from io py in the docutils extension module https docutils sourceforge io http www pyzine comissue008sectionarticlesarticleencodings html def guessencodingdata successfulencoding none we make utf8 the first encoding encodings utf8 next we add anything we can learn from the locale try encodings appendlocale nllanginfolocale codeset except attributeerror pass try encodings appendlocale getlocale1 except attributeerror indexerror pass try encodings appendlocale getdefaultlocale1 except attributeerror indexerror pass we try latin1 last encodings appendlatin1 for enc in encodings some of the locale calls may have returned none if not enc continue try decoded strdata enc successfulencoding enc except unicodeerror lookuperror pass else break if not successfulencoding raise unicodeerror unable to decode input data tried the following encodings s joinreprenc for enc in encodings if enc else return decoded successfulencoding remove repeated elements from a list deterministcally def uniquelistxs seen set not seen addx here acts to make the code shorter without using if statements seen addx always returns none return x for x in xs if x not in seen and not seen addx invert a dictionary def invertdictd inverteddict defaultdictlist for key in d if hasattrdkey iter for term in dkey inverteddictterm appendkey else inverteddictdkey key return inverteddict utilities for directed graphs transitive closure and inversion the graph is represented as a dictionary of sets def transitiveclosuregraph reflexivefalse if reflexive baseset lambda k k else baseset lambda k set the graph ui in the article agendagraph k graphk copy for k in graph the graph mi in the article closuregraph k basesetk for k in graph for i in graph agenda agendagraphi closure closuregraphi while agenda j agenda pop closure addj closure closuregraph setdefaultj basesetj agenda agendagraph getj basesetj agenda closure return closuregraph def invertgraphgraph inverted for key in graph for value in graphkey inverted setdefaultvalue set addkey return inverted html cleaning def cleanhtmlhtml raise notimplementederror to remove html markup use beautifulsoup s gettext function def cleanurlurl raise notimplementederror to remove html markup use beautifulsoup s gettext function flatten lists def flattenargs x for l in args if not isinstancel list tuple l l for item in l if isinstanceitem list tuple x extendflattenitem else x appenditem return x ngram iteration def padsequence sequence n padleftfalse padrightfalse leftpadsymbolnone rightpadsymbolnone sequence itersequence if padleft sequence chainleftpadsymbol n 1 sequence if padright sequence chainsequence rightpadsymbol n 1 return sequence add a flag to pad the sequence so we get peripheral ngrams def ngramssequence n kwargs sequence padsequencesequence n kwargs creates the sliding window of n no of items iterables is a tuple of iterables where each iterable is a window of n items iterables teesequence n for i subiterable in enumerateiterables for each window for in rangei iterate through every order of ngrams nextsubiterable none generate the ngrams within the window return zipiterables unpack and flattens the iterables def bigramssequence kwargs yield from ngramssequence 2 kwargs def trigramssequence kwargs yield from ngramssequence 3 kwargs def everygrams sequence minlen1 maxlen1 padleftfalse padrightfalse kwargs get maxlen for padding if maxlen 1 try maxlen lensequence except typeerror sequence listsequence maxlen lensequence pad if indicated using maxlen sequence padsequencesequence maxlen padleft padright kwargs sliding window to store grams history listislicesequence maxlen yield ngrams from sequence while history for ngramlen in rangeminlen lenhistory 1 yield tuplehistory ngramlen append element to history if sequence has more items try history appendnextsequence except stopiteration pass del history0 def skipgramssequence n k kwargs pads the sequence as desired by kwargs if padleft in kwargs or padright in kwargs sequence padsequencesequence n kwargs note when iterating through the ngrams the padright here is not the kwargs padding it s for the algorithm to detect the sentinel object on the right pad to stop inner loop sentinel object for ngram in ngramssequence n k padrighttrue rightpadsymbolsentinel head ngram 1 tail ngram1 for skiptail in combinationstail n 1 if skiptail1 is sentinel continue yield head skiptail binary search in a file inherited from pywordnet by oliver steele def binarysearchfilefile key cachenone cachedepth1 key key keylen lenkey start 0 currentdepth 0 if hasattrfile name end os statfile name stsize 1 else file seek0 2 end file tell 1 file seek0 if cache is none cache while start end laststate start end middle start end 2 if cache getmiddle offset line cachemiddle else line while true file seekmax0 middle 1 if middle 0 file discardline offset file tell line file readline if line break at eof try to find start of the last line middle start middle 2 if middle end 1 return none if currentdepth cachedepth cachemiddle offset line if offset end assert end middle 1 infinite loop end middle 1 elif line keylen key return line elif line key assert end middle 1 infinite loop end middle 1 elif line key start offset lenline 1 currentdepth 1 thisstate start end if laststate thisstate detects the condition where we re searching past the end of the file which is otherwise difficult to detect return none return none proxy configuration def setproxyproxy usernone password if proxy is none try and find the system proxy settings try proxy getproxieshttp except keyerror as e raise valueerrorcould not detect default proxy settings from e set up the proxy handler proxyhandler proxyhandlerhttps proxy http proxy opener buildopenerproxyhandler if user is not none set up basic proxy authentication if provided passwordmanager httppasswordmgrwithdefaultrealm passwordmanager addpasswordrealmnone uriproxy useruser passwdpassword opener addhandlerproxybasicauthhandlerpasswordmanager opener addhandlerproxydigestauthhandlerpasswordmanager override the existing url opener installopeneropener elementtree pretty printing from https www effbot orgzoneelementlib htm def elementtreeindentelem level0 i n level if lenelem if not elem text or not elem text strip elem text i for elem in elem elementtreeindentelem level 1 if not elem tail or not elem tail strip elem tail i else if level and not elem tail or not elem tail strip elem tail i mathematical approximations def choosen k if 0 k n ntok ktok 1 1 for t in range1 mink n k 1 ntok n ktok t n 1 return ntok ktok else return 0 iteration utilities def pairwiseiterable parallelization natural language toolkit utility functions c 2001 2023 nltk project steven bird stevenbird1 gmail com eric kafe kafe eric gmail com acyclic closures url https www nltk org for license information see license txt short usage message in case it s lazy this will load it builtins sometimes don t support introspection idle return true if this function is run within idle tkinter programs that are run in idle should never call tk mainloop so this function should be used to gate all calls to tk mainloop warning this function works by checking sys stdin if the user has modified sys stdin then it may return incorrect results rtype bool pretty printing pretty print a sequence of data items param data the data stream to print type data sequence or iter param start the start position type start int param end the end position type end int pretty print a string breaking lines on whitespace param s the string to print consisting of words and spaces type s str param width the display width type width int pretty print a list of text tokens breaking lines on whitespace param tokens the tokens to print type tokens list param separator the string to use to separate tokens type separator str param width the display width default 70 type width int indexing regexp display thanks to david mertz return a string with markers surrounding the matched substrings search str for substrings matching regexp and wrap the matches with braces this is convenient for learning about regular expressions param regexp the regular expression type regexp str param string the string being matched type string str param left the left delimiter printed before the matched substring type left str param right the right delimiter printed after the matched substring type right str rtype str read from file or string recipe from david mertz breadth first search traverse the nodes of a tree in breadth first order no check for cycles the first argument should be the tree root children should be a function taking as argument a tree node and returning an iterator of the node s children graph drawing yield the edges of a graph in breadth first order discarding eventual cycles the first argument should be the start node children should be a function taking as argument a graph node and returning an iterator of the node s children from nltk util import edge_closure print list edge_closure a lambda node a b c b c c b node a b a c b c c b param edges the set or list of edges of a directed graph return dot_string a representation of edges as a string in the dot graph language which can be converted to an image by the dot program from the graphviz package or nltk parse dependencygraph dot2img dot_string param shapes dictionary of strings that trigger a specified shape param attr dictionary with global graph attributes import nltk from nltk util import edges2dot print edges2dot a b a c b c c b digraph g a b a c b c c b blankline build a minimum spanning tree mst of an unweighted graph by traversing the nodes of a tree in breadth first order discarding eventual cycles return a representation of this mst as a string in the dot graph language which can be converted to an image by the dot program from the graphviz package or nltk parse dependencygraph dot2img dot_string the first argument should be the tree root children should be a function taking as argument a tree node and returning an iterator of the node s children import nltk wn nltk corpus wordnet from nltk util import unweighted_minimum_spanning_digraph as umsd print umsd wn synset bound a 01 lambda s s also_sees digraph g synset bound a 01 synset unfree a 02 synset unfree a 02 synset confined a 02 synset unfree a 02 synset dependent a 01 synset unfree a 02 synset restricted a 01 synset restricted a 01 synset classified a 02 blankline breadth first depth first searches with cycle detection traverse the nodes of a tree in breadth first order discarding eventual cycles the first argument should be the tree root children should be a function taking as argument a tree node and returning an iterator of the node s children traverse the nodes of a tree in depth first order discarding eventual cycles within any branch adding cut_mark when specified if cycles were truncated the first argument should be the tree root children should be a function taking as argument a tree node and returning an iterator of the node s children catches all cycles import nltk from nltk util import acyclic_depth_first as acyclic_tree wn nltk corpus wordnet from pprint import pprint pprint acyclic_tree wn synset dog n 01 lambda s s hypernyms cut_mark synset dog n 01 synset canine n 02 synset carnivore n 01 synset placental n 01 synset mammal n 01 synset vertebrate n 01 synset chordate n 01 synset animal n 01 synset organism n 01 synset living_thing n 01 synset whole n 02 synset object n 01 synset physical_entity n 01 synset entity n 01 synset domestic_animal n 01 cycle synset animal n 01 3 recurse with a common traversed set for all children traverse the nodes of a tree in depth first order discarding eventual cycles within the same branch but keep duplicate paths in different branches add cut_mark when defined if cycles were truncated the first argument should be the tree root children should be a function taking as argument a tree node and returning an iterator of the node s children catches only only cycles within the same branch but keeping cycles from different branches import nltk from nltk util import acyclic_branches_depth_first as tree wn nltk corpus wordnet from pprint import pprint pprint tree wn synset certified a 01 lambda s s also_sees cut_mark depth 4 synset certified a 01 synset ized a 01 synset lawful a 01 synset legal a 01 cycle synset lawful a 01 0 synset legitimate a 01 synset straight a 06 synset honest a 01 cycle synset lawful a 01 0 synset legitimate a 01 cycle synset ized a 01 1 synset legal a 01 synset lawful a 01 cycle synset legitimate a 01 0 synset valid a 01 cycle synset legitimate a 01 0 synset reasonable a 01 synset official a 01 cycle synset ized a 01 1 synset documented a 01 recurse with a different traversed set for each child convert acyclic dictionary dic where the keys are nodes and the values are lists of children to output tree suitable for pprint starting at root node with subtrees as nested lists output a dictionary representing a minimum spanning tree mst of an unweighted graph by traversing the nodes of a tree in breadth first order discarding eventual cycles the first argument should be the tree root children should be a function taking as argument a tree node and returning an iterator of the node s children import nltk from nltk corpus import wordnet as wn from nltk util import unweighted_minimum_spanning_dict as umsd from pprint import pprint pprint umsd wn synset bound a 01 lambda s s also_sees synset bound a 01 synset unfree a 02 synset classified a 02 synset confined a 02 synset dependent a 01 synset restricted a 01 synset classified a 02 synset unfree a 02 synset confined a 02 synset dependent a 01 synset restricted a 01 empty set of traversed nodes initialize queue set of all nodes ever queued empty mst dictionary node is not yet in the mst dictionary so add it with an empty list of children avoid cycles queue nodes only once add child to the mst add child to queue output a minimum spanning tree mst of an unweighted graph by traversing the nodes of a tree in breadth first order discarding eventual cycles the first argument should be the tree root children should be a function taking as argument a tree node and returning an iterator of the node s children import nltk from nltk util import unweighted_minimum_spanning_tree as mst wn nltk corpus wordnet from pprint import pprint pprint mst wn synset bound a 01 lambda s s also_sees synset bound a 01 synset unfree a 02 synset confined a 02 synset dependent a 01 synset restricted a 01 synset classified a 02 guess character encoding adapted from io py in the docutils extension module https docutils sourceforge io http www pyzine com issue008 section_articles article_encodings html given a byte string attempt to decode it tries the standard utf8 and latin 1 encodings plus several gathered from locale information the calling program must first call locale setlocale locale lc_all if successful it returns decoded_unicode successful_encoding if unsuccessful it raises a unicodeerror we make utf 8 the first encoding next we add anything we can learn from the locale we try latin 1 last some of the locale calls may have returned none remove repeated elements from a list deterministcally not seen add x here acts to make the code shorter without using if statements seen add x always returns none invert a dictionary utilities for directed graphs transitive closure and inversion the graph is represented as a dictionary of sets calculate the transitive closure of a directed graph optionally the reflexive transitive closure the algorithm is a slight modification of the marking algorithm of ioannidis ramakrishnan 1998 efficient transitive closure algorithms param graph the initial graph represented as a dictionary of sets type graph dict set param reflexive if set also make the closure reflexive type reflexive bool rtype dict set the graph u_i in the article the graph m_i in the article inverts a directed graph param graph the graph represented as a dictionary of sets type graph dict set return the inverted graph rtype dict set html cleaning flatten lists flatten a list from nltk util import flatten flatten 1 2 b a c d 3 1 2 b a c d 3 param args items and lists to be combined into a single list rtype list ngram iteration returns a padded sequence of items before ngram extraction list pad_sequence 1 2 3 4 5 2 pad_left true pad_right true left_pad_symbol s right_pad_symbol s s 1 2 3 4 5 s list pad_sequence 1 2 3 4 5 2 pad_left true left_pad_symbol s s 1 2 3 4 5 list pad_sequence 1 2 3 4 5 2 pad_right true right_pad_symbol s 1 2 3 4 5 s param sequence the source data to be padded type sequence sequence or iter param n the degree of the ngrams type n int param pad_left whether the ngrams should be left padded type pad_left bool param pad_right whether the ngrams should be right padded type pad_right bool param left_pad_symbol the symbol to use for left padding default is none type left_pad_symbol any param right_pad_symbol the symbol to use for right padding default is none type right_pad_symbol any rtype sequence or iter add a flag to pad the sequence so we get peripheral ngrams return the ngrams generated from a sequence of items as an iterator for example from nltk util import ngrams list ngrams 1 2 3 4 5 3 1 2 3 2 3 4 3 4 5 wrap with list for a list version of this function set pad_left or pad_right to true in order to get additional ngrams list ngrams 1 2 3 4 5 2 pad_right true 1 2 2 3 3 4 4 5 5 none list ngrams 1 2 3 4 5 2 pad_right true right_pad_symbol s 1 2 2 3 3 4 4 5 5 s list ngrams 1 2 3 4 5 2 pad_left true left_pad_symbol s s 1 1 2 2 3 3 4 4 5 list ngrams 1 2 3 4 5 2 pad_left true pad_right true left_pad_symbol s right_pad_symbol s s 1 1 2 2 3 3 4 4 5 5 s param sequence the source data to be converted into ngrams type sequence sequence or iter param n the degree of the ngrams type n int param pad_left whether the ngrams should be left padded type pad_left bool param pad_right whether the ngrams should be right padded type pad_right bool param left_pad_symbol the symbol to use for left padding default is none type left_pad_symbol any param right_pad_symbol the symbol to use for right padding default is none type right_pad_symbol any rtype sequence or iter creates the sliding window of n no of items iterables is a tuple of iterables where each iterable is a window of n items for each window iterate through every order of ngrams generate the ngrams within the window unpack and flattens the iterables return the bigrams generated from a sequence of items as an iterator for example from nltk util import bigrams list bigrams 1 2 3 4 5 1 2 2 3 3 4 4 5 use bigrams for a list version of this function param sequence the source data to be converted into bigrams type sequence sequence or iter rtype iter tuple return the trigrams generated from a sequence of items as an iterator for example from nltk util import trigrams list trigrams 1 2 3 4 5 1 2 3 2 3 4 3 4 5 use trigrams for a list version of this function param sequence the source data to be converted into trigrams type sequence sequence or iter rtype iter tuple returns all possible ngrams generated from a sequence of items as an iterator sent a b c split new version outputs for everygrams list everygrams sent a a b a b c b b c c old version outputs for everygrams sorted everygrams sent key len a b c a b b c a b c list everygrams sent max_len 2 a a b b b c c param sequence the source data to be converted into ngrams if max_len is not provided this sequence will be loaded into memory type sequence sequence or iter param min_len minimum length of the ngrams aka n gram order degree of ngram type min_len int param max_len maximum length of the ngrams set to length of sequence by default type max_len int param pad_left whether the ngrams should be left padded type pad_left bool param pad_right whether the ngrams should be right padded type pad_right bool rtype iter tuple get max_len for padding pad if indicated using max_len sliding window to store grams yield ngrams from sequence append element to history if sequence has more items returns all possible skipgrams generated from a sequence of items as an iterator skipgrams are ngrams that allows tokens to be skipped refer to http homepages inf ed ac uk ballison pdf lrec_skipgrams pdf sent insurgents killed in ongoing fighting split list skipgrams sent 2 2 insurgents killed insurgents in insurgents ongoing killed in killed ongoing killed fighting in ongoing in fighting ongoing fighting list skipgrams sent 3 2 insurgents killed in insurgents killed ongoing insurgents killed fighting insurgents in ongoing insurgents in fighting insurgents ongoing fighting killed in ongoing killed in fighting killed ongoing fighting in ongoing fighting param sequence the source data to be converted into trigrams type sequence sequence or iter param n the degree of the ngrams type n int param k the skip distance type k int rtype iter tuple pads the sequence as desired by kwargs note when iterating through the ngrams the pad_right here is not the kwargs padding it s for the algorithm to detect the sentinel object on the right pad to stop inner loop binary search in a file inherited from pywordnet by oliver steele return the line from the file with first word key searches through a sorted file using the binary search algorithm type file file param file the file to be searched through type key str param key the identifier we are searching for at eof try to find start of the last line detects the condition where we re searching past the end of the file which is otherwise difficult to detect proxy configuration set the http proxy for python to download through if proxy is none then tries to set proxy from environment or system settings param proxy the http proxy server to use for example http proxy example com 3128 param user the username to authenticate with use none to disable authentication param password the password to authenticate with try and find the system proxy settings set up the proxy handler set up basic proxy authentication if provided override the existing url opener elementtree pretty printing from https www effbot org zone element lib htm recursive function to indent an elementtree _elementinterface used for pretty printing run indent on elem and then output in the normal way param elem element to be indented will be modified type elem elementtree _elementinterface param level level of indentation for this element type level nonnegative integer rtype elementtree _elementinterface return contents of elem indented to reflect its structure mathematical approximations this function is a fast way to calculate binomial coefficients commonly known as nck i e the number of combinations of n things taken k at a time https en wikipedia org wiki binomial_coefficient this is the scipy special comb with long integer computation but this approximation is faster see https github com nltk nltk issues 1181 choose 4 2 6 choose 6 2 15 param n the number of things type n int param r the number of times a thing is taken type r int iteration utilities s s0 s1 s1 s2 s2 s3 parallelization
import inspect import locale import os import pydoc import re import textwrap import warnings from collections import defaultdict, deque from itertools import chain, combinations, islice, tee from pprint import pprint from urllib.request import ( HTTPPasswordMgrWithDefaultRealm, ProxyBasicAuthHandler, ProxyDigestAuthHandler, ProxyHandler, build_opener, getproxies, install_opener, ) from nltk.collections import * from nltk.internals import deprecated, raise_unorderable_types, slice_bounds @deprecated("Use help(obj) instead.") def usage(obj): str(obj) if not isinstance(obj, type): obj = obj.__class__ print(f"{obj.__name__} supports the following operations:") for (name, method) in sorted(pydoc.allmethods(obj).items()): if name.startswith("_"): continue if getattr(method, "__deprecated__", False): continue try: sig = str(inspect.signature(method)) except ValueError as e: if "builtin" in str(e): continue else: raise args = sig.lstrip("(").rstrip(")").split(", ") meth = inspect.getattr_static(obj, name) if isinstance(meth, (classmethod, staticmethod)): name = f"cls.{name}" elif args and args[0] == "self": name = f"self.{name}" args.pop(0) print( textwrap.fill( f"{name}({', '.join(args)})", initial_indent=" - ", subsequent_indent=" " * (len(name) + 5), ) ) def in_idle(): import sys return sys.stdin.__class__.__name__ in ("PyShell", "RPCProxy") def pr(data, start=0, end=None): pprint(list(islice(data, start, end))) def print_string(s, width=70): print("\n".join(textwrap.wrap(s, width=width))) def tokenwrap(tokens, separator=" ", width=70): return "\n".join(textwrap.wrap(separator.join(tokens), width=width)) class Index(defaultdict): def __init__(self, pairs): defaultdict.__init__(self, list) for key, value in pairs: self[key].append(value) def re_show(regexp, string, left="{", right="}"): print(re.compile(regexp, re.M).sub(left + r"\g<0>" + right, string.rstrip())) def filestring(f): if hasattr(f, "read"): return f.read() elif isinstance(f, str): with open(f) as infile: return infile.read() else: raise ValueError("Must be called with a filename or file-like object") def breadth_first(tree, children=iter, maxdepth=-1): queue = deque([(tree, 0)]) while queue: node, depth = queue.popleft() yield node if depth != maxdepth: try: queue.extend((c, depth + 1) for c in children(node)) except TypeError: pass def edge_closure(tree, children=iter, maxdepth=-1, verbose=False): traversed = set() edges = set() queue = deque([(tree, 0)]) while queue: node, depth = queue.popleft() traversed.add(node) if depth != maxdepth: try: for child in children(node): if child not in traversed: queue.append((child, depth + 1)) else: if verbose: warnings.warn( f"Discarded redundant search for {child} at depth {depth + 1}", stacklevel=2, ) edge = (node, child) if edge not in edges: yield edge edges.add(edge) except TypeError: pass def edges2dot(edges, shapes=None, attr=None): if not shapes: shapes = dict() if not attr: attr = dict() dot_string = "digraph G {\n" for pair in attr.items(): dot_string += f"{pair[0]} = {pair[1]};\n" for edge in edges: for shape in shapes.items(): for node in range(2): if shape[0] in repr(edge[node]): dot_string += f'"{edge[node]}" [shape = {shape[1]}];\n' dot_string += f'"{edge[0]}" -> "{edge[1]}";\n' dot_string += "}\n" return dot_string def unweighted_minimum_spanning_digraph(tree, children=iter, shapes=None, attr=None): return edges2dot( edge_closure( tree, lambda node: unweighted_minimum_spanning_dict(tree, children)[node] ), shapes, attr, ) def acyclic_breadth_first(tree, children=iter, maxdepth=-1): traversed = set() queue = deque([(tree, 0)]) while queue: node, depth = queue.popleft() yield node traversed.add(node) if depth != maxdepth: try: for child in children(node): if child not in traversed: queue.append((child, depth + 1)) else: warnings.warn( "Discarded redundant search for {} at depth {}".format( child, depth + 1 ), stacklevel=2, ) except TypeError: pass def acyclic_depth_first(tree, children=iter, depth=-1, cut_mark=None, traversed=None): if traversed is None: traversed = {tree} out_tree = [tree] if depth != 0: try: for child in children(tree): if child not in traversed: traversed.add(child) out_tree += [ acyclic_depth_first( child, children, depth - 1, cut_mark, traversed ) ] else: warnings.warn( "Discarded redundant search for {} at depth {}".format( child, depth - 1 ), stacklevel=3, ) if cut_mark: out_tree += [f"Cycle({child},{depth - 1},{cut_mark})"] except TypeError: pass elif cut_mark: out_tree += [cut_mark] return out_tree def acyclic_branches_depth_first( tree, children=iter, depth=-1, cut_mark=None, traversed=None ): if traversed is None: traversed = {tree} out_tree = [tree] if depth != 0: try: for child in children(tree): if child not in traversed: out_tree += [ acyclic_branches_depth_first( child, children, depth - 1, cut_mark, traversed.union({child}), ) ] else: warnings.warn( "Discarded redundant search for {} at depth {}".format( child, depth - 1 ), stacklevel=3, ) if cut_mark: out_tree += [f"Cycle({child},{depth - 1},{cut_mark})"] except TypeError: pass elif cut_mark: out_tree += [cut_mark] return out_tree def acyclic_dic2tree(node, dic): return [node] + [acyclic_dic2tree(child, dic) for child in dic[node]] def unweighted_minimum_spanning_dict(tree, children=iter): traversed = set() queue = deque([tree]) agenda = {tree} mstdic = {} while queue: node = queue.popleft() mstdic[node] = [] if node not in traversed: traversed.add(node) for child in children(node): if child not in agenda: mstdic[node].append(child) queue.append(child) agenda.add(child) return mstdic def unweighted_minimum_spanning_tree(tree, children=iter): return acyclic_dic2tree(tree, unweighted_minimum_spanning_dict(tree, children)) def guess_encoding(data): successful_encoding = None encodings = ["utf-8"] try: encodings.append(locale.nl_langinfo(locale.CODESET)) except AttributeError: pass try: encodings.append(locale.getlocale()[1]) except (AttributeError, IndexError): pass try: encodings.append(locale.getdefaultlocale()[1]) except (AttributeError, IndexError): pass encodings.append("latin-1") for enc in encodings: if not enc: continue try: decoded = str(data, enc) successful_encoding = enc except (UnicodeError, LookupError): pass else: break if not successful_encoding: raise UnicodeError( "Unable to decode input data. " "Tried the following encodings: %s." % ", ".join([repr(enc) for enc in encodings if enc]) ) else: return (decoded, successful_encoding) def unique_list(xs): seen = set() return [x for x in xs if x not in seen and not seen.add(x)] def invert_dict(d): inverted_dict = defaultdict(list) for key in d: if hasattr(d[key], "__iter__"): for term in d[key]: inverted_dict[term].append(key) else: inverted_dict[d[key]] = key return inverted_dict def transitive_closure(graph, reflexive=False): if reflexive: base_set = lambda k: {k} else: base_set = lambda k: set() agenda_graph = {k: graph[k].copy() for k in graph} closure_graph = {k: base_set(k) for k in graph} for i in graph: agenda = agenda_graph[i] closure = closure_graph[i] while agenda: j = agenda.pop() closure.add(j) closure |= closure_graph.setdefault(j, base_set(j)) agenda |= agenda_graph.get(j, base_set(j)) agenda -= closure return closure_graph def invert_graph(graph): inverted = {} for key in graph: for value in graph[key]: inverted.setdefault(value, set()).add(key) return inverted def clean_html(html): raise NotImplementedError( "To remove HTML markup, use BeautifulSoup's get_text() function" ) def clean_url(url): raise NotImplementedError( "To remove HTML markup, use BeautifulSoup's get_text() function" ) def flatten(*args): x = [] for l in args: if not isinstance(l, (list, tuple)): l = [l] for item in l: if isinstance(item, (list, tuple)): x.extend(flatten(item)) else: x.append(item) return x def pad_sequence( sequence, n, pad_left=False, pad_right=False, left_pad_symbol=None, right_pad_symbol=None, ): sequence = iter(sequence) if pad_left: sequence = chain((left_pad_symbol,) * (n - 1), sequence) if pad_right: sequence = chain(sequence, (right_pad_symbol,) * (n - 1)) return sequence def ngrams(sequence, n, **kwargs): sequence = pad_sequence(sequence, n, **kwargs) iterables = tee(sequence, n) for i, sub_iterable in enumerate(iterables): for _ in range(i): next(sub_iterable, None) return zip(*iterables) def bigrams(sequence, **kwargs): yield from ngrams(sequence, 2, **kwargs) def trigrams(sequence, **kwargs): yield from ngrams(sequence, 3, **kwargs) def everygrams( sequence, min_len=1, max_len=-1, pad_left=False, pad_right=False, **kwargs ): if max_len == -1: try: max_len = len(sequence) except TypeError: sequence = list(sequence) max_len = len(sequence) sequence = pad_sequence(sequence, max_len, pad_left, pad_right, **kwargs) history = list(islice(sequence, max_len)) while history: for ngram_len in range(min_len, len(history) + 1): yield tuple(history[:ngram_len]) try: history.append(next(sequence)) except StopIteration: pass del history[0] def skipgrams(sequence, n, k, **kwargs): if "pad_left" in kwargs or "pad_right" in kwargs: sequence = pad_sequence(sequence, n, **kwargs) SENTINEL = object() for ngram in ngrams(sequence, n + k, pad_right=True, right_pad_symbol=SENTINEL): head = ngram[:1] tail = ngram[1:] for skip_tail in combinations(tail, n - 1): if skip_tail[-1] is SENTINEL: continue yield head + skip_tail def binary_search_file(file, key, cache=None, cacheDepth=-1): key = key + " " keylen = len(key) start = 0 currentDepth = 0 if hasattr(file, "name"): end = os.stat(file.name).st_size - 1 else: file.seek(0, 2) end = file.tell() - 1 file.seek(0) if cache is None: cache = {} while start < end: lastState = start, end middle = (start + end) // 2 if cache.get(middle): offset, line = cache[middle] else: line = "" while True: file.seek(max(0, middle - 1)) if middle > 0: file.discard_line() offset = file.tell() line = file.readline() if line != "": break middle = (start + middle) // 2 if middle == end - 1: return None if currentDepth < cacheDepth: cache[middle] = (offset, line) if offset > end: assert end != middle - 1, "infinite loop" end = middle - 1 elif line[:keylen] == key: return line elif line > key: assert end != middle - 1, "infinite loop" end = middle - 1 elif line < key: start = offset + len(line) - 1 currentDepth += 1 thisState = start, end if lastState == thisState: return None return None def set_proxy(proxy, user=None, password=""): if proxy is None: try: proxy = getproxies()["http"] except KeyError as e: raise ValueError("Could not detect default proxy settings") from e proxy_handler = ProxyHandler({"https": proxy, "http": proxy}) opener = build_opener(proxy_handler) if user is not None: password_manager = HTTPPasswordMgrWithDefaultRealm() password_manager.add_password(realm=None, uri=proxy, user=user, passwd=password) opener.add_handler(ProxyBasicAuthHandler(password_manager)) opener.add_handler(ProxyDigestAuthHandler(password_manager)) install_opener(opener) def elementtree_indent(elem, level=0): i = "\n" + level * " " if len(elem): if not elem.text or not elem.text.strip(): elem.text = i + " " for elem in elem: elementtree_indent(elem, level + 1) if not elem.tail or not elem.tail.strip(): elem.tail = i else: if level and (not elem.tail or not elem.tail.strip()): elem.tail = i def choose(n, k): if 0 <= k <= n: ntok, ktok = 1, 1 for t in range(1, min(k, n - k) + 1): ntok *= n ktok *= t n -= 1 return ntok // ktok else: return 0 def pairwise(iterable): a, b = tee(iterable) next(b, None) return zip(a, b) def parallelize_preprocess(func, iterator, processes, progress_bar=False): from joblib import Parallel, delayed from tqdm import tqdm iterator = tqdm(iterator) if progress_bar else iterator if processes <= 1: return map(func, iterator) return Parallel(n_jobs=processes)(delayed(func)(line) for line in iterator)
natural language toolkit word sense disambiguation algorithms s liling tan alvationsgmail com dmitrijs milajevs dimazestgmail com c 20012023 nltk project url https www nltk org for license information see license txt return a synset for an ambiguous word in a context param iter contextsentence the context sentence where the ambiguous word occurs passed as an iterable of words param str ambiguousword the ambiguous word that requires wsd param str pos a specified partofspeech pos param iter synsets possible synsets of the ambiguous word param str lang wordnet language return lesksense the synset object with the highest signature overlaps this function is an implementation of the original lesk algorithm 1986 1 usage example lesk i went to the bank to deposit money bank n synset savingsbank n 02 1 lesk michael automatic sense disambiguation using machine readable dictionaries how to tell a pine cone from an ice cream cone proceedings of the 5th annual international conference on systems documentation acm 1986 https dl acm orgcitation cfm id318728 natural language toolkit word sense disambiguation algorithms s liling tan alvations gmail com dmitrijs milajevs dimazest gmail com c 2001 2023 nltk project url https www nltk org for license information see license txt return a synset for an ambiguous word in a context param iter context_sentence the context sentence where the ambiguous word occurs passed as an iterable of words param str ambiguous_word the ambiguous word that requires wsd param str pos a specified part of speech pos param iter synsets possible synsets of the ambiguous word param str lang wordnet language return lesk_sense the synset object with the highest signature overlaps this function is an implementation of the original lesk algorithm 1986 1 usage example lesk i went to the bank to deposit money bank n synset savings_bank n 02 1 lesk michael automatic sense disambiguation using machine readable dictionaries how to tell a pine cone from an ice cream cone proceedings of the 5th annual international conference on systems documentation acm 1986 https dl acm org citation cfm id 318728
from nltk.corpus import wordnet def lesk(context_sentence, ambiguous_word, pos=None, synsets=None, lang="eng"): context = set(context_sentence) if synsets is None: synsets = wordnet.synsets(ambiguous_word, lang=lang) if pos: synsets = [ss for ss in synsets if str(ss.pos()) == pos] if not synsets: return None _, sense = max( (len(context.intersection(ss.definition().split())), ss) for ss in synsets ) return sense
usrbinenv python natural language toolkit deprecated function class finder c 20012023 nltk project edward loper edlopergmail com url https www nltk org for license information see license txt this commandline tool takes a list of python files or directories and searches them for calls to deprecated nltk functions or uses of deprecated nltk classes for each use of a deprecated object it finds it will print out a warning containing the offending line as well as its line number and containing file name if the terminal has color support and if epydoc is installed then the offending identifier will be highlighted in red imports regexps a little oversimplified but it ll do define a regexp to search for deprecated definitions globals yes it s bad programming practice but this is a little hack script these get initialized by finddeprecateddefs code return a list of all functions marked with the deprecated decorator and classes with an immediate deprecated base class in all python files in the given directory walk through the directory finding python files search the file for any deprecated definitions remember the previous line it might contain the deprecated decorator ignore all tokens except deprecated names hack only complain about read if it s used after a corpus ignore deprecated definitions print a header for the first use in a file mark the offending token print the offending line usr bin env python natural language toolkit deprecated function class finder c 2001 2023 nltk project edward loper edloper gmail com url https www nltk org for license information see license txt this command line tool takes a list of python files or directories and searches them for calls to deprecated nltk functions or uses of deprecated nltk classes for each use of a deprecated object it finds it will print out a warning containing the offending line as well as its line number and containing file name if the terminal has color support and if epydoc is installed then the offending identifier will be highlighted in red imports regexps a little over simplified but it ll do s s s s define a regexp to search for deprecated definitions globals yes it s bad programming practice but this is a little hack script these get initialized by find_deprecated_defs code return a list of all functions marked with the deprecated decorator and classes with an immediate deprecated base class in all python files in the given directory walk through the directory finding python files search the file for any deprecated definitions remember the previous line it might contain the deprecated decorator ignore all tokens except deprecated names hack only complain about read if it s used after a corpus ignore deprecated definitions print a header for the first use in a file mark the offending token print the offending line
import os import re import sys import textwrap import tokenize from doctest import DocTestParser, register_optionflag from cStringIO import StringIO import nltk.corpus from nltk import defaultdict STRING_PAT = ( r"\s*[ur]{0,2}(?:" r'|' '"[^"\n]+?"|' r"|" "'[^'\n]+?'" r")\s*" ) STRING_RE = re.compile(STRING_PAT) STRINGS_PAT = f"{STRING_PAT}(?:[+]?{STRING_PAT})*" STRINGS_RE = re.compile(STRINGS_PAT) DEPRECATED_DEF_PAT = ( rf"^\s*@deprecated\s*\(\s*({STRINGS_PAT})\s*\)\s*\n+" + r"\s*def\s*(\w+).*" + r"|" + r"^\s*class\s+(\w+)\s*\(.*Deprecated.*\):\s*" ) DEPRECATED_DEF_RE = re.compile(DEPRECATED_DEF_PAT, re.MULTILINE) CORPUS_READ_METHOD_RE = re.compile( r"({})\.read\(".format("|".join(re.escape(n) for n in dir(nltk.corpus))) ) CLASS_DEF_RE = re.compile(r"^\s*class\s+(\w+)\s*[:\(]") deprecated_funcs = defaultdict(set) deprecated_classes = defaultdict(set) deprecated_methods = defaultdict(set) try: from epydoc.cli import TerminalController except ImportError: class TerminalController: def __getattr__(self, attr): return "" term = TerminalController() def strip_quotes(s): s = s.strip() while s and (s[0] in "ur") and (s[-1] in "'\""): s = s[1:] while s and (s[0] in "'\"" and (s[0] == s[-1])): s = s[1:-1] s = s.strip() return s def find_class(s, index): lines = s[:index].split("\n") while lines: m = CLASS_DEF_RE.match(lines[-1]) if m: return m.group(1) + "." lines.pop() return "?." def find_deprecated_defs(pkg_dir): for root, dirs, files in os.walk(pkg_dir): for filename in files: if filename.endswith(".py"): s = open(os.path.join(root, filename)).read() for m in DEPRECATED_DEF_RE.finditer(s): if m.group(2): name = m.group(2) msg = " ".join( strip_quotes(s) for s in STRING_RE.findall(m.group(1)) ) msg = " ".join(msg.split()) if m.group()[0] in " \t": cls = find_class(s, m.start()) deprecated_methods[name].add((msg, cls, "()")) else: deprecated_funcs[name].add((msg, "", "()")) else: name = m.group(3) m2 = STRING_RE.match(s, m.end()) if m2: msg = strip_quotes(m2.group()) else: msg = "" msg = " ".join(msg.split()) deprecated_classes[name].add((msg, "", "")) def print_deprecated_uses(paths): dep_names = set() dep_files = set() for path in sorted(paths): if os.path.isdir(path): dep_names.update( print_deprecated_uses([os.path.join(path, f) for f in os.listdir(path)]) ) elif path.endswith(".py"): print_deprecated_uses_in(open(path).readline, path, dep_files, dep_names, 0) elif path.endswith(".doctest") or path.endswith(".txt"): for example in DocTestParser().get_examples(open(path).read()): ex = StringIO(example.source) try: print_deprecated_uses_in( ex.readline, path, dep_files, dep_names, example.lineno ) except tokenize.TokenError: print( term.RED + "Caught TokenError -- " "malformatted doctest?" + term.NORMAL ) return dep_names def print_deprecated_uses_in(readline, path, dep_files, dep_names, lineno_offset): tokiter = tokenize.generate_tokens(readline) context = [""] for (typ, tok, start, end, line) in tokiter: if line is not context[-1]: context.append(line) if len(context) > 10: del context[0] esctok = re.escape(tok) if not ( tok in deprecated_classes or (tok in deprecated_funcs and re.search(rf"\b{esctok}\s*\(", line)) or ( tok in deprecated_methods and re.search(rf"(?!<\bself)[.]\s*{esctok}\s*\(", line) ) ): continue if tok == "read" and not CORPUS_READ_METHOD_RE.search(line): continue if DEPRECATED_DEF_RE.search("".join(context)): continue if path not in dep_files: print("\n" + term.BOLD + path + term.NORMAL) print(f" {term.YELLOW}linenum{term.NORMAL}") dep_files.add(path) dep_names.add(tok) if term.RED: sub = term.RED + tok + term.NORMAL elif term.BOLD: sub = term.BOLD + tok + term.NORMAL else: sub = "<<" + tok + ">>" line = re.sub(rf"\b{esctok}\b", sub, line) print( " {}[{:5d}]{} {}".format( term.YELLOW, start[0] + lineno_offset, term.NORMAL, line.rstrip() ) ) def main(): paths = sys.argv[1:] or ["."] print("Importing nltk...") try: import nltk except ImportError: print("Unable to import nltk -- check your PYTHONPATH.") sys.exit(-1) print("Finding definitions of deprecated functions & classes in nltk...") find_deprecated_defs(nltk.__path__[0]) print("Looking for possible uses of deprecated funcs & classes...") dep_names = print_deprecated_uses(paths) if not dep_names: print("No deprecated funcs or classes found!") else: print("\n" + term.BOLD + "What you should use instead:" + term.NORMAL) for name in sorted(dep_names): msgs = ( deprecated_funcs[name] .union(deprecated_classes[name]) .union(deprecated_methods[name]) ) for msg, prefix, suffix in msgs: print( textwrap.fill( term.RED + prefix + name + suffix + term.NORMAL + ": " + msg, width=75, initial_indent=" " * 2, subsequent_indent=" " * 6, ) ) if __name__ == "__main__": main()
usrbinenv python natural language toolkit substitute a pattern with a replacement in every file c 20012023 nltk project edward loper edlopergmail com steven bird stevenbird1gmail com url https www nltk org for license information see license txt nb should work on all platforms http www python orgdoc2 5 2libosfiledir html make sure we can write the file write the file restore permissions usr bin env python natural language toolkit substitute a pattern with a replacement in every file c 2001 2023 nltk project edward loper edloper gmail com steven bird stevenbird1 gmail com url https www nltk org for license information see license txt nb should work on all platforms http www python org doc 2 5 2 lib os file dir html make sure we can write the file write the file restore permissions
import os import stat import sys def update(file, pattern, replacement): try: old_perm = os.stat(file)[0] if not os.access(file, os.W_OK): os.chmod(file, old_perm | stat.S_IWRITE) s = open(file, "rb").read().decode("utf-8") t = s.replace(pattern, replacement) out = open(file, "wb") out.write(t.encode("utf-8")) out.close() os.chmod(file, old_perm) return s != t except Exception: exc_type, exc_obj, exc_tb = sys.exc_info() print(f"Unable to check {file:s} {str(exc_type):s}") return 0 if __name__ == "__main__": if len(sys.argv) != 3: exit("Usage: %s <pattern> <replacement>" % sys.argv[0]) pattern = sys.argv[1] replacement = sys.argv[2] count = 0 for root, dirs, files in os.walk("."): if not ("/.git" in root or "/.tox" in root): for file in files: path = os.path.join(root, file) if update(path, pattern, replacement): print("Updated:", path) count += 1 print(f"Updated {count} files")
nltk documentation build configuration file sphinxquickstart on wed nov 2 17 02 59 2011 this file is execfiled with the current directory set to its containing dir note that not all possible configuration values are present in this autogenerated file all configuration values have a default values that are commented out serve to show the default if extensions or modules to document with autodoc are in another directory add these directories to sys path here if the directory is relative to the documentation root use os path abspath to make it absolute like shown here build docs using nltk from the upper dir not the installed version general configuration if your documentation needs a minimal sphinx version state it here needssphinx 2 2 add any sphinx extension module names here as strings they can be extensions coming with sphinx named sphinx ext or your custom ones generating contents in the howto folder based on the ntlktest doctest files as well as contents in the team folder based on team json load jinja template iterate over doctest files and find the modulename ignore index doctest we already have an index i e howto rst write rst files based on the doctesttemplate load the team json data load the team jinja template build the team howto page before creating the sphinx build add any paths that contain templates here relative to this directory the suffix of source filenames the encoding of source files sourceencoding utf8sig the master toctree document general information about the project the version info for the project you re documenting acts as replacement for version and release also used in various other places throughout the built documents the short x y version the full version including alphabetarc tags the language for content autogenerated by sphinx refer to documentation for a list of supported languages language none there are two options for replacing today either you set today to some nonfalse value then it is used today else todayfmt is used as the format for a strftime call todayfmt b d y list of patterns relative to source directory that match files and directories to ignore when looking for source files the rest default role used for this markup text to use for all documents defaultrole none if true will be appended to func etc crossreference text addfunctionparentheses true if true the current module name will be prepended to all description unit titles such as function addmodulenames true if true section and module directives will be shown in the output they are ignored by default shows false the name of the pygments syntax highlighting style to use a list of ignored prefixes for module index sorting options for html output the theme to use for html and html help pages see the documentation for a list of builtin themes theme options are themespecific and customize the look and feel of a theme further for a list of options available for each theme see the documentation required for the theme used for linking to a specific tag in the website footer add any paths that contain custom themes here relative to this directory htmlthemepath the name for this set of sphinx documents if none it defaults to project vrelease documentation htmltitle none a shorter title for the navigation bar default is the same as htmltitle htmlshorttitle none the name of an image file relative to this directory to place at the top of the sidebar htmllogo none the name of an image file within the static path to use as favicon of the docs this file should be a windows icon file ico being 16x16 or 32x32 pixels large htmlfavicon none add any paths that contain custom static files such as style sheets here relative to this directory they are copied after the builtin static files so a file named default css will overwrite the builtin default css if not a last updated on timestamp is inserted at every page bottom using the given strftime format htmllastupdatedfmt d b y if true smartypants will be used to convert quotes and dashes to typographically correct entities custom sidebar templates maps document names to template names htmlsidebars additional templates that should be rendered to pages maps page names to template names htmladditionalpages if false no module index is generated if false no index is generated we don t use the genindex if true the index is split into individual pages for each letter htmlsplitindex false if true links to the rest sources are added to the pages htmlshowsourcelink true if true created using sphinx is shown in the html footer default is true htmlshowsphinx true if true c is shown in the html footer default is true htmlshow true if true an opensearch description file will be output and all pages will contain a link tag referring to it the value of this option must be the base url from which the finished html is served htmluseopensearch this is the file name suffix for html files e g xhtml htmlfilesuffix none output file base name for html help builder options for latex output the paper size letterpaper or a4paper papersize letterpaper the font size 10pt 11pt or 12pt pointsize 10pt additional stuff for the latex preamble preamble grouping the document tree into latex files list of tuples source start file target name title documentclass howtomanual the name of an image file relative to this directory to place at the top of the title page latexlogo none for manual documents if this is true then toplevel headings are parts not chapters latexuseparts false if true show page references after internal links latexshowpagerefs false if true show url addresses after external links latexshowurls false documents to append as an appendix to all manuals latexappendices if false no module index is generated latexdomainindices true options for manual page output one entry per manual page list of tuples source start file name description s manual section if true show url addresses after external links manshowurls false options for texinfo output grouping the document tree into texinfo files list of tuples source start file target name title dir menu entry description category documents to append as an appendix to all manuals texinfoappendices if false no module index is generated texinfodomainindices true how to display url addresses footnote no or inline texinfoshowurls footnote options for autodoc output if it s mixed then the documentation for each parameter isn t listed e g nltk tokenize casual tweettokenizerpreservecasetrue reducelenfalse striphandlesfalse matchphonenumberstrue and that s it with seperated nltk tokenize casual tweettokenizer initpreservecasetrue reducelenfalse striphandlesfalse matchphonenumberstrue create a tweettokenizer instance with settings for use in the tokenize method parameters preservecase bool flag indicating whether to preserve the casing capitalisation of text used in the tokenize method defaults to true reducelen bool flag indicating whether to replace repeated character sequences of length 3 or greater with sequences of length 3 defaults to false striphandles bool flag indicating whether to remove twitter handles of text used in the tokenize method defaults to false matchphonenumbers bool flag indicating whether the tokenize method should look for phone numbers defaults to true put the python 3 5 type hint in the signature and also at the parameters list nltk documentation build configuration file sphinx quickstart on wed nov 2 17 02 59 2011 this file is execfile d with the current directory set to its containing dir note that not all possible configuration values are present in this autogenerated file all configuration values have a default values that are commented out serve to show the default if extensions or modules to document with autodoc are in another directory add these directories to sys path here if the directory is relative to the documentation root use os path abspath to make it absolute like shown here build docs using nltk from the upper dir not the installed version general configuration if your documentation needs a minimal sphinx version state it here needs_sphinx 2 2 add any sphinx extension module names here as strings they can be extensions coming with sphinx named sphinx ext or your custom ones generating contents in the howto folder based on the ntlk test doctest files as well as contents in the team folder based on team json load jinja template iterate over doctest files and find the module_name ignore index doctest we already have an index i e howto rst write rst files based on the doctest_template load the team json data load the team jinja template build the team howto page before creating the sphinx build add any paths that contain templates here relative to this directory the suffix of source filenames the encoding of source files source_encoding utf 8 sig the master toctree document general information about the project the version info for the project you re documenting acts as replacement for version and release also used in various other places throughout the built documents the short x y version the full version including alpha beta rc tags the language for content autogenerated by sphinx refer to documentation for a list of supported languages language none there are two options for replacing today either you set today to some non false value then it is used today else today_fmt is used as the format for a strftime call today_fmt b d y list of patterns relative to source directory that match files and directories to ignore when looking for source files the rest default role used for this markup text to use for all documents default_role none if true will be appended to func etc cross reference text add_function_parentheses true if true the current module name will be prepended to all description unit titles such as function add_module_names true if true section and module directives will be shown in the output they are ignored by default show_s false the name of the pygments syntax highlighting style to use a list of ignored prefixes for module index sorting options for html output the theme to use for html and html help pages see the documentation for a list of builtin themes theme options are theme specific and customize the look and feel of a theme further for a list of options available for each theme see the documentation required for the theme used for linking to a specific tag in the website footer add any paths that contain custom themes here relative to this directory html_theme_path the name for this set of sphinx documents if none it defaults to project v release documentation html_title none a shorter title for the navigation bar default is the same as html_title html_short_title none the name of an image file relative to this directory to place at the top of the sidebar html_logo none the name of an image file within the static path to use as favicon of the docs this file should be a windows icon file ico being 16x16 or 32x32 pixels large html_favicon none add any paths that contain custom static files such as style sheets here relative to this directory they are copied after the builtin static files so a file named default css will overwrite the builtin default css if not a last updated on timestamp is inserted at every page bottom using the given strftime format html_last_updated_fmt d b y if true smartypants will be used to convert quotes and dashes to typographically correct entities custom sidebar templates maps document names to template names html_sidebars additional templates that should be rendered to pages maps page names to template names html_additional_pages if false no module index is generated if false no index is generated we don t use the genindex if true the index is split into individual pages for each letter html_split_index false if true links to the rest sources are added to the pages html_show_sourcelink true if true created using sphinx is shown in the html footer default is true html_show_sphinx true if true c is shown in the html footer default is true html_show_ true if true an opensearch description file will be output and all pages will contain a link tag referring to it the value of this option must be the base url from which the finished html is served html_use_opensearch this is the file name suffix for html files e g xhtml html_file_suffix none output file base name for html help builder options for latex output the paper size letterpaper or a4paper papersize letterpaper the font size 10pt 11pt or 12pt pointsize 10pt additional stuff for the latex preamble preamble grouping the document tree into latex files list of tuples source start file target name title documentclass howto manual the name of an image file relative to this directory to place at the top of the title page latex_logo none for manual documents if this is true then toplevel headings are parts not chapters latex_use_parts false if true show page references after internal links latex_show_pagerefs false if true show url addresses after external links latex_show_urls false documents to append as an appendix to all manuals latex_appendices if false no module index is generated latex_domain_indices true options for manual page output one entry per manual page list of tuples source start file name description s manual section if true show url addresses after external links man_show_urls false options for texinfo output grouping the document tree into texinfo files list of tuples source start file target name title dir menu entry description category documents to append as an appendix to all manuals texinfo_appendices if false no module index is generated texinfo_domain_indices true how to display url addresses footnote no or inline texinfo_show_urls footnote options for autodoc output if it s mixed then the documentation for each parameter isn t listed e g nltk tokenize casual tweettokenizer preserve_case true reduce_len false strip_handles false match_phone_numbers true and that s it with seperated nltk tokenize casual tweettokenizer __init__ preserve_case true reduce_len false strip_handles false match_phone_numbers true create a tweettokenizer instance with settings for use in the tokenize method parameters preserve_case bool flag indicating whether to preserve the casing capitalisation of text used in the tokenize method defaults to true reduce_len bool flag indicating whether to replace repeated character sequences of length 3 or greater with sequences of length 3 defaults to false strip_handles bool flag indicating whether to remove twitter handles of text used in the tokenize method defaults to false match_phone_numbers bool flag indicating whether the tokenize method should look for phone numbers defaults to true put the python 3 5 type hint in the signature and also at the parameters list
import os import sys sys.path.insert(0, os.path.abspath("..")) extensions = [ "sphinx.ext.autodoc", "sphinx.ext.coverage", "sphinx.ext.imgmath", "sphinx.ext.viewcode", "sphinxcontrib.apidoc", ] apidoc_module_dir = "../nltk" apidoc_output_dir = "api" apidoc_separate_modules = True apidoc_extra_args = ["--templatedir=_templates", "--force"] apidoc_excluded_paths = ["test"] def generate_custom_files(): import glob import json import re from jinja2 import Template modules = [] web_folder = os.path.dirname(os.path.abspath(__file__)) howto_folder = os.path.join(web_folder, "howto") if not os.path.exists(howto_folder): os.makedirs(howto_folder) with open( os.path.join(web_folder, "_templates", "doctest.rst"), encoding="utf8" ) as f: doctest_template = Template(f.read()) print("Generating HOWTO pages...") pattern = re.compile(r"(\w+)\.doctest$") for path in glob.glob(os.path.join(web_folder, "..", "nltk", "test", "*.doctest")): match = pattern.search(path) module_name = match.group(1) if module_name == "index": continue doctest_template.stream(module_name=module_name).dump( os.path.join(howto_folder, f"{module_name}.rst") ) modules.append(module_name) print(f"Generated {len(modules)} HOWTO pages.") with open(os.path.join(web_folder, "team", "team.json"), encoding="utf8") as f: full_data = json.load(f) print("Team data loaded!") with open( os.path.join(web_folder, "_templates", "team.html"), encoding="utf8" ) as f: team_template = Template(f.read()) for members_type, members_data in full_data.items(): team_template.stream(members=members_data).dump( os.path.join(web_folder, "team", f"{members_type}_team.html") ) print(f"{members_type.title()} team HTML page written!") generate_custom_files() templates_path = ["_templates"] source_suffix = ".rst" master_doc = "index" project = "NLTK" copyright = "2023, NLTK Project" version = "3.8.1" release = "3.8.1" exclude_patterns = ["_build", "api/modules.rst", "dev/*.rst"] pygments_style = "sphinx" modindex_common_prefix = ["nltk."] html_theme = "nltk_theme" html_theme_options = {"navigation_depth": 1} html_context = {"github_user": "nltk", "github_repo": "nltk"} html_static_path = ["_static"] html_last_updated_fmt = "%b %d, %Y" html_use_smartypants = True html_domain_indices = True html_use_index = False htmlhelp_basename = "NLTKdoc" latex_elements = { } latex_documents = [("index", "NLTK.tex", "NLTK Documentation", "Steven Bird", "manual")] man_pages = [("index", "nltk", "NLTK Documentation", ["Steven Bird"], 1)] texinfo_documents = [ ( "index", "NLTK", "NLTK Documentation", "Steven Bird", "NLTK", "One line description of project.", "Miscellaneous", ) ] autodoc_class_signature = "separated" autodoc_typehints = "both"